vfs_bio.c revision 1.100 1 1.100 pk /* $NetBSD: vfs_bio.c,v 1.100 2003/12/30 12:33:23 pk Exp $ */
2 1.31 cgd
3 1.31 cgd /*-
4 1.31 cgd * Copyright (c) 1982, 1986, 1989, 1993
5 1.31 cgd * The Regents of the University of California. All rights reserved.
6 1.31 cgd * (c) UNIX System Laboratories, Inc.
7 1.31 cgd * All or some portions of this file are derived from material licensed
8 1.31 cgd * to the University of California by American Telephone and Telegraph
9 1.31 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 1.31 cgd * the permission of UNIX System Laboratories, Inc.
11 1.31 cgd *
12 1.31 cgd * Redistribution and use in source and binary forms, with or without
13 1.31 cgd * modification, are permitted provided that the following conditions
14 1.31 cgd * are met:
15 1.31 cgd * 1. Redistributions of source code must retain the above copyright
16 1.31 cgd * notice, this list of conditions and the following disclaimer.
17 1.31 cgd * 2. Redistributions in binary form must reproduce the above copyright
18 1.31 cgd * notice, this list of conditions and the following disclaimer in the
19 1.31 cgd * documentation and/or other materials provided with the distribution.
20 1.93 agc * 3. Neither the name of the University nor the names of its contributors
21 1.93 agc * may be used to endorse or promote products derived from this software
22 1.93 agc * without specific prior written permission.
23 1.93 agc *
24 1.93 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.93 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.93 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.93 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.93 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.93 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.93 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.93 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.93 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.93 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.93 agc * SUCH DAMAGE.
35 1.93 agc *
36 1.93 agc * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
37 1.93 agc */
38 1.93 agc
39 1.93 agc /*-
40 1.93 agc * Copyright (c) 1994 Christopher G. Demetriou
41 1.93 agc *
42 1.93 agc * Redistribution and use in source and binary forms, with or without
43 1.93 agc * modification, are permitted provided that the following conditions
44 1.93 agc * are met:
45 1.93 agc * 1. Redistributions of source code must retain the above copyright
46 1.93 agc * notice, this list of conditions and the following disclaimer.
47 1.93 agc * 2. Redistributions in binary form must reproduce the above copyright
48 1.93 agc * notice, this list of conditions and the following disclaimer in the
49 1.93 agc * documentation and/or other materials provided with the distribution.
50 1.31 cgd * 3. All advertising materials mentioning features or use of this software
51 1.31 cgd * must display the following acknowledgement:
52 1.31 cgd * This product includes software developed by the University of
53 1.31 cgd * California, Berkeley and its contributors.
54 1.31 cgd * 4. Neither the name of the University nor the names of its contributors
55 1.31 cgd * may be used to endorse or promote products derived from this software
56 1.31 cgd * without specific prior written permission.
57 1.31 cgd *
58 1.31 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 1.31 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 1.31 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 1.31 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 1.31 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 1.31 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 1.31 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 1.31 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 1.31 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 1.31 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 1.31 cgd * SUCH DAMAGE.
69 1.31 cgd *
70 1.31 cgd * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
71 1.31 cgd */
72 1.31 cgd
73 1.31 cgd /*
74 1.31 cgd * Some references:
75 1.31 cgd * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
76 1.31 cgd * Leffler, et al.: The Design and Implementation of the 4.3BSD
77 1.31 cgd * UNIX Operating System (Addison Welley, 1989)
78 1.31 cgd */
79 1.77 lukem
80 1.100 pk #include "opt_bufcache.h"
81 1.81 matt #include "opt_softdep.h"
82 1.81 matt
83 1.77 lukem #include <sys/cdefs.h>
84 1.100 pk __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.100 2003/12/30 12:33:23 pk Exp $");
85 1.31 cgd
86 1.31 cgd #include <sys/param.h>
87 1.31 cgd #include <sys/systm.h>
88 1.100 pk #include <sys/kernel.h>
89 1.31 cgd #include <sys/proc.h>
90 1.31 cgd #include <sys/buf.h>
91 1.31 cgd #include <sys/vnode.h>
92 1.31 cgd #include <sys/mount.h>
93 1.31 cgd #include <sys/malloc.h>
94 1.31 cgd #include <sys/resourcevar.h>
95 1.100 pk #include <sys/sysctl.h>
96 1.35 mycroft #include <sys/conf.h>
97 1.40 christos
98 1.73 chs #include <uvm/uvm.h>
99 1.71 thorpej
100 1.59 fvdl #include <miscfs/specfs/specdev.h>
101 1.59 fvdl
102 1.100 pk #ifndef BUFPAGES
103 1.100 pk # define BUFPAGES 0
104 1.100 pk #endif
105 1.100 pk
106 1.100 pk #ifdef BUFCACHE
107 1.100 pk # if (BUFCACHE < 5) || (BUFCACHE > 95)
108 1.100 pk # error BUFCACHE is not between 5 and 95
109 1.100 pk # endif
110 1.100 pk #else
111 1.100 pk # define BUFCACHE 30
112 1.100 pk #endif
113 1.100 pk
114 1.100 pk u_int nbuf; /* XXX - for softdep_lockedbufs */
115 1.100 pk u_int bufpages = BUFPAGES; /* optional hardwired count */
116 1.100 pk u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */
117 1.100 pk
118 1.100 pk
119 1.31 cgd /* Macros to clear/set/test flags. */
120 1.31 cgd #define SET(t, f) (t) |= (f)
121 1.31 cgd #define CLR(t, f) (t) &= ~(f)
122 1.31 cgd #define ISSET(t, f) ((t) & (f))
123 1.31 cgd
124 1.31 cgd /*
125 1.31 cgd * Definitions for the buffer hash lists.
126 1.31 cgd */
127 1.31 cgd #define BUFHASH(dvp, lbn) \
128 1.73 chs (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
129 1.31 cgd LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
130 1.31 cgd u_long bufhash;
131 1.81 matt #ifndef SOFTDEP
132 1.59 fvdl struct bio_ops bioops; /* I/O operation notification */
133 1.81 matt #endif
134 1.31 cgd
135 1.31 cgd /*
136 1.31 cgd * Insq/Remq for the buffer hash lists.
137 1.31 cgd */
138 1.31 cgd #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
139 1.31 cgd #define bremhash(bp) LIST_REMOVE(bp, b_hash)
140 1.31 cgd
141 1.31 cgd /*
142 1.31 cgd * Definitions for the buffer free lists.
143 1.31 cgd */
144 1.100 pk #define BQUEUES 3 /* number of free buffer queues */
145 1.31 cgd
146 1.31 cgd #define BQ_LOCKED 0 /* super-blocks &c */
147 1.31 cgd #define BQ_LRU 1 /* lru, useful buffers */
148 1.31 cgd #define BQ_AGE 2 /* rubbish */
149 1.31 cgd
150 1.31 cgd TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
151 1.31 cgd int needbuffer;
152 1.31 cgd
153 1.31 cgd /*
154 1.87 pk * Buffer queue lock.
155 1.87 pk * Take this lock first if also taking some buffer's b_interlock.
156 1.87 pk */
157 1.87 pk struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
158 1.87 pk
159 1.87 pk /*
160 1.65 thorpej * Buffer pool for I/O buffers.
161 1.65 thorpej */
162 1.65 thorpej struct pool bufpool;
163 1.65 thorpej
164 1.100 pk /* XXX - somewhat gross.. */
165 1.100 pk #if MAXBSIZE == 0x2000
166 1.100 pk #define NMEMPOOLS 4
167 1.100 pk #elif MAXBSIZE == 0x4000
168 1.100 pk #define NMEMPOOLS 5
169 1.100 pk #elif MAXBSIZE == 0x8000
170 1.100 pk #define NMEMPOOLS 6
171 1.100 pk #else
172 1.100 pk #define NMEMPOOLS 7
173 1.100 pk #endif
174 1.100 pk
175 1.100 pk #define MEMPOOL_INDEX_OFFSET 10 /* smallest pool is 1k */
176 1.100 pk #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
177 1.100 pk #error update vfs_bio buffer memory parameters
178 1.100 pk #endif
179 1.100 pk
180 1.100 pk /* Buffer memory pools */
181 1.100 pk struct pool bmempools[NMEMPOOLS];
182 1.100 pk
183 1.100 pk struct vm_map *buf_map;
184 1.100 pk
185 1.100 pk /*
186 1.100 pk * Buffer memory pool allocator.
187 1.100 pk */
188 1.100 pk static void *bufpool_page_alloc(struct pool *pp, int flags)
189 1.100 pk {
190 1.100 pk return (void *)uvm_km_kmemalloc1(buf_map,
191 1.100 pk uvm.kernel_object, MAXBSIZE, MAXBSIZE,
192 1.100 pk UVM_UNKNOWN_OFFSET,
193 1.100 pk (flags & PR_WAITOK)?0:UVM_KMF_NOWAIT);
194 1.100 pk }
195 1.100 pk
196 1.100 pk static void bufpool_page_free(struct pool *pp, void *v)
197 1.100 pk {
198 1.100 pk uvm_km_free(kernel_map, (vaddr_t)v, MAXBSIZE);
199 1.100 pk }
200 1.100 pk
201 1.100 pk struct pool_allocator bufmempool_allocator = {
202 1.100 pk bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
203 1.100 pk };
204 1.100 pk
205 1.100 pk /* Buffer memory management variables */
206 1.100 pk u_long bufmem_valimit;
207 1.100 pk u_long bufmem_hiwater;
208 1.100 pk u_long bufmem_lowater;
209 1.100 pk u_long bufmem;
210 1.100 pk
211 1.100 pk /*
212 1.100 pk * MD code can call this to set a hard limit on the amount
213 1.100 pk * of virtual memory used by the buffer cache.
214 1.100 pk */
215 1.100 pk int buf_setvalimit(vsize_t sz)
216 1.100 pk {
217 1.100 pk
218 1.100 pk /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
219 1.100 pk if (sz < NMEMPOOLS * MAXBSIZE)
220 1.100 pk return EINVAL;
221 1.100 pk
222 1.100 pk bufmem_valimit = sz;
223 1.100 pk return 0;
224 1.100 pk }
225 1.100 pk
226 1.100 pk static int buf_trim(void);
227 1.100 pk
228 1.65 thorpej /*
229 1.87 pk * bread()/breadn() helper.
230 1.87 pk */
231 1.87 pk static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
232 1.87 pk struct ucred *, int);
233 1.87 pk int count_lock_queue(void);
234 1.87 pk
235 1.87 pk /*
236 1.31 cgd * Insq/Remq for the buffer free lists.
237 1.87 pk * Call with buffer queue locked.
238 1.31 cgd */
239 1.31 cgd #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
240 1.31 cgd #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
241 1.31 cgd
242 1.99 dbj #ifdef DEBUG
243 1.99 dbj int debug_verify_freelist = 0;
244 1.100 pk static int checkfreelist(struct buf *bp, struct bqueues *dp)
245 1.99 dbj {
246 1.100 pk struct buf *b;
247 1.100 pk TAILQ_FOREACH(b, dp, b_freelist) {
248 1.100 pk if (b == bp)
249 1.100 pk return 1;
250 1.100 pk }
251 1.100 pk return 0;
252 1.99 dbj }
253 1.99 dbj #endif
254 1.99 dbj
255 1.31 cgd void
256 1.31 cgd bremfree(bp)
257 1.31 cgd struct buf *bp;
258 1.31 cgd {
259 1.31 cgd struct bqueues *dp = NULL;
260 1.94 yamt
261 1.94 yamt LOCK_ASSERT(simple_lock_held(&bqueue_slock));
262 1.31 cgd
263 1.100 pk KDASSERT(!debug_verify_freelist ||
264 1.100 pk checkfreelist(bp, &bufqueues[BQ_AGE]) ||
265 1.100 pk checkfreelist(bp, &bufqueues[BQ_LRU]) ||
266 1.100 pk checkfreelist(bp, &bufqueues[BQ_LOCKED]) );
267 1.99 dbj
268 1.31 cgd /*
269 1.31 cgd * We only calculate the head of the freelist when removing
270 1.31 cgd * the last element of the list as that is the only time that
271 1.31 cgd * it is needed (e.g. to reset the tail pointer).
272 1.31 cgd *
273 1.31 cgd * NB: This makes an assumption about how tailq's are implemented.
274 1.98 dbj *
275 1.98 dbj * We break the TAILQ abstraction in order to efficiently remove a
276 1.98 dbj * buffer from its freelist without having to know exactly which
277 1.98 dbj * freelist it is on.
278 1.31 cgd */
279 1.84 matt if (TAILQ_NEXT(bp, b_freelist) == NULL) {
280 1.31 cgd for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
281 1.31 cgd if (dp->tqh_last == &bp->b_freelist.tqe_next)
282 1.31 cgd break;
283 1.31 cgd if (dp == &bufqueues[BQUEUES])
284 1.31 cgd panic("bremfree: lost tail");
285 1.31 cgd }
286 1.31 cgd TAILQ_REMOVE(dp, bp, b_freelist);
287 1.31 cgd }
288 1.31 cgd
289 1.100 pk u_long buf_memcalc()
290 1.100 pk {
291 1.100 pk u_long n;
292 1.100 pk
293 1.100 pk /*
294 1.100 pk * Determine the upper bound of memory to use for buffers.
295 1.100 pk *
296 1.100 pk * - If bufpages is specified, use that as the number
297 1.100 pk * pages.
298 1.100 pk *
299 1.100 pk * - Otherwise, use bufcache as the percentage of
300 1.100 pk * physical memory.
301 1.100 pk */
302 1.100 pk if (bufpages != 0) {
303 1.100 pk n = bufpages;
304 1.100 pk } else {
305 1.100 pk if (bufcache < 5) {
306 1.100 pk printf("forcing bufcache %d -> 5", bufcache);
307 1.100 pk bufcache = 5;
308 1.100 pk }
309 1.100 pk if (bufcache > 95) {
310 1.100 pk printf("forcing bufcache %d -> 95", bufcache);
311 1.100 pk bufcache = 95;
312 1.100 pk }
313 1.100 pk n = physmem / 100 * bufcache;
314 1.100 pk }
315 1.100 pk
316 1.100 pk n <<= PAGE_SHIFT;
317 1.100 pk if (bufmem_valimit != 0 && n > bufmem_valimit)
318 1.100 pk n = bufmem_valimit;
319 1.100 pk
320 1.100 pk return (n);
321 1.100 pk }
322 1.100 pk
323 1.31 cgd /*
324 1.31 cgd * Initialize buffers and hash links for buffers.
325 1.31 cgd */
326 1.31 cgd void
327 1.31 cgd bufinit()
328 1.31 cgd {
329 1.31 cgd struct bqueues *dp;
330 1.100 pk int smallmem;
331 1.100 pk u_int i;
332 1.100 pk
333 1.100 pk /*
334 1.100 pk * Initialize buffer cache memory parameters.
335 1.100 pk */
336 1.100 pk bufmem = 0;
337 1.100 pk bufmem_hiwater = buf_memcalc();
338 1.100 pk /* lowater is approx. 2% of memory (with bufcache=30) */
339 1.100 pk bufmem_lowater = (bufmem_hiwater >> 4);
340 1.100 pk if (bufmem_lowater < 64 * 1024)
341 1.100 pk /* Ensure a reasonable minimum value */
342 1.100 pk bufmem_lowater = 64 * 1024;
343 1.100 pk
344 1.100 pk if (bufmem_valimit != 0) {
345 1.100 pk vaddr_t minaddr = 0, maxaddr;
346 1.100 pk buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
347 1.100 pk bufmem_valimit, VM_MAP_PAGEABLE,
348 1.100 pk FALSE, 0);
349 1.100 pk if (buf_map == NULL)
350 1.100 pk panic("bufinit: cannot allocate submap");
351 1.100 pk } else
352 1.100 pk buf_map = kernel_map;
353 1.65 thorpej
354 1.65 thorpej /*
355 1.100 pk * Initialize the buffer pools.
356 1.65 thorpej */
357 1.79 thorpej pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
358 1.31 cgd
359 1.100 pk /* On "small" machines use small pool page sizes where possible */
360 1.100 pk smallmem = (physmem < atop(16*1024*1024));
361 1.100 pk
362 1.100 pk for (i = 0; i < NMEMPOOLS; i++) {
363 1.100 pk struct pool_allocator *pa;
364 1.100 pk struct pool *pp = &bmempools[i];
365 1.100 pk u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
366 1.100 pk char *name = malloc(8, M_TEMP, M_WAITOK);
367 1.100 pk snprintf(name, 8, "buf%dk", 1 << i);
368 1.100 pk pa = (size <= PAGE_SIZE && smallmem)
369 1.100 pk ? &pool_allocator_nointr
370 1.100 pk : &bufmempool_allocator;
371 1.100 pk pool_init(pp, size, 0, 0, 0, name, pa);
372 1.100 pk pool_setlowat(pp, 1);
373 1.100 pk }
374 1.100 pk
375 1.100 pk /* Initialize the buffer queues */
376 1.31 cgd for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
377 1.31 cgd TAILQ_INIT(dp);
378 1.100 pk
379 1.100 pk /*
380 1.100 pk * Estimate hash table size based on the amount of memory we
381 1.100 pk * intend to use for the buffer cache. The average buffer
382 1.100 pk * size is dependent on our clients (i.e. filesystems).
383 1.100 pk *
384 1.100 pk * For now, use an empirical 3K per buffer.
385 1.100 pk */
386 1.100 pk nbuf = (bufmem_hiwater / 1024) / 3;
387 1.70 ad bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
388 1.100 pk }
389 1.100 pk
390 1.100 pk static int
391 1.100 pk buf_lotsfree(void)
392 1.100 pk {
393 1.100 pk return (bufmem < bufmem_lowater ||
394 1.100 pk (bufmem < bufmem_hiwater && uvmexp.free > 2*uvmexp.freetarg));
395 1.100 pk }
396 1.100 pk
397 1.100 pk /*
398 1.100 pk * Return estimate of # of buffers we think need to be
399 1.100 pk * released to help resolve low memory conditions.
400 1.100 pk */
401 1.100 pk static int
402 1.100 pk buf_canrelease(void)
403 1.100 pk {
404 1.100 pk int n;
405 1.100 pk
406 1.100 pk if (bufmem < bufmem_lowater)
407 1.100 pk return 0;
408 1.100 pk
409 1.100 pk n = uvmexp.freetarg - uvmexp.free;
410 1.100 pk if (n < 0)
411 1.100 pk n = 0;
412 1.100 pk return 2*n;
413 1.100 pk }
414 1.100 pk
415 1.100 pk /*
416 1.100 pk * Buffer memory allocation helper functions
417 1.100 pk */
418 1.100 pk static __inline__ u_long buf_mempoolidx(u_long size)
419 1.100 pk {
420 1.100 pk u_int n = 0;
421 1.100 pk
422 1.100 pk size -= 1;
423 1.100 pk size >>= MEMPOOL_INDEX_OFFSET;
424 1.100 pk while (size) {
425 1.100 pk size >>= 1;
426 1.100 pk n += 1;
427 1.100 pk }
428 1.100 pk if (n >= NMEMPOOLS)
429 1.100 pk panic("buf mem pool index %d", n);
430 1.100 pk return n;
431 1.100 pk }
432 1.100 pk
433 1.100 pk static __inline__ u_long buf_roundsize(u_long size)
434 1.100 pk {
435 1.100 pk /* Round up to nearest power of 2 */
436 1.100 pk return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
437 1.100 pk }
438 1.100 pk
439 1.100 pk static __inline__ caddr_t buf_malloc(size_t size)
440 1.100 pk {
441 1.100 pk u_int n = buf_mempoolidx(size);
442 1.100 pk caddr_t addr;
443 1.100 pk int s;
444 1.100 pk
445 1.100 pk while (1) {
446 1.100 pk addr = pool_get(&bmempools[n], PR_NOWAIT);
447 1.100 pk if (addr != NULL)
448 1.100 pk break;
449 1.100 pk
450 1.100 pk /* No memory, see if we can free some. If so, try again */
451 1.100 pk if (buf_drain(1) > 0)
452 1.100 pk continue;
453 1.100 pk
454 1.100 pk /* Wait for buffers to arrive on the LRU queue */
455 1.100 pk s = splbio();
456 1.100 pk simple_lock(&bqueue_slock);
457 1.100 pk needbuffer = 1;
458 1.100 pk ltsleep(&needbuffer, PNORELOCK | (PRIBIO+1),
459 1.100 pk "buf_malloc", 0, &bqueue_slock);
460 1.100 pk splx(s);
461 1.31 cgd }
462 1.100 pk
463 1.100 pk return addr;
464 1.100 pk }
465 1.100 pk
466 1.100 pk static void buf_mrelease(caddr_t addr, size_t size)
467 1.100 pk {
468 1.100 pk
469 1.100 pk pool_put(&bmempools[buf_mempoolidx(size)], addr);
470 1.31 cgd }
471 1.31 cgd
472 1.100 pk
473 1.40 christos static __inline struct buf *
474 1.34 mycroft bio_doread(vp, blkno, size, cred, async)
475 1.31 cgd struct vnode *vp;
476 1.31 cgd daddr_t blkno;
477 1.31 cgd int size;
478 1.31 cgd struct ucred *cred;
479 1.34 mycroft int async;
480 1.31 cgd {
481 1.66 augustss struct buf *bp;
482 1.86 thorpej struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
483 1.86 thorpej struct proc *p = l->l_proc;
484 1.31 cgd
485 1.34 mycroft bp = getblk(vp, blkno, size, 0, 0);
486 1.31 cgd
487 1.86 thorpej #ifdef DIAGNOSTIC
488 1.86 thorpej if (bp == NULL) {
489 1.86 thorpej panic("bio_doread: no such buf");
490 1.86 thorpej }
491 1.86 thorpej #endif
492 1.86 thorpej
493 1.31 cgd /*
494 1.34 mycroft * If buffer does not have data valid, start a read.
495 1.31 cgd * Note that if buffer is B_INVAL, getblk() won't return it.
496 1.87 pk * Therefore, it's valid if its I/O has completed or been delayed.
497 1.31 cgd */
498 1.34 mycroft if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
499 1.73 chs /* Start I/O for the buffer. */
500 1.34 mycroft SET(bp->b_flags, B_READ | async);
501 1.34 mycroft VOP_STRATEGY(bp);
502 1.31 cgd
503 1.34 mycroft /* Pay for the read. */
504 1.49 cgd p->p_stats->p_ru.ru_inblock++;
505 1.34 mycroft } else if (async) {
506 1.34 mycroft brelse(bp);
507 1.31 cgd }
508 1.31 cgd
509 1.34 mycroft return (bp);
510 1.34 mycroft }
511 1.34 mycroft
512 1.34 mycroft /*
513 1.34 mycroft * Read a disk block.
514 1.34 mycroft * This algorithm described in Bach (p.54).
515 1.34 mycroft */
516 1.40 christos int
517 1.34 mycroft bread(vp, blkno, size, cred, bpp)
518 1.34 mycroft struct vnode *vp;
519 1.34 mycroft daddr_t blkno;
520 1.34 mycroft int size;
521 1.34 mycroft struct ucred *cred;
522 1.34 mycroft struct buf **bpp;
523 1.34 mycroft {
524 1.66 augustss struct buf *bp;
525 1.34 mycroft
526 1.34 mycroft /* Get buffer for block. */
527 1.34 mycroft bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
528 1.31 cgd
529 1.80 chs /* Wait for the read to complete, and return result. */
530 1.31 cgd return (biowait(bp));
531 1.31 cgd }
532 1.31 cgd
533 1.31 cgd /*
534 1.31 cgd * Read-ahead multiple disk blocks. The first is sync, the rest async.
535 1.31 cgd * Trivial modification to the breada algorithm presented in Bach (p.55).
536 1.31 cgd */
537 1.40 christos int
538 1.31 cgd breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
539 1.31 cgd struct vnode *vp;
540 1.31 cgd daddr_t blkno; int size;
541 1.31 cgd daddr_t rablks[]; int rasizes[];
542 1.31 cgd int nrablks;
543 1.31 cgd struct ucred *cred;
544 1.31 cgd struct buf **bpp;
545 1.31 cgd {
546 1.66 augustss struct buf *bp;
547 1.31 cgd int i;
548 1.31 cgd
549 1.34 mycroft bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
550 1.31 cgd
551 1.31 cgd /*
552 1.31 cgd * For each of the read-ahead blocks, start a read, if necessary.
553 1.31 cgd */
554 1.31 cgd for (i = 0; i < nrablks; i++) {
555 1.31 cgd /* If it's in the cache, just go on to next one. */
556 1.31 cgd if (incore(vp, rablks[i]))
557 1.31 cgd continue;
558 1.31 cgd
559 1.31 cgd /* Get a buffer for the read-ahead block */
560 1.34 mycroft (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
561 1.31 cgd }
562 1.31 cgd
563 1.80 chs /* Otherwise, we had to start a read for it; wait until it's valid. */
564 1.31 cgd return (biowait(bp));
565 1.31 cgd }
566 1.31 cgd
567 1.31 cgd /*
568 1.31 cgd * Read with single-block read-ahead. Defined in Bach (p.55), but
569 1.31 cgd * implemented as a call to breadn().
570 1.31 cgd * XXX for compatibility with old file systems.
571 1.31 cgd */
572 1.40 christos int
573 1.31 cgd breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
574 1.31 cgd struct vnode *vp;
575 1.31 cgd daddr_t blkno; int size;
576 1.31 cgd daddr_t rablkno; int rabsize;
577 1.31 cgd struct ucred *cred;
578 1.31 cgd struct buf **bpp;
579 1.31 cgd {
580 1.34 mycroft
581 1.31 cgd return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
582 1.31 cgd }
583 1.31 cgd
584 1.31 cgd /*
585 1.31 cgd * Block write. Described in Bach (p.56)
586 1.31 cgd */
587 1.40 christos int
588 1.31 cgd bwrite(bp)
589 1.31 cgd struct buf *bp;
590 1.31 cgd {
591 1.44 pk int rv, sync, wasdelayed, s;
592 1.86 thorpej struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
593 1.86 thorpej struct proc *p = l->l_proc;
594 1.59 fvdl struct vnode *vp;
595 1.59 fvdl struct mount *mp;
596 1.31 cgd
597 1.87 pk KASSERT(ISSET(bp->b_flags, B_BUSY));
598 1.87 pk
599 1.76 chs vp = bp->b_vp;
600 1.76 chs if (vp != NULL) {
601 1.76 chs if (vp->v_type == VBLK)
602 1.76 chs mp = vp->v_specmountpoint;
603 1.76 chs else
604 1.76 chs mp = vp->v_mount;
605 1.76 chs } else {
606 1.76 chs mp = NULL;
607 1.76 chs }
608 1.76 chs
609 1.38 cgd /*
610 1.38 cgd * Remember buffer type, to switch on it later. If the write was
611 1.38 cgd * synchronous, but the file system was mounted with MNT_ASYNC,
612 1.38 cgd * convert it to a delayed write.
613 1.38 cgd * XXX note that this relies on delayed tape writes being converted
614 1.38 cgd * to async, not sync writes (which is safe, but ugly).
615 1.38 cgd */
616 1.31 cgd sync = !ISSET(bp->b_flags, B_ASYNC);
617 1.76 chs if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
618 1.37 cgd bdwrite(bp);
619 1.37 cgd return (0);
620 1.37 cgd }
621 1.46 mycroft
622 1.59 fvdl /*
623 1.59 fvdl * Collect statistics on synchronous and asynchronous writes.
624 1.59 fvdl * Writes to block devices are charged to their associated
625 1.59 fvdl * filesystem (if any).
626 1.59 fvdl */
627 1.76 chs if (mp != NULL) {
628 1.76 chs if (sync)
629 1.76 chs mp->mnt_stat.f_syncwrites++;
630 1.59 fvdl else
631 1.76 chs mp->mnt_stat.f_asyncwrites++;
632 1.59 fvdl }
633 1.59 fvdl
634 1.44 pk s = splbio();
635 1.87 pk simple_lock(&bp->b_interlock);
636 1.46 mycroft
637 1.97 dbj wasdelayed = ISSET(bp->b_flags, B_DELWRI);
638 1.97 dbj
639 1.60 fvdl CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
640 1.60 fvdl
641 1.46 mycroft /*
642 1.46 mycroft * Pay for the I/O operation and make sure the buf is on the correct
643 1.46 mycroft * vnode queue.
644 1.46 mycroft */
645 1.46 mycroft if (wasdelayed)
646 1.46 mycroft reassignbuf(bp, bp->b_vp);
647 1.46 mycroft else
648 1.49 cgd p->p_stats->p_ru.ru_oublock++;
649 1.32 mycroft
650 1.31 cgd /* Initiate disk write. Make sure the appropriate party is charged. */
651 1.87 pk V_INCR_NUMOUTPUT(bp->b_vp);
652 1.87 pk simple_unlock(&bp->b_interlock);
653 1.44 pk splx(s);
654 1.46 mycroft
655 1.31 cgd VOP_STRATEGY(bp);
656 1.31 cgd
657 1.34 mycroft if (sync) {
658 1.46 mycroft /* If I/O was synchronous, wait for it to complete. */
659 1.31 cgd rv = biowait(bp);
660 1.31 cgd
661 1.34 mycroft /* Release the buffer. */
662 1.31 cgd brelse(bp);
663 1.34 mycroft
664 1.34 mycroft return (rv);
665 1.34 mycroft } else {
666 1.34 mycroft return (0);
667 1.31 cgd }
668 1.31 cgd }
669 1.31 cgd
670 1.31 cgd int
671 1.40 christos vn_bwrite(v)
672 1.40 christos void *v;
673 1.31 cgd {
674 1.40 christos struct vop_bwrite_args *ap = v;
675 1.34 mycroft
676 1.31 cgd return (bwrite(ap->a_bp));
677 1.31 cgd }
678 1.31 cgd
679 1.31 cgd /*
680 1.31 cgd * Delayed write.
681 1.31 cgd *
682 1.31 cgd * The buffer is marked dirty, but is not queued for I/O.
683 1.31 cgd * This routine should be used when the buffer is expected
684 1.31 cgd * to be modified again soon, typically a small write that
685 1.31 cgd * partially fills a buffer.
686 1.31 cgd *
687 1.31 cgd * NB: magnetic tapes cannot be delayed; they must be
688 1.31 cgd * written in the order that the writes are requested.
689 1.31 cgd *
690 1.31 cgd * Described in Leffler, et al. (pp. 208-213).
691 1.31 cgd */
692 1.31 cgd void
693 1.31 cgd bdwrite(bp)
694 1.31 cgd struct buf *bp;
695 1.31 cgd {
696 1.86 thorpej struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
697 1.86 thorpej struct proc *p = l->l_proc;
698 1.85 gehenna const struct bdevsw *bdev;
699 1.45 pk int s;
700 1.31 cgd
701 1.46 mycroft /* If this is a tape block, write the block now. */
702 1.90 pk bdev = bdevsw_lookup(bp->b_dev);
703 1.90 pk if (bdev != NULL && bdev->d_type == D_TAPE) {
704 1.90 pk bawrite(bp);
705 1.90 pk return;
706 1.46 mycroft }
707 1.46 mycroft
708 1.31 cgd /*
709 1.31 cgd * If the block hasn't been seen before:
710 1.31 cgd * (1) Mark it as having been seen,
711 1.45 pk * (2) Charge for the write,
712 1.45 pk * (3) Make sure it's on its vnode's correct block list.
713 1.31 cgd */
714 1.60 fvdl s = splbio();
715 1.87 pk simple_lock(&bp->b_interlock);
716 1.60 fvdl
717 1.97 dbj KASSERT(ISSET(bp->b_flags, B_BUSY));
718 1.97 dbj
719 1.31 cgd if (!ISSET(bp->b_flags, B_DELWRI)) {
720 1.31 cgd SET(bp->b_flags, B_DELWRI);
721 1.49 cgd p->p_stats->p_ru.ru_oublock++;
722 1.31 cgd reassignbuf(bp, bp->b_vp);
723 1.31 cgd }
724 1.31 cgd
725 1.31 cgd /* Otherwise, the "write" is done, so mark and release the buffer. */
726 1.92 yamt CLR(bp->b_flags, B_DONE);
727 1.87 pk simple_unlock(&bp->b_interlock);
728 1.60 fvdl splx(s);
729 1.60 fvdl
730 1.31 cgd brelse(bp);
731 1.31 cgd }
732 1.31 cgd
733 1.31 cgd /*
734 1.31 cgd * Asynchronous block write; just an asynchronous bwrite().
735 1.31 cgd */
736 1.31 cgd void
737 1.31 cgd bawrite(bp)
738 1.31 cgd struct buf *bp;
739 1.31 cgd {
740 1.87 pk int s;
741 1.31 cgd
742 1.97 dbj s = splbio();
743 1.97 dbj simple_lock(&bp->b_interlock);
744 1.97 dbj
745 1.87 pk KASSERT(ISSET(bp->b_flags, B_BUSY));
746 1.87 pk
747 1.31 cgd SET(bp->b_flags, B_ASYNC);
748 1.87 pk simple_unlock(&bp->b_interlock);
749 1.87 pk splx(s);
750 1.31 cgd VOP_BWRITE(bp);
751 1.31 cgd }
752 1.31 cgd
753 1.31 cgd /*
754 1.59 fvdl * Same as first half of bdwrite, mark buffer dirty, but do not release it.
755 1.88 pk * Call at splbio() and with the buffer interlock locked.
756 1.88 pk * Note: called only from biodone() through ffs softdep's bioops.io_complete()
757 1.59 fvdl */
758 1.59 fvdl void
759 1.59 fvdl bdirty(bp)
760 1.59 fvdl struct buf *bp;
761 1.59 fvdl {
762 1.86 thorpej struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */
763 1.86 thorpej struct proc *p = l->l_proc;
764 1.59 fvdl
765 1.97 dbj LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
766 1.87 pk KASSERT(ISSET(bp->b_flags, B_BUSY));
767 1.61 fvdl
768 1.61 fvdl CLR(bp->b_flags, B_AGE);
769 1.60 fvdl
770 1.59 fvdl if (!ISSET(bp->b_flags, B_DELWRI)) {
771 1.59 fvdl SET(bp->b_flags, B_DELWRI);
772 1.59 fvdl p->p_stats->p_ru.ru_oublock++;
773 1.59 fvdl reassignbuf(bp, bp->b_vp);
774 1.59 fvdl }
775 1.59 fvdl }
776 1.59 fvdl
777 1.59 fvdl /*
778 1.31 cgd * Release a buffer on to the free lists.
779 1.31 cgd * Described in Bach (p. 46).
780 1.31 cgd */
781 1.31 cgd void
782 1.31 cgd brelse(bp)
783 1.31 cgd struct buf *bp;
784 1.31 cgd {
785 1.31 cgd struct bqueues *bufq;
786 1.31 cgd int s;
787 1.31 cgd
788 1.87 pk /* Block disk interrupts. */
789 1.87 pk s = splbio();
790 1.87 pk simple_lock(&bqueue_slock);
791 1.87 pk simple_lock(&bp->b_interlock);
792 1.97 dbj
793 1.97 dbj KASSERT(ISSET(bp->b_flags, B_BUSY));
794 1.97 dbj KASSERT(!ISSET(bp->b_flags, B_CALL));
795 1.87 pk
796 1.31 cgd /* Wake up any processes waiting for any buffer to become free. */
797 1.31 cgd if (needbuffer) {
798 1.31 cgd needbuffer = 0;
799 1.31 cgd wakeup(&needbuffer);
800 1.31 cgd }
801 1.31 cgd
802 1.31 cgd /* Wake up any proceeses waiting for _this_ buffer to become free. */
803 1.31 cgd if (ISSET(bp->b_flags, B_WANTED)) {
804 1.57 mycroft CLR(bp->b_flags, B_WANTED|B_AGE);
805 1.31 cgd wakeup(bp);
806 1.31 cgd }
807 1.31 cgd
808 1.31 cgd /*
809 1.31 cgd * Determine which queue the buffer should be on, then put it there.
810 1.31 cgd */
811 1.31 cgd
812 1.31 cgd /* If it's locked, don't report an error; try again later. */
813 1.31 cgd if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
814 1.31 cgd CLR(bp->b_flags, B_ERROR);
815 1.31 cgd
816 1.31 cgd /* If it's not cacheable, or an error, mark it invalid. */
817 1.31 cgd if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
818 1.31 cgd SET(bp->b_flags, B_INVAL);
819 1.31 cgd
820 1.50 mycroft if (ISSET(bp->b_flags, B_VFLUSH)) {
821 1.50 mycroft /*
822 1.50 mycroft * This is a delayed write buffer that was just flushed to
823 1.50 mycroft * disk. It is still on the LRU queue. If it's become
824 1.50 mycroft * invalid, then we need to move it to a different queue;
825 1.50 mycroft * otherwise leave it in its current position.
826 1.50 mycroft */
827 1.50 mycroft CLR(bp->b_flags, B_VFLUSH);
828 1.99 dbj if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
829 1.99 dbj KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
830 1.50 mycroft goto already_queued;
831 1.99 dbj } else {
832 1.50 mycroft bremfree(bp);
833 1.99 dbj }
834 1.50 mycroft }
835 1.99 dbj
836 1.99 dbj KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
837 1.99 dbj KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
838 1.99 dbj KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
839 1.50 mycroft
840 1.31 cgd if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
841 1.31 cgd /*
842 1.31 cgd * If it's invalid or empty, dissociate it from its vnode
843 1.31 cgd * and put on the head of the appropriate queue.
844 1.31 cgd */
845 1.59 fvdl if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
846 1.59 fvdl (*bioops.io_deallocate)(bp);
847 1.59 fvdl CLR(bp->b_flags, B_DONE|B_DELWRI);
848 1.59 fvdl if (bp->b_vp) {
849 1.59 fvdl reassignbuf(bp, bp->b_vp);
850 1.31 cgd brelvp(bp);
851 1.59 fvdl }
852 1.31 cgd if (bp->b_bufsize <= 0)
853 1.31 cgd /* no data */
854 1.100 pk goto already_queued;
855 1.31 cgd else
856 1.31 cgd /* invalid data */
857 1.31 cgd bufq = &bufqueues[BQ_AGE];
858 1.31 cgd binsheadfree(bp, bufq);
859 1.31 cgd } else {
860 1.31 cgd /*
861 1.31 cgd * It has valid data. Put it on the end of the appropriate
862 1.31 cgd * queue, so that it'll stick around for as long as possible.
863 1.67 fvdl * If buf is AGE, but has dependencies, must put it on last
864 1.67 fvdl * bufqueue to be scanned, ie LRU. This protects against the
865 1.67 fvdl * livelock where BQ_AGE only has buffers with dependencies,
866 1.67 fvdl * and we thus never get to the dependent buffers in BQ_LRU.
867 1.31 cgd */
868 1.31 cgd if (ISSET(bp->b_flags, B_LOCKED))
869 1.31 cgd /* locked in core */
870 1.31 cgd bufq = &bufqueues[BQ_LOCKED];
871 1.67 fvdl else if (!ISSET(bp->b_flags, B_AGE))
872 1.31 cgd /* valid data */
873 1.31 cgd bufq = &bufqueues[BQ_LRU];
874 1.67 fvdl else {
875 1.67 fvdl /* stale but valid data */
876 1.67 fvdl int has_deps;
877 1.67 fvdl
878 1.67 fvdl if (LIST_FIRST(&bp->b_dep) != NULL &&
879 1.67 fvdl bioops.io_countdeps)
880 1.67 fvdl has_deps = (*bioops.io_countdeps)(bp, 0);
881 1.67 fvdl else
882 1.67 fvdl has_deps = 0;
883 1.67 fvdl bufq = has_deps ? &bufqueues[BQ_LRU] :
884 1.67 fvdl &bufqueues[BQ_AGE];
885 1.67 fvdl }
886 1.31 cgd binstailfree(bp, bufq);
887 1.31 cgd }
888 1.31 cgd
889 1.50 mycroft already_queued:
890 1.31 cgd /* Unlock the buffer. */
891 1.83 hannken CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
892 1.73 chs SET(bp->b_flags, B_CACHE);
893 1.31 cgd
894 1.31 cgd /* Allow disk interrupts. */
895 1.87 pk simple_unlock(&bp->b_interlock);
896 1.87 pk simple_unlock(&bqueue_slock);
897 1.100 pk if (bp->b_bufsize <= 0) {
898 1.100 pk #ifdef DEBUG
899 1.100 pk memset((char *)bp, 0, sizeof(*bp));
900 1.100 pk #endif
901 1.100 pk pool_put(&bufpool, bp);
902 1.100 pk }
903 1.31 cgd splx(s);
904 1.31 cgd }
905 1.31 cgd
906 1.31 cgd /*
907 1.31 cgd * Determine if a block is in the cache.
908 1.31 cgd * Just look on what would be its hash chain. If it's there, return
909 1.31 cgd * a pointer to it, unless it's marked invalid. If it's marked invalid,
910 1.31 cgd * we normally don't return the buffer, unless the caller explicitly
911 1.31 cgd * wants us to.
912 1.31 cgd */
913 1.31 cgd struct buf *
914 1.31 cgd incore(vp, blkno)
915 1.31 cgd struct vnode *vp;
916 1.31 cgd daddr_t blkno;
917 1.31 cgd {
918 1.31 cgd struct buf *bp;
919 1.31 cgd
920 1.31 cgd /* Search hash chain */
921 1.84 matt LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
922 1.31 cgd if (bp->b_lblkno == blkno && bp->b_vp == vp &&
923 1.31 cgd !ISSET(bp->b_flags, B_INVAL))
924 1.31 cgd return (bp);
925 1.31 cgd }
926 1.31 cgd
927 1.73 chs return (NULL);
928 1.31 cgd }
929 1.31 cgd
930 1.31 cgd /*
931 1.31 cgd * Get a block of requested size that is associated with
932 1.31 cgd * a given vnode and block offset. If it is found in the
933 1.31 cgd * block cache, mark it as having been found, make it busy
934 1.31 cgd * and return it. Otherwise, return an empty block of the
935 1.31 cgd * correct size. It is up to the caller to insure that the
936 1.31 cgd * cached blocks be of the correct size.
937 1.31 cgd */
938 1.31 cgd struct buf *
939 1.31 cgd getblk(vp, blkno, size, slpflag, slptimeo)
940 1.66 augustss struct vnode *vp;
941 1.31 cgd daddr_t blkno;
942 1.31 cgd int size, slpflag, slptimeo;
943 1.31 cgd {
944 1.31 cgd struct buf *bp;
945 1.31 cgd int s, err;
946 1.100 pk int preserve;
947 1.31 cgd
948 1.39 cgd start:
949 1.87 pk s = splbio();
950 1.87 pk simple_lock(&bqueue_slock);
951 1.73 chs bp = incore(vp, blkno);
952 1.73 chs if (bp != NULL) {
953 1.87 pk simple_lock(&bp->b_interlock);
954 1.31 cgd if (ISSET(bp->b_flags, B_BUSY)) {
955 1.87 pk simple_unlock(&bqueue_slock);
956 1.73 chs if (curproc == uvm.pagedaemon_proc) {
957 1.87 pk simple_unlock(&bp->b_interlock);
958 1.73 chs splx(s);
959 1.73 chs return NULL;
960 1.73 chs }
961 1.31 cgd SET(bp->b_flags, B_WANTED);
962 1.87 pk err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
963 1.87 pk "getblk", slptimeo, &bp->b_interlock);
964 1.31 cgd splx(s);
965 1.31 cgd if (err)
966 1.31 cgd return (NULL);
967 1.31 cgd goto start;
968 1.31 cgd }
969 1.57 mycroft #ifdef DIAGNOSTIC
970 1.78 chs if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
971 1.78 chs bp->b_bcount < size && vp->v_type != VBLK)
972 1.73 chs panic("getblk: block size invariant failed");
973 1.57 mycroft #endif
974 1.73 chs SET(bp->b_flags, B_BUSY);
975 1.73 chs bremfree(bp);
976 1.100 pk preserve = 1;
977 1.73 chs } else {
978 1.100 pk if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
979 1.87 pk simple_unlock(&bqueue_slock);
980 1.87 pk splx(s);
981 1.31 cgd goto start;
982 1.87 pk }
983 1.73 chs
984 1.73 chs binshash(bp, BUFHASH(vp, blkno));
985 1.64 thorpej bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
986 1.31 cgd bgetvp(vp, bp);
987 1.100 pk preserve = 0;
988 1.31 cgd }
989 1.87 pk simple_unlock(&bp->b_interlock);
990 1.87 pk simple_unlock(&bqueue_slock);
991 1.87 pk splx(s);
992 1.96 yamt /*
993 1.96 yamt * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
994 1.96 yamt * if we re-size buffers here.
995 1.96 yamt */
996 1.96 yamt if (ISSET(bp->b_flags, B_LOCKED)) {
997 1.96 yamt KASSERT(bp->b_bufsize >= size);
998 1.96 yamt } else {
999 1.100 pk allocbuf(bp, size, preserve);
1000 1.96 yamt }
1001 1.31 cgd return (bp);
1002 1.31 cgd }
1003 1.31 cgd
1004 1.31 cgd /*
1005 1.31 cgd * Get an empty, disassociated buffer of given size.
1006 1.31 cgd */
1007 1.31 cgd struct buf *
1008 1.31 cgd geteblk(size)
1009 1.31 cgd int size;
1010 1.31 cgd {
1011 1.31 cgd struct buf *bp;
1012 1.87 pk int s;
1013 1.31 cgd
1014 1.87 pk s = splbio();
1015 1.87 pk simple_lock(&bqueue_slock);
1016 1.100 pk while ((bp = getnewbuf(0, 0, 0)) == 0)
1017 1.31 cgd ;
1018 1.87 pk
1019 1.31 cgd SET(bp->b_flags, B_INVAL);
1020 1.31 cgd binshash(bp, &invalhash);
1021 1.87 pk simple_unlock(&bqueue_slock);
1022 1.87 pk simple_unlock(&bp->b_interlock);
1023 1.87 pk splx(s);
1024 1.100 pk allocbuf(bp, size, 0);
1025 1.31 cgd return (bp);
1026 1.31 cgd }
1027 1.31 cgd
1028 1.31 cgd /*
1029 1.31 cgd * Expand or contract the actual memory allocated to a buffer.
1030 1.31 cgd *
1031 1.31 cgd * If the buffer shrinks, data is lost, so it's up to the
1032 1.31 cgd * caller to have written it out *first*; this routine will not
1033 1.31 cgd * start a write. If the buffer grows, it's the callers
1034 1.31 cgd * responsibility to fill out the buffer's additional contents.
1035 1.31 cgd */
1036 1.40 christos void
1037 1.100 pk allocbuf(bp, size, preserve)
1038 1.31 cgd struct buf *bp;
1039 1.31 cgd int size;
1040 1.100 pk int preserve;
1041 1.31 cgd {
1042 1.100 pk vsize_t oldsize, desired_size;
1043 1.100 pk caddr_t addr;
1044 1.100 pk int s, delta;
1045 1.31 cgd
1046 1.100 pk desired_size = buf_roundsize(size);
1047 1.31 cgd if (desired_size > MAXBSIZE)
1048 1.100 pk printf("allocbuf: buffer larger than MAXBSIZE requested");
1049 1.31 cgd
1050 1.100 pk bp->b_bcount = size;
1051 1.100 pk
1052 1.100 pk oldsize = bp->b_bufsize;
1053 1.100 pk if (oldsize == desired_size)
1054 1.100 pk return;
1055 1.31 cgd
1056 1.31 cgd /*
1057 1.100 pk * If we want a buffer of a different size, re-allocate the
1058 1.100 pk * buffer's memory; copy old content only if needed.
1059 1.31 cgd */
1060 1.100 pk addr = buf_malloc(desired_size);
1061 1.100 pk if (preserve)
1062 1.100 pk memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1063 1.100 pk if (bp->b_data != NULL)
1064 1.100 pk buf_mrelease(bp->b_data, oldsize);
1065 1.100 pk bp->b_data = addr;
1066 1.100 pk bp->b_bufsize = desired_size;
1067 1.31 cgd
1068 1.31 cgd /*
1069 1.100 pk * Update overall buffer memory counter (protected by bqueue_slock)
1070 1.31 cgd */
1071 1.100 pk delta = (long)desired_size - (long)oldsize;
1072 1.100 pk
1073 1.100 pk s = splbio();
1074 1.100 pk simple_lock(&bqueue_slock);
1075 1.100 pk if ((bufmem += delta) > bufmem_hiwater) {
1076 1.100 pk /*
1077 1.100 pk * Need to trim overall memory usage.
1078 1.100 pk */
1079 1.100 pk while (buf_canrelease()) {
1080 1.100 pk if (buf_trim() == 0)
1081 1.100 pk break;
1082 1.31 cgd }
1083 1.31 cgd }
1084 1.31 cgd
1085 1.100 pk simple_unlock(&bqueue_slock);
1086 1.100 pk splx(s);
1087 1.31 cgd }
1088 1.31 cgd
1089 1.31 cgd /*
1090 1.31 cgd * Find a buffer which is available for use.
1091 1.31 cgd * Select something from a free list.
1092 1.31 cgd * Preference is to AGE list, then LRU list.
1093 1.87 pk *
1094 1.100 pk * Called at splbio and with buffer queues locked.
1095 1.87 pk * Return buffer locked.
1096 1.31 cgd */
1097 1.31 cgd struct buf *
1098 1.100 pk getnewbuf(slpflag, slptimeo, from_bufq)
1099 1.100 pk int slpflag, slptimeo, from_bufq;
1100 1.31 cgd {
1101 1.66 augustss struct buf *bp;
1102 1.31 cgd
1103 1.31 cgd start:
1104 1.87 pk LOCK_ASSERT(simple_lock_held(&bqueue_slock));
1105 1.87 pk
1106 1.100 pk /*
1107 1.100 pk * Get a new buffer from the pool; but use NOWAIT because
1108 1.100 pk * we have the buffer queues locked.
1109 1.100 pk */
1110 1.100 pk if (buf_lotsfree() && !from_bufq &&
1111 1.100 pk (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
1112 1.100 pk memset((char *)bp, 0, sizeof(*bp));
1113 1.100 pk BUF_INIT(bp);
1114 1.100 pk bp->b_dev = NODEV;
1115 1.100 pk bp->b_vnbufs.le_next = NOLIST;
1116 1.100 pk bp->b_flags = B_BUSY;
1117 1.100 pk return (bp);
1118 1.100 pk }
1119 1.100 pk
1120 1.84 matt if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
1121 1.84 matt (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
1122 1.87 pk simple_lock(&bp->b_interlock);
1123 1.31 cgd bremfree(bp);
1124 1.31 cgd } else {
1125 1.31 cgd /* wait for a free buffer of any kind */
1126 1.31 cgd needbuffer = 1;
1127 1.87 pk ltsleep(&needbuffer, slpflag|(PRIBIO+1),
1128 1.87 pk "getnewbuf", slptimeo, &bqueue_slock);
1129 1.73 chs return (NULL);
1130 1.31 cgd }
1131 1.31 cgd
1132 1.100 pk #ifdef DIAGNOSTIC
1133 1.100 pk if (bp->b_bufsize <= 0)
1134 1.100 pk panic("buffer %p: on queue but empty", bp);
1135 1.100 pk #endif
1136 1.100 pk
1137 1.50 mycroft if (ISSET(bp->b_flags, B_VFLUSH)) {
1138 1.50 mycroft /*
1139 1.50 mycroft * This is a delayed write buffer being flushed to disk. Make
1140 1.50 mycroft * sure it gets aged out of the queue when it's finished, and
1141 1.50 mycroft * leave it off the LRU queue.
1142 1.50 mycroft */
1143 1.50 mycroft CLR(bp->b_flags, B_VFLUSH);
1144 1.50 mycroft SET(bp->b_flags, B_AGE);
1145 1.87 pk simple_unlock(&bp->b_interlock);
1146 1.50 mycroft goto start;
1147 1.50 mycroft }
1148 1.50 mycroft
1149 1.31 cgd /* Buffer is no longer on free lists. */
1150 1.31 cgd SET(bp->b_flags, B_BUSY);
1151 1.31 cgd
1152 1.75 chs /*
1153 1.75 chs * If buffer was a delayed write, start it and return NULL
1154 1.75 chs * (since we might sleep while starting the write).
1155 1.75 chs */
1156 1.31 cgd if (ISSET(bp->b_flags, B_DELWRI)) {
1157 1.50 mycroft /*
1158 1.50 mycroft * This buffer has gone through the LRU, so make sure it gets
1159 1.50 mycroft * reused ASAP.
1160 1.50 mycroft */
1161 1.50 mycroft SET(bp->b_flags, B_AGE);
1162 1.87 pk simple_unlock(&bp->b_interlock);
1163 1.89 pk simple_unlock(&bqueue_slock);
1164 1.50 mycroft bawrite(bp);
1165 1.89 pk simple_lock(&bqueue_slock);
1166 1.75 chs return (NULL);
1167 1.31 cgd }
1168 1.31 cgd
1169 1.31 cgd /* disassociate us from our vnode, if we had one... */
1170 1.31 cgd if (bp->b_vp)
1171 1.31 cgd brelvp(bp);
1172 1.31 cgd
1173 1.59 fvdl if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1174 1.59 fvdl (*bioops.io_deallocate)(bp);
1175 1.59 fvdl
1176 1.31 cgd /* clear out various other fields */
1177 1.31 cgd bp->b_flags = B_BUSY;
1178 1.31 cgd bp->b_dev = NODEV;
1179 1.64 thorpej bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
1180 1.31 cgd bp->b_iodone = 0;
1181 1.31 cgd bp->b_error = 0;
1182 1.31 cgd bp->b_resid = 0;
1183 1.31 cgd bp->b_bcount = 0;
1184 1.31 cgd
1185 1.34 mycroft bremhash(bp);
1186 1.31 cgd return (bp);
1187 1.31 cgd }
1188 1.31 cgd
1189 1.31 cgd /*
1190 1.100 pk * Attempt to free an aged buffer off the queues.
1191 1.100 pk * Called at splbio and with queue lock held.
1192 1.100 pk * Returns the amount of buffer memory freed.
1193 1.100 pk */
1194 1.100 pk int buf_trim(void)
1195 1.100 pk {
1196 1.100 pk struct buf *bp;
1197 1.100 pk long size = 0;
1198 1.100 pk int wanted;
1199 1.100 pk
1200 1.100 pk /* Instruct getnewbuf() to get buffers off the queues */
1201 1.100 pk if ((bp = getnewbuf(PCATCH,1,1)) == NULL)
1202 1.100 pk return 0;
1203 1.100 pk
1204 1.100 pk wanted = ISSET(bp->b_flags, B_WANTED);
1205 1.100 pk simple_unlock(&bp->b_interlock);
1206 1.100 pk if (wanted) {
1207 1.100 pk printf("buftrim: got WANTED buffer\n");
1208 1.100 pk SET(bp->b_flags, B_INVAL);
1209 1.100 pk binshash(bp, &invalhash);
1210 1.100 pk simple_unlock(&bqueue_slock);
1211 1.100 pk goto out;
1212 1.100 pk }
1213 1.100 pk size = bp->b_bufsize;
1214 1.100 pk bufmem -= size;
1215 1.100 pk simple_unlock(&bqueue_slock);
1216 1.100 pk if (size > 0) {
1217 1.100 pk buf_mrelease(bp->b_data, size);
1218 1.100 pk bp->b_bcount = bp->b_bufsize = 0;
1219 1.100 pk }
1220 1.100 pk
1221 1.100 pk out:
1222 1.100 pk /* brelse() will return the buffer to the global buffer pool */
1223 1.100 pk brelse(bp);
1224 1.100 pk simple_lock(&bqueue_slock);
1225 1.100 pk return size;
1226 1.100 pk }
1227 1.100 pk
1228 1.100 pk int buf_drain(int n)
1229 1.100 pk {
1230 1.100 pk int s, size = 0;
1231 1.100 pk
1232 1.100 pk /* If not asked for a specific amount, make our own estimate */
1233 1.100 pk if (n == 0)
1234 1.100 pk n = buf_canrelease();
1235 1.100 pk
1236 1.100 pk s = splbio();
1237 1.100 pk simple_lock(&bqueue_slock);
1238 1.100 pk while (n-- > 0 && bufmem > bufmem_lowater)
1239 1.100 pk size += buf_trim();
1240 1.100 pk simple_unlock(&bqueue_slock);
1241 1.100 pk splx(s);
1242 1.100 pk return size;
1243 1.100 pk }
1244 1.100 pk
1245 1.100 pk /*
1246 1.31 cgd * Wait for operations on the buffer to complete.
1247 1.31 cgd * When they do, extract and return the I/O's error value.
1248 1.31 cgd */
1249 1.31 cgd int
1250 1.31 cgd biowait(bp)
1251 1.31 cgd struct buf *bp;
1252 1.31 cgd {
1253 1.87 pk int s, error;
1254 1.59 fvdl
1255 1.31 cgd s = splbio();
1256 1.87 pk simple_lock(&bp->b_interlock);
1257 1.80 chs while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
1258 1.87 pk ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
1259 1.31 cgd
1260 1.31 cgd /* check for interruption of I/O (e.g. via NFS), then errors. */
1261 1.31 cgd if (ISSET(bp->b_flags, B_EINTR)) {
1262 1.31 cgd CLR(bp->b_flags, B_EINTR);
1263 1.87 pk error = EINTR;
1264 1.31 cgd } else if (ISSET(bp->b_flags, B_ERROR))
1265 1.87 pk error = bp->b_error ? bp->b_error : EIO;
1266 1.31 cgd else
1267 1.87 pk error = 0;
1268 1.87 pk
1269 1.87 pk simple_unlock(&bp->b_interlock);
1270 1.87 pk splx(s);
1271 1.87 pk return (error);
1272 1.31 cgd }
1273 1.31 cgd
1274 1.31 cgd /*
1275 1.31 cgd * Mark I/O complete on a buffer.
1276 1.31 cgd *
1277 1.31 cgd * If a callback has been requested, e.g. the pageout
1278 1.31 cgd * daemon, do so. Otherwise, awaken waiting processes.
1279 1.31 cgd *
1280 1.31 cgd * [ Leffler, et al., says on p.247:
1281 1.31 cgd * "This routine wakes up the blocked process, frees the buffer
1282 1.31 cgd * for an asynchronous write, or, for a request by the pagedaemon
1283 1.31 cgd * process, invokes a procedure specified in the buffer structure" ]
1284 1.31 cgd *
1285 1.31 cgd * In real life, the pagedaemon (or other system processes) wants
1286 1.31 cgd * to do async stuff to, and doesn't want the buffer brelse()'d.
1287 1.31 cgd * (for swap pager, that puts swap buffers on the free lists (!!!),
1288 1.31 cgd * for the vn device, that puts malloc'd buffers on the free lists!)
1289 1.31 cgd */
1290 1.31 cgd void
1291 1.31 cgd biodone(bp)
1292 1.31 cgd struct buf *bp;
1293 1.31 cgd {
1294 1.60 fvdl int s = splbio();
1295 1.60 fvdl
1296 1.87 pk simple_lock(&bp->b_interlock);
1297 1.31 cgd if (ISSET(bp->b_flags, B_DONE))
1298 1.31 cgd panic("biodone already");
1299 1.31 cgd SET(bp->b_flags, B_DONE); /* note that it's done */
1300 1.31 cgd
1301 1.59 fvdl if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
1302 1.59 fvdl (*bioops.io_complete)(bp);
1303 1.59 fvdl
1304 1.31 cgd if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
1305 1.31 cgd vwakeup(bp);
1306 1.31 cgd
1307 1.87 pk /*
1308 1.87 pk * If necessary, call out. Unlock the buffer before calling
1309 1.87 pk * iodone() as the buffer isn't valid any more when it return.
1310 1.87 pk */
1311 1.87 pk if (ISSET(bp->b_flags, B_CALL)) {
1312 1.31 cgd CLR(bp->b_flags, B_CALL); /* but note callout done */
1313 1.87 pk simple_unlock(&bp->b_interlock);
1314 1.31 cgd (*bp->b_iodone)(bp);
1315 1.59 fvdl } else {
1316 1.87 pk if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */
1317 1.87 pk simple_unlock(&bp->b_interlock);
1318 1.59 fvdl brelse(bp);
1319 1.87 pk } else { /* or just wakeup the buffer */
1320 1.59 fvdl CLR(bp->b_flags, B_WANTED);
1321 1.59 fvdl wakeup(bp);
1322 1.87 pk simple_unlock(&bp->b_interlock);
1323 1.59 fvdl }
1324 1.31 cgd }
1325 1.60 fvdl
1326 1.60 fvdl splx(s);
1327 1.31 cgd }
1328 1.31 cgd
1329 1.31 cgd /*
1330 1.31 cgd * Return a count of buffers on the "locked" queue.
1331 1.31 cgd */
1332 1.31 cgd int
1333 1.31 cgd count_lock_queue()
1334 1.31 cgd {
1335 1.66 augustss struct buf *bp;
1336 1.66 augustss int n = 0;
1337 1.31 cgd
1338 1.87 pk simple_lock(&bqueue_slock);
1339 1.84 matt TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
1340 1.31 cgd n++;
1341 1.87 pk simple_unlock(&bqueue_slock);
1342 1.31 cgd return (n);
1343 1.31 cgd }
1344 1.31 cgd
1345 1.100 pk /*
1346 1.100 pk * Wait for all buffers to complete I/O
1347 1.100 pk * Return the number of "stuck" buffers.
1348 1.100 pk */
1349 1.100 pk int
1350 1.100 pk buf_syncwait(void)
1351 1.100 pk {
1352 1.100 pk struct buf *bp;
1353 1.100 pk int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
1354 1.100 pk
1355 1.100 pk dcount = 10000;
1356 1.100 pk for (iter = 0; iter < 20;) {
1357 1.100 pk s = splbio();
1358 1.100 pk simple_lock(&bqueue_slock);
1359 1.100 pk nbusy = 0;
1360 1.100 pk for (ihash = 0; ihash < bufhash+1; ihash++) {
1361 1.100 pk LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1362 1.100 pk if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1363 1.100 pk nbusy++;
1364 1.100 pk /*
1365 1.100 pk * With soft updates, some buffers that are
1366 1.100 pk * written will be remarked as dirty until other
1367 1.100 pk * buffers are written.
1368 1.100 pk */
1369 1.100 pk if (bp->b_vp && bp->b_vp->v_mount
1370 1.100 pk && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
1371 1.100 pk && (bp->b_flags & B_DELWRI)) {
1372 1.100 pk simple_lock(&bp->b_interlock);
1373 1.100 pk bremfree(bp);
1374 1.100 pk bp->b_flags |= B_BUSY;
1375 1.100 pk nbusy++;
1376 1.100 pk simple_unlock(&bp->b_interlock);
1377 1.100 pk simple_unlock(&bqueue_slock);
1378 1.100 pk bawrite(bp);
1379 1.100 pk if (dcount-- <= 0) {
1380 1.100 pk printf("softdep ");
1381 1.100 pk goto fail;
1382 1.100 pk }
1383 1.100 pk simple_lock(&bqueue_slock);
1384 1.100 pk }
1385 1.100 pk }
1386 1.100 pk }
1387 1.100 pk
1388 1.100 pk simple_unlock(&bqueue_slock);
1389 1.100 pk splx(s);
1390 1.100 pk
1391 1.100 pk if (nbusy == 0)
1392 1.100 pk break;
1393 1.100 pk if (nbusy_prev == 0)
1394 1.100 pk nbusy_prev = nbusy;
1395 1.100 pk printf("%d ", nbusy);
1396 1.100 pk tsleep(&nbusy, PRIBIO, "bflush",
1397 1.100 pk (iter == 0) ? 1 : hz / 25 * iter);
1398 1.100 pk if (nbusy >= nbusy_prev) /* we didn't flush anything */
1399 1.100 pk iter++;
1400 1.100 pk else
1401 1.100 pk nbusy_prev = nbusy;
1402 1.100 pk }
1403 1.100 pk
1404 1.100 pk if (nbusy) {
1405 1.100 pk fail:;
1406 1.100 pk #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1407 1.100 pk printf("giving up\nPrinting vnodes for busy buffers\n");
1408 1.100 pk for (ihash = 0; ihash < bufhash+1; ihash++) {
1409 1.100 pk LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1410 1.100 pk if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
1411 1.100 pk vprint(NULL, bp->b_vp);
1412 1.100 pk }
1413 1.100 pk }
1414 1.100 pk #endif
1415 1.100 pk }
1416 1.100 pk
1417 1.100 pk return nbusy;
1418 1.100 pk }
1419 1.100 pk
1420 1.100 pk #define KERN_BUFSLOP 20
1421 1.100 pk static int
1422 1.100 pk sysctl_dobuf(SYSCTLFN_ARGS)
1423 1.100 pk {
1424 1.100 pk struct buf *bp;
1425 1.100 pk char *dp;
1426 1.100 pk u_int i, elem_size;
1427 1.100 pk size_t len, buflen, needed;
1428 1.100 pk int error, s;
1429 1.100 pk
1430 1.100 pk dp = oldp;
1431 1.100 pk len = buflen = oldp != NULL ? *oldlenp : 0;
1432 1.100 pk error = 0;
1433 1.100 pk needed = 0;
1434 1.100 pk elem_size = sizeof(struct buf);
1435 1.100 pk
1436 1.100 pk s = splbio();
1437 1.100 pk simple_lock(&bqueue_slock);
1438 1.100 pk for (i = 0; i < BQUEUES; i++) {
1439 1.100 pk TAILQ_FOREACH(bp, &bufqueues[i], b_freelist) {
1440 1.100 pk if (len >= sizeof(elem_size)) {
1441 1.100 pk error = copyout(bp, dp, elem_size);
1442 1.100 pk if (error)
1443 1.100 pk goto cleanup;
1444 1.100 pk dp += elem_size;
1445 1.100 pk len -= elem_size;
1446 1.100 pk }
1447 1.100 pk needed += elem_size;
1448 1.100 pk }
1449 1.100 pk }
1450 1.100 pk cleanup:
1451 1.100 pk simple_unlock(&bqueue_slock);
1452 1.100 pk splx(s);
1453 1.100 pk
1454 1.100 pk if (oldp != NULL) {
1455 1.100 pk *oldlenp = (char *)dp - (char *)oldp;
1456 1.100 pk if (needed > *oldlenp)
1457 1.100 pk error = ENOMEM;
1458 1.100 pk } else {
1459 1.100 pk needed += KERN_BUFSLOP;
1460 1.100 pk *oldlenp = needed;
1461 1.100 pk }
1462 1.100 pk
1463 1.100 pk return (error);
1464 1.100 pk }
1465 1.100 pk
1466 1.100 pk static int sysctlnum_bufcache, sysctlnum_bufmemhiwater, sysctlnum_bufmemlowater;
1467 1.100 pk
1468 1.100 pk static int
1469 1.100 pk sysctl_bufvm_update(SYSCTLFN_ARGS)
1470 1.100 pk {
1471 1.100 pk int t, error;
1472 1.100 pk struct sysctlnode node;
1473 1.100 pk
1474 1.100 pk node = *rnode;
1475 1.100 pk node.sysctl_data = &t;
1476 1.100 pk t = *(int*)rnode->sysctl_data;
1477 1.100 pk error = sysctl_lookup(SYSCTLFN_CALL(&node));
1478 1.100 pk if (error || newp == NULL)
1479 1.100 pk return (error);
1480 1.100 pk
1481 1.100 pk if (rnode->sysctl_num == sysctlnum_bufcache) {
1482 1.100 pk if (t < 0 || t > 100)
1483 1.100 pk return (EINVAL);
1484 1.100 pk bufcache = t;
1485 1.100 pk bufmem_hiwater = buf_memcalc();
1486 1.100 pk bufmem_lowater = (bufmem_hiwater >> 4);
1487 1.100 pk } else if (rnode->sysctl_num == sysctlnum_bufmemlowater) {
1488 1.100 pk bufmem_lowater = t;
1489 1.100 pk } else if (rnode->sysctl_num == sysctlnum_bufmemhiwater) {
1490 1.100 pk bufmem_hiwater = t;
1491 1.100 pk } else
1492 1.100 pk return (EINVAL);
1493 1.100 pk
1494 1.100 pk /* Drain until below new high water mark */
1495 1.100 pk while ((t = bufmem - bufmem_hiwater) >= 0) {
1496 1.100 pk if (buf_drain(t / (2*1024)) <= 0)
1497 1.100 pk break;
1498 1.100 pk }
1499 1.100 pk
1500 1.100 pk return 0;
1501 1.100 pk }
1502 1.100 pk
1503 1.100 pk SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
1504 1.100 pk {
1505 1.100 pk struct sysctlnode *rnode;
1506 1.100 pk
1507 1.100 pk sysctl_createv(SYSCTL_PERMANENT,
1508 1.100 pk CTLTYPE_NODE, "buf", NULL,
1509 1.100 pk sysctl_dobuf, 0, NULL, 0,
1510 1.100 pk CTL_KERN, KERN_BUF, CTL_EOL);
1511 1.100 pk
1512 1.100 pk rnode = NULL;
1513 1.100 pk if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1514 1.100 pk CTLTYPE_INT, "bufcache", &rnode,
1515 1.100 pk sysctl_bufvm_update, 0, &bufcache, 0,
1516 1.100 pk CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1517 1.100 pk sysctlnum_bufcache = rnode->sysctl_num;
1518 1.100 pk
1519 1.100 pk rnode = NULL;
1520 1.100 pk if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1521 1.100 pk CTLTYPE_INT, "bufmem_lowater", &rnode,
1522 1.100 pk sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1523 1.100 pk CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1524 1.100 pk sysctlnum_bufmemlowater = rnode->sysctl_num;
1525 1.100 pk
1526 1.100 pk rnode = NULL;
1527 1.100 pk if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1528 1.100 pk CTLTYPE_INT, "bufmem_hiwater", &rnode,
1529 1.100 pk sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1530 1.100 pk CTL_VM, CTL_CREATE, CTL_EOL) == 0)
1531 1.100 pk sysctlnum_bufmemhiwater = rnode->sysctl_num;
1532 1.100 pk }
1533 1.100 pk
1534 1.36 cgd #ifdef DEBUG
1535 1.31 cgd /*
1536 1.31 cgd * Print out statistics on the current allocation of the buffer pool.
1537 1.31 cgd * Can be enabled to print out on every ``sync'' by setting "syncprt"
1538 1.31 cgd * in vfs_syscalls.c using sysctl.
1539 1.31 cgd */
1540 1.31 cgd void
1541 1.31 cgd vfs_bufstats()
1542 1.31 cgd {
1543 1.31 cgd int s, i, j, count;
1544 1.66 augustss struct buf *bp;
1545 1.66 augustss struct bqueues *dp;
1546 1.72 simonb int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1547 1.100 pk static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1548 1.71 thorpej
1549 1.31 cgd for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1550 1.31 cgd count = 0;
1551 1.71 thorpej for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1552 1.31 cgd counts[j] = 0;
1553 1.31 cgd s = splbio();
1554 1.84 matt TAILQ_FOREACH(bp, dp, b_freelist) {
1555 1.71 thorpej counts[bp->b_bufsize/PAGE_SIZE]++;
1556 1.31 cgd count++;
1557 1.31 cgd }
1558 1.31 cgd splx(s);
1559 1.48 christos printf("%s: total-%d", bname[i], count);
1560 1.71 thorpej for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1561 1.31 cgd if (counts[j] != 0)
1562 1.71 thorpej printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1563 1.48 christos printf("\n");
1564 1.31 cgd }
1565 1.31 cgd }
1566 1.36 cgd #endif /* DEBUG */
1567