bufcache.c revision 1.10 1 /* $NetBSD: bufcache.c,v 1.10 2000/12/01 02:19:43 simonb Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Simon Burge.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 #ifndef lint
41 __RCSID("$NetBSD: bufcache.c,v 1.10 2000/12/01 02:19:43 simonb Exp $");
42 #endif /* not lint */
43
44 #include <sys/param.h>
45 #include <sys/buf.h>
46 #include <sys/mount.h>
47 #include <sys/sysctl.h>
48 #include <sys/vnode.h>
49
50 #include <uvm/uvm_extern.h>
51
52 #include <err.h>
53 #include <errno.h>
54 #include <kvm.h>
55 #include <nlist.h>
56 #include <stdlib.h>
57 #include <string.h>
58 #include <unistd.h>
59
60 #include "systat.h"
61 #include "extern.h"
62
63 /*
64 * Definitions for the buffer free lists (from sys/kern/vfs_bio.c).
65 */
66 #define BQUEUES 4 /* number of free buffer queues */
67
68 #define BQ_LOCKED 0 /* super-blocks &c */
69 #define BQ_LRU 1 /* lru, useful buffers */
70 #define BQ_AGE 2 /* rubbish */
71 #define BQ_EMPTY 3 /* buffer headers with no memory */
72
73 #define VCACHE_SIZE 50
74
75 struct vcache {
76 int vc_age;
77 struct vnode *vc_addr;
78 struct vnode vc_node;
79 };
80
81 struct ml_entry {
82 int ml_count;
83 long ml_size;
84 long ml_valid;
85 struct mount *ml_addr;
86 struct mount ml_mount;
87 LIST_ENTRY(ml_entry) ml_entries;
88 };
89
90 static struct nlist namelist[] = {
91 #define X_NBUF 0
92 { "_nbuf" },
93 #define X_BUF 1
94 { "_buf" },
95 #define X_BUFQUEUES 2
96 { "_bufqueues" },
97 #define X_BUFPAGES 3
98 { "_bufpages" },
99 { "" },
100 };
101
102 static struct vcache vcache[VCACHE_SIZE];
103 static LIST_HEAD(mount_list, ml_entry) mount_list;
104
105 static int nbuf, bufpages, bufkb;
106 static struct uvmexp_sysctl uvmexp;
107 static void *bufaddr;
108 static struct buf *buf = NULL;
109 static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
110
111 static void vc_init(void);
112 static void ml_init(void);
113 static struct vnode *vc_lookup(struct vnode *);
114 static struct mount *ml_lookup(struct mount *, int, int);
115
116
117 WINDOW *
118 openbufcache(void)
119 {
120
121 return (subwin(stdscr, LINES-5-1, 0, 5, 0));
122 }
123
124 void
125 closebufcache(WINDOW *w)
126 {
127
128 if (w == NULL)
129 return;
130 wclear(w);
131 wrefresh(w);
132 delwin(w);
133 ml_init(); /* Clear out mount list */
134 }
135
136 void
137 labelbufcache(void)
138 {
139 mvwprintw(wnd, 0, 0,
140 "There are %d metadata buffers using %d kBytes of memory.",
141 nbuf, bufkb);
142 wclrtoeol(wnd);
143 wmove(wnd, 1, 0);
144 wmove(wnd, 2, 0);
145 wclrtoeol(wnd);
146 mvwaddstr(wnd, 3, 0,
147 "File System Bufs used % kB in use % Bufsize kB % Util %");
148 wclrtoeol(wnd);
149 }
150
151 void
152 showbufcache(void)
153 {
154 int tbuf, i, lastrow;
155 long tvalid, tsize;
156 struct ml_entry *ml;
157
158 mvwprintw(wnd, 1, 0,
159 "There are %llu pages for vnode page cache using %llu kBytes of memory.",
160 (long long)uvmexp.vnodepages,
161 (long long) uvmexp.vnodepages * getpagesize() / 1024);
162 wclrtoeol(wnd);
163
164 tbuf = tvalid = tsize = 0;
165 lastrow = 4; /* Leave room for header. */
166 for (i = lastrow, ml = LIST_FIRST(&mount_list); ml != NULL;
167 i++, ml = LIST_NEXT(ml, ml_entries)) {
168
169 /* Display in window if enough room. */
170 if (i < getmaxy(wnd) - 2) {
171 mvwprintw(wnd, i, 0, "%-20.20s", ml->ml_addr == NULL ?
172 "NULL" : ml->ml_mount.mnt_stat.f_mntonname);
173 wprintw(wnd,
174 " %6d %3d %8ld %3ld %8ld %3ld %3ld",
175 ml->ml_count, (100 * ml->ml_count) / nbuf,
176 ml->ml_valid, (100 * ml->ml_valid) / bufkb,
177 ml->ml_size, (100 * ml->ml_size) / bufkb,
178 (100 * ml->ml_valid) / ml->ml_size);
179 wclrtoeol(wnd);
180 lastrow = i;
181 }
182
183 /* Update statistics. */
184 tbuf += ml->ml_count;
185 tvalid += ml->ml_valid;
186 tsize += ml->ml_size;
187 }
188
189 wclrtobot(wnd);
190 mvwprintw(wnd, lastrow + 2, 0,
191 "%-20s %6d %3d %8ld %3ld %8ld %3ld %3ld",
192 "Total:", tbuf, (100 * tbuf) / nbuf,
193 tvalid, (100 * tvalid) / bufkb,
194 tsize, (100 * tsize) / bufkb, (100 * tvalid) / tsize);
195 }
196
197 int
198 initbufcache(void)
199 {
200 if (namelist[X_NBUF].n_type == 0) {
201 if (kvm_nlist(kd, namelist)) {
202 nlisterr(namelist);
203 return(0);
204 }
205 if (namelist[X_NBUF].n_type == 0) {
206 error("No namelist");
207 return(0);
208 }
209 }
210
211 NREAD(X_NBUF, &nbuf, sizeof(nbuf));
212 NREAD(X_BUFPAGES, &bufpages, sizeof(bufpages));
213 bufkb = bufpages * sysconf(_SC_PAGESIZE) / 1024;
214
215 if ((buf = malloc(nbuf * sizeof(struct buf))) == NULL) {
216 error("malloc failed");
217 die(0);
218 }
219 NREAD(X_BUF, &bufaddr, sizeof(bufaddr));
220
221 return(1);
222 }
223
224 void
225 fetchbufcache(void)
226 {
227 int i, count, mib[2];
228 size_t size;
229 struct buf *bp;
230 struct vnode *vn;
231 struct mount *mt;
232 struct ml_entry *ml;
233
234 /* Re-read pages used for vnodes */
235 size = sizeof(uvmexp);
236 mib[0] = CTL_VM;
237 mib[1] = VM_UVMEXP2;
238 if (sysctl(mib, 2, &uvmexp, &size, NULL, 0) < 0) {
239 error("can't get uvmexp: %s\n", strerror(errno));
240 memset(&uvmexp, 0, sizeof(uvmexp));
241 }
242
243 /* Re-read bufqueues lists and buffer cache headers */
244 NREAD(X_BUFQUEUES, bufqueues, sizeof(bufqueues));
245 KREAD(bufaddr, buf, sizeof(struct buf) * nbuf);
246
247 /* Initialise vnode cache and mount list. */
248 vc_init();
249 ml_init();
250 for (i = 0; i < BQUEUES; i++) {
251 for (bp = bufqueues[i].tqh_first; bp != NULL;
252 bp = bp->b_freelist.tqe_next) {
253 if (bp != NULL) {
254 bp = (struct buf *)((u_long)bp + (u_long)buf -
255 (u_long)bufaddr);
256
257 if (bp->b_vp != NULL) {
258 vn = vc_lookup(bp->b_vp);
259 if (vn == NULL)
260 errx(1,
261 "vc_lookup returns NULL!\n");
262 if (vn->v_mount != NULL)
263 mt = ml_lookup(vn->v_mount,
264 bp->b_bufsize,
265 bp->b_bcount);
266 }
267 }
268 }
269 }
270
271 /* simple sort - there's not that many entries */
272 do {
273 if ((ml = LIST_FIRST(&mount_list)) == NULL ||
274 LIST_NEXT(ml, ml_entries) == NULL)
275 break;
276
277 count = 0;
278 for (ml = LIST_FIRST(&mount_list); ml != NULL;
279 ml = LIST_NEXT(ml, ml_entries)) {
280 if (LIST_NEXT(ml, ml_entries) == NULL)
281 break;
282 if (ml->ml_count < LIST_NEXT(ml, ml_entries)->ml_count) {
283 ml = LIST_NEXT(ml, ml_entries);
284 LIST_REMOVE(ml, ml_entries);
285 LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
286 count++;
287 }
288 }
289 } while (count != 0);
290 }
291
292 static void
293 vc_init(void)
294 {
295 int i;
296
297 /* vc_addr == NULL for unused cache entry. */
298 for (i = 0; i < VCACHE_SIZE; i++)
299 vcache[i].vc_addr = NULL;
300 }
301
302 static void
303 ml_init(void)
304 {
305 struct ml_entry *ml;
306
307 /* Throw out the current mount list and start again. */
308 while ((ml = LIST_FIRST(&mount_list)) != NULL) {
309 LIST_REMOVE(ml, ml_entries);
310 free(ml);
311 }
312 }
313
314
315 static struct vnode *
316 vc_lookup(struct vnode *vaddr)
317 {
318 struct vnode *ret;
319 int i, oldest, match;
320
321 ret = NULL;
322 oldest = match = 0;
323 for (i = 0; i < VCACHE_SIZE || vcache[i].vc_addr == NULL; i++) {
324 vcache[i].vc_age++;
325 if (vcache[i].vc_addr == NULL)
326 break;
327 if (vcache[i].vc_age < vcache[oldest].vc_age)
328 oldest = i;
329 if (vcache[i].vc_addr == vaddr) {
330 vcache[i].vc_age = 0;
331 match = i;
332 ret = &vcache[i].vc_node;
333 }
334 }
335
336 /* Find an entry in the cache? */
337 if (ret != NULL)
338 return(ret);
339
340 /* Go past the end of the cache? */
341 if (i >= VCACHE_SIZE)
342 i = oldest;
343
344 /* Read in new vnode and reset age counter. */
345 KREAD(vaddr, &vcache[i].vc_node, sizeof(struct vnode));
346 vcache[i].vc_addr = vaddr;
347 vcache[i].vc_age = 0;
348
349 return(&vcache[i].vc_node);
350 }
351
352 static struct mount *
353 ml_lookup(struct mount *maddr, int size, int valid)
354 {
355 struct ml_entry *ml;
356
357 for (ml = LIST_FIRST(&mount_list); ml != NULL;
358 ml = LIST_NEXT(ml, ml_entries))
359 if (ml->ml_addr == maddr) {
360 ml->ml_count++;
361 ml->ml_size += size / 1024;
362 ml->ml_valid += valid / 1024;
363 if (ml->ml_addr == NULL)
364 return(NULL);
365 else
366 return(&ml->ml_mount);
367 }
368
369 if ((ml = malloc(sizeof(struct ml_entry))) == NULL) {
370 error("out of memory");
371 die(0);
372 }
373 LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
374 ml->ml_count = 1;
375 ml->ml_size = size / 1024;
376 ml->ml_valid = valid / 1024;
377 ml->ml_addr = maddr;
378 if (maddr == NULL)
379 return(NULL);
380
381 KREAD(maddr, &ml->ml_mount, sizeof(struct mount));
382 return(&ml->ml_mount);
383 }
384