bufcache.c revision 1.15 1 /* $NetBSD: bufcache.c,v 1.15 2003/12/30 12:52:48 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Simon Burge.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 #ifndef lint
41 __RCSID("$NetBSD: bufcache.c,v 1.15 2003/12/30 12:52:48 pk Exp $");
42 #endif /* not lint */
43
44 #include <sys/param.h>
45 #include <sys/buf.h>
46 #include <sys/mount.h>
47 #include <sys/sysctl.h>
48 #include <sys/vnode.h>
49
50 #include <uvm/uvm_extern.h>
51
52 #include <err.h>
53 #include <errno.h>
54 #include <kvm.h>
55 #include <math.h>
56 #include <nlist.h>
57 #include <stdlib.h>
58 #include <string.h>
59 #include <unistd.h>
60
61 #include <miscfs/specfs/specdev.h>
62
63 #include "systat.h"
64 #include "extern.h"
65
66 #define VCACHE_SIZE 50
67
68 struct vcache {
69 int vc_age;
70 struct vnode *vc_addr;
71 struct vnode vc_node;
72 };
73
74 struct ml_entry {
75 u_int ml_count;
76 u_long ml_size;
77 u_long ml_valid;
78 struct mount *ml_addr;
79 LIST_ENTRY(ml_entry) ml_entries;
80 struct mount ml_mount;
81 };
82
83 static struct nlist namelist[] = {
84 #define X_BUFMEM 0
85 { "_bufmem" },
86 { "" },
87 };
88
89 static struct vcache vcache[VCACHE_SIZE];
90 static LIST_HEAD(mount_list, ml_entry) mount_list;
91
92 static u_int nbuf, bufmem, pgwidth, kbwidth;
93 static struct uvmexp_sysctl uvmexp;
94
95 static void vc_init(void);
96 static void ml_init(void);
97 static struct vnode *vc_lookup(struct vnode *);
98 static struct mount *ml_lookup(struct mount *, int, int);
99 static void fetchuvmexp(void);
100
101
102 WINDOW *
103 openbufcache(void)
104 {
105
106 return (subwin(stdscr, -1, 0, 5, 0));
107 }
108
109 void
110 closebufcache(WINDOW *w)
111 {
112
113 if (w == NULL)
114 return;
115 wclear(w);
116 wrefresh(w);
117 delwin(w);
118 ml_init(); /* Clear out mount list */
119 }
120
121 void
122 labelbufcache(void)
123 {
124 wmove(wnd, 1, 0);
125 wclrtoeol(wnd);
126 wmove(wnd, 2, 0);
127 wclrtoeol(wnd);
128 wmove(wnd, 3, 0);
129 wclrtoeol(wnd);
130 mvwaddstr(wnd, 4, 0,
131 "File System Bufs used % kB in use % Bufsize kB % Util %");
132 wclrtoeol(wnd);
133 }
134
135 void
136 showbufcache(void)
137 {
138 int tbuf, i, lastrow;
139 double tvalid, tsize;
140 struct ml_entry *ml;
141
142 NREAD(X_BUFMEM, &bufmem, sizeof(bufmem));
143
144 mvwprintw(wnd, 0, 0,
145 "There are %*d metadata buffers using %*d kBytes of memory.",
146 pgwidth, nbuf, kbwidth, bufmem/1024);
147 wclrtoeol(wnd);
148 mvwprintw(wnd, 1, 0,
149 "There are %*llu pages for cached file data using %*llu kBytes of memory.",
150 pgwidth, (long long)uvmexp.filepages,
151 kbwidth, (long long) uvmexp.filepages * getpagesize() / 1024);
152 wclrtoeol(wnd);
153 mvwprintw(wnd, 2, 0,
154 "There are %*llu pages for executables using %*llu kBytes of memory.",
155 pgwidth, (long long)uvmexp.execpages,
156 kbwidth, (long long) uvmexp.execpages * getpagesize() / 1024);
157 wclrtoeol(wnd);
158
159 if (nbuf == 0 || bufmem == 0) {
160 wclrtobot(wnd);
161 return;
162 }
163
164 tbuf = 0;
165 tvalid = tsize = 0;
166 lastrow = 5; /* Leave room for header. */
167 for (i = lastrow, ml = LIST_FIRST(&mount_list); ml != NULL;
168 i++, ml = LIST_NEXT(ml, ml_entries)) {
169
170 int c = ml->ml_count;
171 double v = ml->ml_valid;
172 double s = ml->ml_size;
173
174 /* Display in window if enough room. */
175 if (i < getmaxy(wnd) - 2) {
176 mvwprintw(wnd, i, 0, "%-20.20s", ml->ml_addr == NULL ?
177 "NULL" : ml->ml_mount.mnt_stat.f_mntonname);
178 wprintw(wnd,
179 " %6d %3d %8ld %3.0f %8ld %3.0f %3.0f",
180 c, (100 * c) / nbuf,
181 (long)(v/1024), 100 * v / bufmem,
182 (long)(s/1024), 100 * s / bufmem,
183 100 * v / s);
184 wclrtoeol(wnd);
185 lastrow = i;
186 }
187
188 /* Update statistics. */
189 tbuf += c;
190 tvalid += v;
191 tsize += s;
192 }
193
194 wclrtobot(wnd);
195 mvwprintw(wnd, lastrow + 2, 0,
196 "%-20s %6d %3d %8ld %3.0f %8ld %3.0f %3.0f",
197 "Total:", tbuf, (100 * tbuf) / nbuf,
198 (long)(tvalid/1024), 100 * tvalid / bufmem,
199 (long)(tsize/1024), 100 * tsize / bufmem,
200 tsize != 0 ? ((100 * tvalid) / tsize) : 0);
201 }
202
203 int
204 initbufcache(void)
205 {
206 if (namelist[0].n_type == 0) {
207 if (kvm_nlist(kd, namelist)) {
208 nlisterr(namelist);
209 return(0);
210 }
211 }
212
213 fetchuvmexp();
214 pgwidth = (int)(floor(log10((double)uvmexp.npages)) + 1);
215 kbwidth = (int)(floor(log10(uvmexp.npages * getpagesize() / 1024.0)) + 1);
216
217 return(1);
218 }
219
220 static void
221 fetchuvmexp(void)
222 {
223 int mib[2];
224 size_t size;
225
226 /* Re-read pages used for vnodes & executables */
227 size = sizeof(uvmexp);
228 mib[0] = CTL_VM;
229 mib[1] = VM_UVMEXP2;
230 if (sysctl(mib, 2, &uvmexp, &size, NULL, 0) < 0) {
231 error("can't get uvmexp: %s\n", strerror(errno));
232 memset(&uvmexp, 0, sizeof(uvmexp));
233 }
234 }
235
236 void
237 fetchbufcache(void)
238 {
239 int count;
240 struct buf *bp;
241 struct vnode *vn;
242 struct mount *mt;
243 struct ml_entry *ml;
244 int mib[2];
245 size_t size;
246 struct buf *buffers;
247 int extraslop = 0;
248
249 /* Re-read pages used for vnodes & executables */
250 fetchuvmexp();
251
252 /* Initialise vnode cache and mount list. */
253 vc_init();
254 ml_init();
255
256 /* Get metadata buffers */
257 again:
258 size = 0;
259 buffers = NULL;
260 mib[0] = CTL_KERN;
261 mib[1] = KERN_BUF;
262 if (sysctl(mib, 2, NULL, &size, NULL, 0) < 0) {
263 error("can't get buffers size: %s\n", strerror(errno));
264 return;
265 }
266 if (size == 0)
267 return;
268
269 size += extraslop * sizeof(struct buf);
270 buffers = malloc(size);
271 if (buffers == NULL) {
272 error("can't allocate buffers: %s\n", strerror(errno));
273 return;
274 }
275 if (sysctl(mib, 2, buffers, &size, NULL, 0) < 0) {
276 free(buffers);
277 if (extraslop == 0) {
278 extraslop = 100;
279 goto again;
280 }
281 error("can't get buffers: %s\n", strerror(errno));
282 return;
283 }
284
285 nbuf = size / sizeof(struct buf);
286 for (bp = buffers; bp < buffers + nbuf; bp++) {
287 if (bp->b_vp != NULL) {
288 struct mount *mp;
289 vn = vc_lookup(bp->b_vp);
290 if (vn == NULL)
291 break;
292
293 mp = vn->v_mount;
294 /*
295 * References to mounted-on vnodes should be
296 * counted towards the mounted filesystem.
297 */
298 if (vn->v_type == VBLK && vn->v_specinfo != NULL) {
299 struct specinfo sp;
300 if (!KREAD(vn->v_specinfo, &sp, sizeof(sp)))
301 continue;
302 if (sp.si_mountpoint)
303 mp = sp.si_mountpoint;
304 }
305 if (mp != NULL)
306 mt = ml_lookup(mp,
307 bp->b_bufsize,
308 bp->b_bcount);
309 }
310 }
311
312 /* simple sort - there's not that many entries */
313 do {
314 if ((ml = LIST_FIRST(&mount_list)) == NULL ||
315 LIST_NEXT(ml, ml_entries) == NULL)
316 break;
317
318 count = 0;
319 for (ml = LIST_FIRST(&mount_list); ml != NULL;
320 ml = LIST_NEXT(ml, ml_entries)) {
321 if (LIST_NEXT(ml, ml_entries) == NULL)
322 break;
323 if (ml->ml_count < LIST_NEXT(ml, ml_entries)->ml_count) {
324 ml = LIST_NEXT(ml, ml_entries);
325 LIST_REMOVE(ml, ml_entries);
326 LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
327 count++;
328 }
329 }
330 } while (count != 0);
331
332 free(buffers);
333 }
334
335 static void
336 vc_init(void)
337 {
338 int i;
339
340 /* vc_addr == NULL for unused cache entry. */
341 for (i = 0; i < VCACHE_SIZE; i++)
342 vcache[i].vc_addr = NULL;
343 }
344
345 static void
346 ml_init(void)
347 {
348 struct ml_entry *ml;
349
350 /* Throw out the current mount list and start again. */
351 while ((ml = LIST_FIRST(&mount_list)) != NULL) {
352 LIST_REMOVE(ml, ml_entries);
353 free(ml);
354 }
355 }
356
357
358 static struct vnode *
359 vc_lookup(struct vnode *vaddr)
360 {
361 struct vnode *ret;
362 size_t i, oldest;
363
364 ret = NULL;
365 oldest = 0;
366 for (i = 0; i < VCACHE_SIZE || vcache[i].vc_addr == NULL; i++) {
367 vcache[i].vc_age++;
368 if (vcache[i].vc_addr == NULL)
369 break;
370 if (vcache[i].vc_age < vcache[oldest].vc_age)
371 oldest = i;
372 if (vcache[i].vc_addr == vaddr) {
373 vcache[i].vc_age = 0;
374 ret = &vcache[i].vc_node;
375 }
376 }
377
378 /* Find an entry in the cache? */
379 if (ret != NULL)
380 return(ret);
381
382 /* Go past the end of the cache? */
383 if (i >= VCACHE_SIZE)
384 i = oldest;
385
386 /* Read in new vnode and reset age counter. */
387 if (KREAD(vaddr, &vcache[i].vc_node, sizeof(struct vnode)) == 0)
388 return NULL;
389 vcache[i].vc_addr = vaddr;
390 vcache[i].vc_age = 0;
391
392 return(&vcache[i].vc_node);
393 }
394
395 static struct mount *
396 ml_lookup(struct mount *maddr, int size, int valid)
397 {
398 struct ml_entry *ml;
399
400 for (ml = LIST_FIRST(&mount_list); ml != NULL;
401 ml = LIST_NEXT(ml, ml_entries))
402 if (ml->ml_addr == maddr) {
403 ml->ml_count++;
404 ml->ml_size += size;
405 ml->ml_valid += valid;
406 if (ml->ml_addr == NULL)
407 return(NULL);
408 else
409 return(&ml->ml_mount);
410 }
411
412 if ((ml = malloc(sizeof(struct ml_entry))) == NULL) {
413 error("out of memory");
414 die(0);
415 }
416 LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
417 ml->ml_count = 1;
418 ml->ml_size = size;
419 ml->ml_valid = valid;
420 ml->ml_addr = maddr;
421 if (maddr == NULL)
422 return(NULL);
423
424 KREAD(maddr, &ml->ml_mount, sizeof(struct mount));
425 return(&ml->ml_mount);
426 }
427