bufcache.c revision 1.13 1 /* $NetBSD: bufcache.c,v 1.13 2003/02/24 10:10:00 dsl Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Simon Burge.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 #ifndef lint
41 __RCSID("$NetBSD: bufcache.c,v 1.13 2003/02/24 10:10:00 dsl Exp $");
42 #endif /* not lint */
43
44 #include <sys/param.h>
45 #include <sys/buf.h>
46 #include <sys/mount.h>
47 #include <sys/sysctl.h>
48 #include <sys/vnode.h>
49
50 #include <uvm/uvm_extern.h>
51
52 #include <err.h>
53 #include <errno.h>
54 #include <kvm.h>
55 #include <math.h>
56 #include <nlist.h>
57 #include <stdlib.h>
58 #include <string.h>
59 #include <unistd.h>
60
61 #include "systat.h"
62 #include "extern.h"
63
64 /*
65 * Definitions for the buffer free lists (from sys/kern/vfs_bio.c).
66 */
67 #define BQUEUES 4 /* number of free buffer queues */
68
69 #define BQ_LOCKED 0 /* super-blocks &c */
70 #define BQ_LRU 1 /* lru, useful buffers */
71 #define BQ_AGE 2 /* rubbish */
72 #define BQ_EMPTY 3 /* buffer headers with no memory */
73
74 #define VCACHE_SIZE 50
75
76 struct vcache {
77 int vc_age;
78 struct vnode *vc_addr;
79 struct vnode vc_node;
80 };
81
82 struct ml_entry {
83 int ml_count;
84 long ml_size;
85 long ml_valid;
86 struct mount *ml_addr;
87 struct mount ml_mount;
88 LIST_ENTRY(ml_entry) ml_entries;
89 };
90
91 static struct nlist namelist[] = {
92 #define X_NBUF 0
93 { "_nbuf" },
94 #define X_BUF 1
95 { "_buf" },
96 #define X_BUFQUEUES 2
97 { "_bufqueues" },
98 #define X_BUFPAGES 3
99 { "_bufpages" },
100 { "" },
101 };
102
103 static struct vcache vcache[VCACHE_SIZE];
104 static LIST_HEAD(mount_list, ml_entry) mount_list;
105
106 static int nbuf, bufpages, bufkb, pgwidth, kbwidth;
107 static struct uvmexp_sysctl uvmexp;
108 static void *bufaddr;
109 static struct buf *buf = NULL;
110 static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
111
112 static void vc_init(void);
113 static void ml_init(void);
114 static struct vnode *vc_lookup(struct vnode *);
115 static struct mount *ml_lookup(struct mount *, int, int);
116 static void fetchuvmexp(void);
117
118
119 WINDOW *
120 openbufcache(void)
121 {
122
123 return (subwin(stdscr, -1, 0, 5, 0));
124 }
125
126 void
127 closebufcache(WINDOW *w)
128 {
129
130 if (w == NULL)
131 return;
132 wclear(w);
133 wrefresh(w);
134 delwin(w);
135 ml_init(); /* Clear out mount list */
136 }
137
138 void
139 labelbufcache(void)
140 {
141 mvwprintw(wnd, 0, 0,
142 "There are %*d metadata buffers using %*d kBytes of memory.",
143 pgwidth, nbuf, kbwidth, bufkb);
144 wclrtoeol(wnd);
145 wmove(wnd, 1, 0);
146 wclrtoeol(wnd);
147 wmove(wnd, 2, 0);
148 wclrtoeol(wnd);
149 wmove(wnd, 3, 0);
150 wclrtoeol(wnd);
151 mvwaddstr(wnd, 4, 0,
152 "File System Bufs used % kB in use % Bufsize kB % Util %");
153 wclrtoeol(wnd);
154 }
155
156 void
157 showbufcache(void)
158 {
159 int tbuf, i, lastrow;
160 long tvalid, tsize;
161 struct ml_entry *ml;
162
163 mvwprintw(wnd, 1, 0,
164 "There are %*llu pages for cached file data using %*llu kBytes of memory.",
165 pgwidth, (long long)uvmexp.filepages,
166 kbwidth, (long long) uvmexp.filepages * getpagesize() / 1024);
167 wclrtoeol(wnd);
168 mvwprintw(wnd, 2, 0,
169 "There are %*llu pages for executables using %*llu kBytes of memory.",
170 pgwidth, (long long)uvmexp.execpages,
171 kbwidth, (long long) uvmexp.execpages * getpagesize() / 1024);
172 wclrtoeol(wnd);
173
174 tbuf = tvalid = tsize = 0;
175 lastrow = 5; /* Leave room for header. */
176 for (i = lastrow, ml = LIST_FIRST(&mount_list); ml != NULL;
177 i++, ml = LIST_NEXT(ml, ml_entries)) {
178
179 /* Display in window if enough room. */
180 if (i < getmaxy(wnd) - 2) {
181 mvwprintw(wnd, i, 0, "%-20.20s", ml->ml_addr == NULL ?
182 "NULL" : ml->ml_mount.mnt_stat.f_mntonname);
183 wprintw(wnd,
184 " %6d %3d %8ld %3ld %8ld %3ld %3ld",
185 ml->ml_count, (100 * ml->ml_count) / nbuf,
186 ml->ml_valid, (100 * ml->ml_valid) / bufkb,
187 ml->ml_size, (100 * ml->ml_size) / bufkb,
188 (100 * ml->ml_valid) / ml->ml_size);
189 wclrtoeol(wnd);
190 lastrow = i;
191 }
192
193 /* Update statistics. */
194 tbuf += ml->ml_count;
195 tvalid += ml->ml_valid;
196 tsize += ml->ml_size;
197 }
198
199 wclrtobot(wnd);
200 mvwprintw(wnd, lastrow + 2, 0,
201 "%-20s %6d %3d %8ld %3ld %8ld %3ld %3ld",
202 "Total:", tbuf, (100 * tbuf) / nbuf,
203 tvalid, (100 * tvalid) / bufkb,
204 tsize, (100 * tsize) / bufkb, (100 * tvalid) / tsize);
205 }
206
207 int
208 initbufcache(void)
209 {
210 if (namelist[X_NBUF].n_type == 0) {
211 if (kvm_nlist(kd, namelist)) {
212 nlisterr(namelist);
213 return(0);
214 }
215 if (namelist[X_NBUF].n_type == 0) {
216 error("No namelist");
217 return(0);
218 }
219 }
220
221 NREAD(X_NBUF, &nbuf, sizeof(nbuf));
222 NREAD(X_BUFPAGES, &bufpages, sizeof(bufpages));
223 bufkb = bufpages * sysconf(_SC_PAGESIZE) / 1024;
224
225 if ((buf = malloc(nbuf * sizeof(struct buf))) == NULL) {
226 error("malloc failed");
227 die(0);
228 }
229 NREAD(X_BUF, &bufaddr, sizeof(bufaddr));
230
231 fetchuvmexp();
232 pgwidth = (int)(floor(log10((double)uvmexp.npages)) + 1);
233 kbwidth = (int)(floor(log10(uvmexp.npages * getpagesize() / 1024.0)) + 1);
234
235 return(1);
236 }
237
238 static void
239 fetchuvmexp(void)
240 {
241 int mib[2];
242 size_t size;
243
244 /* Re-read pages used for vnodes & executables */
245 size = sizeof(uvmexp);
246 mib[0] = CTL_VM;
247 mib[1] = VM_UVMEXP2;
248 if (sysctl(mib, 2, &uvmexp, &size, NULL, 0) < 0) {
249 error("can't get uvmexp: %s\n", strerror(errno));
250 memset(&uvmexp, 0, sizeof(uvmexp));
251 }
252 }
253
254 void
255 fetchbufcache(void)
256 {
257 int i, count;
258 struct buf *bp;
259 struct vnode *vn;
260 struct mount *mt;
261 struct ml_entry *ml;
262
263 fetchuvmexp();
264 /* Re-read bufqueues lists and buffer cache headers */
265 NREAD(X_BUFQUEUES, bufqueues, sizeof(bufqueues));
266 KREAD(bufaddr, buf, sizeof(struct buf) * nbuf);
267
268 /* Initialise vnode cache and mount list. */
269 vc_init();
270 ml_init();
271 for (i = 0; i < BQUEUES; i++) {
272 for (bp = bufqueues[i].tqh_first; bp != NULL;
273 bp = bp->b_freelist.tqe_next) {
274 if (bp != NULL) {
275 bp = (struct buf *)((u_long)bp + (u_long)buf -
276 (u_long)bufaddr);
277
278 if (bp->b_vp != NULL) {
279 vn = vc_lookup(bp->b_vp);
280 if (vn == NULL)
281 errx(1,
282 "vc_lookup returns NULL!\n");
283 if (vn->v_mount != NULL)
284 mt = ml_lookup(vn->v_mount,
285 bp->b_bufsize,
286 bp->b_bcount);
287 }
288 }
289 }
290 }
291
292 /* simple sort - there's not that many entries */
293 do {
294 if ((ml = LIST_FIRST(&mount_list)) == NULL ||
295 LIST_NEXT(ml, ml_entries) == NULL)
296 break;
297
298 count = 0;
299 for (ml = LIST_FIRST(&mount_list); ml != NULL;
300 ml = LIST_NEXT(ml, ml_entries)) {
301 if (LIST_NEXT(ml, ml_entries) == NULL)
302 break;
303 if (ml->ml_count < LIST_NEXT(ml, ml_entries)->ml_count) {
304 ml = LIST_NEXT(ml, ml_entries);
305 LIST_REMOVE(ml, ml_entries);
306 LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
307 count++;
308 }
309 }
310 } while (count != 0);
311 }
312
313 static void
314 vc_init(void)
315 {
316 int i;
317
318 /* vc_addr == NULL for unused cache entry. */
319 for (i = 0; i < VCACHE_SIZE; i++)
320 vcache[i].vc_addr = NULL;
321 }
322
323 static void
324 ml_init(void)
325 {
326 struct ml_entry *ml;
327
328 /* Throw out the current mount list and start again. */
329 while ((ml = LIST_FIRST(&mount_list)) != NULL) {
330 LIST_REMOVE(ml, ml_entries);
331 free(ml);
332 }
333 }
334
335
336 static struct vnode *
337 vc_lookup(struct vnode *vaddr)
338 {
339 struct vnode *ret;
340 int i, oldest, match;
341
342 ret = NULL;
343 oldest = match = 0;
344 for (i = 0; i < VCACHE_SIZE || vcache[i].vc_addr == NULL; i++) {
345 vcache[i].vc_age++;
346 if (vcache[i].vc_addr == NULL)
347 break;
348 if (vcache[i].vc_age < vcache[oldest].vc_age)
349 oldest = i;
350 if (vcache[i].vc_addr == vaddr) {
351 vcache[i].vc_age = 0;
352 match = i;
353 ret = &vcache[i].vc_node;
354 }
355 }
356
357 /* Find an entry in the cache? */
358 if (ret != NULL)
359 return(ret);
360
361 /* Go past the end of the cache? */
362 if (i >= VCACHE_SIZE)
363 i = oldest;
364
365 /* Read in new vnode and reset age counter. */
366 KREAD(vaddr, &vcache[i].vc_node, sizeof(struct vnode));
367 vcache[i].vc_addr = vaddr;
368 vcache[i].vc_age = 0;
369
370 return(&vcache[i].vc_node);
371 }
372
373 static struct mount *
374 ml_lookup(struct mount *maddr, int size, int valid)
375 {
376 struct ml_entry *ml;
377
378 for (ml = LIST_FIRST(&mount_list); ml != NULL;
379 ml = LIST_NEXT(ml, ml_entries))
380 if (ml->ml_addr == maddr) {
381 ml->ml_count++;
382 ml->ml_size += size / 1024;
383 ml->ml_valid += valid / 1024;
384 if (ml->ml_addr == NULL)
385 return(NULL);
386 else
387 return(&ml->ml_mount);
388 }
389
390 if ((ml = malloc(sizeof(struct ml_entry))) == NULL) {
391 error("out of memory");
392 die(0);
393 }
394 LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
395 ml->ml_count = 1;
396 ml->ml_size = size / 1024;
397 ml->ml_valid = valid / 1024;
398 ml->ml_addr = maddr;
399 if (maddr == NULL)
400 return(NULL);
401
402 KREAD(maddr, &ml->ml_mount, sizeof(struct mount));
403 return(&ml->ml_mount);
404 }
405