bufcache.c revision 1.4 1 /* $NetBSD: bufcache.c,v 1.4 1999/11/27 05:58:04 mrg Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Simon Burge.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 #ifndef lint
41 __RCSID("$NetBSD: bufcache.c,v 1.4 1999/11/27 05:58:04 mrg Exp $");
42 #endif /* not lint */
43
44 #include <sys/param.h>
45 #include <sys/buf.h>
46 #include <sys/mount.h>
47 #include <sys/queue.h>
48 #include <sys/time.h>
49 #include <sys/vnode.h>
50
51 #include <err.h>
52 #include <kvm.h>
53 #include <nlist.h>
54 #include <paths.h>
55 #include <stdlib.h>
56 #include <unistd.h>
57
58 #include "systat.h"
59 #include "extern.h"
60
61
62 /*
63 * Definitions for the buffer free lists (from sys/kern/vfs_bio.c).
64 */
65 #define BQUEUES 4 /* number of free buffer queues */
66
67 #define BQ_LOCKED 0 /* super-blocks &c */
68 #define BQ_LRU 1 /* lru, useful buffers */
69 #define BQ_AGE 2 /* rubbish */
70 #define BQ_EMPTY 3 /* buffer headers with no memory */
71
72 #define VCACHE_SIZE 50
73
74 struct vcache {
75 int vc_age;
76 struct vnode *vc_addr;
77 struct vnode vc_node;
78 };
79
80 struct ml_entry {
81 int ml_count;
82 long ml_size;
83 long ml_valid;
84 struct mount *ml_addr;
85 struct mount ml_mount;
86 LIST_ENTRY(ml_entry) ml_entries;
87 };
88
89 static struct nlist namelist[] = {
90 #define X_NBUF 0
91 { "_nbuf" },
92 #define X_BUF 1
93 { "_buf" },
94 #define X_BUFQUEUES 2
95 { "_bufqueues" },
96 #define X_BUFPAGES 3
97 { "_bufpages" },
98 { "" },
99 };
100
101 static struct vcache vcache[VCACHE_SIZE];
102 static LIST_HEAD(mount_list, ml_entry) mount_list;
103
104 static int nbuf, bufpages, bufbytes;
105 static void *bufaddr;
106 static struct buf *buf = NULL;
107 static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
108
109 static void vc_init __P((void));
110 static void ml_init __P((void));
111 static struct vnode *vc_lookup __P((struct vnode *));
112 static struct mount *ml_lookup __P((struct mount *, int, int));
113
114
115 WINDOW *
116 openbufcache()
117 {
118
119 return (subwin(stdscr, LINES-5-1, 0, 5, 0));
120 }
121
122 void
123 closebufcache(w)
124 WINDOW *w;
125 {
126
127 if (w == NULL)
128 return;
129 wclear(w);
130 wrefresh(w);
131 delwin(w);
132 ml_init(); /* Clear out mount list */
133 }
134
135 void
136 labelbufcache()
137 {
138 mvwprintw(wnd, 0, 0, "There are %d buffers using %d kBytes of memory.",
139 nbuf, bufbytes / 1024);
140 wclrtoeol(wnd);
141 wmove(wnd, 1, 0);
142 wclrtoeol(wnd);
143 mvwaddstr(wnd, 2, 0,
144 "File System Bufs used % kB in use % Bufsize kB % Util %");
145 wclrtoeol(wnd);
146 }
147
148 void
149 showbufcache()
150 {
151 int tbuf, i, lastrow;
152 long tvalid, tsize;
153 struct ml_entry *ml;
154
155 tbuf = tvalid = tsize = 0;
156 lastrow = 3; /* Leave room for header. */
157 for (i = lastrow, ml = LIST_FIRST(&mount_list); ml != NULL;
158 i++, ml = LIST_NEXT(ml, ml_entries)) {
159
160 /* Display in window if enough room. */
161 if (i < getmaxy(wnd) - 2) {
162 mvwprintw(wnd, i, 0, "%-20.20s", ml->ml_addr == NULL ?
163 "NULL" : ml->ml_mount.mnt_stat.f_mntonname);
164 wprintw(wnd,
165 " %6d %3d %8ld %3d %8ld %3d %3d",
166 ml->ml_count,
167 (100 * ml->ml_count) / nbuf,
168 ml->ml_valid / 1024,
169 (100 * ml->ml_valid) / bufbytes,
170 ml->ml_size / 1024,
171 (100 * ml->ml_size) / bufbytes,
172 (100 * ml->ml_valid) / ml->ml_size);
173 wclrtoeol(wnd);
174 lastrow = i;
175 }
176
177 /* Update statistics. */
178 tbuf += ml->ml_count;
179 tvalid += ml->ml_valid / 1024;
180 tsize += ml->ml_size / 1024;
181 }
182
183 wclrtobot(wnd);
184 mvwprintw(wnd, lastrow + 2, 0,
185 "%-20s %6d %3d %8d %3d %8d %3d %3d",
186 "Total:", tbuf, (100 * tbuf) / nbuf,
187 tvalid, (100 * tvalid * 1024) / bufbytes,
188 tsize, (100 * tsize * 1024) / bufbytes, (100 * tvalid) / tsize);
189 }
190
191 int
192 initbufcache()
193 {
194 if (namelist[X_NBUF].n_type == 0) {
195 if (kvm_nlist(kd, namelist)) {
196 nlisterr(namelist);
197 return(0);
198 }
199 if (namelist[X_NBUF].n_type == 0) {
200 error("namelist on %s failed", _PATH_UNIX);
201 return(0);
202 }
203 }
204
205 NREAD(X_NBUF, &nbuf, sizeof(nbuf));
206 NREAD(X_BUFPAGES, &bufpages, sizeof(bufpages));
207 bufbytes = bufpages * sysconf(_SC_PAGESIZE);
208
209 buf = (struct buf *)malloc(nbuf * sizeof(struct buf));
210 if (buf == NULL)
211 errx(1, "malloc failed\n");
212 NREAD(X_BUF, &bufaddr, sizeof(bufaddr));
213
214 return(1);
215 }
216
217 void
218 fetchbufcache()
219 {
220 int i, count;
221 struct buf *bp;
222 struct vnode *vn;
223 struct mount *mt;
224 struct ml_entry *ml;
225
226 /* Re-read bufqueues lists and buffer cache headers */
227 NREAD(X_BUFQUEUES, bufqueues, sizeof(bufqueues));
228 KREAD(bufaddr, buf, sizeof(struct buf) * nbuf);
229
230 /* Initialise vnode cache and mount list. */
231 vc_init();
232 ml_init();
233 for (i = 0; i < BQUEUES; i++) {
234 for (bp = bufqueues[i].tqh_first; bp != NULL;
235 bp = bp->b_freelist.tqe_next) {
236 if (bp != NULL) {
237 bp = (struct buf *)((u_long)bp + (u_long)buf -
238 (u_long)bufaddr);
239
240 if (bp->b_vp != NULL) {
241 vn = vc_lookup(bp->b_vp);
242 if (vn == NULL)
243 errx(1,
244 "vc_lookup returns NULL!\n");
245 if (vn->v_mount != NULL)
246 mt = ml_lookup(vn->v_mount,
247 bp->b_bufsize,
248 bp->b_bcount);
249 }
250 }
251 }
252 }
253
254 /* simple sort - there's not that many entries */
255 do {
256 if ((ml = LIST_FIRST(&mount_list)) == NULL ||
257 LIST_NEXT(ml, ml_entries) == NULL)
258 break;
259
260 count = 0;
261 for (ml = LIST_FIRST(&mount_list); ml != NULL;
262 ml = LIST_NEXT(ml, ml_entries)) {
263 if (LIST_NEXT(ml, ml_entries) == NULL)
264 break;
265 if (ml->ml_count < LIST_NEXT(ml, ml_entries)->ml_count) {
266 ml = LIST_NEXT(ml, ml_entries);
267 LIST_REMOVE(ml, ml_entries);
268 LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
269 count++;
270 }
271 }
272 } while (count != 0);
273 }
274
275 static void
276 vc_init()
277 {
278 int i;
279
280 /* vc_addr == NULL for unused cache entry. */
281 for (i = 0; i < VCACHE_SIZE; i++)
282 vcache[i].vc_addr = NULL;
283 }
284
285 static void
286 ml_init()
287 {
288 struct ml_entry *ml;
289
290 /* Throw out the current mount list and start again. */
291 while ((ml = LIST_FIRST(&mount_list)) != NULL) {
292 LIST_REMOVE(ml, ml_entries);
293 free(ml);
294 }
295 }
296
297
298 static struct vnode *
299 vc_lookup(vaddr)
300 struct vnode *vaddr;
301 {
302 struct vnode *ret;
303 int i, oldest, match;
304
305 ret = NULL;
306 oldest = match = 0;
307 for (i = 0; i < VCACHE_SIZE || vcache[i].vc_addr == NULL; i++) {
308 vcache[i].vc_age++;
309 if (vcache[i].vc_addr == NULL)
310 break;
311 if (vcache[i].vc_age < vcache[oldest].vc_age)
312 oldest = i;
313 if (vcache[i].vc_addr == vaddr) {
314 vcache[i].vc_age = 0;
315 match = i;
316 ret = &vcache[i].vc_node;
317 }
318 }
319
320 /* Find an entry in the cache? */
321 if (ret != NULL)
322 return(ret);
323
324 /* Go past the end of the cache? */
325 if (i >= VCACHE_SIZE)
326 i = oldest;
327
328 /* Read in new vnode and reset age counter. */
329 KREAD(vaddr, &vcache[i].vc_node, sizeof(struct vnode));
330 vcache[i].vc_addr = vaddr;
331 vcache[i].vc_age = 0;
332
333 return(&vcache[i].vc_node);
334 }
335
336 static struct mount *
337 ml_lookup(maddr, size, valid)
338 struct mount *maddr;
339 int size, valid;
340 {
341 struct ml_entry *ml;
342
343 for (ml = LIST_FIRST(&mount_list); ml != NULL;
344 ml = LIST_NEXT(ml, ml_entries))
345 if (ml->ml_addr == maddr) {
346 ml->ml_count++;
347 ml->ml_size += size;
348 ml->ml_valid += valid;
349 if (ml->ml_addr == NULL)
350 return(NULL);
351 else
352 return(&ml->ml_mount);
353 }
354
355 if ((ml = malloc(sizeof(struct ml_entry))) == NULL)
356 errx(1, "out of memory\n");
357 LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
358 ml->ml_count = 1;
359 ml->ml_size = size;
360 ml->ml_valid = valid;
361 ml->ml_addr = maddr;
362 if (maddr == NULL)
363 return(NULL);
364
365 KREAD(maddr, &ml->ml_mount, sizeof(struct mount));
366 return(&ml->ml_mount);
367 }
368