uvm_meter.c revision 1.23 1 /* $NetBSD: uvm_meter.c,v 1.23 2001/12/09 03:07:19 chs Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Charles D. Cranor,
21 * Washington University, and the University of California, Berkeley
22 * and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
40 * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.23 2001/12/09 03:07:19 chs Exp $");
45
46 #include <sys/param.h>
47 #include <sys/proc.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <uvm/uvm_extern.h>
51 #include <sys/sysctl.h>
52
53 /*
54 * maxslp: ???? XXXCDC
55 */
56
57 int maxslp = MAXSLP; /* patchable ... */
58 struct loadavg averunnable;
59
60 /*
61 * constants for averages over 1, 5, and 15 minutes when sampling at
62 * 5 second intervals.
63 */
64
65 static fixpt_t cexp[3] = {
66 0.9200444146293232 * FSCALE, /* exp(-1/12) */
67 0.9834714538216174 * FSCALE, /* exp(-1/60) */
68 0.9944598480048967 * FSCALE, /* exp(-1/180) */
69 };
70
71 /*
72 * prototypes
73 */
74
75 static void uvm_loadav __P((struct loadavg *));
76 static void uvm_total __P((struct vmtotal *));
77 static int sysctl_uvmexp __P((void *, size_t *));
78
79 /*
80 * uvm_meter: calculate load average and wake up the swapper (if needed)
81 */
82 void
83 uvm_meter()
84 {
85 if ((time.tv_sec % 5) == 0)
86 uvm_loadav(&averunnable);
87 if (proc0.p_slptime > (maxslp / 2))
88 wakeup(&proc0);
89 }
90
91 /*
92 * uvm_loadav: compute a tenex style load average of a quantity on
93 * 1, 5, and 15 minute internvals.
94 */
95 static void
96 uvm_loadav(avg)
97 struct loadavg *avg;
98 {
99 int i, nrun;
100 struct proc *p;
101
102 proclist_lock_read();
103 nrun = 0;
104 LIST_FOREACH(p, &allproc, p_list) {
105 switch (p->p_stat) {
106 case SSLEEP:
107 if (p->p_priority > PZERO || p->p_slptime > 1)
108 continue;
109 /* fall through */
110 case SRUN:
111 case SONPROC:
112 case SIDL:
113 nrun++;
114 }
115 }
116 proclist_unlock_read();
117 for (i = 0; i < 3; i++)
118 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
119 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
120 }
121
122 /*
123 * uvm_sysctl: sysctl hook into UVM system.
124 */
125 int
126 uvm_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
127 int *name;
128 u_int namelen;
129 void *oldp;
130 size_t *oldlenp;
131 void *newp;
132 size_t newlen;
133 struct proc *p;
134 {
135 struct vmtotal vmtotals;
136 int rv, t;
137
138 /* all sysctl names at this level are terminal */
139 if (namelen != 1)
140 return (ENOTDIR); /* overloaded */
141
142 switch (name[0]) {
143 case VM_LOADAVG:
144 return (sysctl_rdstruct(oldp, oldlenp, newp, &averunnable,
145 sizeof(averunnable)));
146
147 case VM_METER:
148 uvm_total(&vmtotals);
149 return (sysctl_rdstruct(oldp, oldlenp, newp, &vmtotals,
150 sizeof(vmtotals)));
151
152 case VM_UVMEXP:
153 return (sysctl_rdminstruct(oldp, oldlenp, newp, &uvmexp,
154 sizeof(uvmexp)));
155 case VM_UVMEXP2:
156 if (newp)
157 return (EPERM);
158 return (sysctl_uvmexp(oldp, oldlenp));
159
160 case VM_NKMEMPAGES:
161 return (sysctl_rdint(oldp, oldlenp, newp, nkmempages));
162
163 #define UPDATEMIN(a, ap, bp, cp) \
164 { \
165 t = uvmexp.ap; \
166 rv = sysctl_int(oldp, oldlenp, newp, newlen, &t); \
167 if (rv) { \
168 return rv; \
169 } \
170 if (t + uvmexp.bp + uvmexp.cp > 95 || t < 0) { \
171 return EINVAL; \
172 } \
173 uvmexp.ap = t; \
174 uvmexp.a = t * 256 / 100; \
175 return rv; \
176 }
177
178 case VM_ANONMIN:
179 UPDATEMIN(anonmin, anonminpct, fileminpct, execminpct);
180
181 case VM_EXECMIN:
182 UPDATEMIN(execmin, execminpct, fileminpct, anonminpct);
183
184 case VM_FILEMIN:
185 UPDATEMIN(filemin, fileminpct, execminpct, anonminpct);
186
187 #undef UPDATEMIN
188 #define UPDATEMAX(a, ap) \
189 { \
190 t = uvmexp.ap; \
191 rv = sysctl_int(oldp, oldlenp, newp, newlen, &t); \
192 if (rv) { \
193 return rv; \
194 } \
195 if (t > 100 || t < 0) { \
196 return EINVAL; \
197 } \
198 uvmexp.ap = t; \
199 uvmexp.a = t * 256 / 100; \
200 return rv; \
201 }
202
203 case VM_ANONMAX:
204 UPDATEMAX(anonmax, anonmaxpct);
205
206 case VM_EXECMAX:
207 UPDATEMAX(execmax, execmaxpct);
208
209 case VM_FILEMAX:
210 UPDATEMAX(filemax, filemaxpct);
211
212 #undef UPDATEMAX
213
214 case VM_MAXSLP:
215 return (sysctl_rdint(oldp, oldlenp, newp, maxslp));
216
217 case VM_USPACE:
218 return (sysctl_rdint(oldp, oldlenp, newp, USPACE));
219
220 default:
221 return (EOPNOTSUPP);
222 }
223 /* NOTREACHED */
224 }
225
226 static int
227 sysctl_uvmexp(oldp, oldlenp)
228 void *oldp;
229 size_t *oldlenp;
230 {
231 struct uvmexp_sysctl u;
232
233 memset(&u, 0, sizeof(u));
234
235 /* Entries here are in order of uvmexp_sysctl, not uvmexp */
236 u.pagesize = uvmexp.pagesize;
237 u.pagemask = uvmexp.pagemask;
238 u.pageshift = uvmexp.pageshift;
239 u.npages = uvmexp.npages;
240 u.free = uvmexp.free;
241 u.active = uvmexp.active;
242 u.inactive = uvmexp.inactive;
243 u.paging = uvmexp.paging;
244 u.wired = uvmexp.wired;
245 u.zeropages = uvmexp.zeropages;
246 u.reserve_pagedaemon = uvmexp.reserve_pagedaemon;
247 u.reserve_kernel = uvmexp.reserve_kernel;
248 u.freemin = uvmexp.freemin;
249 u.freetarg = uvmexp.freetarg;
250 u.inactarg = uvmexp.inactarg;
251 u.wiredmax = uvmexp.wiredmax;
252 u.nswapdev = uvmexp.nswapdev;
253 u.swpages = uvmexp.swpages;
254 u.swpginuse = uvmexp.swpginuse;
255 u.swpgonly = uvmexp.swpgonly;
256 u.nswget = uvmexp.nswget;
257 u.nanon = uvmexp.nanon;
258 u.nanonneeded = uvmexp.nanonneeded;
259 u.nfreeanon = uvmexp.nfreeanon;
260 u.faults = uvmexp.faults;
261 u.traps = uvmexp.traps;
262 u.intrs = uvmexp.intrs;
263 u.swtch = uvmexp.swtch;
264 u.softs = uvmexp.softs;
265 u.syscalls = uvmexp.syscalls;
266 u.pageins = uvmexp.pageins;
267 u.swapins = uvmexp.swapins;
268 u.swapouts = uvmexp.swapouts;
269 u.pgswapin = uvmexp.pgswapin;
270 u.pgswapout = uvmexp.pgswapout;
271 u.forks = uvmexp.forks;
272 u.forks_ppwait = uvmexp.forks_ppwait;
273 u.forks_sharevm = uvmexp.forks_sharevm;
274 u.pga_zerohit = uvmexp.pga_zerohit;
275 u.pga_zeromiss = uvmexp.pga_zeromiss;
276 u.zeroaborts = uvmexp.zeroaborts;
277 u.fltnoram = uvmexp.fltnoram;
278 u.fltnoanon = uvmexp.fltnoanon;
279 u.fltpgwait = uvmexp.fltpgwait;
280 u.fltpgrele = uvmexp.fltpgrele;
281 u.fltrelck = uvmexp.fltrelck;
282 u.fltrelckok = uvmexp.fltrelckok;
283 u.fltanget = uvmexp.fltanget;
284 u.fltanretry = uvmexp.fltanretry;
285 u.fltamcopy = uvmexp.fltamcopy;
286 u.fltnamap = uvmexp.fltnamap;
287 u.fltnomap = uvmexp.fltnomap;
288 u.fltlget = uvmexp.fltlget;
289 u.fltget = uvmexp.fltget;
290 u.flt_anon = uvmexp.flt_anon;
291 u.flt_acow = uvmexp.flt_acow;
292 u.flt_obj = uvmexp.flt_obj;
293 u.flt_prcopy = uvmexp.flt_prcopy;
294 u.flt_przero = uvmexp.flt_przero;
295 u.pdwoke = uvmexp.pdwoke;
296 u.pdrevs = uvmexp.pdrevs;
297 u.pdswout = uvmexp.pdswout;
298 u.pdfreed = uvmexp.pdfreed;
299 u.pdscans = uvmexp.pdscans;
300 u.pdanscan = uvmexp.pdanscan;
301 u.pdobscan = uvmexp.pdobscan;
302 u.pdreact = uvmexp.pdreact;
303 u.pdbusy = uvmexp.pdbusy;
304 u.pdpageouts = uvmexp.pdpageouts;
305 u.pdpending = uvmexp.pdpending;
306 u.pddeact = uvmexp.pddeact;
307 u.anonpages = uvmexp.anonpages;
308 u.filepages = uvmexp.filepages;
309 u.execpages = uvmexp.execpages;
310 u.colorhit = uvmexp.colorhit;
311 u.colormiss = uvmexp.colormiss;
312
313 return (sysctl_rdminstruct(oldp, oldlenp, NULL, &u, sizeof(u)));
314 }
315
316 /*
317 * uvm_total: calculate the current state of the system.
318 */
319 static void
320 uvm_total(totalp)
321 struct vmtotal *totalp;
322 {
323 struct proc *p;
324 #if 0
325 struct vm_map_entry * entry;
326 struct vm_map *map;
327 int paging;
328 #endif
329
330 memset(totalp, 0, sizeof *totalp);
331
332 /*
333 * calculate process statistics
334 */
335
336 proclist_lock_read();
337 LIST_FOREACH(p, &allproc, p_list) {
338 if (p->p_flag & P_SYSTEM)
339 continue;
340 switch (p->p_stat) {
341 case 0:
342 continue;
343
344 case SSLEEP:
345 case SSTOP:
346 if (p->p_flag & P_INMEM) {
347 if (p->p_priority <= PZERO)
348 totalp->t_dw++;
349 else if (p->p_slptime < maxslp)
350 totalp->t_sl++;
351 } else if (p->p_slptime < maxslp)
352 totalp->t_sw++;
353 if (p->p_slptime >= maxslp)
354 continue;
355 break;
356
357 case SRUN:
358 case SONPROC:
359 case SIDL:
360 if (p->p_flag & P_INMEM)
361 totalp->t_rq++;
362 else
363 totalp->t_sw++;
364 if (p->p_stat == SIDL)
365 continue;
366 break;
367 }
368 /*
369 * note active objects
370 */
371 #if 0
372 /*
373 * XXXCDC: BOGUS! rethink this. in the mean time
374 * don't do it.
375 */
376 paging = 0;
377 vm_map_lock(map);
378 for (map = &p->p_vmspace->vm_map, entry = map->header.next;
379 entry != &map->header; entry = entry->next) {
380 if (entry->is_a_map || entry->is_sub_map ||
381 entry->object.uvm_obj == NULL)
382 continue;
383 /* XXX how to do this with uvm */
384 }
385 vm_map_unlock(map);
386 if (paging)
387 totalp->t_pw++;
388 #endif
389 }
390 proclist_unlock_read();
391 /*
392 * Calculate object memory usage statistics.
393 */
394 totalp->t_free = uvmexp.free;
395 totalp->t_vm = uvmexp.npages - uvmexp.free + uvmexp.swpginuse;
396 totalp->t_avm = uvmexp.active + uvmexp.swpginuse; /* XXX */
397 totalp->t_rm = uvmexp.npages - uvmexp.free;
398 totalp->t_arm = uvmexp.active;
399 totalp->t_vmshr = 0; /* XXX */
400 totalp->t_avmshr = 0; /* XXX */
401 totalp->t_rmshr = 0; /* XXX */
402 totalp->t_armshr = 0; /* XXX */
403 }
404