uvm_meter.c revision 1.41 1 /* $NetBSD: uvm_meter.c,v 1.41 2006/09/15 15:51:13 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Charles D. Cranor,
21 * Washington University, and the University of California, Berkeley
22 * and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
40 * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.41 2006/09/15 15:51:13 yamt Exp $");
45
46 #include <sys/param.h>
47 #include <sys/proc.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51
52 #include <uvm/uvm_extern.h>
53 #include <uvm/uvm_pdpolicy.h>
54
55 /*
56 * maxslp: ???? XXXCDC
57 */
58
59 int maxslp = MAXSLP; /* patchable ... */
60 struct loadavg averunnable;
61
62 /*
63 * constants for averages over 1, 5, and 15 minutes when sampling at
64 * 5 second intervals.
65 */
66
67 static const fixpt_t cexp[3] = {
68 0.9200444146293232 * FSCALE, /* exp(-1/12) */
69 0.9834714538216174 * FSCALE, /* exp(-1/60) */
70 0.9944598480048967 * FSCALE, /* exp(-1/180) */
71 };
72
73 /*
74 * prototypes
75 */
76
77 static void uvm_loadav(struct loadavg *);
78 static void uvm_total(struct vmtotal *);
79
80 /*
81 * uvm_meter: calculate load average and wake up the swapper (if needed)
82 */
83 void
84 uvm_meter(void)
85 {
86 if ((time_second % 5) == 0)
87 uvm_loadav(&averunnable);
88 if (lwp0.l_slptime > (maxslp / 2))
89 wakeup(&proc0);
90 }
91
92 /*
93 * uvm_loadav: compute a tenex style load average of a quantity on
94 * 1, 5, and 15 minute internvals.
95 */
96 static void
97 uvm_loadav(struct loadavg *avg)
98 {
99 int i, nrun;
100 struct lwp *l;
101
102 proclist_lock_read();
103 nrun = 0;
104 LIST_FOREACH(l, &alllwp, l_list) {
105 switch (l->l_stat) {
106 case LSSLEEP:
107 if (l->l_priority > PZERO || l->l_slptime > 1)
108 continue;
109 /* fall through */
110 case LSRUN:
111 case LSONPROC:
112 case LSIDL:
113 nrun++;
114 }
115 }
116 proclist_unlock_read();
117 for (i = 0; i < 3; i++)
118 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
119 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
120 }
121
122 /*
123 * sysctl helper routine for the vm.vmmeter node.
124 */
125 static int
126 sysctl_vm_meter(SYSCTLFN_ARGS)
127 {
128 struct sysctlnode node;
129 struct vmtotal vmtotals;
130
131 node = *rnode;
132 node.sysctl_data = &vmtotals;
133 uvm_total(&vmtotals);
134
135 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
136 }
137
138 /*
139 * sysctl helper routine for the vm.uvmexp node.
140 */
141 static int
142 sysctl_vm_uvmexp(SYSCTLFN_ARGS)
143 {
144 struct sysctlnode node;
145
146 node = *rnode;
147 if (oldp)
148 node.sysctl_size = min(*oldlenp, node.sysctl_size);
149
150 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
151 }
152
153 static int
154 sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
155 {
156 struct sysctlnode node;
157 struct uvmexp_sysctl u;
158 int active, inactive;
159
160 uvm_estimatepageable(&active, &inactive);
161
162 memset(&u, 0, sizeof(u));
163
164 /* Entries here are in order of uvmexp_sysctl, not uvmexp */
165 u.pagesize = uvmexp.pagesize;
166 u.pagemask = uvmexp.pagemask;
167 u.pageshift = uvmexp.pageshift;
168 u.npages = uvmexp.npages;
169 u.free = uvmexp.free;
170 u.active = active;
171 u.inactive = inactive;
172 u.paging = uvmexp.paging;
173 u.wired = uvmexp.wired;
174 u.zeropages = uvmexp.zeropages;
175 u.reserve_pagedaemon = uvmexp.reserve_pagedaemon;
176 u.reserve_kernel = uvmexp.reserve_kernel;
177 u.freemin = uvmexp.freemin;
178 u.freetarg = uvmexp.freetarg;
179 u.inactarg = 0; /* unused */
180 u.wiredmax = uvmexp.wiredmax;
181 u.nswapdev = uvmexp.nswapdev;
182 u.swpages = uvmexp.swpages;
183 u.swpginuse = uvmexp.swpginuse;
184 u.swpgonly = uvmexp.swpgonly;
185 u.nswget = uvmexp.nswget;
186 u.faults = uvmexp.faults;
187 u.traps = uvmexp.traps;
188 u.intrs = uvmexp.intrs;
189 u.swtch = uvmexp.swtch;
190 u.softs = uvmexp.softs;
191 u.syscalls = uvmexp.syscalls;
192 u.pageins = uvmexp.pageins;
193 u.swapins = uvmexp.swapins;
194 u.swapouts = uvmexp.swapouts;
195 u.pgswapin = uvmexp.pgswapin;
196 u.pgswapout = uvmexp.pgswapout;
197 u.forks = uvmexp.forks;
198 u.forks_ppwait = uvmexp.forks_ppwait;
199 u.forks_sharevm = uvmexp.forks_sharevm;
200 u.pga_zerohit = uvmexp.pga_zerohit;
201 u.pga_zeromiss = uvmexp.pga_zeromiss;
202 u.zeroaborts = uvmexp.zeroaborts;
203 u.fltnoram = uvmexp.fltnoram;
204 u.fltnoanon = uvmexp.fltnoanon;
205 u.fltpgwait = uvmexp.fltpgwait;
206 u.fltpgrele = uvmexp.fltpgrele;
207 u.fltrelck = uvmexp.fltrelck;
208 u.fltrelckok = uvmexp.fltrelckok;
209 u.fltanget = uvmexp.fltanget;
210 u.fltanretry = uvmexp.fltanretry;
211 u.fltamcopy = uvmexp.fltamcopy;
212 u.fltnamap = uvmexp.fltnamap;
213 u.fltnomap = uvmexp.fltnomap;
214 u.fltlget = uvmexp.fltlget;
215 u.fltget = uvmexp.fltget;
216 u.flt_anon = uvmexp.flt_anon;
217 u.flt_acow = uvmexp.flt_acow;
218 u.flt_obj = uvmexp.flt_obj;
219 u.flt_prcopy = uvmexp.flt_prcopy;
220 u.flt_przero = uvmexp.flt_przero;
221 u.pdwoke = uvmexp.pdwoke;
222 u.pdrevs = uvmexp.pdrevs;
223 u.pdswout = uvmexp.pdswout;
224 u.pdfreed = uvmexp.pdfreed;
225 u.pdscans = uvmexp.pdscans;
226 u.pdanscan = uvmexp.pdanscan;
227 u.pdobscan = uvmexp.pdobscan;
228 u.pdreact = uvmexp.pdreact;
229 u.pdbusy = uvmexp.pdbusy;
230 u.pdpageouts = uvmexp.pdpageouts;
231 u.pdpending = uvmexp.pdpending;
232 u.pddeact = uvmexp.pddeact;
233 u.anonpages = uvmexp.anonpages;
234 u.filepages = uvmexp.filepages;
235 u.execpages = uvmexp.execpages;
236 u.colorhit = uvmexp.colorhit;
237 u.colormiss = uvmexp.colormiss;
238
239 node = *rnode;
240 node.sysctl_data = &u;
241 node.sysctl_size = sizeof(u);
242 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
243 }
244
245 /*
246 * sysctl helper routine for uvm_pctparam.
247 */
248 static int
249 uvm_sysctlpctparam(SYSCTLFN_ARGS)
250 {
251 int t, error;
252 struct sysctlnode node;
253 struct uvm_pctparam *pct;
254
255 pct = rnode->sysctl_data;
256 t = pct->pct_pct;
257
258 node = *rnode;
259 node.sysctl_data = &t;
260 error = sysctl_lookup(SYSCTLFN_CALL(&node));
261 if (error || newp == NULL)
262 return error;
263
264 if (t < 0 || t > 100)
265 return EINVAL;
266
267 error = uvm_pctparam_check(pct, t);
268 if (error) {
269 return error;
270 }
271 uvm_pctparam_set(pct, t);
272
273 return (0);
274 }
275
276 /*
277 * uvm_sysctl: sysctl hook into UVM system.
278 */
279 SYSCTL_SETUP(sysctl_vm_setup, "sysctl vm subtree setup")
280 {
281
282 sysctl_createv(clog, 0, NULL, NULL,
283 CTLFLAG_PERMANENT,
284 CTLTYPE_NODE, "vm", NULL,
285 NULL, 0, NULL, 0,
286 CTL_VM, CTL_EOL);
287 sysctl_createv(clog, 0, NULL, NULL,
288 CTLFLAG_PERMANENT,
289 CTLTYPE_STRUCT, "vmmeter",
290 SYSCTL_DESCR("Simple system-wide virtual memory "
291 "statistics"),
292 sysctl_vm_meter, 0, NULL, sizeof(struct vmtotal),
293 CTL_VM, VM_METER, CTL_EOL);
294 sysctl_createv(clog, 0, NULL, NULL,
295 CTLFLAG_PERMANENT,
296 CTLTYPE_STRUCT, "loadavg",
297 SYSCTL_DESCR("System load average history"),
298 NULL, 0, &averunnable, sizeof(averunnable),
299 CTL_VM, VM_LOADAVG, CTL_EOL);
300 sysctl_createv(clog, 0, NULL, NULL,
301 CTLFLAG_PERMANENT,
302 CTLTYPE_STRUCT, "uvmexp",
303 SYSCTL_DESCR("Detailed system-wide virtual memory "
304 "statistics"),
305 sysctl_vm_uvmexp, 0, &uvmexp, sizeof(uvmexp),
306 CTL_VM, VM_UVMEXP, CTL_EOL);
307 sysctl_createv(clog, 0, NULL, NULL,
308 CTLFLAG_PERMANENT,
309 CTLTYPE_INT, "nkmempages",
310 SYSCTL_DESCR("Default number of pages in kmem_map"),
311 NULL, 0, &nkmempages, 0,
312 CTL_VM, VM_NKMEMPAGES, CTL_EOL);
313 sysctl_createv(clog, 0, NULL, NULL,
314 CTLFLAG_PERMANENT,
315 CTLTYPE_STRUCT, "uvmexp2",
316 SYSCTL_DESCR("Detailed system-wide virtual memory "
317 "statistics (MI)"),
318 sysctl_vm_uvmexp2, 0, NULL, 0,
319 CTL_VM, VM_UVMEXP2, CTL_EOL);
320 sysctl_createv(clog, 0, NULL, NULL,
321 CTLFLAG_PERMANENT, CTLTYPE_INT, "maxslp",
322 SYSCTL_DESCR("Maximum process sleep time before being "
323 "swapped"),
324 NULL, 0, &maxslp, 0,
325 CTL_VM, VM_MAXSLP, CTL_EOL);
326 sysctl_createv(clog, 0, NULL, NULL,
327 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
328 CTLTYPE_INT, "uspace",
329 SYSCTL_DESCR("Number of bytes allocated for a kernel "
330 "stack"),
331 NULL, USPACE, NULL, 0,
332 CTL_VM, VM_USPACE, CTL_EOL);
333 sysctl_createv(clog, 0, NULL, NULL,
334 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
335 CTLTYPE_INT, "idlezero",
336 SYSCTL_DESCR("Whether try to zero pages in idle loop"),
337 NULL, 0, &vm_page_zero_enable, 0,
338 CTL_VM, CTL_CREATE, CTL_EOL);
339
340 uvmpdpol_sysctlsetup();
341 }
342
343 /*
344 * uvm_total: calculate the current state of the system.
345 */
346 static void
347 uvm_total(struct vmtotal *totalp)
348 {
349 struct lwp *l;
350 #if 0
351 struct vm_map_entry * entry;
352 struct vm_map *map;
353 int paging;
354 #endif
355 int active;
356
357 memset(totalp, 0, sizeof *totalp);
358
359 /*
360 * calculate process statistics
361 */
362
363 proclist_lock_read();
364 LIST_FOREACH(l, &alllwp, l_list) {
365 if (l->l_proc->p_flag & P_SYSTEM)
366 continue;
367 switch (l->l_stat) {
368 case 0:
369 continue;
370
371 case LSSLEEP:
372 case LSSTOP:
373 if (l->l_flag & L_INMEM) {
374 if (l->l_priority <= PZERO)
375 totalp->t_dw++;
376 else if (l->l_slptime < maxslp)
377 totalp->t_sl++;
378 } else if (l->l_slptime < maxslp)
379 totalp->t_sw++;
380 if (l->l_slptime >= maxslp)
381 continue;
382 break;
383
384 case LSRUN:
385 case LSONPROC:
386 case LSIDL:
387 if (l->l_flag & L_INMEM)
388 totalp->t_rq++;
389 else
390 totalp->t_sw++;
391 if (l->l_stat == LSIDL)
392 continue;
393 break;
394 }
395 /*
396 * note active objects
397 */
398 #if 0
399 /*
400 * XXXCDC: BOGUS! rethink this. in the mean time
401 * don't do it.
402 */
403 paging = 0;
404 vm_map_lock(map);
405 for (map = &p->p_vmspace->vm_map, entry = map->header.next;
406 entry != &map->header; entry = entry->next) {
407 if (entry->is_a_map || entry->is_sub_map ||
408 entry->object.uvm_obj == NULL)
409 continue;
410 /* XXX how to do this with uvm */
411 }
412 vm_map_unlock(map);
413 if (paging)
414 totalp->t_pw++;
415 #endif
416 }
417 proclist_unlock_read();
418 /*
419 * Calculate object memory usage statistics.
420 */
421 uvm_estimatepageable(&active, NULL);
422 totalp->t_free = uvmexp.free;
423 totalp->t_vm = uvmexp.npages - uvmexp.free + uvmexp.swpginuse;
424 totalp->t_avm = active + uvmexp.swpginuse; /* XXX */
425 totalp->t_rm = uvmexp.npages - uvmexp.free;
426 totalp->t_arm = active;
427 totalp->t_vmshr = 0; /* XXX */
428 totalp->t_avmshr = 0; /* XXX */
429 totalp->t_rmshr = 0; /* XXX */
430 totalp->t_armshr = 0; /* XXX */
431 }
432
433 void
434 uvm_pctparam_set(struct uvm_pctparam *pct, int val)
435 {
436
437 pct->pct_pct = val;
438 pct->pct_scaled = val * UVM_PCTPARAM_SCALE / 100;
439 }
440
441 int
442 uvm_pctparam_get(struct uvm_pctparam *pct)
443 {
444
445 return pct->pct_pct;
446 }
447
448 int
449 uvm_pctparam_check(struct uvm_pctparam *pct, int val)
450 {
451
452 if (pct->pct_check == NULL) {
453 return 0;
454 }
455 return (*pct->pct_check)(pct, val);
456 }
457
458 void
459 uvm_pctparam_init(struct uvm_pctparam *pct, int val,
460 int (*fn)(struct uvm_pctparam *, int))
461 {
462
463 pct->pct_check = fn;
464 uvm_pctparam_set(pct, val);
465 }
466
467 int
468 uvm_pctparam_createsysctlnode(struct uvm_pctparam *pct, const char *name,
469 const char *desc)
470 {
471
472 return sysctl_createv(NULL, 0, NULL, NULL,
473 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
474 CTLTYPE_INT, name, SYSCTL_DESCR(desc),
475 uvm_sysctlpctparam, 0, pct, 0, CTL_VM, CTL_CREATE, CTL_EOL);
476 }
477