uvm_meter.c revision 1.55 1 /* $NetBSD: uvm_meter.c,v 1.55 2010/12/20 00:25:48 matt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Charles D. Cranor,
21 * Washington University, and the University of California, Berkeley
22 * and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
40 * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.55 2010/12/20 00:25:48 matt Exp $");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/cpu.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/sysctl.h>
52
53 #include <uvm/uvm.h>
54 #include <uvm/uvm_pdpolicy.h>
55
56 /*
57 * maxslp: ???? XXXCDC
58 */
59
60 int maxslp = MAXSLP; /* patchable ... */
61 struct loadavg averunnable;
62
63 static void uvm_total(struct vmtotal *);
64
65 /*
66 * sysctl helper routine for the vm.vmmeter node.
67 */
68 static int
69 sysctl_vm_meter(SYSCTLFN_ARGS)
70 {
71 struct sysctlnode node;
72 struct vmtotal vmtotals;
73
74 node = *rnode;
75 node.sysctl_data = &vmtotals;
76 uvm_total(&vmtotals);
77
78 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
79 }
80
81 /*
82 * sysctl helper routine for the vm.uvmexp node.
83 */
84 static int
85 sysctl_vm_uvmexp(SYSCTLFN_ARGS)
86 {
87 struct sysctlnode node;
88
89 node = *rnode;
90 if (oldp)
91 node.sysctl_size = min(*oldlenp, node.sysctl_size);
92
93 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
94 }
95
96 static int
97 sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
98 {
99 struct sysctlnode node;
100 struct uvmexp_sysctl u;
101 int active, inactive;
102 CPU_INFO_ITERATOR cii;
103 struct cpu_info *ci;
104
105 uvm_estimatepageable(&active, &inactive);
106
107 memset(&u, 0, sizeof(u));
108
109 /* Entries here are in order of uvmexp_sysctl, not uvmexp */
110 u.pagesize = uvmexp.pagesize;
111 u.pagemask = uvmexp.pagemask;
112 u.pageshift = uvmexp.pageshift;
113 u.npages = uvmexp.npages;
114 u.free = uvmexp.free;
115 u.active = active;
116 u.inactive = inactive;
117 u.paging = uvmexp.paging;
118 u.wired = uvmexp.wired;
119 u.zeropages = uvmexp.zeropages;
120 u.reserve_pagedaemon = uvmexp.reserve_pagedaemon;
121 u.reserve_kernel = uvmexp.reserve_kernel;
122 u.freemin = uvmexp.freemin;
123 u.freetarg = uvmexp.freetarg;
124 u.inactarg = 0; /* unused */
125 u.wiredmax = uvmexp.wiredmax;
126 u.nswapdev = uvmexp.nswapdev;
127 u.swpages = uvmexp.swpages;
128 u.swpginuse = uvmexp.swpginuse;
129 u.swpgonly = uvmexp.swpgonly;
130 u.nswget = uvmexp.nswget;
131 for (CPU_INFO_FOREACH(cii, ci)) {
132 u.faults += ci->ci_data.cpu_nfault;
133 u.traps += ci->ci_data.cpu_ntrap;
134 u.intrs += ci->ci_data.cpu_nintr;
135 u.swtch += ci->ci_data.cpu_nswtch;
136 u.softs += ci->ci_data.cpu_nsoft;
137 u.syscalls += ci->ci_data.cpu_nsyscall;
138 }
139 u.pageins = uvmexp.pageins;
140 u.pgswapin = uvmexp.pgswapin;
141 u.pgswapout = uvmexp.pgswapout;
142 u.forks = uvmexp.forks;
143 u.forks_ppwait = uvmexp.forks_ppwait;
144 u.forks_sharevm = uvmexp.forks_sharevm;
145 u.pga_zerohit = uvmexp.pga_zerohit;
146 u.pga_zeromiss = uvmexp.pga_zeromiss;
147 u.zeroaborts = uvmexp.zeroaborts;
148 u.fltnoram = uvmexp.fltnoram;
149 u.fltnoanon = uvmexp.fltnoanon;
150 u.fltpgwait = uvmexp.fltpgwait;
151 u.fltpgrele = uvmexp.fltpgrele;
152 u.fltrelck = uvmexp.fltrelck;
153 u.fltrelckok = uvmexp.fltrelckok;
154 u.fltanget = uvmexp.fltanget;
155 u.fltanretry = uvmexp.fltanretry;
156 u.fltamcopy = uvmexp.fltamcopy;
157 u.fltnamap = uvmexp.fltnamap;
158 u.fltnomap = uvmexp.fltnomap;
159 u.fltlget = uvmexp.fltlget;
160 u.fltget = uvmexp.fltget;
161 u.flt_anon = uvmexp.flt_anon;
162 u.flt_acow = uvmexp.flt_acow;
163 u.flt_obj = uvmexp.flt_obj;
164 u.flt_prcopy = uvmexp.flt_prcopy;
165 u.flt_przero = uvmexp.flt_przero;
166 u.pdwoke = uvmexp.pdwoke;
167 u.pdrevs = uvmexp.pdrevs;
168 u.pdfreed = uvmexp.pdfreed;
169 u.pdscans = uvmexp.pdscans;
170 u.pdanscan = uvmexp.pdanscan;
171 u.pdobscan = uvmexp.pdobscan;
172 u.pdreact = uvmexp.pdreact;
173 u.pdbusy = uvmexp.pdbusy;
174 u.pdpageouts = uvmexp.pdpageouts;
175 u.pdpending = uvmexp.pdpending;
176 u.pddeact = uvmexp.pddeact;
177 u.anonpages = uvmexp.anonpages;
178 u.filepages = uvmexp.filepages;
179 u.execpages = uvmexp.execpages;
180 u.colorhit = uvmexp.colorhit;
181 u.colormiss = uvmexp.colormiss;
182 u.cpuhit = uvmexp.cpuhit;
183 u.cpumiss = uvmexp.cpumiss;
184
185 node = *rnode;
186 node.sysctl_data = &u;
187 node.sysctl_size = sizeof(u);
188 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
189 }
190
191 /*
192 * sysctl helper routine for uvm_pctparam.
193 */
194 static int
195 uvm_sysctlpctparam(SYSCTLFN_ARGS)
196 {
197 int t, error;
198 struct sysctlnode node;
199 struct uvm_pctparam *pct;
200
201 pct = rnode->sysctl_data;
202 t = pct->pct_pct;
203
204 node = *rnode;
205 node.sysctl_data = &t;
206 error = sysctl_lookup(SYSCTLFN_CALL(&node));
207 if (error || newp == NULL)
208 return error;
209
210 if (t < 0 || t > 100)
211 return EINVAL;
212
213 error = uvm_pctparam_check(pct, t);
214 if (error) {
215 return error;
216 }
217 uvm_pctparam_set(pct, t);
218
219 return (0);
220 }
221
222 /*
223 * uvm_sysctl: sysctl hook into UVM system.
224 */
225 SYSCTL_SETUP(sysctl_vm_setup, "sysctl vm subtree setup")
226 {
227
228 sysctl_createv(clog, 0, NULL, NULL,
229 CTLFLAG_PERMANENT,
230 CTLTYPE_NODE, "vm", NULL,
231 NULL, 0, NULL, 0,
232 CTL_VM, CTL_EOL);
233 sysctl_createv(clog, 0, NULL, NULL,
234 CTLFLAG_PERMANENT,
235 CTLTYPE_STRUCT, "vmmeter",
236 SYSCTL_DESCR("Simple system-wide virtual memory "
237 "statistics"),
238 sysctl_vm_meter, 0, NULL, sizeof(struct vmtotal),
239 CTL_VM, VM_METER, CTL_EOL);
240 sysctl_createv(clog, 0, NULL, NULL,
241 CTLFLAG_PERMANENT,
242 CTLTYPE_STRUCT, "loadavg",
243 SYSCTL_DESCR("System load average history"),
244 NULL, 0, &averunnable, sizeof(averunnable),
245 CTL_VM, VM_LOADAVG, CTL_EOL);
246 sysctl_createv(clog, 0, NULL, NULL,
247 CTLFLAG_PERMANENT,
248 CTLTYPE_STRUCT, "uvmexp",
249 SYSCTL_DESCR("Detailed system-wide virtual memory "
250 "statistics"),
251 sysctl_vm_uvmexp, 0, &uvmexp, sizeof(uvmexp),
252 CTL_VM, VM_UVMEXP, CTL_EOL);
253 sysctl_createv(clog, 0, NULL, NULL,
254 CTLFLAG_PERMANENT,
255 CTLTYPE_INT, "nkmempages",
256 SYSCTL_DESCR("Default number of pages in kmem_map"),
257 NULL, 0, &nkmempages, 0,
258 CTL_VM, VM_NKMEMPAGES, CTL_EOL);
259 sysctl_createv(clog, 0, NULL, NULL,
260 CTLFLAG_PERMANENT,
261 CTLTYPE_STRUCT, "uvmexp2",
262 SYSCTL_DESCR("Detailed system-wide virtual memory "
263 "statistics (MI)"),
264 sysctl_vm_uvmexp2, 0, NULL, 0,
265 CTL_VM, VM_UVMEXP2, CTL_EOL);
266 sysctl_createv(clog, 0, NULL, NULL,
267 CTLFLAG_PERMANENT, CTLTYPE_INT, "maxslp",
268 SYSCTL_DESCR("Maximum process sleep time before being "
269 "swapped"),
270 NULL, 0, &maxslp, 0,
271 CTL_VM, VM_MAXSLP, CTL_EOL);
272 sysctl_createv(clog, 0, NULL, NULL,
273 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
274 CTLTYPE_INT, "uspace",
275 SYSCTL_DESCR("Number of bytes allocated for a kernel "
276 "stack"),
277 NULL, USPACE, NULL, 0,
278 CTL_VM, VM_USPACE, CTL_EOL);
279 sysctl_createv(clog, 0, NULL, NULL,
280 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
281 CTLTYPE_BOOL, "idlezero",
282 SYSCTL_DESCR("Whether try to zero pages in idle loop"),
283 NULL, 0, &vm_page_zero_enable, 0,
284 CTL_VM, CTL_CREATE, CTL_EOL);
285
286 uvmpdpol_sysctlsetup();
287 }
288
289 /*
290 * uvm_total: calculate the current state of the system.
291 */
292 static void
293 uvm_total(struct vmtotal *totalp)
294 {
295 struct lwp *l;
296 #if 0
297 struct vm_map_entry * entry;
298 struct vm_map *map;
299 int paging;
300 #endif
301 int active;
302
303 memset(totalp, 0, sizeof *totalp);
304
305 /*
306 * calculate process statistics
307 */
308 mutex_enter(proc_lock);
309 LIST_FOREACH(l, &alllwp, l_list) {
310 if (l->l_proc->p_flag & PK_SYSTEM)
311 continue;
312 switch (l->l_stat) {
313 case 0:
314 continue;
315
316 case LSSLEEP:
317 case LSSTOP:
318 if ((l->l_flag & LW_SINTR) == 0) {
319 totalp->t_dw++;
320 } else if (l->l_slptime < maxslp) {
321 totalp->t_sl++;
322 }
323 if (l->l_slptime >= maxslp)
324 continue;
325 break;
326
327 case LSRUN:
328 case LSONPROC:
329 case LSIDL:
330 totalp->t_rq++;
331 if (l->l_stat == LSIDL)
332 continue;
333 break;
334 }
335 /*
336 * note active objects
337 */
338 #if 0
339 /*
340 * XXXCDC: BOGUS! rethink this. in the mean time
341 * don't do it.
342 */
343 paging = 0;
344 vm_map_lock(map);
345 for (map = &p->p_vmspace->vm_map, entry = map->header.next;
346 entry != &map->header; entry = entry->next) {
347 if (entry->is_a_map || entry->is_sub_map ||
348 entry->object.uvm_obj == NULL)
349 continue;
350 /* XXX how to do this with uvm */
351 }
352 vm_map_unlock(map);
353 if (paging)
354 totalp->t_pw++;
355 #endif
356 }
357 mutex_exit(proc_lock);
358
359 /*
360 * Calculate object memory usage statistics.
361 */
362 uvm_estimatepageable(&active, NULL);
363 totalp->t_free = uvmexp.free;
364 totalp->t_vm = uvmexp.npages - uvmexp.free + uvmexp.swpginuse;
365 totalp->t_avm = active + uvmexp.swpginuse; /* XXX */
366 totalp->t_rm = uvmexp.npages - uvmexp.free;
367 totalp->t_arm = active;
368 totalp->t_vmshr = 0; /* XXX */
369 totalp->t_avmshr = 0; /* XXX */
370 totalp->t_rmshr = 0; /* XXX */
371 totalp->t_armshr = 0; /* XXX */
372 }
373
374 void
375 uvm_pctparam_set(struct uvm_pctparam *pct, int val)
376 {
377
378 pct->pct_pct = val;
379 pct->pct_scaled = val * UVM_PCTPARAM_SCALE / 100;
380 }
381
382 int
383 uvm_pctparam_get(struct uvm_pctparam *pct)
384 {
385
386 return pct->pct_pct;
387 }
388
389 int
390 uvm_pctparam_check(struct uvm_pctparam *pct, int val)
391 {
392
393 if (pct->pct_check == NULL) {
394 return 0;
395 }
396 return (*pct->pct_check)(pct, val);
397 }
398
399 void
400 uvm_pctparam_init(struct uvm_pctparam *pct, int val,
401 int (*fn)(struct uvm_pctparam *, int))
402 {
403
404 pct->pct_check = fn;
405 uvm_pctparam_set(pct, val);
406 }
407
408 int
409 uvm_pctparam_createsysctlnode(struct uvm_pctparam *pct, const char *name,
410 const char *desc)
411 {
412
413 return sysctl_createv(NULL, 0, NULL, NULL,
414 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
415 CTLTYPE_INT, name, SYSCTL_DESCR(desc),
416 uvm_sysctlpctparam, 0, pct, 0, CTL_VM, CTL_CREATE, CTL_EOL);
417 }
418