uvm_meter.c revision 1.73 1 /* $NetBSD: uvm_meter.c,v 1.73 2019/12/31 13:07:14 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94
35 * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.73 2019/12/31 13:07:14 ad Exp $");
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/cpu.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47
48 #include <uvm/uvm.h>
49 #include <uvm/uvm_pdpolicy.h>
50
51 /*
52 * maxslp: ???? XXXCDC
53 */
54
55 int maxslp = MAXSLP; /* patchable ... */
56 struct loadavg averunnable;
57
58 static void uvm_total(struct vmtotal *);
59
60 /*
61 * sysctl helper routine for the vm.vmmeter node.
62 */
63 static int
64 sysctl_vm_meter(SYSCTLFN_ARGS)
65 {
66 struct sysctlnode node;
67 struct vmtotal vmtotals;
68
69 node = *rnode;
70 node.sysctl_data = &vmtotals;
71 uvm_total(&vmtotals);
72
73 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
74 }
75
76 /*
77 * sysctl helper routine for the vm.uvmexp node.
78 */
79 static int
80 sysctl_vm_uvmexp(SYSCTLFN_ARGS)
81 {
82 struct sysctlnode node;
83
84 uvm_update_uvmexp();
85
86 node = *rnode;
87 if (oldlenp)
88 node.sysctl_size = uimin(*oldlenp, node.sysctl_size);
89
90 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
91 }
92
93 static int
94 sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
95 {
96 struct sysctlnode node;
97 struct uvmexp_sysctl u;
98 int active, inactive;
99
100 cpu_count_sync_all();
101 uvm_estimatepageable(&active, &inactive);
102
103 memset(&u, 0, sizeof(u));
104
105 /* Entries here are in order of uvmexp_sysctl, not uvmexp */
106 u.pagesize = uvmexp.pagesize;
107 u.pagemask = uvmexp.pagemask;
108 u.pageshift = uvmexp.pageshift;
109 u.npages = uvmexp.npages;
110 u.free = uvm_availmem();
111 u.active = active;
112 u.inactive = inactive;
113 u.paging = uvmexp.paging;
114 u.wired = uvmexp.wired;
115 u.zeropages = cpu_count_get(CPU_COUNT_ZEROPAGES);
116 u.reserve_pagedaemon = uvmexp.reserve_pagedaemon;
117 u.reserve_kernel = uvmexp.reserve_kernel;
118 u.freemin = uvmexp.freemin;
119 u.freetarg = uvmexp.freetarg;
120 u.inactarg = 0; /* unused */
121 u.wiredmax = uvmexp.wiredmax;
122 u.nswapdev = uvmexp.nswapdev;
123 u.swpages = uvmexp.swpages;
124 u.swpginuse = uvmexp.swpginuse;
125 u.swpgonly = uvmexp.swpgonly;
126 u.nswget = uvmexp.nswget;
127 u.cpuhit = cpu_count_get(CPU_COUNT_CPUHIT);
128 u.cpumiss = cpu_count_get(CPU_COUNT_CPUMISS);
129 u.faults = cpu_count_get(CPU_COUNT_NFAULT);
130 u.traps = cpu_count_get(CPU_COUNT_NTRAP);
131 u.intrs = cpu_count_get(CPU_COUNT_NINTR);
132 u.swtch = cpu_count_get(CPU_COUNT_NSWTCH);
133 u.softs = cpu_count_get(CPU_COUNT_NSOFT);
134 u.syscalls = cpu_count_get(CPU_COUNT_NSYSCALL);
135 u.pageins = cpu_count_get(CPU_COUNT_PAGEINS);
136 u.pgswapin = 0; /* unused */
137 u.pgswapout = uvmexp.pgswapout;
138 u.forks = cpu_count_get(CPU_COUNT_FORKS);
139 u.forks_ppwait = cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
140 u.forks_sharevm = cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
141 u.pga_zerohit = cpu_count_get(CPU_COUNT_PGA_ZEROHIT);
142 u.pga_zeromiss = cpu_count_get(CPU_COUNT_PGA_ZEROMISS);
143 u.zeroaborts = uvmexp.zeroaborts;
144 u.fltnoram = cpu_count_get(CPU_COUNT_FLTNORAM);
145 u.fltnoanon = cpu_count_get(CPU_COUNT_FLTNOANON);
146 u.fltpgwait = cpu_count_get(CPU_COUNT_FLTPGWAIT);
147 u.fltpgrele = cpu_count_get(CPU_COUNT_FLTPGRELE);
148 u.fltrelck = cpu_count_get(CPU_COUNT_FLTRELCK);
149 u.fltrelckok = cpu_count_get(CPU_COUNT_FLTRELCKOK);
150 u.fltanget = cpu_count_get(CPU_COUNT_FLTANGET);
151 u.fltanretry = cpu_count_get(CPU_COUNT_FLTANRETRY);
152 u.fltamcopy = cpu_count_get(CPU_COUNT_FLTAMCOPY);
153 u.fltnamap = cpu_count_get(CPU_COUNT_FLTNAMAP);
154 u.fltnomap = cpu_count_get(CPU_COUNT_FLTNOMAP);
155 u.fltlget = cpu_count_get(CPU_COUNT_FLTLGET);
156 u.fltget = cpu_count_get(CPU_COUNT_FLTGET);
157 u.flt_anon = cpu_count_get(CPU_COUNT_FLT_ANON);
158 u.flt_acow = cpu_count_get(CPU_COUNT_FLT_ACOW);
159 u.flt_obj = cpu_count_get(CPU_COUNT_FLT_OBJ);
160 u.flt_prcopy = cpu_count_get(CPU_COUNT_FLT_PRCOPY);
161 u.flt_przero = cpu_count_get(CPU_COUNT_FLT_PRZERO);
162 u.pdwoke = uvmexp.pdwoke;
163 u.pdrevs = uvmexp.pdrevs;
164 u.pdfreed = uvmexp.pdfreed;
165 u.pdscans = uvmexp.pdscans;
166 u.pdanscan = uvmexp.pdanscan;
167 u.pdobscan = uvmexp.pdobscan;
168 u.pdreact = uvmexp.pdreact;
169 u.pdbusy = uvmexp.pdbusy;
170 u.pdpageouts = uvmexp.pdpageouts;
171 u.pdpending = uvmexp.pdpending;
172 u.pddeact = uvmexp.pddeact;
173 u.anonpages = cpu_count_get(CPU_COUNT_ANONPAGES);
174 u.filepages = cpu_count_get(CPU_COUNT_FILEPAGES);
175 u.execpages = cpu_count_get(CPU_COUNT_EXECPAGES);
176 u.colorhit = cpu_count_get(CPU_COUNT_COLORHIT);
177 u.colormiss = cpu_count_get(CPU_COUNT_COLORMISS);
178 u.ncolors = uvmexp.ncolors;
179 u.bootpages = uvmexp.bootpages;
180 u.poolpages = pool_totalpages();
181 u.countsyncone = cpu_count_get(CPU_COUNT_SYNC_ONE);
182 u.countsyncall = cpu_count_get(CPU_COUNT_SYNC_ALL);
183
184 node = *rnode;
185 node.sysctl_data = &u;
186 node.sysctl_size = sizeof(u);
187 if (oldlenp)
188 node.sysctl_size = uimin(*oldlenp, node.sysctl_size);
189 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
190 }
191
192 /*
193 * sysctl helper routine for uvm_pctparam.
194 */
195 static int
196 uvm_sysctlpctparam(SYSCTLFN_ARGS)
197 {
198 int t, error;
199 struct sysctlnode node;
200 struct uvm_pctparam *pct;
201
202 pct = rnode->sysctl_data;
203 t = pct->pct_pct;
204
205 node = *rnode;
206 node.sysctl_data = &t;
207 error = sysctl_lookup(SYSCTLFN_CALL(&node));
208 if (error || newp == NULL)
209 return error;
210
211 if (t < 0 || t > 100)
212 return EINVAL;
213
214 error = uvm_pctparam_check(pct, t);
215 if (error) {
216 return error;
217 }
218 uvm_pctparam_set(pct, t);
219
220 return (0);
221 }
222
223 /*
224 * uvm_sysctl: sysctl hook into UVM system.
225 */
226 SYSCTL_SETUP(sysctl_vm_setup, "sysctl vm subtree setup")
227 {
228
229 sysctl_createv(clog, 0, NULL, NULL,
230 CTLFLAG_PERMANENT,
231 CTLTYPE_STRUCT, "vmmeter",
232 SYSCTL_DESCR("Simple system-wide virtual memory "
233 "statistics"),
234 sysctl_vm_meter, 0, NULL, sizeof(struct vmtotal),
235 CTL_VM, VM_METER, CTL_EOL);
236 sysctl_createv(clog, 0, NULL, NULL,
237 CTLFLAG_PERMANENT,
238 CTLTYPE_STRUCT, "loadavg",
239 SYSCTL_DESCR("System load average history"),
240 NULL, 0, &averunnable, sizeof(averunnable),
241 CTL_VM, VM_LOADAVG, CTL_EOL);
242 sysctl_createv(clog, 0, NULL, NULL,
243 CTLFLAG_PERMANENT,
244 CTLTYPE_STRUCT, "uvmexp",
245 SYSCTL_DESCR("Detailed system-wide virtual memory "
246 "statistics"),
247 sysctl_vm_uvmexp, 0, &uvmexp, sizeof(uvmexp),
248 CTL_VM, VM_UVMEXP, CTL_EOL);
249 sysctl_createv(clog, 0, NULL, NULL,
250 CTLFLAG_PERMANENT,
251 CTLTYPE_STRUCT, "uvmexp2",
252 SYSCTL_DESCR("Detailed system-wide virtual memory "
253 "statistics (MI)"),
254 sysctl_vm_uvmexp2, 0, NULL, 0,
255 CTL_VM, VM_UVMEXP2, CTL_EOL);
256 sysctl_createv(clog, 0, NULL, NULL,
257 CTLFLAG_PERMANENT, CTLTYPE_INT, "maxslp",
258 SYSCTL_DESCR("Maximum process sleep time before being "
259 "swapped"),
260 NULL, 0, &maxslp, 0,
261 CTL_VM, VM_MAXSLP, CTL_EOL);
262 sysctl_createv(clog, 0, NULL, NULL,
263 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
264 CTLTYPE_INT, "uspace",
265 SYSCTL_DESCR("Number of bytes allocated for a kernel "
266 "stack"),
267 NULL, USPACE, NULL, 0,
268 CTL_VM, VM_USPACE, CTL_EOL);
269 sysctl_createv(clog, 0, NULL, NULL,
270 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
271 CTLTYPE_BOOL, "idlezero",
272 SYSCTL_DESCR("Whether try to zero pages in idle loop"),
273 NULL, 0, &vm_page_zero_enable, 0,
274 CTL_VM, CTL_CREATE, CTL_EOL);
275 sysctl_createv(clog, 0, NULL, NULL,
276 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
277 CTLTYPE_LONG, "minaddress",
278 SYSCTL_DESCR("Minimum user address"),
279 NULL, VM_MIN_ADDRESS, NULL, 0,
280 CTL_VM, VM_MINADDRESS, CTL_EOL);
281 sysctl_createv(clog, 0, NULL, NULL,
282 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
283 CTLTYPE_LONG, "maxaddress",
284 SYSCTL_DESCR("Maximum user address"),
285 NULL, VM_MAX_ADDRESS, NULL, 0,
286 CTL_VM, VM_MAXADDRESS, CTL_EOL);
287 sysctl_createv(clog, 0, NULL, NULL,
288 CTLFLAG_PERMANENT|CTLFLAG_UNSIGNED,
289 CTLTYPE_INT, "guard_size",
290 SYSCTL_DESCR("Guard size of main thread"),
291 NULL, 0, &user_stack_guard_size, 0,
292 CTL_VM, VM_GUARD_SIZE, CTL_EOL);
293 sysctl_createv(clog, 0, NULL, NULL,
294 CTLFLAG_PERMANENT|CTLFLAG_UNSIGNED|CTLFLAG_READWRITE,
295 CTLTYPE_INT, "thread_guard_size",
296 SYSCTL_DESCR("Guard size of other threads"),
297 NULL, 0, &user_thread_stack_guard_size, 0,
298 CTL_VM, VM_THREAD_GUARD_SIZE, CTL_EOL);
299 #ifdef PMAP_DIRECT
300 sysctl_createv(clog, 0, NULL, NULL,
301 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
302 CTLTYPE_BOOL, "ubc_direct",
303 SYSCTL_DESCR("Use direct map for UBC I/O"),
304 NULL, 0, &ubc_direct, 0,
305 CTL_VM, CTL_CREATE, CTL_EOL);
306 #endif
307
308 uvmpdpol_sysctlsetup();
309 }
310
311 /*
312 * uvm_total: calculate the current state of the system.
313 */
314 static void
315 uvm_total(struct vmtotal *totalp)
316 {
317 struct lwp *l;
318 #if 0
319 struct vm_map_entry * entry;
320 struct vm_map *map;
321 int paging;
322 #endif
323 int freepg;
324 int active;
325
326 memset(totalp, 0, sizeof *totalp);
327
328 /*
329 * calculate process statistics
330 */
331 mutex_enter(proc_lock);
332 LIST_FOREACH(l, &alllwp, l_list) {
333 if (l->l_proc->p_flag & PK_SYSTEM)
334 continue;
335 switch (l->l_stat) {
336 case 0:
337 continue;
338
339 case LSSLEEP:
340 case LSSTOP:
341 if ((l->l_flag & LW_SINTR) == 0) {
342 totalp->t_dw++;
343 } else if (l->l_slptime < maxslp) {
344 totalp->t_sl++;
345 }
346 if (l->l_slptime >= maxslp)
347 continue;
348 break;
349
350 case LSRUN:
351 case LSONPROC:
352 case LSIDL:
353 totalp->t_rq++;
354 if (l->l_stat == LSIDL)
355 continue;
356 break;
357 }
358 /*
359 * note active objects
360 */
361 #if 0
362 /*
363 * XXXCDC: BOGUS! rethink this. in the mean time
364 * don't do it.
365 */
366 paging = 0;
367 vm_map_lock(map);
368 for (map = &p->p_vmspace->vm_map, entry = map->header.next;
369 entry != &map->header; entry = entry->next) {
370 if (entry->is_a_map || entry->is_sub_map ||
371 entry->object.uvm_obj == NULL)
372 continue;
373 /* XXX how to do this with uvm */
374 }
375 vm_map_unlock(map);
376 if (paging)
377 totalp->t_pw++;
378 #endif
379 }
380 mutex_exit(proc_lock);
381
382 /*
383 * Calculate object memory usage statistics.
384 */
385 freepg = uvm_availmem();
386 uvm_estimatepageable(&active, NULL);
387 totalp->t_free = freepg;
388 totalp->t_vm = uvmexp.npages - freepg + uvmexp.swpginuse;
389 totalp->t_avm = active + uvmexp.swpginuse; /* XXX */
390 totalp->t_rm = uvmexp.npages - freepg;
391 totalp->t_arm = active;
392 totalp->t_vmshr = 0; /* XXX */
393 totalp->t_avmshr = 0; /* XXX */
394 totalp->t_rmshr = 0; /* XXX */
395 totalp->t_armshr = 0; /* XXX */
396 }
397
398 void
399 uvm_pctparam_set(struct uvm_pctparam *pct, int val)
400 {
401
402 pct->pct_pct = val;
403 pct->pct_scaled = val * UVM_PCTPARAM_SCALE / 100;
404 }
405
406 int
407 uvm_pctparam_get(struct uvm_pctparam *pct)
408 {
409
410 return pct->pct_pct;
411 }
412
413 int
414 uvm_pctparam_check(struct uvm_pctparam *pct, int val)
415 {
416
417 if (pct->pct_check == NULL) {
418 return 0;
419 }
420 return (*pct->pct_check)(pct, val);
421 }
422
423 void
424 uvm_pctparam_init(struct uvm_pctparam *pct, int val,
425 int (*fn)(struct uvm_pctparam *, int))
426 {
427
428 pct->pct_check = fn;
429 uvm_pctparam_set(pct, val);
430 }
431
432 int
433 uvm_pctparam_createsysctlnode(struct uvm_pctparam *pct, const char *name,
434 const char *desc)
435 {
436
437 return sysctl_createv(NULL, 0, NULL, NULL,
438 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
439 CTLTYPE_INT, name, SYSCTL_DESCR(desc),
440 uvm_sysctlpctparam, 0, (void *)pct, 0, CTL_VM, CTL_CREATE, CTL_EOL);
441 }
442
443 /*
444 * Update uvmexp with aggregate values from the per-CPU counters.
445 */
446 void
447 uvm_update_uvmexp(void)
448 {
449
450 cpu_count_sync_all();
451
452 uvmexp.free = (int)uvm_availmem();
453 uvmexp.zeropages = (int)cpu_count_get(CPU_COUNT_ZEROPAGES);
454 uvmexp.cpuhit = (int)cpu_count_get(CPU_COUNT_CPUHIT);
455 uvmexp.cpumiss = (int)cpu_count_get(CPU_COUNT_CPUMISS);
456 uvmexp.faults = (int)cpu_count_get(CPU_COUNT_NFAULT);
457 uvmexp.traps = (int)cpu_count_get(CPU_COUNT_NTRAP);
458 uvmexp.intrs = (int)cpu_count_get(CPU_COUNT_NINTR);
459 uvmexp.swtch = (int)cpu_count_get(CPU_COUNT_NSWTCH);
460 uvmexp.softs = (int)cpu_count_get(CPU_COUNT_NSOFT);
461 uvmexp.syscalls = (int)cpu_count_get(CPU_COUNT_NSYSCALL);
462 uvmexp.pageins = (int)cpu_count_get(CPU_COUNT_PAGEINS);
463 uvmexp.forks = (int)cpu_count_get(CPU_COUNT_FORKS);
464 uvmexp.forks_ppwait = (int)cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
465 uvmexp.forks_sharevm = (int)cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
466 uvmexp.pga_zerohit = (int)cpu_count_get(CPU_COUNT_PGA_ZEROHIT);
467 uvmexp.pga_zeromiss = (int)cpu_count_get(CPU_COUNT_PGA_ZEROMISS);
468 uvmexp.fltnoram = (int)cpu_count_get(CPU_COUNT_FLTNORAM);
469 uvmexp.fltnoanon = (int)cpu_count_get(CPU_COUNT_FLTNOANON);
470 uvmexp.fltpgwait = (int)cpu_count_get(CPU_COUNT_FLTPGWAIT);
471 uvmexp.fltpgrele = (int)cpu_count_get(CPU_COUNT_FLTPGRELE);
472 uvmexp.fltrelck = (int)cpu_count_get(CPU_COUNT_FLTRELCK);
473 uvmexp.fltrelckok = (int)cpu_count_get(CPU_COUNT_FLTRELCKOK);
474 uvmexp.fltanget = (int)cpu_count_get(CPU_COUNT_FLTANGET);
475 uvmexp.fltanretry = (int)cpu_count_get(CPU_COUNT_FLTANRETRY);
476 uvmexp.fltamcopy = (int)cpu_count_get(CPU_COUNT_FLTAMCOPY);
477 uvmexp.fltnamap = (int)cpu_count_get(CPU_COUNT_FLTNAMAP);
478 uvmexp.fltnomap = (int)cpu_count_get(CPU_COUNT_FLTNOMAP);
479 uvmexp.fltlget = (int)cpu_count_get(CPU_COUNT_FLTLGET);
480 uvmexp.fltget = (int)cpu_count_get(CPU_COUNT_FLTGET);
481 uvmexp.flt_anon = (int)cpu_count_get(CPU_COUNT_FLT_ANON);
482 uvmexp.flt_acow = (int)cpu_count_get(CPU_COUNT_FLT_ACOW);
483 uvmexp.flt_obj = (int)cpu_count_get(CPU_COUNT_FLT_OBJ);
484 uvmexp.flt_prcopy = (int)cpu_count_get(CPU_COUNT_FLT_PRCOPY);
485 uvmexp.flt_przero = (int)cpu_count_get(CPU_COUNT_FLT_PRZERO);
486 uvmexp.anonpages = (int)cpu_count_get(CPU_COUNT_ANONPAGES);
487 uvmexp.filepages = (int)cpu_count_get(CPU_COUNT_FILEPAGES);
488 uvmexp.execpages = (int)cpu_count_get(CPU_COUNT_EXECPAGES);
489 uvmexp.colorhit = (int)cpu_count_get(CPU_COUNT_COLORHIT);
490 uvmexp.colormiss = (int)cpu_count_get(CPU_COUNT_COLORMISS);
491 }
492