subr_prof.c revision 1.49 1 1.49 thorpej /* $NetBSD: subr_prof.c,v 1.49 2019/04/06 03:06:28 thorpej Exp $ */
2 1.3 cgd
3 1.1 cgd /*-
4 1.1 cgd * Copyright (c) 1982, 1986, 1993
5 1.1 cgd * The Regents of the University of California. All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Redistribution and use in source and binary forms, with or without
8 1.1 cgd * modification, are permitted provided that the following conditions
9 1.1 cgd * are met:
10 1.1 cgd * 1. Redistributions of source code must retain the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer.
12 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 cgd * notice, this list of conditions and the following disclaimer in the
14 1.1 cgd * documentation and/or other materials provided with the distribution.
15 1.28 agc * 3. Neither the name of the University nor the names of its contributors
16 1.1 cgd * may be used to endorse or promote products derived from this software
17 1.1 cgd * without specific prior written permission.
18 1.1 cgd *
19 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 1.1 cgd * SUCH DAMAGE.
30 1.1 cgd *
31 1.17 fvdl * @(#)subr_prof.c 8.4 (Berkeley) 2/14/95
32 1.1 cgd */
33 1.25 lukem
34 1.25 lukem #include <sys/cdefs.h>
35 1.49 thorpej __KERNEL_RCSID(0, "$NetBSD: subr_prof.c,v 1.49 2019/04/06 03:06:28 thorpej Exp $");
36 1.48 maxv
37 1.48 maxv #ifdef _KERNEL_OPT
38 1.48 maxv #include "opt_gprof.h"
39 1.48 maxv #endif
40 1.1 cgd
41 1.1 cgd #include <sys/param.h>
42 1.1 cgd #include <sys/systm.h>
43 1.1 cgd #include <sys/kernel.h>
44 1.1 cgd #include <sys/proc.h>
45 1.4 cgd #include <sys/mount.h>
46 1.4 cgd #include <sys/syscallargs.h>
47 1.16 jonathan #include <sys/sysctl.h>
48 1.4 cgd
49 1.42 ad #include <sys/cpu.h>
50 1.9 christos
51 1.1 cgd #ifdef GPROF
52 1.1 cgd #include <sys/malloc.h>
53 1.1 cgd #include <sys/gmon.h>
54 1.27 thorpej
55 1.27 thorpej MALLOC_DEFINE(M_GPROF, "gprof", "kernel profiling buffer");
56 1.1 cgd
57 1.1 cgd /*
58 1.1 cgd * Froms is actually a bunch of unsigned shorts indexing tos
59 1.1 cgd */
60 1.37 christos struct gmonparam _gmonparam = { .state = GMON_PROF_OFF };
61 1.1 cgd
62 1.15 gwr /* Actual start of the kernel text segment. */
63 1.15 gwr extern char kernel_text[];
64 1.15 gwr
65 1.1 cgd extern char etext[];
66 1.1 cgd
67 1.9 christos
68 1.9 christos void
69 1.32 thorpej kmstartup(void)
70 1.1 cgd {
71 1.1 cgd char *cp;
72 1.1 cgd struct gmonparam *p = &_gmonparam;
73 1.1 cgd /*
74 1.1 cgd * Round lowpc and highpc to multiples of the density we're using
75 1.1 cgd * so the rest of the scaling (here and in gprof) stays in ints.
76 1.1 cgd */
77 1.34 christos p->lowpc = rounddown(((u_long)kernel_text),
78 1.15 gwr HISTFRACTION * sizeof(HISTCOUNTER));
79 1.34 christos p->highpc = roundup((u_long)etext,
80 1.15 gwr HISTFRACTION * sizeof(HISTCOUNTER));
81 1.1 cgd p->textsize = p->highpc - p->lowpc;
82 1.14 christos printf("Profiling kernel, textsize=%ld [%lx..%lx]\n",
83 1.1 cgd p->textsize, p->lowpc, p->highpc);
84 1.1 cgd p->kcountsize = p->textsize / HISTFRACTION;
85 1.1 cgd p->hashfraction = HASHFRACTION;
86 1.1 cgd p->fromssize = p->textsize / HASHFRACTION;
87 1.1 cgd p->tolimit = p->textsize * ARCDENSITY / 100;
88 1.1 cgd if (p->tolimit < MINARCS)
89 1.1 cgd p->tolimit = MINARCS;
90 1.1 cgd else if (p->tolimit > MAXARCS)
91 1.1 cgd p->tolimit = MAXARCS;
92 1.1 cgd p->tossize = p->tolimit * sizeof(struct tostruct);
93 1.47 christos cp = malloc(p->kcountsize + p->fromssize + p->tossize,
94 1.38 ad M_GPROF, M_NOWAIT | M_ZERO);
95 1.1 cgd if (cp == 0) {
96 1.14 christos printf("No memory for profiling.\n");
97 1.1 cgd return;
98 1.1 cgd }
99 1.1 cgd p->tos = (struct tostruct *)cp;
100 1.1 cgd cp += p->tossize;
101 1.1 cgd p->kcount = (u_short *)cp;
102 1.1 cgd cp += p->kcountsize;
103 1.1 cgd p->froms = (u_short *)cp;
104 1.1 cgd }
105 1.1 cgd
106 1.1 cgd /*
107 1.1 cgd * Return kernel profiling information.
108 1.1 cgd */
109 1.29 atatat /*
110 1.29 atatat * sysctl helper routine for kern.profiling subtree. enables/disables
111 1.29 atatat * kernel profiling and gives out copies of the profiling data.
112 1.29 atatat */
113 1.29 atatat static int
114 1.29 atatat sysctl_kern_profiling(SYSCTLFN_ARGS)
115 1.1 cgd {
116 1.1 cgd struct gmonparam *gp = &_gmonparam;
117 1.1 cgd int error;
118 1.29 atatat struct sysctlnode node;
119 1.1 cgd
120 1.29 atatat node = *rnode;
121 1.23 bjh21
122 1.29 atatat switch (node.sysctl_num) {
123 1.1 cgd case GPROF_STATE:
124 1.29 atatat node.sysctl_data = &gp->state;
125 1.29 atatat break;
126 1.1 cgd case GPROF_COUNT:
127 1.29 atatat node.sysctl_data = gp->kcount;
128 1.29 atatat node.sysctl_size = gp->kcountsize;
129 1.29 atatat break;
130 1.1 cgd case GPROF_FROMS:
131 1.29 atatat node.sysctl_data = gp->froms;
132 1.29 atatat node.sysctl_size = gp->fromssize;
133 1.29 atatat break;
134 1.1 cgd case GPROF_TOS:
135 1.29 atatat node.sysctl_data = gp->tos;
136 1.29 atatat node.sysctl_size = gp->tossize;
137 1.29 atatat break;
138 1.1 cgd case GPROF_GMONPARAM:
139 1.29 atatat node.sysctl_data = gp;
140 1.29 atatat node.sysctl_size = sizeof(*gp);
141 1.29 atatat break;
142 1.1 cgd default:
143 1.1 cgd return (EOPNOTSUPP);
144 1.1 cgd }
145 1.29 atatat
146 1.29 atatat error = sysctl_lookup(SYSCTLFN_CALL(&node));
147 1.29 atatat if (error || newp == NULL)
148 1.29 atatat return (error);
149 1.29 atatat
150 1.29 atatat if (node.sysctl_num == GPROF_STATE) {
151 1.38 ad mutex_spin_enter(&proc0.p_stmutex);
152 1.29 atatat if (gp->state == GMON_PROF_OFF)
153 1.29 atatat stopprofclock(&proc0);
154 1.29 atatat else
155 1.29 atatat startprofclock(&proc0);
156 1.38 ad mutex_spin_exit(&proc0.p_stmutex);
157 1.29 atatat }
158 1.29 atatat
159 1.29 atatat return (0);
160 1.29 atatat }
161 1.29 atatat
162 1.29 atatat SYSCTL_SETUP(sysctl_kern_gprof_setup, "sysctl kern.profiling subtree setup")
163 1.29 atatat {
164 1.29 atatat
165 1.30 atatat sysctl_createv(clog, 0, NULL, NULL,
166 1.30 atatat CTLFLAG_PERMANENT,
167 1.31 atatat CTLTYPE_NODE, "profiling",
168 1.31 atatat SYSCTL_DESCR("Profiling information (available)"),
169 1.29 atatat NULL, 0, NULL, 0,
170 1.29 atatat CTL_KERN, KERN_PROF, CTL_EOL);
171 1.29 atatat
172 1.30 atatat sysctl_createv(clog, 0, NULL, NULL,
173 1.30 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
174 1.31 atatat CTLTYPE_INT, "state",
175 1.31 atatat SYSCTL_DESCR("Profiling state"),
176 1.29 atatat sysctl_kern_profiling, 0, NULL, 0,
177 1.29 atatat CTL_KERN, KERN_PROF, GPROF_STATE, CTL_EOL);
178 1.30 atatat sysctl_createv(clog, 0, NULL, NULL,
179 1.30 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
180 1.31 atatat CTLTYPE_STRUCT, "count",
181 1.31 atatat SYSCTL_DESCR("Array of statistical program counters"),
182 1.29 atatat sysctl_kern_profiling, 0, NULL, 0,
183 1.29 atatat CTL_KERN, KERN_PROF, GPROF_COUNT, CTL_EOL);
184 1.30 atatat sysctl_createv(clog, 0, NULL, NULL,
185 1.30 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
186 1.31 atatat CTLTYPE_STRUCT, "froms",
187 1.31 atatat SYSCTL_DESCR("Array indexed by program counter of "
188 1.31 atatat "call-from points"),
189 1.29 atatat sysctl_kern_profiling, 0, NULL, 0,
190 1.29 atatat CTL_KERN, KERN_PROF, GPROF_FROMS, CTL_EOL);
191 1.30 atatat sysctl_createv(clog, 0, NULL, NULL,
192 1.30 atatat CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
193 1.31 atatat CTLTYPE_STRUCT, "tos",
194 1.31 atatat SYSCTL_DESCR("Array of structures describing "
195 1.31 atatat "destination of calls and their counts"),
196 1.29 atatat sysctl_kern_profiling, 0, NULL, 0,
197 1.29 atatat CTL_KERN, KERN_PROF, GPROF_TOS, CTL_EOL);
198 1.30 atatat sysctl_createv(clog, 0, NULL, NULL,
199 1.30 atatat CTLFLAG_PERMANENT,
200 1.31 atatat CTLTYPE_STRUCT, "gmonparam",
201 1.31 atatat SYSCTL_DESCR("Structure giving the sizes of the above "
202 1.31 atatat "arrays"),
203 1.29 atatat sysctl_kern_profiling, 0, NULL, 0,
204 1.29 atatat CTL_KERN, KERN_PROF, GPROF_GMONPARAM, CTL_EOL);
205 1.1 cgd }
206 1.1 cgd #endif /* GPROF */
207 1.1 cgd
208 1.1 cgd /*
209 1.1 cgd * Profiling system call.
210 1.1 cgd *
211 1.1 cgd * The scale factor is a fixed point number with 16 bits of fraction, so that
212 1.1 cgd * 1.0 is represented as 0x10000. A scale factor of 0 turns off profiling.
213 1.1 cgd */
214 1.1 cgd /* ARGSUSED */
215 1.9 christos int
216 1.43 dsl sys_profil(struct lwp *l, const struct sys_profil_args *uap, register_t *retval)
217 1.6 thorpej {
218 1.43 dsl /* {
219 1.40 drochner syscallarg(char *) samples;
220 1.44 dsl syscallarg(size_t) size;
221 1.44 dsl syscallarg(u_long) offset;
222 1.4 cgd syscallarg(u_int) scale;
223 1.43 dsl } */
224 1.26 thorpej struct proc *p = l->l_proc;
225 1.20 augustss struct uprof *upp;
226 1.1 cgd
227 1.4 cgd if (SCARG(uap, scale) > (1 << 16))
228 1.1 cgd return (EINVAL);
229 1.4 cgd if (SCARG(uap, scale) == 0) {
230 1.38 ad mutex_spin_enter(&p->p_stmutex);
231 1.1 cgd stopprofclock(p);
232 1.38 ad mutex_spin_exit(&p->p_stmutex);
233 1.1 cgd return (0);
234 1.1 cgd }
235 1.1 cgd upp = &p->p_stats->p_prof;
236 1.1 cgd
237 1.1 cgd /* Block profile interrupts while changing state. */
238 1.38 ad mutex_spin_enter(&p->p_stmutex);
239 1.4 cgd upp->pr_off = SCARG(uap, offset);
240 1.4 cgd upp->pr_scale = SCARG(uap, scale);
241 1.4 cgd upp->pr_base = SCARG(uap, samples);
242 1.4 cgd upp->pr_size = SCARG(uap, size);
243 1.1 cgd startprofclock(p);
244 1.38 ad mutex_spin_exit(&p->p_stmutex);
245 1.1 cgd
246 1.1 cgd return (0);
247 1.1 cgd }
248 1.1 cgd
249 1.1 cgd /*
250 1.1 cgd * Scale is a fixed-point number with the binary point 16 bits
251 1.1 cgd * into the value, and is <= 1.0. pc is at most 32 bits, so the
252 1.1 cgd * intermediate result is at most 48 bits.
253 1.1 cgd */
254 1.1 cgd #define PC_TO_INDEX(pc, prof) \
255 1.1 cgd ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
256 1.1 cgd (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
257 1.1 cgd
258 1.1 cgd /*
259 1.1 cgd * Collect user-level profiling statistics; called on a profiling tick,
260 1.1 cgd * when a process is running in user-mode. This routine may be called
261 1.49 thorpej * from an interrupt context. We schedule an AST that will vector us
262 1.49 thorpej * to trap() with a context in which copyin and copyout will work.
263 1.49 thorpej * Trap will then call addupc_task().
264 1.49 thorpej *
265 1.49 thorpej * XXX We could use ufetch/ustore here if the profile buffers were
266 1.49 thorpej * wired.
267 1.1 cgd *
268 1.1 cgd * Note that we may (rarely) not get around to the AST soon enough, and
269 1.1 cgd * lose profile ticks when the next tick overwrites this one, but in this
270 1.1 cgd * case the system is overloaded and the profile is probably already
271 1.1 cgd * inaccurate.
272 1.1 cgd */
273 1.1 cgd void
274 1.38 ad addupc_intr(struct lwp *l, u_long pc)
275 1.1 cgd {
276 1.20 augustss struct uprof *prof;
277 1.38 ad struct proc *p;
278 1.20 augustss u_int i;
279 1.1 cgd
280 1.38 ad p = l->l_proc;
281 1.38 ad
282 1.41 ad KASSERT(mutex_owned(&p->p_stmutex));
283 1.38 ad
284 1.1 cgd prof = &p->p_stats->p_prof;
285 1.1 cgd if (pc < prof->pr_off ||
286 1.1 cgd (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
287 1.1 cgd return; /* out of range; ignore */
288 1.1 cgd
289 1.38 ad mutex_spin_exit(&p->p_stmutex);
290 1.49 thorpej
291 1.49 thorpej /* XXXSMP */
292 1.49 thorpej prof->pr_addr = pc;
293 1.49 thorpej prof->pr_ticks++;
294 1.49 thorpej cpu_need_proftick(l);
295 1.49 thorpej
296 1.38 ad mutex_spin_enter(&p->p_stmutex);
297 1.1 cgd }
298 1.1 cgd
299 1.1 cgd /*
300 1.1 cgd * Much like before, but we can afford to take faults here. If the
301 1.1 cgd * update fails, we simply turn off profiling.
302 1.1 cgd */
303 1.1 cgd void
304 1.38 ad addupc_task(struct lwp *l, u_long pc, u_int ticks)
305 1.1 cgd {
306 1.20 augustss struct uprof *prof;
307 1.38 ad struct proc *p;
308 1.39 christos void *addr;
309 1.38 ad int error;
310 1.20 augustss u_int i;
311 1.1 cgd u_short v;
312 1.1 cgd
313 1.38 ad p = l->l_proc;
314 1.38 ad
315 1.38 ad if (ticks == 0)
316 1.1 cgd return;
317 1.1 cgd
318 1.38 ad mutex_spin_enter(&p->p_stmutex);
319 1.1 cgd prof = &p->p_stats->p_prof;
320 1.38 ad
321 1.38 ad /* Testing P_PROFIL may be unnecessary, but is certainly safe. */
322 1.38 ad if ((p->p_stflag & PST_PROFIL) == 0 || pc < prof->pr_off ||
323 1.38 ad (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
324 1.38 ad mutex_spin_exit(&p->p_stmutex);
325 1.1 cgd return;
326 1.38 ad }
327 1.1 cgd
328 1.40 drochner addr = prof->pr_base + i;
329 1.38 ad mutex_spin_exit(&p->p_stmutex);
330 1.39 christos if ((error = copyin(addr, (void *)&v, sizeof(v))) == 0) {
331 1.1 cgd v += ticks;
332 1.39 christos error = copyout((void *)&v, addr, sizeof(v));
333 1.38 ad }
334 1.38 ad if (error != 0) {
335 1.38 ad mutex_spin_enter(&p->p_stmutex);
336 1.38 ad stopprofclock(p);
337 1.38 ad mutex_spin_exit(&p->p_stmutex);
338 1.1 cgd }
339 1.1 cgd }
340