tprof_top.c revision 1.7.2.2 1 /* $NetBSD: tprof_top.c,v 1.7.2.2 2023/06/21 22:34:51 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2022 Ryo Shimizu <ryo (at) nerv.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
17 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #ifndef lint
31 __RCSID("$NetBSD: tprof_top.c,v 1.7.2.2 2023/06/21 22:34:51 martin Exp $");
32 #endif /* not lint */
33
34 #include <sys/param.h>
35 #include <sys/types.h>
36 #include <sys/ioctl.h>
37 #include <sys/rbtree.h>
38 #include <sys/select.h>
39 #include <sys/time.h>
40
41 #include <assert.h>
42 #include <err.h>
43 #include <errno.h>
44 #include <fcntl.h>
45 #include <inttypes.h>
46 #include <math.h>
47 #include <signal.h>
48 #include <stdio.h>
49 #include <stdlib.h>
50 #include <string.h>
51 #include <term.h>
52 #include <termios.h>
53 #include <unistd.h>
54 #include <util.h>
55
56 #include <dev/tprof/tprof_ioctl.h>
57 #include "tprof.h"
58 #include "ksyms.h"
59
60 #define SAMPLE_MODE_ACCUMULATIVE 0
61 #define SAMPLE_MODE_INSTANTANEOUS 1
62 #define SAMPLE_MODE_NUM 2
63
64 #define LINESTR "-------------------------------------------------------------"
65 #define SYMBOL_LEN 32 /* symbol and event name */
66
67 struct sample_elm {
68 struct rb_node node;
69 uint64_t addr;
70 const char *name;
71 uint32_t flags;
72 #define SAMPLE_ELM_FLAGS_USER 0x00000001
73 uint32_t num[SAMPLE_MODE_NUM];
74 uint32_t num_cpu[]; /* [SAMPLE_MODE_NUM][ncpu] */
75 #define SAMPLE_ELM_NUM_CPU(e, k) \
76 ((e)->num_cpu + (k) * ncpu)
77 };
78
79 struct ptrarray {
80 void **pa_ptrs;
81 size_t pa_allocnum;
82 size_t pa_inuse;
83 };
84
85 static int opt_mode = SAMPLE_MODE_INSTANTANEOUS;
86 static int opt_userland = 0;
87 static int opt_showcounter = 0;
88
89 /* for display */
90 static char *term;
91 static struct winsize win;
92 static int nontty;
93 static struct termios termios_save;
94 static bool termios_saved;
95 static long top_interval = 1;
96 static bool do_redraw;
97 static u_int nshow;
98
99 /* for profiling and counting samples */
100 static sig_atomic_t sigalrm;
101 static struct sym **ksyms;
102 static size_t nksyms;
103 static u_int nevent;
104 static const char *eventname[TPROF_MAXCOUNTERS];
105 static size_t sizeof_sample_elm;
106 static rb_tree_t rb_tree_sample;
107 struct ptrarray sample_list[SAMPLE_MODE_NUM];
108 static u_int sample_n_kern[SAMPLE_MODE_NUM];
109 static u_int sample_n_user[SAMPLE_MODE_NUM];
110 static u_int sample_event_width = 7;
111 static u_int *sample_cpu_width; /* [ncpu] */
112 static uint32_t *sample_n_kern_per_cpu[SAMPLE_MODE_NUM]; /* [ncpu] */
113 static uint32_t *sample_n_user_per_cpu[SAMPLE_MODE_NUM]; /* [ncpu] */
114 static uint64_t *sample_n_per_event[SAMPLE_MODE_NUM]; /* [nevent] */
115 static uint64_t *sample_n_per_event_cpu[SAMPLE_MODE_NUM]; /* [ncpu] */
116
117 /* raw event counter */
118 static uint64_t *counters; /* counters[2][ncpu][nevent] */
119 static u_int counters_i;
120
121 static void
122 reset_cursor_pos(void)
123 {
124 int i;
125 char *p;
126
127 if (nontty || term == NULL)
128 return;
129
130 printf("\r");
131
132 /* cursor_up * n */
133 if ((p = tigetstr("cuu")) != NULL) {
134 putp(tparm(p, win.ws_row - 1, 0, 0, 0, 0, 0, 0, 0, 0));
135 } else if ((p = tigetstr("cuu1")) != NULL) {
136 for (i = win.ws_row - 1; i > 0; i--)
137 putp(p);
138 }
139 }
140
141 static void
142 clr_to_eol(void)
143 {
144 char *p;
145
146 if (nontty || term == NULL)
147 return;
148
149 if ((p = tigetstr("el")) != NULL)
150 putp(p);
151 }
152
153 /* newline, and clearing to end of line if needed */
154 static void
155 lim_newline(int *lim)
156 {
157 if (*lim >= 1)
158 clr_to_eol();
159
160 printf("\n");
161 *lim = win.ws_col;
162 }
163
164 static int
165 lim_printf(int *lim, const char *fmt, ...)
166 {
167 va_list ap;
168 size_t written;
169 char *p;
170
171 if (*lim <= 0)
172 return 0;
173
174 p = malloc(*lim + 1);
175 if (p == NULL)
176 return -1;
177
178 va_start(ap, fmt);
179 vsnprintf(p, *lim + 1, fmt, ap);
180 va_end(ap);
181
182 written = strlen(p);
183 if (written == 0) {
184 free(p);
185 *lim = 0;
186 return 0;
187 }
188
189 fwrite(p, written, 1, stdout);
190 *lim -= written;
191
192 free(p);
193 return written;
194 }
195
196 static void
197 sigwinch_handler(int signo)
198 {
199 char *p;
200
201 win.ws_col = tigetnum("lines");
202 win.ws_row = tigetnum("cols");
203
204 nontty = ioctl(STDOUT_FILENO, TIOCGWINSZ, &win);
205 if (nontty != 0) {
206 nontty = !isatty(STDOUT_FILENO);
207 win.ws_col = 65535;
208 win.ws_row = 65535;
209 }
210
211 if ((p = getenv("LINES")) != NULL)
212 win.ws_row = strtoul(p, NULL, 0);
213 if ((p = getenv("COLUMNS")) != NULL)
214 win.ws_col = strtoul(p, NULL, 0);
215
216 do_redraw = true;
217 }
218
219 static void
220 tty_setup(void)
221 {
222 struct termios termios;
223
224 term = getenv("TERM");
225 if (term != NULL)
226 setupterm(term, 0, NULL);
227
228 sigwinch_handler(0);
229
230 if (tcgetattr(STDOUT_FILENO, &termios_save) == 0) {
231 termios_saved = true;
232
233 /* stty cbreak */
234 termios = termios_save;
235 termios.c_iflag |= BRKINT|IXON|IMAXBEL;
236 termios.c_oflag |= OPOST;
237 termios.c_lflag |= ISIG|IEXTEN;
238 termios.c_lflag &= ~(ICANON|ECHO);
239 tcsetattr(STDOUT_FILENO, TCSADRAIN, &termios);
240 }
241 }
242
243 static void
244 tty_restore(void)
245 {
246 if (termios_saved) {
247 tcsetattr(STDOUT_FILENO, TCSADRAIN, &termios_save);
248 termios_saved = false;
249 }
250 }
251
252 static void
253 sigtstp_handler(int signo)
254 {
255 tty_restore();
256
257 signal(SIGWINCH, SIG_DFL);
258 signal(SIGINT, SIG_DFL);
259 signal(SIGQUIT, SIG_DFL);
260 signal(SIGTERM, SIG_DFL);
261 signal(SIGTSTP, SIG_DFL);
262 kill(0, SIGTSTP);
263 nshow = 0;
264 }
265
266 static void
267 sigalrm_handler(int signo)
268 {
269 sigalrm = 1;
270 }
271
272 __dead static void
273 die(int signo)
274 {
275 tty_restore();
276 printf("\n");
277
278 exit(EXIT_SUCCESS);
279 }
280
281 __dead static void
282 die_errc(int status, int code, const char *fmt, ...)
283 {
284 va_list ap;
285
286 tty_restore();
287
288 va_start(ap, fmt);
289 if (code == 0)
290 verrx(status, fmt, ap);
291 else
292 verrc(status, code, fmt, ap);
293 va_end(ap);
294 }
295
296 static void
297 ptrarray_push(struct ptrarray *ptrarray, void *ptr)
298 {
299 int error;
300
301 if (ptrarray->pa_inuse >= ptrarray->pa_allocnum) {
302 /* increase buffer */
303 ptrarray->pa_allocnum += 1024;
304 error = reallocarr(&ptrarray->pa_ptrs, ptrarray->pa_allocnum,
305 sizeof(*ptrarray->pa_ptrs));
306 if (error != 0)
307 die_errc(EXIT_FAILURE, error, "rellocarr failed");
308 }
309 ptrarray->pa_ptrs[ptrarray->pa_inuse++] = ptr;
310 }
311
312 static void
313 ptrarray_iterate(struct ptrarray *ptrarray, void (*ifunc)(void *))
314 {
315 size_t i;
316
317 for (i = 0; i < ptrarray->pa_inuse; i++) {
318 (*ifunc)(ptrarray->pa_ptrs[i]);
319 }
320 }
321
322 static void
323 ptrarray_clear(struct ptrarray *ptrarray)
324 {
325 ptrarray->pa_inuse = 0;
326 }
327
328 static int
329 sample_compare_key(void *ctx, const void *n1, const void *keyp)
330 {
331 const struct sample_elm *a1 = n1;
332 const struct sample_elm *a2 = (const struct sample_elm *)keyp;
333 return a1->addr - a2->addr;
334 }
335
336 static signed int
337 sample_compare_nodes(void *ctx, const void *n1, const void *n2)
338 {
339 const struct addr *a2 = n2;
340 return sample_compare_key(ctx, n1, a2);
341 }
342
343 static const rb_tree_ops_t sample_ops = {
344 .rbto_compare_nodes = sample_compare_nodes,
345 .rbto_compare_key = sample_compare_key
346 };
347
348 static u_int
349 n_align(u_int n, u_int align)
350 {
351 return (n + align - 1) / align * align;
352 }
353
354 static void
355 sample_init(void)
356 {
357 const struct sample_elm *e;
358 int l, mode, n;
359 u_int size;
360 char buf[16];
361
362 size = sizeof(struct sample_elm) +
363 sizeof(e->num_cpu[0]) * SAMPLE_MODE_NUM * ncpu;
364 sizeof_sample_elm = n_align(size, __alignof(struct sample_elm));
365
366 sample_cpu_width = ecalloc(1, sizeof(*sample_cpu_width) * ncpu);
367 for (n = 0; n < ncpu; n++) {
368 sample_cpu_width[n] = 5;
369 l = snprintf(buf, sizeof(buf), "CPU%d", n);
370 if (sample_cpu_width[n] < (u_int)l)
371 sample_cpu_width[n] = l;
372 }
373
374 for (mode = 0; mode < SAMPLE_MODE_NUM; mode++) {
375 sample_n_kern_per_cpu[mode] = ecalloc(1,
376 sizeof(typeof(*sample_n_kern_per_cpu[mode])) * ncpu);
377 sample_n_user_per_cpu[mode] = ecalloc(1,
378 sizeof(typeof(*sample_n_user_per_cpu[mode])) * ncpu);
379 sample_n_per_event[mode] = ecalloc(1,
380 sizeof(typeof(*sample_n_per_event[mode])) * nevent);
381 sample_n_per_event_cpu[mode] = ecalloc(1,
382 sizeof(typeof(*sample_n_per_event_cpu[mode])) *
383 nevent * ncpu);
384 }
385 }
386
387 static void
388 sample_clear_instantaneous(void *arg)
389 {
390 struct sample_elm *e = (void *)arg;
391
392 e->num[SAMPLE_MODE_INSTANTANEOUS] = 0;
393 memset(SAMPLE_ELM_NUM_CPU(e, SAMPLE_MODE_INSTANTANEOUS),
394 0, sizeof(e->num_cpu[0]) * ncpu);
395 }
396
397 static void
398 sample_reset(bool reset_accumulative)
399 {
400 int mode;
401
402 for (mode = 0; mode < SAMPLE_MODE_NUM; mode++) {
403 if (mode == SAMPLE_MODE_ACCUMULATIVE && !reset_accumulative)
404 continue;
405
406 sample_n_kern[mode] = 0;
407 sample_n_user[mode] = 0;
408 memset(sample_n_kern_per_cpu[mode], 0,
409 sizeof(typeof(*sample_n_kern_per_cpu[mode])) * ncpu);
410 memset(sample_n_user_per_cpu[mode], 0,
411 sizeof(typeof(*sample_n_user_per_cpu[mode])) * ncpu);
412 memset(sample_n_per_event[mode], 0,
413 sizeof(typeof(*sample_n_per_event[mode])) * nevent);
414 memset(sample_n_per_event_cpu[mode], 0,
415 sizeof(typeof(*sample_n_per_event_cpu[mode])) *
416 nevent * ncpu);
417 }
418
419 if (reset_accumulative) {
420 rb_tree_init(&rb_tree_sample, &sample_ops);
421 ptrarray_iterate(&sample_list[SAMPLE_MODE_ACCUMULATIVE], free);
422 ptrarray_clear(&sample_list[SAMPLE_MODE_ACCUMULATIVE]);
423 ptrarray_clear(&sample_list[SAMPLE_MODE_INSTANTANEOUS]);
424 } else {
425 ptrarray_iterate(&sample_list[SAMPLE_MODE_INSTANTANEOUS],
426 sample_clear_instantaneous);
427 ptrarray_clear(&sample_list[SAMPLE_MODE_INSTANTANEOUS]);
428 }
429 }
430
431 static int __unused
432 sample_sortfunc_accumulative(const void *a, const void *b)
433 {
434 struct sample_elm * const *ea = a;
435 struct sample_elm * const *eb = b;
436 return (*eb)->num[SAMPLE_MODE_ACCUMULATIVE] -
437 (*ea)->num[SAMPLE_MODE_ACCUMULATIVE];
438 }
439
440 static int
441 sample_sortfunc_instantaneous(const void *a, const void *b)
442 {
443 struct sample_elm * const *ea = a;
444 struct sample_elm * const *eb = b;
445 return (*eb)->num[SAMPLE_MODE_INSTANTANEOUS] -
446 (*ea)->num[SAMPLE_MODE_INSTANTANEOUS];
447 }
448
449 static void
450 sample_sort_accumulative(void)
451 {
452 qsort(sample_list[SAMPLE_MODE_ACCUMULATIVE].pa_ptrs,
453 sample_list[SAMPLE_MODE_ACCUMULATIVE].pa_inuse,
454 sizeof(struct sample_elm *), sample_sortfunc_accumulative);
455 }
456
457 static void
458 sample_sort_instantaneous(void)
459 {
460 qsort(sample_list[SAMPLE_MODE_INSTANTANEOUS].pa_ptrs,
461 sample_list[SAMPLE_MODE_INSTANTANEOUS].pa_inuse,
462 sizeof(struct sample_elm *), sample_sortfunc_instantaneous);
463 }
464
465 static void
466 sample_collect(tprof_sample_t *s)
467 {
468 struct sample_elm *e, *o;
469 const char *name;
470 size_t symid;
471 uint64_t addr, offset;
472 uint32_t flags = 0;
473 uint32_t eventid, cpuid;
474 int mode;
475
476 eventid = __SHIFTOUT(s->s_flags, TPROF_SAMPLE_COUNTER_MASK);
477 cpuid = s->s_cpuid;
478
479 if (eventid >= nevent) /* unknown event from tprof? */
480 return;
481
482 for (mode = 0; mode < SAMPLE_MODE_NUM; mode++) {
483 sample_n_per_event[mode][eventid]++;
484 sample_n_per_event_cpu[mode][nevent * cpuid + eventid]++;
485 }
486
487 if ((s->s_flags & TPROF_SAMPLE_INKERNEL) == 0) {
488 sample_n_user[SAMPLE_MODE_ACCUMULATIVE]++;
489 sample_n_user[SAMPLE_MODE_INSTANTANEOUS]++;
490 sample_n_user_per_cpu[SAMPLE_MODE_ACCUMULATIVE][cpuid]++;
491 sample_n_user_per_cpu[SAMPLE_MODE_INSTANTANEOUS][cpuid]++;
492
493 name = NULL;
494 addr = s->s_pid; /* XXX */
495 flags |= SAMPLE_ELM_FLAGS_USER;
496
497 if (!opt_userland)
498 return;
499 } else {
500 sample_n_kern[SAMPLE_MODE_ACCUMULATIVE]++;
501 sample_n_kern[SAMPLE_MODE_INSTANTANEOUS]++;
502 sample_n_kern_per_cpu[SAMPLE_MODE_ACCUMULATIVE][cpuid]++;
503 sample_n_kern_per_cpu[SAMPLE_MODE_INSTANTANEOUS][cpuid]++;
504
505 name = ksymlookup(s->s_pc, &offset, &symid);
506 if (name != NULL) {
507 addr = ksyms[symid]->value;
508 } else {
509 addr = s->s_pc;
510 }
511 }
512
513 e = ecalloc(1, sizeof_sample_elm);
514 e->addr = addr;
515 e->name = name;
516 e->flags = flags;
517 e->num[SAMPLE_MODE_ACCUMULATIVE] = 1;
518 e->num[SAMPLE_MODE_INSTANTANEOUS] = 1;
519 SAMPLE_ELM_NUM_CPU(e, SAMPLE_MODE_ACCUMULATIVE)[cpuid] = 1;
520 SAMPLE_ELM_NUM_CPU(e, SAMPLE_MODE_INSTANTANEOUS)[cpuid] = 1;
521 o = rb_tree_insert_node(&rb_tree_sample, e);
522 if (o == e) {
523 /* new symbol. add to list for sort */
524 ptrarray_push(&sample_list[SAMPLE_MODE_ACCUMULATIVE], o);
525 ptrarray_push(&sample_list[SAMPLE_MODE_INSTANTANEOUS], o);
526 } else {
527 /* already exists */
528 free(e);
529
530 o->num[SAMPLE_MODE_ACCUMULATIVE]++;
531 if (o->num[SAMPLE_MODE_INSTANTANEOUS]++ == 0) {
532 /* new instantaneous symbols. add to list for sort */
533 ptrarray_push(&sample_list[SAMPLE_MODE_INSTANTANEOUS],
534 o);
535 }
536 SAMPLE_ELM_NUM_CPU(o, SAMPLE_MODE_ACCUMULATIVE)[cpuid]++;
537 SAMPLE_ELM_NUM_CPU(o, SAMPLE_MODE_INSTANTANEOUS)[cpuid]++;
538 }
539 }
540
541 static void
542 show_tprof_stat(int *lim)
543 {
544 static struct tprof_stat tsbuf[2], *ts0, *ts;
545 static u_int ts_i = 0;
546 static int tprofstat_width[6];
547 int ret, l;
548 char tmpbuf[128];
549
550 ts0 = &tsbuf[ts_i++ & 1];
551 ts = &tsbuf[ts_i & 1];
552 ret = ioctl(devfd, TPROF_IOC_GETSTAT, ts);
553 if (ret == -1)
554 die_errc(EXIT_FAILURE, errno, "TPROF_IOC_GETSTAT");
555
556 #define TS_PRINT(idx, label, _m) \
557 do { \
558 __CTASSERT(idx < __arraycount(tprofstat_width)); \
559 lim_printf(lim, "%s", label); \
560 l = snprintf(tmpbuf, sizeof(tmpbuf), "%"PRIu64, ts->_m);\
561 if (ts->_m != ts0->_m) \
562 l += snprintf(tmpbuf + l, sizeof(tmpbuf) - l, \
563 "(+%"PRIu64")", ts->_m - ts0->_m); \
564 assert(l < (int)sizeof(tmpbuf)); \
565 if (tprofstat_width[idx] < l) \
566 tprofstat_width[idx] = l; \
567 lim_printf(lim, "%-*.*s ", tprofstat_width[idx], \
568 tprofstat_width[idx], tmpbuf); \
569 } while (0)
570 lim_printf(lim, "tprof ");
571 TS_PRINT(0, "sample:", ts_sample);
572 TS_PRINT(1, "overflow:", ts_overflow);
573 TS_PRINT(2, "buf:", ts_buf);
574 TS_PRINT(3, "emptybuf:", ts_emptybuf);
575 TS_PRINT(4, "dropbuf:", ts_dropbuf);
576 TS_PRINT(5, "dropbuf_sample:", ts_dropbuf_sample);
577 }
578
579 static void
580 show_timestamp(void)
581 {
582 struct timeval tv;
583 gettimeofday(&tv, NULL);
584 printf("%-8.8s", &(ctime((time_t *)&tv.tv_sec)[11]));
585 }
586
587 static void
588 show_counters_alloc(void)
589 {
590 size_t sz = 2 * ncpu * nevent * sizeof(*counters);
591 counters = ecalloc(1, sz);
592 }
593
594 static void
595 show_counters(int *lim)
596 {
597 tprof_counts_t countsbuf;
598 uint64_t *cn[2], *c0, *c;
599 u_int i;
600 int n, ret;
601
602 cn[0] = counters;
603 cn[1] = counters + ncpu * nevent;
604 c0 = cn[counters_i++ & 1];
605 c = cn[counters_i & 1];
606
607 for (n = 0; n < ncpu; n++) {
608 countsbuf.c_cpu = n;
609 ret = ioctl(devfd, TPROF_IOC_GETCOUNTS, &countsbuf);
610 if (ret == -1)
611 die_errc(EXIT_FAILURE, errno, "TPROF_IOC_GETCOUNTS");
612
613 for (i = 0; i < nevent; i++)
614 c[n * nevent + i] = countsbuf.c_count[i];
615 }
616
617 if (do_redraw) {
618 lim_printf(lim, "%-22s", "Event counter (delta)");
619 for (n = 0; n < ncpu; n++) {
620 char cpuname[16];
621 snprintf(cpuname, sizeof(cpuname), "CPU%u", n);
622 lim_printf(lim, "%11s", cpuname);
623 }
624 lim_newline(lim);
625 } else {
626 printf("\n");
627 }
628
629 for (i = 0; i < nevent; i++) {
630 lim_printf(lim, "%-22.22s", eventname[i]);
631 for (n = 0; n < ncpu; n++) {
632 lim_printf(lim, "%11"PRIu64,
633 c[n * nevent + i] - c0[n * nevent + i]);
634 }
635 lim_newline(lim);
636 }
637 lim_newline(lim);
638 }
639
640 static void
641 show_count_per_event(int *lim)
642 {
643 u_int i, nsample_total;
644 int n, l;
645 char buf[32];
646
647 nsample_total = sample_n_kern[opt_mode] + sample_n_user[opt_mode];
648 if (nsample_total == 0)
649 nsample_total = 1;
650
651 /* calc width in advance */
652 for (i = 0; i < nevent; i++) {
653 l = snprintf(buf, sizeof(buf), "%"PRIu64,
654 sample_n_per_event[opt_mode][i]);
655 if (sample_event_width < (u_int)l) {
656 sample_event_width = l;
657 do_redraw = true;
658 }
659 }
660 for (n = 0; n < ncpu; n++) {
661 uint64_t sum = 0;
662 for (i = 0; i < nevent; i++)
663 sum += sample_n_per_event_cpu[opt_mode][nevent * n + i];
664 l = snprintf(buf, sizeof(buf), "%"PRIu64, sum);
665 if (sample_cpu_width[n] < (u_int)l) {
666 sample_cpu_width[n] = l;
667 do_redraw = true;
668 }
669 }
670
671 if (do_redraw) {
672 lim_printf(lim, " Rate %*s %-*s",
673 sample_event_width, "Sample#",
674 SYMBOL_LEN, "Eventname");
675 for (n = 0; n < ncpu; n++) {
676 snprintf(buf, sizeof(buf), "CPU%d", n);
677 lim_printf(lim, " %*s", sample_cpu_width[n], buf);
678 }
679 lim_newline(lim);
680
681 lim_printf(lim, "------ %*.*s %*.*s",
682 sample_event_width, sample_event_width, LINESTR,
683 SYMBOL_LEN, SYMBOL_LEN, LINESTR);
684 for (n = 0; n < ncpu; n++) {
685 lim_printf(lim, " %*.*s",
686 sample_cpu_width[n], sample_cpu_width[n], LINESTR);
687 }
688 lim_newline(lim);
689 } else {
690 printf("\n\n");
691 }
692
693 for (i = 0; i < nevent; i++) {
694 if (sample_n_per_event[opt_mode][i] >= nsample_total) {
695 lim_printf(lim, "%5.1f%%", 100.0 *
696 sample_n_per_event[opt_mode][i] / nsample_total);
697 } else {
698 lim_printf(lim, "%5.2f%%", 100.0 *
699 sample_n_per_event[opt_mode][i] / nsample_total);
700 }
701 lim_printf(lim, " %*"PRIu64" ", sample_event_width,
702 sample_n_per_event[opt_mode][i]);
703
704 lim_printf(lim, "%-32.32s", eventname[i]);
705 for (n = 0; n < ncpu; n++) {
706 lim_printf(lim, " %*"PRIu64, sample_cpu_width[n],
707 sample_n_per_event_cpu[opt_mode][nevent * n + i]);
708 }
709 lim_newline(lim);
710 }
711 }
712
713 static void
714 sample_show(void)
715 {
716 struct sample_elm *e;
717 struct ptrarray *samples;
718 u_int nsample_total;
719 int i, l, lim, n, ndisp;
720 char namebuf[32];
721 const char *name;
722
723 if (nshow++ == 0) {
724 printf("\n");
725 if (!nontty) {
726 signal(SIGWINCH, sigwinch_handler);
727 signal(SIGINT, die);
728 signal(SIGQUIT, die);
729 signal(SIGTERM, die);
730 signal(SIGTSTP, sigtstp_handler);
731
732 tty_setup();
733 }
734 } else {
735 reset_cursor_pos();
736 }
737
738 int margin_lines = 7;
739
740 margin_lines += 3 + nevent; /* show_counter_per_event() */
741
742 if (opt_mode == SAMPLE_MODE_INSTANTANEOUS)
743 sample_sort_instantaneous();
744 else
745 sample_sort_accumulative();
746 samples = &sample_list[opt_mode];
747
748 if (opt_showcounter)
749 margin_lines += 2 + nevent;
750 if (opt_userland)
751 margin_lines += 1;
752
753 ndisp = samples->pa_inuse;
754 if (!nontty && ndisp > (win.ws_row - margin_lines))
755 ndisp = win.ws_row - margin_lines;
756
757 lim = win.ws_col;
758 if (opt_mode == SAMPLE_MODE_ACCUMULATIVE)
759 lim_printf(&lim, "[Accumulative mode] ");
760 show_tprof_stat(&lim);
761
762 if (lim >= 16) {
763 l = win.ws_col - lim;
764 if (!nontty) {
765 clr_to_eol();
766 for (; l <= win.ws_col - 17; l = ((l + 8) & -8))
767 printf("\t");
768 }
769 show_timestamp();
770 }
771 lim_newline(&lim);
772 lim_newline(&lim);
773
774 if (opt_showcounter)
775 show_counters(&lim);
776
777 show_count_per_event(&lim);
778 lim_newline(&lim);
779
780 if (do_redraw) {
781 lim_printf(&lim, " Rate %*s %-*s",
782 sample_event_width, "Sample#",
783 SYMBOL_LEN, "Symbol");
784 for (n = 0; n < ncpu; n++) {
785 snprintf(namebuf, sizeof(namebuf), "CPU%d", n);
786 lim_printf(&lim, " %*s", sample_cpu_width[n], namebuf);
787 }
788 lim_newline(&lim);
789
790 lim_printf(&lim, "------ %*.*s %*.*s",
791 sample_event_width, sample_event_width, LINESTR,
792 SYMBOL_LEN, SYMBOL_LEN, LINESTR);
793 for (n = 0; n < ncpu; n++) {
794 lim_printf(&lim, " %*.*s", sample_cpu_width[n],
795 sample_cpu_width[n], LINESTR);
796 }
797 lim_newline(&lim);
798 } else {
799 printf("\n\n");
800 }
801
802 for (i = 0; i < ndisp; i++) {
803 e = (struct sample_elm *)samples->pa_ptrs[i];
804 name = e->name;
805 if (name == NULL) {
806 if (e->flags & SAMPLE_ELM_FLAGS_USER) {
807 snprintf(namebuf, sizeof(namebuf),
808 "<PID:%"PRIu64">", e->addr);
809 } else {
810 snprintf(namebuf, sizeof(namebuf),
811 "0x%016"PRIx64, e->addr);
812 }
813 name = namebuf;
814 }
815
816 nsample_total = sample_n_kern[opt_mode];
817 if (opt_userland)
818 nsample_total += sample_n_user[opt_mode];
819 /*
820 * even when only kernel mode events are configured,
821 * interrupts may still occur in the user mode state.
822 */
823 if (nsample_total == 0)
824 nsample_total = 1;
825
826 if (e->num[opt_mode] >= nsample_total) {
827 lim_printf(&lim, "%5.1f%%", 100.0 *
828 e->num[opt_mode] / nsample_total);
829 } else {
830 lim_printf(&lim, "%5.2f%%", 100.0 *
831 e->num[opt_mode] / nsample_total);
832 }
833 lim_printf(&lim, " %*u %-32.32s", sample_event_width,
834 e->num[opt_mode], name);
835
836 for (n = 0; n < ncpu; n++) {
837 if (SAMPLE_ELM_NUM_CPU(e, opt_mode)[n] == 0) {
838 lim_printf(&lim, " %*s", sample_cpu_width[n],
839 ".");
840 } else {
841 lim_printf(&lim, " %*u", sample_cpu_width[n],
842 SAMPLE_ELM_NUM_CPU(e, opt_mode)[n]);
843 }
844 }
845 lim_newline(&lim);
846 }
847
848 if ((u_int)ndisp != samples->pa_inuse) {
849 lim_printf(&lim, " : %*s (more %zu symbols omitted)",
850 sample_event_width, ":", samples->pa_inuse - ndisp);
851 lim_newline(&lim);
852 } else if (!nontty) {
853 for (i = ndisp; i <= win.ws_row - margin_lines; i++) {
854 printf("~");
855 lim_newline(&lim);
856 }
857 }
858
859 if (do_redraw) {
860 lim_printf(&lim, "------ %*.*s %*.*s",
861 sample_event_width, sample_event_width, LINESTR,
862 SYMBOL_LEN, SYMBOL_LEN, LINESTR);
863 for (n = 0; n < ncpu; n++) {
864 lim_printf(&lim, " %*.*s",
865 sample_cpu_width[n], sample_cpu_width[n], LINESTR);
866 }
867 lim_newline(&lim);
868 } else {
869 printf("\n");
870 }
871
872 lim_printf(&lim, "Total %*u %-32.32s",
873 sample_event_width, sample_n_kern[opt_mode], "in-kernel");
874 for (n = 0; n < ncpu; n++) {
875 lim_printf(&lim, " %*u", sample_cpu_width[n],
876 sample_n_kern_per_cpu[opt_mode][n]);
877 }
878
879 if (opt_userland) {
880 lim_newline(&lim);
881 lim_printf(&lim, " %*u %-32.32s",
882 sample_event_width, sample_n_user[opt_mode], "userland");
883 for (n = 0; n < ncpu; n++) {
884 lim_printf(&lim, " %*u", sample_cpu_width[n],
885 sample_n_user_per_cpu[opt_mode][n]);
886 }
887 }
888
889 if (nontty)
890 printf("\n");
891 else
892 clr_to_eol();
893 }
894
895 __dead static void
896 tprof_top_usage(void)
897 {
898 fprintf(stderr, "%s top [-acu] [-e name[,scale] [-e ...]]"
899 " [-i interval]\n", getprogname());
900 exit(EXIT_FAILURE);
901 }
902
903 __dead void
904 tprof_top(int argc, char **argv)
905 {
906 tprof_param_t params[TPROF_MAXCOUNTERS];
907 struct itimerval it;
908 ssize_t tprof_bufsize, len;
909 u_int i;
910 int ch, ret;
911 char *tprof_buf, *p, *errmsg;
912 bool noinput = false;
913
914 memset(params, 0, sizeof(params));
915 nevent = 0;
916
917 while ((ch = getopt(argc, argv, "ace:i:L:u")) != -1) {
918 switch (ch) {
919 case 'a':
920 opt_mode = SAMPLE_MODE_ACCUMULATIVE;
921 break;
922 case 'c':
923 opt_showcounter = 1;
924 break;
925 case 'e':
926 if (tprof_parse_event(¶ms[nevent], optarg,
927 TPROF_PARSE_EVENT_F_ALLOWSCALE,
928 &eventname[nevent], &errmsg) != 0) {
929 die_errc(EXIT_FAILURE, 0, "%s", errmsg);
930 }
931 nevent++;
932 if (nevent > __arraycount(params) ||
933 nevent > ncounters)
934 die_errc(EXIT_FAILURE, 0,
935 "Too many events. Only a maximum of %d "
936 "counters can be used.", ncounters);
937 break;
938 case 'i':
939 top_interval = strtol(optarg, &p, 10);
940 if (*p != '\0' || top_interval <= 0)
941 die_errc(EXIT_FAILURE, 0,
942 "Bad/invalid interval: %s", optarg);
943 break;
944 case 'u':
945 opt_userland = 1;
946 break;
947 default:
948 tprof_top_usage();
949 }
950 }
951 argc -= optind;
952 argv += optind;
953
954 if (argc != 0)
955 tprof_top_usage();
956
957 if (nevent == 0) {
958 const char *defaultevent = tprof_cycle_event_name();
959 if (defaultevent == NULL)
960 die_errc(EXIT_FAILURE, 0, "cpu not supported");
961
962 tprof_event_lookup(defaultevent, ¶ms[nevent]);
963 eventname[nevent] = defaultevent;
964 nevent++;
965 }
966
967 sample_init();
968 show_counters_alloc();
969
970 for (i = 0; i < nevent; i++) {
971 params[i].p_counter = i;
972 params[i].p_flags |= TPROF_PARAM_KERN | TPROF_PARAM_PROFILE;
973 if (opt_userland)
974 params[i].p_flags |= TPROF_PARAM_USER;
975 ret = ioctl(devfd, TPROF_IOC_CONFIGURE_EVENT, ¶ms[i]);
976 if (ret == -1)
977 die_errc(EXIT_FAILURE, errno,
978 "TPROF_IOC_CONFIGURE_EVENT: %s", eventname[i]);
979 }
980
981 tprof_countermask_t mask = TPROF_COUNTERMASK_ALL;
982 ret = ioctl(devfd, TPROF_IOC_START, &mask);
983 if (ret == -1)
984 die_errc(EXIT_FAILURE, errno, "TPROF_IOC_START");
985
986 ksyms = ksymload(&nksyms);
987
988 signal(SIGALRM, sigalrm_handler);
989
990 it.it_interval.tv_sec = it.it_value.tv_sec = top_interval;
991 it.it_interval.tv_usec = it.it_value.tv_usec = 0;
992 setitimer(ITIMER_REAL, &it, NULL);
993
994 sample_reset(true);
995 printf("collecting samples...");
996 fflush(stdout);
997
998 tprof_bufsize = sizeof(tprof_sample_t) * 1024 * 32;
999 tprof_buf = emalloc(tprof_bufsize);
1000 do {
1001 bool force_update = false;
1002
1003 while (sigalrm == 0 && !force_update) {
1004 fd_set r;
1005 int nfound;
1006 char c;
1007
1008 FD_ZERO(&r);
1009 if (!noinput)
1010 FD_SET(STDIN_FILENO, &r);
1011 FD_SET(devfd, &r);
1012 nfound = select(devfd + 1, &r, NULL, NULL, NULL);
1013 if (nfound == -1) {
1014 if (errno == EINTR)
1015 break;
1016 die_errc(EXIT_FAILURE, errno, "select");
1017 }
1018
1019 if (FD_ISSET(STDIN_FILENO, &r)) {
1020 len = read(STDIN_FILENO, &c, 1);
1021 if (len <= 0) {
1022 noinput = true;
1023 continue;
1024 }
1025 switch (c) {
1026 case 0x0c: /* ^L */
1027 do_redraw = true;
1028 break;
1029 case 'a':
1030 /* toggle mode */
1031 opt_mode = (opt_mode + 1) %
1032 SAMPLE_MODE_NUM;
1033 do_redraw = true;
1034 break;
1035 case 'c':
1036 /* toggle mode */
1037 opt_showcounter ^= 1;
1038 do_redraw = true;
1039 break;
1040 case 'q':
1041 goto done;
1042 case 'z':
1043 sample_reset(true);
1044 break;
1045 default:
1046 continue;
1047 }
1048 force_update = true;
1049 }
1050
1051 if (FD_ISSET(devfd, &r)) {
1052 len = read(devfd, tprof_buf, tprof_bufsize);
1053 if (len == -1 && errno != EINTR)
1054 die_errc(EXIT_FAILURE, errno, "read");
1055 if (len > 0) {
1056 tprof_sample_t *s =
1057 (tprof_sample_t *)tprof_buf;
1058 while (s <
1059 (tprof_sample_t *)(tprof_buf + len))
1060 sample_collect(s++);
1061 }
1062 }
1063 }
1064 sigalrm = 0;
1065
1066 /* update screen */
1067 sample_show();
1068 fflush(stdout);
1069 do_redraw = false;
1070 if (force_update)
1071 continue;
1072
1073 sample_reset(false);
1074
1075 } while (!nontty);
1076
1077 done:
1078 die(0);
1079 }
1080