lockstat.c revision 1.25.10.1 1 /* $NetBSD: lockstat.c,v 1.25.10.1 2020/04/08 14:08:02 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Lock statistics driver, providing kernel support for the lockstat(8)
34 * command.
35 *
36 * We use a global lock word (lockstat_lock) to track device opens.
37 * Only one thread can hold the device at a time, providing a global lock.
38 *
39 * XXX Timings for contention on sleep locks are currently incorrect.
40 * XXX Convert this to use timecounters!
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: lockstat.c,v 1.25.10.1 2020/04/08 14:08:02 martin Exp $");
45
46 #include <sys/types.h>
47 #include <sys/param.h>
48 #include <sys/proc.h>
49 #include <sys/resourcevar.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/kmem.h>
53 #include <sys/conf.h>
54 #include <sys/cpu.h>
55 #include <sys/syslog.h>
56 #include <sys/atomic.h>
57
58 #include <dev/lockstat.h>
59
60 #include <machine/lock.h>
61
62 #include "ioconf.h"
63
64 #ifndef __HAVE_CPU_COUNTER
65 #error CPU counters not available
66 #endif
67
68 #if LONG_BIT == 64
69 #define LOCKSTAT_HASH_SHIFT 3
70 #elif LONG_BIT == 32
71 #define LOCKSTAT_HASH_SHIFT 2
72 #endif
73
74 #define LOCKSTAT_MINBUFS 1000
75 #define LOCKSTAT_DEFBUFS 20000
76 #define LOCKSTAT_MAXBUFS 1000000
77
78 #define LOCKSTAT_HASH_SIZE 128
79 #define LOCKSTAT_HASH_MASK (LOCKSTAT_HASH_SIZE - 1)
80 #define LOCKSTAT_HASH(key) \
81 ((key >> LOCKSTAT_HASH_SHIFT) & LOCKSTAT_HASH_MASK)
82
83 typedef struct lscpu {
84 SLIST_HEAD(, lsbuf) lc_free;
85 u_int lc_overflow;
86 LIST_HEAD(lslist, lsbuf) lc_hash[LOCKSTAT_HASH_SIZE];
87 } lscpu_t;
88
89 typedef struct lslist lslist_t;
90
91 void lockstat_start(lsenable_t *);
92 int lockstat_alloc(lsenable_t *);
93 void lockstat_init_tables(lsenable_t *);
94 int lockstat_stop(lsdisable_t *);
95 void lockstat_free(void);
96
97 dev_type_open(lockstat_open);
98 dev_type_close(lockstat_close);
99 dev_type_read(lockstat_read);
100 dev_type_ioctl(lockstat_ioctl);
101
102 volatile u_int lockstat_enabled;
103 volatile u_int lockstat_dev_enabled;
104 uintptr_t lockstat_csstart;
105 uintptr_t lockstat_csend;
106 uintptr_t lockstat_csmask;
107 uintptr_t lockstat_lamask;
108 uintptr_t lockstat_lockstart;
109 uintptr_t lockstat_lockend;
110 __cpu_simple_lock_t lockstat_lock;
111 lwp_t *lockstat_lwp;
112 lsbuf_t *lockstat_baseb;
113 size_t lockstat_sizeb;
114 int lockstat_busy;
115 struct timespec lockstat_stime;
116
117 #ifdef KDTRACE_HOOKS
118 volatile u_int lockstat_dtrace_enabled;
119 CTASSERT(LB_NEVENT <= 3);
120 CTASSERT(LB_NLOCK <= (7 << LB_LOCK_SHIFT));
121 void
122 lockstat_probe_stub(uint32_t id, uintptr_t lock, uintptr_t callsite,
123 uintptr_t flags, uintptr_t count, uintptr_t cycles)
124 {
125 }
126
127 uint32_t lockstat_probemap[LS_NPROBES];
128 void (*lockstat_probe_func)(uint32_t, uintptr_t, uintptr_t,
129 uintptr_t, uintptr_t, uintptr_t) = &lockstat_probe_stub;
130 #endif
131
132 const struct cdevsw lockstat_cdevsw = {
133 .d_open = lockstat_open,
134 .d_close = lockstat_close,
135 .d_read = lockstat_read,
136 .d_write = nowrite,
137 .d_ioctl = lockstat_ioctl,
138 .d_stop = nostop,
139 .d_tty = notty,
140 .d_poll = nopoll,
141 .d_mmap = nommap,
142 .d_kqfilter = nokqfilter,
143 .d_discard = nodiscard,
144 .d_flag = D_OTHER | D_MPSAFE
145 };
146
147 /*
148 * Called when the pseudo-driver is attached.
149 */
150 void
151 lockstatattach(int nunits)
152 {
153
154 (void)nunits;
155
156 __cpu_simple_lock_init(&lockstat_lock);
157 }
158
159 /*
160 * Prepare the per-CPU tables for use, or clear down tables when tracing is
161 * stopped.
162 */
163 void
164 lockstat_init_tables(lsenable_t *le)
165 {
166 int i, per, slop, cpuno;
167 CPU_INFO_ITERATOR cii;
168 struct cpu_info *ci;
169 lscpu_t *lc;
170 lsbuf_t *lb;
171
172 /* coverity[assert_side_effect] */
173 KASSERT(!lockstat_dev_enabled);
174
175 for (CPU_INFO_FOREACH(cii, ci)) {
176 if (ci->ci_lockstat != NULL) {
177 kmem_free(ci->ci_lockstat, sizeof(lscpu_t));
178 ci->ci_lockstat = NULL;
179 }
180 }
181
182 if (le == NULL)
183 return;
184
185 lb = lockstat_baseb;
186 per = le->le_nbufs / ncpu;
187 slop = le->le_nbufs - (per * ncpu);
188 cpuno = 0;
189 for (CPU_INFO_FOREACH(cii, ci)) {
190 lc = kmem_alloc(sizeof(*lc), KM_SLEEP);
191 lc->lc_overflow = 0;
192 ci->ci_lockstat = lc;
193
194 SLIST_INIT(&lc->lc_free);
195 for (i = 0; i < LOCKSTAT_HASH_SIZE; i++)
196 LIST_INIT(&lc->lc_hash[i]);
197
198 for (i = per; i != 0; i--, lb++) {
199 lb->lb_cpu = (uint16_t)cpuno;
200 SLIST_INSERT_HEAD(&lc->lc_free, lb, lb_chain.slist);
201 }
202 if (--slop > 0) {
203 lb->lb_cpu = (uint16_t)cpuno;
204 SLIST_INSERT_HEAD(&lc->lc_free, lb, lb_chain.slist);
205 lb++;
206 }
207 cpuno++;
208 }
209 }
210
211 /*
212 * Start collecting lock statistics.
213 */
214 void
215 lockstat_start(lsenable_t *le)
216 {
217
218 /* coverity[assert_side_effect] */
219 KASSERT(!lockstat_dev_enabled);
220
221 lockstat_init_tables(le);
222
223 if ((le->le_flags & LE_CALLSITE) != 0)
224 lockstat_csmask = (uintptr_t)-1LL;
225 else
226 lockstat_csmask = 0;
227
228 if ((le->le_flags & LE_LOCK) != 0)
229 lockstat_lamask = (uintptr_t)-1LL;
230 else
231 lockstat_lamask = 0;
232
233 lockstat_csstart = le->le_csstart;
234 lockstat_csend = le->le_csend;
235 lockstat_lockstart = le->le_lockstart;
236 lockstat_lockstart = le->le_lockstart;
237 lockstat_lockend = le->le_lockend;
238 membar_sync();
239 getnanotime(&lockstat_stime);
240 lockstat_dev_enabled = le->le_mask;
241 LOCKSTAT_ENABLED_UPDATE();
242 }
243
244 /*
245 * Stop collecting lock statistics.
246 */
247 int
248 lockstat_stop(lsdisable_t *ld)
249 {
250 CPU_INFO_ITERATOR cii;
251 struct cpu_info *ci;
252 u_int cpuno, overflow;
253 struct timespec ts;
254 int error;
255 lwp_t *l;
256
257 /* coverity[assert_side_effect] */
258 KASSERT(lockstat_dev_enabled);
259
260 /*
261 * Set enabled false, force a write barrier, and wait for other CPUs
262 * to exit lockstat_event().
263 */
264 lockstat_dev_enabled = 0;
265 LOCKSTAT_ENABLED_UPDATE();
266 getnanotime(&ts);
267 tsleep(&lockstat_stop, PPAUSE, "lockstat", mstohz(10));
268
269 /*
270 * Did we run out of buffers while tracing?
271 */
272 overflow = 0;
273 for (CPU_INFO_FOREACH(cii, ci))
274 overflow += ((lscpu_t *)ci->ci_lockstat)->lc_overflow;
275
276 if (overflow != 0) {
277 error = EOVERFLOW;
278 log(LOG_NOTICE, "lockstat: %d buffer allocations failed\n",
279 overflow);
280 } else
281 error = 0;
282
283 lockstat_init_tables(NULL);
284
285 /* Run through all LWPs and clear the slate for the next run. */
286 mutex_enter(proc_lock);
287 LIST_FOREACH(l, &alllwp, l_list) {
288 l->l_pfailaddr = 0;
289 l->l_pfailtime = 0;
290 l->l_pfaillock = 0;
291 }
292 mutex_exit(proc_lock);
293
294 if (ld == NULL)
295 return error;
296
297 /*
298 * Fill out the disable struct for the caller.
299 */
300 timespecsub(&ts, &lockstat_stime, &ld->ld_time);
301 ld->ld_size = lockstat_sizeb;
302
303 cpuno = 0;
304 for (CPU_INFO_FOREACH(cii, ci)) {
305 if (cpuno >= sizeof(ld->ld_freq) / sizeof(ld->ld_freq[0])) {
306 log(LOG_WARNING, "lockstat: too many CPUs\n");
307 break;
308 }
309 ld->ld_freq[cpuno++] = cpu_frequency(ci);
310 }
311
312 return error;
313 }
314
315 /*
316 * Allocate buffers for lockstat_start().
317 */
318 int
319 lockstat_alloc(lsenable_t *le)
320 {
321 lsbuf_t *lb;
322 size_t sz;
323
324 /* coverity[assert_side_effect] */
325 KASSERT(!lockstat_dev_enabled);
326 lockstat_free();
327
328 sz = sizeof(*lb) * le->le_nbufs;
329
330 lb = kmem_zalloc(sz, KM_SLEEP);
331
332 /* coverity[assert_side_effect] */
333 KASSERT(!lockstat_dev_enabled);
334 KASSERT(lockstat_baseb == NULL);
335 lockstat_sizeb = sz;
336 lockstat_baseb = lb;
337
338 return (0);
339 }
340
341 /*
342 * Free allocated buffers after tracing has stopped.
343 */
344 void
345 lockstat_free(void)
346 {
347
348 /* coverity[assert_side_effect] */
349 KASSERT(!lockstat_dev_enabled);
350
351 if (lockstat_baseb != NULL) {
352 kmem_free(lockstat_baseb, lockstat_sizeb);
353 lockstat_baseb = NULL;
354 }
355 }
356
357 /*
358 * Main entry point from lock primatives.
359 */
360 void
361 lockstat_event(uintptr_t lock, uintptr_t callsite, u_int flags, u_int count,
362 uint64_t cycles)
363 {
364 lslist_t *ll;
365 lscpu_t *lc;
366 lsbuf_t *lb;
367 u_int event;
368 int s;
369
370 #ifdef KDTRACE_HOOKS
371 uint32_t id;
372 CTASSERT((LS_NPROBES & (LS_NPROBES - 1)) == 0);
373 if ((id = lockstat_probemap[LS_COMPRESS(flags)]) != 0)
374 (*lockstat_probe_func)(id, lock, callsite, flags, count,
375 cycles);
376 #endif
377
378 if ((flags & lockstat_dev_enabled) != flags || count == 0)
379 return;
380 if (lock < lockstat_lockstart || lock > lockstat_lockend)
381 return;
382 if (callsite < lockstat_csstart || callsite > lockstat_csend)
383 return;
384
385 callsite &= lockstat_csmask;
386 lock &= lockstat_lamask;
387
388 /*
389 * Find the table for this lock+callsite pair, and try to locate a
390 * buffer with the same key.
391 */
392 s = splhigh();
393 lc = curcpu()->ci_lockstat;
394 ll = &lc->lc_hash[LOCKSTAT_HASH(lock ^ callsite)];
395 event = (flags & LB_EVENT_MASK) - 1;
396
397 LIST_FOREACH(lb, ll, lb_chain.list) {
398 if (lb->lb_lock == lock && lb->lb_callsite == callsite)
399 break;
400 }
401
402 if (lb != NULL) {
403 /*
404 * We found a record. Move it to the front of the list, as
405 * we're likely to hit it again soon.
406 */
407 if (lb != LIST_FIRST(ll)) {
408 LIST_REMOVE(lb, lb_chain.list);
409 LIST_INSERT_HEAD(ll, lb, lb_chain.list);
410 }
411 lb->lb_counts[event] += count;
412 lb->lb_times[event] += cycles;
413 } else if ((lb = SLIST_FIRST(&lc->lc_free)) != NULL) {
414 /*
415 * Pinch a new buffer and fill it out.
416 */
417 SLIST_REMOVE_HEAD(&lc->lc_free, lb_chain.slist);
418 LIST_INSERT_HEAD(ll, lb, lb_chain.list);
419 lb->lb_flags = (uint16_t)flags;
420 lb->lb_lock = lock;
421 lb->lb_callsite = callsite;
422 lb->lb_counts[event] = count;
423 lb->lb_times[event] = cycles;
424 } else {
425 /*
426 * We didn't find a buffer and there were none free.
427 * lockstat_stop() will notice later on and report the
428 * error.
429 */
430 lc->lc_overflow++;
431 }
432
433 splx(s);
434 }
435
436 /*
437 * Accept an open() on /dev/lockstat.
438 */
439 int
440 lockstat_open(dev_t dev, int flag, int mode, lwp_t *l)
441 {
442
443 if (!__cpu_simple_lock_try(&lockstat_lock))
444 return EBUSY;
445 lockstat_lwp = curlwp;
446 return 0;
447 }
448
449 /*
450 * Accept the last close() on /dev/lockstat.
451 */
452 int
453 lockstat_close(dev_t dev, int flag, int mode, lwp_t *l)
454 {
455
456 lockstat_lwp = NULL;
457 if (lockstat_dev_enabled) {
458 lockstat_stop(NULL);
459 lockstat_free();
460 }
461 __cpu_simple_unlock(&lockstat_lock);
462 return 0;
463 }
464
465 /*
466 * Handle control operations.
467 */
468 int
469 lockstat_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
470 {
471 lsenable_t *le;
472 int error;
473
474 if (lockstat_lwp != curlwp)
475 return EBUSY;
476
477 switch (cmd) {
478 case IOC_LOCKSTAT_GVERSION:
479 *(int *)data = LS_VERSION;
480 error = 0;
481 break;
482
483 case IOC_LOCKSTAT_ENABLE:
484 le = (lsenable_t *)data;
485
486 if (!cpu_hascounter()) {
487 error = ENODEV;
488 break;
489 }
490 if (lockstat_dev_enabled) {
491 error = EBUSY;
492 break;
493 }
494
495 /*
496 * Sanitize the arguments passed in and set up filtering.
497 */
498 if (le->le_nbufs == 0) {
499 le->le_nbufs = MIN(LOCKSTAT_DEFBUFS * ncpu,
500 LOCKSTAT_MAXBUFS);
501 } else if (le->le_nbufs > LOCKSTAT_MAXBUFS ||
502 le->le_nbufs < LOCKSTAT_MINBUFS) {
503 error = EINVAL;
504 break;
505 }
506 if ((le->le_flags & LE_ONE_CALLSITE) == 0) {
507 le->le_csstart = 0;
508 le->le_csend = le->le_csstart - 1;
509 }
510 if ((le->le_flags & LE_ONE_LOCK) == 0) {
511 le->le_lockstart = 0;
512 le->le_lockend = le->le_lockstart - 1;
513 }
514 if ((le->le_mask & LB_EVENT_MASK) == 0)
515 return EINVAL;
516 if ((le->le_mask & LB_LOCK_MASK) == 0)
517 return EINVAL;
518
519 /*
520 * Start tracing.
521 */
522 if ((error = lockstat_alloc(le)) == 0)
523 lockstat_start(le);
524 break;
525
526 case IOC_LOCKSTAT_DISABLE:
527 if (!lockstat_dev_enabled)
528 error = EINVAL;
529 else
530 error = lockstat_stop((lsdisable_t *)data);
531 break;
532
533 default:
534 error = ENOTTY;
535 break;
536 }
537
538 return error;
539 }
540
541 /*
542 * Copy buffers out to user-space.
543 */
544 int
545 lockstat_read(dev_t dev, struct uio *uio, int flag)
546 {
547
548 if (curlwp != lockstat_lwp || lockstat_dev_enabled)
549 return EBUSY;
550 return uiomove(lockstat_baseb, lockstat_sizeb, uio);
551 }
552