1 1.30 andvar /* $NetBSD: lockstat.c,v 1.30 2022/04/08 10:17:54 andvar Exp $ */ 2 1.1 ad 3 1.1 ad /*- 4 1.26 ad * Copyright (c) 2006, 2007, 2019 The NetBSD Foundation, Inc. 5 1.1 ad * All rights reserved. 6 1.1 ad * 7 1.1 ad * This code is derived from software contributed to The NetBSD Foundation 8 1.1 ad * by Andrew Doran. 9 1.1 ad * 10 1.1 ad * Redistribution and use in source and binary forms, with or without 11 1.1 ad * modification, are permitted provided that the following conditions 12 1.1 ad * are met: 13 1.1 ad * 1. Redistributions of source code must retain the above copyright 14 1.1 ad * notice, this list of conditions and the following disclaimer. 15 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 ad * notice, this list of conditions and the following disclaimer in the 17 1.1 ad * documentation and/or other materials provided with the distribution. 18 1.1 ad * 19 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 ad * POSSIBILITY OF SUCH DAMAGE. 30 1.1 ad */ 31 1.1 ad 32 1.1 ad /* 33 1.1 ad * Lock statistics driver, providing kernel support for the lockstat(8) 34 1.1 ad * command. 35 1.5 ad * 36 1.11 ad * We use a global lock word (lockstat_lock) to track device opens. 37 1.11 ad * Only one thread can hold the device at a time, providing a global lock. 38 1.11 ad * 39 1.5 ad * XXX Timings for contention on sleep locks are currently incorrect. 40 1.26 ad * XXX Convert this to use timecounters! 41 1.1 ad */ 42 1.1 ad 43 1.1 ad #include <sys/cdefs.h> 44 1.30 andvar __KERNEL_RCSID(0, "$NetBSD: lockstat.c,v 1.30 2022/04/08 10:17:54 andvar Exp $"); 45 1.1 ad 46 1.1 ad #include <sys/types.h> 47 1.1 ad #include <sys/param.h> 48 1.29 riastrad 49 1.29 riastrad #include <sys/atomic.h> 50 1.29 riastrad #include <sys/conf.h> 51 1.29 riastrad #include <sys/cpu.h> 52 1.1 ad #include <sys/kernel.h> 53 1.11 ad #include <sys/kmem.h> 54 1.29 riastrad #include <sys/lock.h> 55 1.29 riastrad #include <sys/proc.h> 56 1.29 riastrad #include <sys/resourcevar.h> 57 1.1 ad #include <sys/syslog.h> 58 1.29 riastrad #include <sys/systm.h> 59 1.28 riastrad #include <sys/xcall.h> 60 1.1 ad 61 1.1 ad #include <dev/lockstat.h> 62 1.1 ad 63 1.24 christos #include "ioconf.h" 64 1.24 christos 65 1.1 ad #ifndef __HAVE_CPU_COUNTER 66 1.1 ad #error CPU counters not available 67 1.1 ad #endif 68 1.1 ad 69 1.1 ad #if LONG_BIT == 64 70 1.1 ad #define LOCKSTAT_HASH_SHIFT 3 71 1.1 ad #elif LONG_BIT == 32 72 1.1 ad #define LOCKSTAT_HASH_SHIFT 2 73 1.1 ad #endif 74 1.1 ad 75 1.10 ad #define LOCKSTAT_MINBUFS 1000 76 1.26 ad #define LOCKSTAT_DEFBUFS 20000 77 1.18 chs #define LOCKSTAT_MAXBUFS 1000000 78 1.1 ad 79 1.11 ad #define LOCKSTAT_HASH_SIZE 128 80 1.1 ad #define LOCKSTAT_HASH_MASK (LOCKSTAT_HASH_SIZE - 1) 81 1.1 ad #define LOCKSTAT_HASH(key) \ 82 1.1 ad ((key >> LOCKSTAT_HASH_SHIFT) & LOCKSTAT_HASH_MASK) 83 1.1 ad 84 1.1 ad typedef struct lscpu { 85 1.1 ad SLIST_HEAD(, lsbuf) lc_free; 86 1.1 ad u_int lc_overflow; 87 1.1 ad LIST_HEAD(lslist, lsbuf) lc_hash[LOCKSTAT_HASH_SIZE]; 88 1.1 ad } lscpu_t; 89 1.1 ad 90 1.1 ad typedef struct lslist lslist_t; 91 1.1 ad 92 1.1 ad void lockstat_start(lsenable_t *); 93 1.1 ad int lockstat_alloc(lsenable_t *); 94 1.1 ad void lockstat_init_tables(lsenable_t *); 95 1.1 ad int lockstat_stop(lsdisable_t *); 96 1.1 ad void lockstat_free(void); 97 1.1 ad 98 1.1 ad dev_type_open(lockstat_open); 99 1.1 ad dev_type_close(lockstat_close); 100 1.1 ad dev_type_read(lockstat_read); 101 1.1 ad dev_type_ioctl(lockstat_ioctl); 102 1.1 ad 103 1.1 ad volatile u_int lockstat_enabled; 104 1.21 christos volatile u_int lockstat_dev_enabled; 105 1.28 riastrad __cpu_simple_lock_t lockstat_enabled_lock; 106 1.1 ad uintptr_t lockstat_csstart; 107 1.1 ad uintptr_t lockstat_csend; 108 1.1 ad uintptr_t lockstat_csmask; 109 1.10 ad uintptr_t lockstat_lamask; 110 1.5 ad uintptr_t lockstat_lockstart; 111 1.5 ad uintptr_t lockstat_lockend; 112 1.11 ad __cpu_simple_lock_t lockstat_lock; 113 1.11 ad lwp_t *lockstat_lwp; 114 1.1 ad lsbuf_t *lockstat_baseb; 115 1.1 ad size_t lockstat_sizeb; 116 1.1 ad int lockstat_busy; 117 1.1 ad struct timespec lockstat_stime; 118 1.1 ad 119 1.20 christos #ifdef KDTRACE_HOOKS 120 1.21 christos volatile u_int lockstat_dtrace_enabled; 121 1.20 christos CTASSERT(LB_NEVENT <= 3); 122 1.20 christos CTASSERT(LB_NLOCK <= (7 << LB_LOCK_SHIFT)); 123 1.20 christos void 124 1.20 christos lockstat_probe_stub(uint32_t id, uintptr_t lock, uintptr_t callsite, 125 1.20 christos uintptr_t flags, uintptr_t count, uintptr_t cycles) 126 1.20 christos { 127 1.20 christos } 128 1.20 christos 129 1.20 christos uint32_t lockstat_probemap[LS_NPROBES]; 130 1.20 christos void (*lockstat_probe_func)(uint32_t, uintptr_t, uintptr_t, 131 1.20 christos uintptr_t, uintptr_t, uintptr_t) = &lockstat_probe_stub; 132 1.20 christos #endif 133 1.20 christos 134 1.1 ad const struct cdevsw lockstat_cdevsw = { 135 1.17 dholland .d_open = lockstat_open, 136 1.17 dholland .d_close = lockstat_close, 137 1.17 dholland .d_read = lockstat_read, 138 1.17 dholland .d_write = nowrite, 139 1.17 dholland .d_ioctl = lockstat_ioctl, 140 1.17 dholland .d_stop = nostop, 141 1.17 dholland .d_tty = notty, 142 1.17 dholland .d_poll = nopoll, 143 1.17 dholland .d_mmap = nommap, 144 1.17 dholland .d_kqfilter = nokqfilter, 145 1.19 dholland .d_discard = nodiscard, 146 1.17 dholland .d_flag = D_OTHER | D_MPSAFE 147 1.1 ad }; 148 1.1 ad 149 1.1 ad /* 150 1.1 ad * Called when the pseudo-driver is attached. 151 1.1 ad */ 152 1.1 ad void 153 1.1 ad lockstatattach(int nunits) 154 1.1 ad { 155 1.1 ad 156 1.1 ad (void)nunits; 157 1.1 ad 158 1.11 ad __cpu_simple_lock_init(&lockstat_lock); 159 1.28 riastrad __cpu_simple_lock_init(&lockstat_enabled_lock); 160 1.1 ad } 161 1.1 ad 162 1.1 ad /* 163 1.1 ad * Prepare the per-CPU tables for use, or clear down tables when tracing is 164 1.1 ad * stopped. 165 1.1 ad */ 166 1.1 ad void 167 1.1 ad lockstat_init_tables(lsenable_t *le) 168 1.1 ad { 169 1.7 ad int i, per, slop, cpuno; 170 1.1 ad CPU_INFO_ITERATOR cii; 171 1.1 ad struct cpu_info *ci; 172 1.1 ad lscpu_t *lc; 173 1.1 ad lsbuf_t *lb; 174 1.1 ad 175 1.22 christos /* coverity[assert_side_effect] */ 176 1.21 christos KASSERT(!lockstat_dev_enabled); 177 1.1 ad 178 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) { 179 1.1 ad if (ci->ci_lockstat != NULL) { 180 1.11 ad kmem_free(ci->ci_lockstat, sizeof(lscpu_t)); 181 1.1 ad ci->ci_lockstat = NULL; 182 1.1 ad } 183 1.1 ad } 184 1.1 ad 185 1.1 ad if (le == NULL) 186 1.1 ad return; 187 1.1 ad 188 1.1 ad lb = lockstat_baseb; 189 1.1 ad per = le->le_nbufs / ncpu; 190 1.1 ad slop = le->le_nbufs - (per * ncpu); 191 1.1 ad cpuno = 0; 192 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) { 193 1.11 ad lc = kmem_alloc(sizeof(*lc), KM_SLEEP); 194 1.1 ad lc->lc_overflow = 0; 195 1.1 ad ci->ci_lockstat = lc; 196 1.1 ad 197 1.1 ad SLIST_INIT(&lc->lc_free); 198 1.1 ad for (i = 0; i < LOCKSTAT_HASH_SIZE; i++) 199 1.1 ad LIST_INIT(&lc->lc_hash[i]); 200 1.1 ad 201 1.1 ad for (i = per; i != 0; i--, lb++) { 202 1.1 ad lb->lb_cpu = (uint16_t)cpuno; 203 1.1 ad SLIST_INSERT_HEAD(&lc->lc_free, lb, lb_chain.slist); 204 1.1 ad } 205 1.1 ad if (--slop > 0) { 206 1.1 ad lb->lb_cpu = (uint16_t)cpuno; 207 1.1 ad SLIST_INSERT_HEAD(&lc->lc_free, lb, lb_chain.slist); 208 1.1 ad lb++; 209 1.1 ad } 210 1.1 ad cpuno++; 211 1.1 ad } 212 1.1 ad } 213 1.1 ad 214 1.1 ad /* 215 1.1 ad * Start collecting lock statistics. 216 1.1 ad */ 217 1.1 ad void 218 1.1 ad lockstat_start(lsenable_t *le) 219 1.1 ad { 220 1.1 ad 221 1.22 christos /* coverity[assert_side_effect] */ 222 1.21 christos KASSERT(!lockstat_dev_enabled); 223 1.1 ad 224 1.1 ad lockstat_init_tables(le); 225 1.1 ad 226 1.1 ad if ((le->le_flags & LE_CALLSITE) != 0) 227 1.1 ad lockstat_csmask = (uintptr_t)-1LL; 228 1.1 ad else 229 1.1 ad lockstat_csmask = 0; 230 1.1 ad 231 1.10 ad if ((le->le_flags & LE_LOCK) != 0) 232 1.10 ad lockstat_lamask = (uintptr_t)-1LL; 233 1.10 ad else 234 1.10 ad lockstat_lamask = 0; 235 1.10 ad 236 1.1 ad lockstat_csstart = le->le_csstart; 237 1.1 ad lockstat_csend = le->le_csend; 238 1.5 ad lockstat_lockstart = le->le_lockstart; 239 1.6 ad lockstat_lockstart = le->le_lockstart; 240 1.5 ad lockstat_lockend = le->le_lockend; 241 1.28 riastrad 242 1.28 riastrad /* 243 1.28 riastrad * Ensure everything is initialized on all CPUs, by issuing a 244 1.28 riastrad * null xcall with the side effect of a release barrier on this 245 1.28 riastrad * CPU and an acquire barrier on all other CPUs, before they 246 1.28 riastrad * can witness any flags set in lockstat_dev_enabled -- this 247 1.28 riastrad * way we don't need to add any barriers in lockstat_event. 248 1.28 riastrad */ 249 1.28 riastrad xc_barrier(0); 250 1.28 riastrad 251 1.28 riastrad /* 252 1.28 riastrad * Start timing after the xcall, so we don't spuriously count 253 1.28 riastrad * xcall communication time, but before flipping the switch, so 254 1.28 riastrad * we don't dirty sample with locks taken in the timecounter. 255 1.28 riastrad */ 256 1.1 ad getnanotime(&lockstat_stime); 257 1.28 riastrad 258 1.28 riastrad LOCKSTAT_ENABLED_UPDATE_BEGIN(); 259 1.28 riastrad atomic_store_relaxed(&lockstat_dev_enabled, le->le_mask); 260 1.28 riastrad LOCKSTAT_ENABLED_UPDATE_END(); 261 1.1 ad } 262 1.1 ad 263 1.1 ad /* 264 1.1 ad * Stop collecting lock statistics. 265 1.1 ad */ 266 1.1 ad int 267 1.1 ad lockstat_stop(lsdisable_t *ld) 268 1.1 ad { 269 1.1 ad CPU_INFO_ITERATOR cii; 270 1.1 ad struct cpu_info *ci; 271 1.1 ad u_int cpuno, overflow; 272 1.1 ad struct timespec ts; 273 1.1 ad int error; 274 1.14 ad lwp_t *l; 275 1.1 ad 276 1.22 christos /* coverity[assert_side_effect] */ 277 1.21 christos KASSERT(lockstat_dev_enabled); 278 1.1 ad 279 1.1 ad /* 280 1.28 riastrad * Disable and wait for other CPUs to exit lockstat_event(). 281 1.1 ad */ 282 1.28 riastrad LOCKSTAT_ENABLED_UPDATE_BEGIN(); 283 1.28 riastrad atomic_store_relaxed(&lockstat_dev_enabled, 0); 284 1.28 riastrad LOCKSTAT_ENABLED_UPDATE_END(); 285 1.1 ad getnanotime(&ts); 286 1.28 riastrad xc_barrier(0); 287 1.1 ad 288 1.1 ad /* 289 1.1 ad * Did we run out of buffers while tracing? 290 1.1 ad */ 291 1.1 ad overflow = 0; 292 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) 293 1.1 ad overflow += ((lscpu_t *)ci->ci_lockstat)->lc_overflow; 294 1.1 ad 295 1.1 ad if (overflow != 0) { 296 1.1 ad error = EOVERFLOW; 297 1.1 ad log(LOG_NOTICE, "lockstat: %d buffer allocations failed\n", 298 1.1 ad overflow); 299 1.1 ad } else 300 1.1 ad error = 0; 301 1.1 ad 302 1.1 ad lockstat_init_tables(NULL); 303 1.1 ad 304 1.14 ad /* Run through all LWPs and clear the slate for the next run. */ 305 1.27 ad mutex_enter(&proc_lock); 306 1.14 ad LIST_FOREACH(l, &alllwp, l_list) { 307 1.14 ad l->l_pfailaddr = 0; 308 1.14 ad l->l_pfailtime = 0; 309 1.14 ad l->l_pfaillock = 0; 310 1.14 ad } 311 1.27 ad mutex_exit(&proc_lock); 312 1.14 ad 313 1.1 ad if (ld == NULL) 314 1.11 ad return error; 315 1.1 ad 316 1.1 ad /* 317 1.1 ad * Fill out the disable struct for the caller. 318 1.1 ad */ 319 1.1 ad timespecsub(&ts, &lockstat_stime, &ld->ld_time); 320 1.1 ad ld->ld_size = lockstat_sizeb; 321 1.1 ad 322 1.1 ad cpuno = 0; 323 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) { 324 1.16 msaitoh if (cpuno >= sizeof(ld->ld_freq) / sizeof(ld->ld_freq[0])) { 325 1.1 ad log(LOG_WARNING, "lockstat: too many CPUs\n"); 326 1.1 ad break; 327 1.1 ad } 328 1.1 ad ld->ld_freq[cpuno++] = cpu_frequency(ci); 329 1.1 ad } 330 1.1 ad 331 1.11 ad return error; 332 1.1 ad } 333 1.1 ad 334 1.1 ad /* 335 1.1 ad * Allocate buffers for lockstat_start(). 336 1.1 ad */ 337 1.1 ad int 338 1.1 ad lockstat_alloc(lsenable_t *le) 339 1.1 ad { 340 1.1 ad lsbuf_t *lb; 341 1.1 ad size_t sz; 342 1.1 ad 343 1.22 christos /* coverity[assert_side_effect] */ 344 1.21 christos KASSERT(!lockstat_dev_enabled); 345 1.1 ad lockstat_free(); 346 1.1 ad 347 1.1 ad sz = sizeof(*lb) * le->le_nbufs; 348 1.1 ad 349 1.11 ad lb = kmem_zalloc(sz, KM_SLEEP); 350 1.1 ad 351 1.22 christos /* coverity[assert_side_effect] */ 352 1.21 christos KASSERT(!lockstat_dev_enabled); 353 1.1 ad KASSERT(lockstat_baseb == NULL); 354 1.1 ad lockstat_sizeb = sz; 355 1.1 ad lockstat_baseb = lb; 356 1.29 riastrad 357 1.1 ad return (0); 358 1.1 ad } 359 1.1 ad 360 1.1 ad /* 361 1.1 ad * Free allocated buffers after tracing has stopped. 362 1.1 ad */ 363 1.1 ad void 364 1.1 ad lockstat_free(void) 365 1.1 ad { 366 1.1 ad 367 1.22 christos /* coverity[assert_side_effect] */ 368 1.21 christos KASSERT(!lockstat_dev_enabled); 369 1.1 ad 370 1.1 ad if (lockstat_baseb != NULL) { 371 1.11 ad kmem_free(lockstat_baseb, lockstat_sizeb); 372 1.1 ad lockstat_baseb = NULL; 373 1.1 ad } 374 1.1 ad } 375 1.1 ad 376 1.1 ad /* 377 1.30 andvar * Main entry point from lock primitives. 378 1.1 ad */ 379 1.1 ad void 380 1.1 ad lockstat_event(uintptr_t lock, uintptr_t callsite, u_int flags, u_int count, 381 1.6 ad uint64_t cycles) 382 1.1 ad { 383 1.1 ad lslist_t *ll; 384 1.1 ad lscpu_t *lc; 385 1.1 ad lsbuf_t *lb; 386 1.1 ad u_int event; 387 1.1 ad int s; 388 1.1 ad 389 1.20 christos #ifdef KDTRACE_HOOKS 390 1.20 christos uint32_t id; 391 1.20 christos CTASSERT((LS_NPROBES & (LS_NPROBES - 1)) == 0); 392 1.28 riastrad if ((id = atomic_load_relaxed(&lockstat_probemap[LS_COMPRESS(flags)])) 393 1.28 riastrad != 0) 394 1.20 christos (*lockstat_probe_func)(id, lock, callsite, flags, count, 395 1.20 christos cycles); 396 1.20 christos #endif 397 1.20 christos 398 1.28 riastrad if ((flags & atomic_load_relaxed(&lockstat_dev_enabled)) != flags || 399 1.28 riastrad count == 0) 400 1.1 ad return; 401 1.5 ad if (lock < lockstat_lockstart || lock > lockstat_lockend) 402 1.1 ad return; 403 1.1 ad if (callsite < lockstat_csstart || callsite > lockstat_csend) 404 1.1 ad return; 405 1.1 ad 406 1.1 ad callsite &= lockstat_csmask; 407 1.10 ad lock &= lockstat_lamask; 408 1.1 ad 409 1.1 ad /* 410 1.1 ad * Find the table for this lock+callsite pair, and try to locate a 411 1.1 ad * buffer with the same key. 412 1.1 ad */ 413 1.11 ad s = splhigh(); 414 1.1 ad lc = curcpu()->ci_lockstat; 415 1.1 ad ll = &lc->lc_hash[LOCKSTAT_HASH(lock ^ callsite)]; 416 1.1 ad event = (flags & LB_EVENT_MASK) - 1; 417 1.1 ad 418 1.1 ad LIST_FOREACH(lb, ll, lb_chain.list) { 419 1.1 ad if (lb->lb_lock == lock && lb->lb_callsite == callsite) 420 1.1 ad break; 421 1.1 ad } 422 1.1 ad 423 1.1 ad if (lb != NULL) { 424 1.1 ad /* 425 1.1 ad * We found a record. Move it to the front of the list, as 426 1.1 ad * we're likely to hit it again soon. 427 1.1 ad */ 428 1.1 ad if (lb != LIST_FIRST(ll)) { 429 1.1 ad LIST_REMOVE(lb, lb_chain.list); 430 1.1 ad LIST_INSERT_HEAD(ll, lb, lb_chain.list); 431 1.1 ad } 432 1.1 ad lb->lb_counts[event] += count; 433 1.6 ad lb->lb_times[event] += cycles; 434 1.1 ad } else if ((lb = SLIST_FIRST(&lc->lc_free)) != NULL) { 435 1.1 ad /* 436 1.1 ad * Pinch a new buffer and fill it out. 437 1.1 ad */ 438 1.1 ad SLIST_REMOVE_HEAD(&lc->lc_free, lb_chain.slist); 439 1.1 ad LIST_INSERT_HEAD(ll, lb, lb_chain.list); 440 1.1 ad lb->lb_flags = (uint16_t)flags; 441 1.1 ad lb->lb_lock = lock; 442 1.1 ad lb->lb_callsite = callsite; 443 1.1 ad lb->lb_counts[event] = count; 444 1.6 ad lb->lb_times[event] = cycles; 445 1.1 ad } else { 446 1.1 ad /* 447 1.1 ad * We didn't find a buffer and there were none free. 448 1.1 ad * lockstat_stop() will notice later on and report the 449 1.1 ad * error. 450 1.1 ad */ 451 1.1 ad lc->lc_overflow++; 452 1.1 ad } 453 1.1 ad 454 1.1 ad splx(s); 455 1.1 ad } 456 1.1 ad 457 1.1 ad /* 458 1.1 ad * Accept an open() on /dev/lockstat. 459 1.1 ad */ 460 1.1 ad int 461 1.11 ad lockstat_open(dev_t dev, int flag, int mode, lwp_t *l) 462 1.1 ad { 463 1.1 ad 464 1.11 ad if (!__cpu_simple_lock_try(&lockstat_lock)) 465 1.11 ad return EBUSY; 466 1.11 ad lockstat_lwp = curlwp; 467 1.11 ad return 0; 468 1.1 ad } 469 1.1 ad 470 1.1 ad /* 471 1.1 ad * Accept the last close() on /dev/lockstat. 472 1.1 ad */ 473 1.1 ad int 474 1.11 ad lockstat_close(dev_t dev, int flag, int mode, lwp_t *l) 475 1.1 ad { 476 1.1 ad 477 1.11 ad lockstat_lwp = NULL; 478 1.26 ad if (lockstat_dev_enabled) { 479 1.26 ad lockstat_stop(NULL); 480 1.26 ad lockstat_free(); 481 1.26 ad } 482 1.11 ad __cpu_simple_unlock(&lockstat_lock); 483 1.11 ad return 0; 484 1.1 ad } 485 1.1 ad 486 1.1 ad /* 487 1.1 ad * Handle control operations. 488 1.1 ad */ 489 1.1 ad int 490 1.11 ad lockstat_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 491 1.1 ad { 492 1.1 ad lsenable_t *le; 493 1.1 ad int error; 494 1.1 ad 495 1.11 ad if (lockstat_lwp != curlwp) 496 1.11 ad return EBUSY; 497 1.1 ad 498 1.1 ad switch (cmd) { 499 1.1 ad case IOC_LOCKSTAT_GVERSION: 500 1.1 ad *(int *)data = LS_VERSION; 501 1.1 ad error = 0; 502 1.1 ad break; 503 1.1 ad 504 1.1 ad case IOC_LOCKSTAT_ENABLE: 505 1.1 ad le = (lsenable_t *)data; 506 1.1 ad 507 1.1 ad if (!cpu_hascounter()) { 508 1.1 ad error = ENODEV; 509 1.1 ad break; 510 1.1 ad } 511 1.28 riastrad if (atomic_load_relaxed(&lockstat_dev_enabled)) { 512 1.1 ad error = EBUSY; 513 1.1 ad break; 514 1.1 ad } 515 1.1 ad 516 1.1 ad /* 517 1.1 ad * Sanitize the arguments passed in and set up filtering. 518 1.1 ad */ 519 1.26 ad if (le->le_nbufs == 0) { 520 1.26 ad le->le_nbufs = MIN(LOCKSTAT_DEFBUFS * ncpu, 521 1.26 ad LOCKSTAT_MAXBUFS); 522 1.26 ad } else if (le->le_nbufs > LOCKSTAT_MAXBUFS || 523 1.1 ad le->le_nbufs < LOCKSTAT_MINBUFS) { 524 1.1 ad error = EINVAL; 525 1.1 ad break; 526 1.1 ad } 527 1.1 ad if ((le->le_flags & LE_ONE_CALLSITE) == 0) { 528 1.1 ad le->le_csstart = 0; 529 1.1 ad le->le_csend = le->le_csstart - 1; 530 1.1 ad } 531 1.5 ad if ((le->le_flags & LE_ONE_LOCK) == 0) { 532 1.5 ad le->le_lockstart = 0; 533 1.5 ad le->le_lockend = le->le_lockstart - 1; 534 1.5 ad } 535 1.1 ad if ((le->le_mask & LB_EVENT_MASK) == 0) 536 1.11 ad return EINVAL; 537 1.1 ad if ((le->le_mask & LB_LOCK_MASK) == 0) 538 1.11 ad return EINVAL; 539 1.1 ad 540 1.1 ad /* 541 1.1 ad * Start tracing. 542 1.1 ad */ 543 1.1 ad if ((error = lockstat_alloc(le)) == 0) 544 1.1 ad lockstat_start(le); 545 1.1 ad break; 546 1.1 ad 547 1.1 ad case IOC_LOCKSTAT_DISABLE: 548 1.28 riastrad if (!atomic_load_relaxed(&lockstat_dev_enabled)) 549 1.1 ad error = EINVAL; 550 1.1 ad else 551 1.1 ad error = lockstat_stop((lsdisable_t *)data); 552 1.1 ad break; 553 1.1 ad 554 1.1 ad default: 555 1.1 ad error = ENOTTY; 556 1.1 ad break; 557 1.1 ad } 558 1.1 ad 559 1.1 ad return error; 560 1.1 ad } 561 1.1 ad 562 1.1 ad /* 563 1.1 ad * Copy buffers out to user-space. 564 1.1 ad */ 565 1.1 ad int 566 1.4 christos lockstat_read(dev_t dev, struct uio *uio, int flag) 567 1.1 ad { 568 1.1 ad 569 1.21 christos if (curlwp != lockstat_lwp || lockstat_dev_enabled) 570 1.11 ad return EBUSY; 571 1.11 ad return uiomove(lockstat_baseb, lockstat_sizeb, uio); 572 1.1 ad } 573