1 1.83 nakayama /* $NetBSD: subr_lockdebug.c,v 1.83 2022/09/02 06:01:38 nakayama Exp $ */ 2 1.2 ad 3 1.2 ad /*- 4 1.73 ad * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. 5 1.2 ad * All rights reserved. 6 1.2 ad * 7 1.2 ad * This code is derived from software contributed to The NetBSD Foundation 8 1.2 ad * by Andrew Doran. 9 1.2 ad * 10 1.2 ad * Redistribution and use in source and binary forms, with or without 11 1.2 ad * modification, are permitted provided that the following conditions 12 1.2 ad * are met: 13 1.2 ad * 1. Redistributions of source code must retain the above copyright 14 1.2 ad * notice, this list of conditions and the following disclaimer. 15 1.2 ad * 2. Redistributions in binary form must reproduce the above copyright 16 1.2 ad * notice, this list of conditions and the following disclaimer in the 17 1.2 ad * documentation and/or other materials provided with the distribution. 18 1.2 ad * 19 1.2 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.2 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.2 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.2 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.2 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.2 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.2 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.2 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.2 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.2 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.2 ad * POSSIBILITY OF SUCH DAMAGE. 30 1.2 ad */ 31 1.2 ad 32 1.2 ad /* 33 1.11 ad * Basic lock debugging code shared among lock primitives. 34 1.2 ad */ 35 1.2 ad 36 1.9 dsl #include <sys/cdefs.h> 37 1.83 nakayama __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.83 2022/09/02 06:01:38 nakayama Exp $"); 38 1.9 dsl 39 1.54 ozaki #ifdef _KERNEL_OPT 40 1.2 ad #include "opt_ddb.h" 41 1.54 ozaki #endif 42 1.2 ad 43 1.2 ad #include <sys/param.h> 44 1.2 ad #include <sys/proc.h> 45 1.2 ad #include <sys/systm.h> 46 1.10 ad #include <sys/kernel.h> 47 1.2 ad #include <sys/kmem.h> 48 1.2 ad #include <sys/lockdebug.h> 49 1.2 ad #include <sys/sleepq.h> 50 1.10 ad #include <sys/cpu.h> 51 1.22 ad #include <sys/atomic.h> 52 1.26 ad #include <sys/lock.h> 53 1.43 matt #include <sys/rbtree.h> 54 1.62 ozaki #include <sys/ksyms.h> 55 1.77 maxv #include <sys/kcov.h> 56 1.16 yamt 57 1.25 ad #include <machine/lock.h> 58 1.25 ad 59 1.82 msaitoh #ifdef DDB 60 1.82 msaitoh #include <machine/db_machdep.h> 61 1.82 msaitoh #include <ddb/db_interface.h> 62 1.82 msaitoh #include <ddb/db_access.h> 63 1.82 msaitoh #include <ddb/db_sym.h> 64 1.82 msaitoh #endif 65 1.82 msaitoh 66 1.28 ad unsigned int ld_panic; 67 1.28 ad 68 1.2 ad #ifdef LOCKDEBUG 69 1.2 ad 70 1.71 scole #ifdef __ia64__ 71 1.71 scole #define LD_BATCH_SHIFT 16 72 1.71 scole #else 73 1.2 ad #define LD_BATCH_SHIFT 9 74 1.71 scole #endif 75 1.2 ad #define LD_BATCH (1 << LD_BATCH_SHIFT) 76 1.2 ad #define LD_BATCH_MASK (LD_BATCH - 1) 77 1.2 ad #define LD_MAX_LOCKS 1048576 78 1.2 ad #define LD_SLOP 16 79 1.2 ad 80 1.2 ad #define LD_LOCKED 0x01 81 1.2 ad #define LD_SLEEPER 0x02 82 1.2 ad 83 1.23 ad #define LD_WRITE_LOCK 0x80000000 84 1.23 ad 85 1.2 ad typedef struct lockdebug { 86 1.42 rmind struct rb_node ld_rb_node; 87 1.34 ad __cpu_simple_lock_t ld_spinlock; 88 1.2 ad _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; 89 1.2 ad _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; 90 1.2 ad volatile void *ld_lock; 91 1.2 ad lockops_t *ld_lockops; 92 1.2 ad struct lwp *ld_lwp; 93 1.2 ad uintptr_t ld_locked; 94 1.2 ad uintptr_t ld_unlocked; 95 1.10 ad uintptr_t ld_initaddr; 96 1.2 ad uint16_t ld_shares; 97 1.2 ad uint16_t ld_cpu; 98 1.2 ad uint8_t ld_flags; 99 1.2 ad uint8_t ld_shwant; /* advisory */ 100 1.2 ad uint8_t ld_exwant; /* advisory */ 101 1.2 ad uint8_t ld_unused; 102 1.2 ad } volatile lockdebug_t; 103 1.2 ad 104 1.2 ad typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; 105 1.2 ad 106 1.34 ad __cpu_simple_lock_t ld_mod_lk; 107 1.13 matt lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free); 108 1.75 christos #ifdef _KERNEL 109 1.13 matt lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all); 110 1.75 christos #else 111 1.75 christos extern lockdebuglist_t ld_all; 112 1.75 christos #define cpu_name(a) "?" 113 1.75 christos #define cpu_index(a) -1 114 1.75 christos #define curlwp NULL 115 1.75 christos #endif /* _KERNEL */ 116 1.2 ad int ld_nfree; 117 1.2 ad int ld_freeptr; 118 1.2 ad int ld_recurse; 119 1.5 ad bool ld_nomore; 120 1.2 ad lockdebug_t ld_prime[LD_BATCH]; 121 1.2 ad 122 1.75 christos #ifdef _KERNEL 123 1.55 christos static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int, 124 1.55 christos const char *, bool); 125 1.34 ad static int lockdebug_more(int); 126 1.5 ad static void lockdebug_init(void); 127 1.74 ad static void lockdebug_dump(lwp_t *, lockdebug_t *, 128 1.74 ad void (*)(const char *, ...) 129 1.52 christos __printflike(1, 2)); 130 1.2 ad 131 1.16 yamt static signed int 132 1.42 rmind ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2) 133 1.16 yamt { 134 1.42 rmind const lockdebug_t *ld1 = n1; 135 1.42 rmind const lockdebug_t *ld2 = n2; 136 1.20 yamt const uintptr_t a = (uintptr_t)ld1->ld_lock; 137 1.20 yamt const uintptr_t b = (uintptr_t)ld2->ld_lock; 138 1.20 yamt 139 1.20 yamt if (a < b) 140 1.42 rmind return -1; 141 1.42 rmind if (a > b) 142 1.20 yamt return 1; 143 1.16 yamt return 0; 144 1.16 yamt } 145 1.16 yamt 146 1.16 yamt static signed int 147 1.42 rmind ld_rbto_compare_key(void *ctx, const void *n, const void *key) 148 1.16 yamt { 149 1.42 rmind const lockdebug_t *ld = n; 150 1.20 yamt const uintptr_t a = (uintptr_t)ld->ld_lock; 151 1.20 yamt const uintptr_t b = (uintptr_t)key; 152 1.20 yamt 153 1.20 yamt if (a < b) 154 1.42 rmind return -1; 155 1.42 rmind if (a > b) 156 1.20 yamt return 1; 157 1.16 yamt return 0; 158 1.16 yamt } 159 1.16 yamt 160 1.42 rmind static rb_tree_t ld_rb_tree; 161 1.16 yamt 162 1.42 rmind static const rb_tree_ops_t ld_rb_tree_ops = { 163 1.37 matt .rbto_compare_nodes = ld_rbto_compare_nodes, 164 1.37 matt .rbto_compare_key = ld_rbto_compare_key, 165 1.42 rmind .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node), 166 1.42 rmind .rbto_context = NULL 167 1.16 yamt }; 168 1.16 yamt 169 1.34 ad static inline lockdebug_t * 170 1.58 christos lockdebug_lookup1(const volatile void *lock) 171 1.23 ad { 172 1.34 ad lockdebug_t *ld; 173 1.34 ad struct cpu_info *ci; 174 1.23 ad 175 1.34 ad ci = curcpu(); 176 1.34 ad __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 177 1.58 christos ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock); 178 1.34 ad __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 179 1.34 ad if (ld == NULL) { 180 1.34 ad return NULL; 181 1.34 ad } 182 1.34 ad __cpu_simple_lock(&ld->ld_spinlock); 183 1.23 ad 184 1.34 ad return ld; 185 1.2 ad } 186 1.2 ad 187 1.23 ad static void 188 1.34 ad lockdebug_lock_cpus(void) 189 1.2 ad { 190 1.34 ad CPU_INFO_ITERATOR cii; 191 1.34 ad struct cpu_info *ci; 192 1.2 ad 193 1.34 ad for (CPU_INFO_FOREACH(cii, ci)) { 194 1.34 ad __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 195 1.34 ad } 196 1.23 ad } 197 1.23 ad 198 1.23 ad static void 199 1.34 ad lockdebug_unlock_cpus(void) 200 1.23 ad { 201 1.34 ad CPU_INFO_ITERATOR cii; 202 1.34 ad struct cpu_info *ci; 203 1.23 ad 204 1.34 ad for (CPU_INFO_FOREACH(cii, ci)) { 205 1.34 ad __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 206 1.34 ad } 207 1.2 ad } 208 1.2 ad 209 1.2 ad /* 210 1.19 yamt * lockdebug_lookup: 211 1.19 yamt * 212 1.19 yamt * Find a lockdebug structure by a pointer to a lock and return it locked. 213 1.19 yamt */ 214 1.19 yamt static inline lockdebug_t * 215 1.58 christos lockdebug_lookup(const char *func, size_t line, const volatile void *lock, 216 1.55 christos uintptr_t where) 217 1.19 yamt { 218 1.19 yamt lockdebug_t *ld; 219 1.19 yamt 220 1.77 maxv kcov_silence_enter(); 221 1.34 ad ld = lockdebug_lookup1(lock); 222 1.77 maxv kcov_silence_leave(); 223 1.77 maxv 224 1.60 ozaki if (__predict_false(ld == NULL)) { 225 1.55 christos panic("%s,%zu: uninitialized lock (lock=%p, from=%08" 226 1.55 christos PRIxPTR ")", func, line, lock, where); 227 1.42 rmind } 228 1.19 yamt return ld; 229 1.19 yamt } 230 1.19 yamt 231 1.19 yamt /* 232 1.2 ad * lockdebug_init: 233 1.2 ad * 234 1.2 ad * Initialize the lockdebug system. Allocate an initial pool of 235 1.2 ad * lockdebug structures before the VM system is up and running. 236 1.2 ad */ 237 1.5 ad static void 238 1.2 ad lockdebug_init(void) 239 1.2 ad { 240 1.2 ad lockdebug_t *ld; 241 1.2 ad int i; 242 1.2 ad 243 1.34 ad TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks); 244 1.34 ad TAILQ_INIT(&curlwp->l_ld_locks); 245 1.34 ad __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock); 246 1.34 ad __cpu_simple_lock_init(&ld_mod_lk); 247 1.15 matt 248 1.16 yamt rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops); 249 1.16 yamt 250 1.2 ad ld = ld_prime; 251 1.2 ad for (i = 1, ld++; i < LD_BATCH; i++, ld++) { 252 1.34 ad __cpu_simple_lock_init(&ld->ld_spinlock); 253 1.2 ad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 254 1.2 ad TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 255 1.2 ad } 256 1.2 ad ld_freeptr = 1; 257 1.2 ad ld_nfree = LD_BATCH - 1; 258 1.2 ad } 259 1.2 ad 260 1.2 ad /* 261 1.2 ad * lockdebug_alloc: 262 1.2 ad * 263 1.2 ad * A lock is being initialized, so allocate an associated debug 264 1.2 ad * structure. 265 1.2 ad */ 266 1.16 yamt bool 267 1.55 christos lockdebug_alloc(const char *func, size_t line, volatile void *lock, 268 1.55 christos lockops_t *lo, uintptr_t initaddr) 269 1.2 ad { 270 1.2 ad struct cpu_info *ci; 271 1.2 ad lockdebug_t *ld; 272 1.34 ad int s; 273 1.2 ad 274 1.60 ozaki if (__predict_false(lo == NULL || panicstr != NULL || ld_panic)) 275 1.16 yamt return false; 276 1.60 ozaki if (__predict_false(ld_freeptr == 0)) 277 1.5 ad lockdebug_init(); 278 1.2 ad 279 1.34 ad s = splhigh(); 280 1.34 ad __cpu_simple_lock(&ld_mod_lk); 281 1.60 ozaki if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) { 282 1.34 ad __cpu_simple_unlock(&ld_mod_lk); 283 1.55 christos lockdebug_abort1(func, line, ld, s, "already initialized", 284 1.55 christos true); 285 1.27 ad return false; 286 1.19 yamt } 287 1.19 yamt 288 1.2 ad /* 289 1.2 ad * Pinch a new debug structure. We may recurse because we call 290 1.2 ad * kmem_alloc(), which may need to initialize new locks somewhere 291 1.7 skrll * down the path. If not recursing, we try to maintain at least 292 1.2 ad * LD_SLOP structures free, which should hopefully be enough to 293 1.2 ad * satisfy kmem_alloc(). If we can't provide a structure, not to 294 1.2 ad * worry: we'll just mark the lock as not having an ID. 295 1.2 ad */ 296 1.23 ad ci = curcpu(); 297 1.2 ad ci->ci_lkdebug_recurse++; 298 1.2 ad if (TAILQ_EMPTY(&ld_free)) { 299 1.5 ad if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { 300 1.2 ad ci->ci_lkdebug_recurse--; 301 1.34 ad __cpu_simple_unlock(&ld_mod_lk); 302 1.34 ad splx(s); 303 1.16 yamt return false; 304 1.2 ad } 305 1.34 ad s = lockdebug_more(s); 306 1.34 ad } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) { 307 1.34 ad s = lockdebug_more(s); 308 1.34 ad } 309 1.60 ozaki if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) { 310 1.34 ad __cpu_simple_unlock(&ld_mod_lk); 311 1.34 ad splx(s); 312 1.16 yamt return false; 313 1.2 ad } 314 1.2 ad TAILQ_REMOVE(&ld_free, ld, ld_chain); 315 1.2 ad ld_nfree--; 316 1.2 ad ci->ci_lkdebug_recurse--; 317 1.2 ad 318 1.60 ozaki if (__predict_false(ld->ld_lock != NULL)) { 319 1.55 christos panic("%s,%zu: corrupt table ld %p", func, line, ld); 320 1.34 ad } 321 1.2 ad 322 1.2 ad /* Initialise the structure. */ 323 1.2 ad ld->ld_lock = lock; 324 1.2 ad ld->ld_lockops = lo; 325 1.2 ad ld->ld_locked = 0; 326 1.2 ad ld->ld_unlocked = 0; 327 1.2 ad ld->ld_lwp = NULL; 328 1.10 ad ld->ld_initaddr = initaddr; 329 1.35 ad ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0); 330 1.34 ad lockdebug_lock_cpus(); 331 1.42 rmind (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld)); 332 1.34 ad lockdebug_unlock_cpus(); 333 1.34 ad __cpu_simple_unlock(&ld_mod_lk); 334 1.2 ad 335 1.34 ad splx(s); 336 1.16 yamt return true; 337 1.2 ad } 338 1.2 ad 339 1.2 ad /* 340 1.2 ad * lockdebug_free: 341 1.2 ad * 342 1.2 ad * A lock is being destroyed, so release debugging resources. 343 1.2 ad */ 344 1.2 ad void 345 1.55 christos lockdebug_free(const char *func, size_t line, volatile void *lock) 346 1.2 ad { 347 1.2 ad lockdebug_t *ld; 348 1.34 ad int s; 349 1.2 ad 350 1.60 ozaki if (__predict_false(panicstr != NULL || ld_panic)) 351 1.2 ad return; 352 1.2 ad 353 1.34 ad s = splhigh(); 354 1.34 ad __cpu_simple_lock(&ld_mod_lk); 355 1.55 christos ld = lockdebug_lookup(func, line, lock, 356 1.55 christos (uintptr_t) __builtin_return_address(0)); 357 1.60 ozaki if (__predict_false(ld == NULL)) { 358 1.34 ad __cpu_simple_unlock(&ld_mod_lk); 359 1.55 christos panic("%s,%zu: destroying uninitialized object %p" 360 1.55 christos "(ld_lock=%p)", func, line, lock, ld->ld_lock); 361 1.27 ad return; 362 1.2 ad } 363 1.60 ozaki if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 || 364 1.60 ozaki ld->ld_shares != 0)) { 365 1.34 ad __cpu_simple_unlock(&ld_mod_lk); 366 1.55 christos lockdebug_abort1(func, line, ld, s, "is locked or in use", 367 1.55 christos true); 368 1.27 ad return; 369 1.27 ad } 370 1.34 ad lockdebug_lock_cpus(); 371 1.42 rmind rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld)); 372 1.34 ad lockdebug_unlock_cpus(); 373 1.2 ad ld->ld_lock = NULL; 374 1.2 ad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 375 1.2 ad ld_nfree++; 376 1.34 ad __cpu_simple_unlock(&ld->ld_spinlock); 377 1.34 ad __cpu_simple_unlock(&ld_mod_lk); 378 1.34 ad splx(s); 379 1.2 ad } 380 1.2 ad 381 1.2 ad /* 382 1.2 ad * lockdebug_more: 383 1.2 ad * 384 1.2 ad * Allocate a batch of debug structures and add to the free list. 385 1.34 ad * Must be called with ld_mod_lk held. 386 1.2 ad */ 387 1.34 ad static int 388 1.34 ad lockdebug_more(int s) 389 1.2 ad { 390 1.2 ad lockdebug_t *ld; 391 1.2 ad void *block; 392 1.5 ad int i, base, m; 393 1.2 ad 394 1.35 ad /* 395 1.35 ad * Can't call kmem_alloc() if in interrupt context. XXX We could 396 1.35 ad * deadlock, because we don't know which locks the caller holds. 397 1.35 ad */ 398 1.59 ozaki if (cpu_intr_p() || cpu_softintr_p()) { 399 1.35 ad return s; 400 1.35 ad } 401 1.35 ad 402 1.2 ad while (ld_nfree < LD_SLOP) { 403 1.34 ad __cpu_simple_unlock(&ld_mod_lk); 404 1.34 ad splx(s); 405 1.2 ad block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); 406 1.34 ad s = splhigh(); 407 1.34 ad __cpu_simple_lock(&ld_mod_lk); 408 1.2 ad 409 1.2 ad if (ld_nfree > LD_SLOP) { 410 1.2 ad /* Somebody beat us to it. */ 411 1.34 ad __cpu_simple_unlock(&ld_mod_lk); 412 1.34 ad splx(s); 413 1.2 ad kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); 414 1.34 ad s = splhigh(); 415 1.34 ad __cpu_simple_lock(&ld_mod_lk); 416 1.2 ad continue; 417 1.2 ad } 418 1.2 ad 419 1.2 ad base = ld_freeptr; 420 1.2 ad ld_nfree += LD_BATCH; 421 1.2 ad ld = block; 422 1.2 ad base <<= LD_BATCH_SHIFT; 423 1.66 riastrad m = uimin(LD_MAX_LOCKS, base + LD_BATCH); 424 1.5 ad 425 1.5 ad if (m == LD_MAX_LOCKS) 426 1.5 ad ld_nomore = true; 427 1.2 ad 428 1.5 ad for (i = base; i < m; i++, ld++) { 429 1.34 ad __cpu_simple_lock_init(&ld->ld_spinlock); 430 1.2 ad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 431 1.2 ad TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 432 1.2 ad } 433 1.2 ad 434 1.22 ad membar_producer(); 435 1.2 ad } 436 1.34 ad 437 1.34 ad return s; 438 1.2 ad } 439 1.2 ad 440 1.2 ad /* 441 1.2 ad * lockdebug_wantlock: 442 1.2 ad * 443 1.56 pgoyette * Process the preamble to a lock acquire. The "shared" 444 1.56 pgoyette * parameter controls which ld_{ex,sh}want counter is 445 1.56 pgoyette * updated; a negative value of shared updates neither. 446 1.2 ad */ 447 1.2 ad void 448 1.55 christos lockdebug_wantlock(const char *func, size_t line, 449 1.58 christos const volatile void *lock, uintptr_t where, int shared) 450 1.2 ad { 451 1.2 ad struct lwp *l = curlwp; 452 1.2 ad lockdebug_t *ld; 453 1.3 thorpej bool recurse; 454 1.34 ad int s; 455 1.2 ad 456 1.2 ad (void)shared; 457 1.4 thorpej recurse = false; 458 1.2 ad 459 1.60 ozaki if (__predict_false(panicstr != NULL || ld_panic)) 460 1.2 ad return; 461 1.2 ad 462 1.34 ad s = splhigh(); 463 1.55 christos if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 464 1.34 ad splx(s); 465 1.2 ad return; 466 1.34 ad } 467 1.32 yamt if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 468 1.2 ad if ((ld->ld_flags & LD_SLEEPER) != 0) { 469 1.49 mlelstv if (ld->ld_lwp == l) 470 1.4 thorpej recurse = true; 471 1.40 rmind } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 472 1.4 thorpej recurse = true; 473 1.2 ad } 474 1.10 ad if (cpu_intr_p()) { 475 1.60 ozaki if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) { 476 1.55 christos lockdebug_abort1(func, line, ld, s, 477 1.10 ad "acquiring sleep lock from interrupt context", 478 1.10 ad true); 479 1.27 ad return; 480 1.27 ad } 481 1.10 ad } 482 1.56 pgoyette if (shared > 0) 483 1.2 ad ld->ld_shwant++; 484 1.56 pgoyette else if (shared == 0) 485 1.2 ad ld->ld_exwant++; 486 1.60 ozaki if (__predict_false(recurse)) { 487 1.55 christos lockdebug_abort1(func, line, ld, s, "locking against myself", 488 1.10 ad true); 489 1.27 ad return; 490 1.27 ad } 491 1.74 ad if (l->l_ld_wanted == NULL) { 492 1.74 ad l->l_ld_wanted = ld; 493 1.74 ad } 494 1.34 ad __cpu_simple_unlock(&ld->ld_spinlock); 495 1.34 ad splx(s); 496 1.2 ad } 497 1.2 ad 498 1.2 ad /* 499 1.2 ad * lockdebug_locked: 500 1.2 ad * 501 1.2 ad * Process a lock acquire operation. 502 1.2 ad */ 503 1.2 ad void 504 1.55 christos lockdebug_locked(const char *func, size_t line, 505 1.55 christos volatile void *lock, void *cvlock, uintptr_t where, int shared) 506 1.2 ad { 507 1.2 ad struct lwp *l = curlwp; 508 1.2 ad lockdebug_t *ld; 509 1.34 ad int s; 510 1.2 ad 511 1.60 ozaki if (__predict_false(panicstr != NULL || ld_panic)) 512 1.2 ad return; 513 1.2 ad 514 1.34 ad s = splhigh(); 515 1.55 christos if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 516 1.34 ad splx(s); 517 1.2 ad return; 518 1.34 ad } 519 1.76 ad if (shared) { 520 1.2 ad l->l_shlocks++; 521 1.45 yamt ld->ld_locked = where; 522 1.2 ad ld->ld_shares++; 523 1.2 ad ld->ld_shwant--; 524 1.2 ad } else { 525 1.60 ozaki if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) { 526 1.55 christos lockdebug_abort1(func, line, ld, s, "already locked", 527 1.34 ad true); 528 1.27 ad return; 529 1.27 ad } 530 1.2 ad ld->ld_flags |= LD_LOCKED; 531 1.2 ad ld->ld_locked = where; 532 1.2 ad ld->ld_exwant--; 533 1.2 ad if ((ld->ld_flags & LD_SLEEPER) != 0) { 534 1.34 ad TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain); 535 1.2 ad } else { 536 1.34 ad TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks, 537 1.34 ad ld, ld_chain); 538 1.2 ad } 539 1.2 ad } 540 1.40 rmind ld->ld_cpu = (uint16_t)cpu_index(curcpu()); 541 1.32 yamt ld->ld_lwp = l; 542 1.34 ad __cpu_simple_unlock(&ld->ld_spinlock); 543 1.74 ad if (l->l_ld_wanted == ld) { 544 1.74 ad l->l_ld_wanted = NULL; 545 1.74 ad } 546 1.34 ad splx(s); 547 1.2 ad } 548 1.2 ad 549 1.2 ad /* 550 1.2 ad * lockdebug_unlocked: 551 1.2 ad * 552 1.2 ad * Process a lock release operation. 553 1.2 ad */ 554 1.2 ad void 555 1.55 christos lockdebug_unlocked(const char *func, size_t line, 556 1.55 christos volatile void *lock, uintptr_t where, int shared) 557 1.2 ad { 558 1.2 ad struct lwp *l = curlwp; 559 1.2 ad lockdebug_t *ld; 560 1.34 ad int s; 561 1.2 ad 562 1.60 ozaki if (__predict_false(panicstr != NULL || ld_panic)) 563 1.2 ad return; 564 1.2 ad 565 1.34 ad s = splhigh(); 566 1.55 christos if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 567 1.34 ad splx(s); 568 1.2 ad return; 569 1.34 ad } 570 1.76 ad if (shared) { 571 1.60 ozaki if (__predict_false(l->l_shlocks == 0)) { 572 1.55 christos lockdebug_abort1(func, line, ld, s, 573 1.10 ad "no shared locks held by LWP", true); 574 1.27 ad return; 575 1.27 ad } 576 1.60 ozaki if (__predict_false(ld->ld_shares == 0)) { 577 1.55 christos lockdebug_abort1(func, line, ld, s, 578 1.10 ad "no shared holds on this lock", true); 579 1.27 ad return; 580 1.27 ad } 581 1.2 ad l->l_shlocks--; 582 1.2 ad ld->ld_shares--; 583 1.45 yamt if (ld->ld_lwp == l) { 584 1.45 yamt ld->ld_unlocked = where; 585 1.32 yamt ld->ld_lwp = NULL; 586 1.45 yamt } 587 1.40 rmind if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 588 1.32 yamt ld->ld_cpu = (uint16_t)-1; 589 1.2 ad } else { 590 1.60 ozaki if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) { 591 1.55 christos lockdebug_abort1(func, line, ld, s, "not locked", true); 592 1.27 ad return; 593 1.27 ad } 594 1.2 ad 595 1.2 ad if ((ld->ld_flags & LD_SLEEPER) != 0) { 596 1.60 ozaki if (__predict_false(ld->ld_lwp != curlwp)) { 597 1.55 christos lockdebug_abort1(func, line, ld, s, 598 1.10 ad "not held by current LWP", true); 599 1.27 ad return; 600 1.27 ad } 601 1.34 ad TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain); 602 1.2 ad } else { 603 1.60 ozaki uint16_t idx = (uint16_t)cpu_index(curcpu()); 604 1.60 ozaki if (__predict_false(ld->ld_cpu != idx)) { 605 1.55 christos lockdebug_abort1(func, line, ld, s, 606 1.10 ad "not held by current CPU", true); 607 1.27 ad return; 608 1.27 ad } 609 1.34 ad TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld, 610 1.34 ad ld_chain); 611 1.2 ad } 612 1.44 matt ld->ld_flags &= ~LD_LOCKED; 613 1.78 riastrad ld->ld_unlocked = where; 614 1.44 matt ld->ld_lwp = NULL; 615 1.2 ad } 616 1.34 ad __cpu_simple_unlock(&ld->ld_spinlock); 617 1.34 ad splx(s); 618 1.2 ad } 619 1.2 ad 620 1.2 ad /* 621 1.2 ad * lockdebug_barrier: 622 1.78 riastrad * 623 1.73 ad * Panic if we hold more than one specified lock, and optionally, if we 624 1.73 ad * hold any sleep locks. 625 1.2 ad */ 626 1.2 ad void 627 1.73 ad lockdebug_barrier(const char *func, size_t line, volatile void *onelock, 628 1.55 christos int slplocks) 629 1.2 ad { 630 1.2 ad struct lwp *l = curlwp; 631 1.2 ad lockdebug_t *ld; 632 1.34 ad int s; 633 1.2 ad 634 1.60 ozaki if (__predict_false(panicstr != NULL || ld_panic)) 635 1.2 ad return; 636 1.2 ad 637 1.34 ad s = splhigh(); 638 1.34 ad if ((l->l_pflag & LP_INTR) == 0) { 639 1.34 ad TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) { 640 1.73 ad if (ld->ld_lock == onelock) { 641 1.2 ad continue; 642 1.2 ad } 643 1.34 ad __cpu_simple_lock(&ld->ld_spinlock); 644 1.55 christos lockdebug_abort1(func, line, ld, s, 645 1.34 ad "spin lock held", true); 646 1.34 ad return; 647 1.2 ad } 648 1.2 ad } 649 1.34 ad if (slplocks) { 650 1.34 ad splx(s); 651 1.34 ad return; 652 1.34 ad } 653 1.60 ozaki ld = TAILQ_FIRST(&l->l_ld_locks); 654 1.73 ad if (__predict_false(ld != NULL && ld->ld_lock != onelock)) { 655 1.34 ad __cpu_simple_lock(&ld->ld_spinlock); 656 1.55 christos lockdebug_abort1(func, line, ld, s, "sleep lock held", true); 657 1.34 ad return; 658 1.34 ad } 659 1.34 ad splx(s); 660 1.34 ad if (l->l_shlocks != 0) { 661 1.52 christos TAILQ_FOREACH(ld, &ld_all, ld_achain) { 662 1.73 ad if (ld->ld_lock == onelock) { 663 1.73 ad continue; 664 1.73 ad } 665 1.52 christos if (ld->ld_lwp == l) 666 1.74 ad lockdebug_dump(l, ld, printf); 667 1.52 christos } 668 1.55 christos panic("%s,%zu: holding %d shared locks", func, line, 669 1.55 christos l->l_shlocks); 670 1.2 ad } 671 1.2 ad } 672 1.2 ad 673 1.2 ad /* 674 1.10 ad * lockdebug_mem_check: 675 1.10 ad * 676 1.10 ad * Check for in-use locks within a memory region that is 677 1.16 yamt * being freed. 678 1.10 ad */ 679 1.10 ad void 680 1.55 christos lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz) 681 1.10 ad { 682 1.16 yamt lockdebug_t *ld; 683 1.34 ad struct cpu_info *ci; 684 1.23 ad int s; 685 1.10 ad 686 1.60 ozaki if (__predict_false(panicstr != NULL || ld_panic)) 687 1.24 ad return; 688 1.24 ad 689 1.77 maxv kcov_silence_enter(); 690 1.77 maxv 691 1.34 ad s = splhigh(); 692 1.34 ad ci = curcpu(); 693 1.34 ad __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 694 1.16 yamt ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base); 695 1.23 ad if (ld != NULL) { 696 1.23 ad const uintptr_t lock = (uintptr_t)ld->ld_lock; 697 1.23 ad 698 1.60 ozaki if (__predict_false((uintptr_t)base > lock)) 699 1.55 christos panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu", 700 1.55 christos func, line, ld, base, sz); 701 1.23 ad if (lock >= (uintptr_t)base + sz) 702 1.23 ad ld = NULL; 703 1.23 ad } 704 1.34 ad __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 705 1.60 ozaki if (__predict_false(ld != NULL)) { 706 1.34 ad __cpu_simple_lock(&ld->ld_spinlock); 707 1.55 christos lockdebug_abort1(func, line, ld, s, 708 1.34 ad "allocation contains active lock", !cold); 709 1.77 maxv kcov_silence_leave(); 710 1.16 yamt return; 711 1.34 ad } 712 1.34 ad splx(s); 713 1.77 maxv 714 1.77 maxv kcov_silence_leave(); 715 1.10 ad } 716 1.75 christos #endif /* _KERNEL */ 717 1.75 christos 718 1.10 ad /* 719 1.2 ad * lockdebug_dump: 720 1.2 ad * 721 1.2 ad * Dump information about a lock on panic, or for DDB. 722 1.2 ad */ 723 1.2 ad static void 724 1.74 ad lockdebug_dump(lwp_t *l, lockdebug_t *ld, void (*pr)(const char *, ...) 725 1.47 christos __printflike(1, 2)) 726 1.2 ad { 727 1.2 ad int sleeper = (ld->ld_flags & LD_SLEEPER); 728 1.75 christos lockops_t *lo = ld->ld_lockops; 729 1.81 riastrad char locksym[128], initsym[128], lockedsym[128], unlockedsym[128]; 730 1.81 riastrad 731 1.81 riastrad #ifdef DDB 732 1.83 nakayama db_symstr(locksym, sizeof(locksym), (db_expr_t)(intptr_t)ld->ld_lock, 733 1.81 riastrad DB_STGY_ANY); 734 1.81 riastrad db_symstr(initsym, sizeof(initsym), (db_expr_t)ld->ld_initaddr, 735 1.81 riastrad DB_STGY_PROC); 736 1.81 riastrad db_symstr(lockedsym, sizeof(lockedsym), (db_expr_t)ld->ld_locked, 737 1.81 riastrad DB_STGY_PROC); 738 1.81 riastrad db_symstr(unlockedsym, sizeof(unlockedsym), (db_expr_t)ld->ld_unlocked, 739 1.81 riastrad DB_STGY_PROC); 740 1.81 riastrad #else 741 1.81 riastrad snprintf(locksym, sizeof(locksym), "%#018lx", 742 1.81 riastrad (unsigned long)ld->ld_lock); 743 1.81 riastrad snprintf(initsym, sizeof(initsym), "%#018lx", 744 1.81 riastrad (unsigned long)ld->ld_initaddr); 745 1.81 riastrad snprintf(lockedsym, sizeof(lockedsym), "%#018lx", 746 1.81 riastrad (unsigned long)ld->ld_locked); 747 1.81 riastrad snprintf(unlockedsym, sizeof(unlockedsym), "%#018lx", 748 1.81 riastrad (unsigned long)ld->ld_unlocked); 749 1.81 riastrad #endif 750 1.2 ad 751 1.2 ad (*pr)( 752 1.81 riastrad "lock address : %s\n" 753 1.81 riastrad "type : %s\n" 754 1.81 riastrad "initialized : %s", 755 1.81 riastrad locksym, (sleeper ? "sleep/adaptive" : "spin"), 756 1.81 riastrad initsym); 757 1.2 ad 758 1.75 christos #ifndef _KERNEL 759 1.75 christos lockops_t los; 760 1.75 christos lo = &los; 761 1.75 christos db_read_bytes((db_addr_t)ld->ld_lockops, sizeof(los), (char *)lo); 762 1.75 christos #endif 763 1.76 ad (*pr)("\n" 764 1.76 ad "shared holds : %18u exclusive: %18u\n" 765 1.76 ad "shares wanted: %18u exclusive: %18u\n" 766 1.76 ad "relevant cpu : %18u last held: %18u\n" 767 1.76 ad "relevant lwp : %#018lx last held: %#018lx\n" 768 1.81 riastrad "last locked%c : %s\n" 769 1.81 riastrad "unlocked%c : %s\n", 770 1.76 ad (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), 771 1.76 ad (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, 772 1.76 ad (unsigned)cpu_index(l->l_cpu), (unsigned)ld->ld_cpu, 773 1.76 ad (long)l, (long)ld->ld_lwp, 774 1.76 ad ((ld->ld_flags & LD_LOCKED) ? '*' : ' '), 775 1.81 riastrad lockedsym, 776 1.76 ad ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'), 777 1.81 riastrad unlockedsym); 778 1.35 ad 779 1.75 christos #ifdef _KERNEL 780 1.75 christos if (lo->lo_dump != NULL) 781 1.75 christos (*lo->lo_dump)(ld->ld_lock, pr); 782 1.2 ad 783 1.2 ad if (sleeper) { 784 1.2 ad turnstile_print(ld->ld_lock, pr); 785 1.2 ad } 786 1.75 christos #endif 787 1.2 ad } 788 1.2 ad 789 1.75 christos #ifdef _KERNEL 790 1.2 ad /* 791 1.27 ad * lockdebug_abort1: 792 1.2 ad * 793 1.27 ad * An error has been trapped - dump lock info and panic. 794 1.2 ad */ 795 1.5 ad static void 796 1.55 christos lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s, 797 1.10 ad const char *msg, bool dopanic) 798 1.2 ad { 799 1.2 ad 800 1.27 ad /* 801 1.46 christos * Don't make the situation worse if the system is already going 802 1.27 ad * down in flames. Once a panic is triggered, lockdebug state 803 1.27 ad * becomes stale and cannot be trusted. 804 1.27 ad */ 805 1.27 ad if (atomic_inc_uint_nv(&ld_panic) != 1) { 806 1.34 ad __cpu_simple_unlock(&ld->ld_spinlock); 807 1.34 ad splx(s); 808 1.27 ad return; 809 1.27 ad } 810 1.27 ad 811 1.79 riastrad printf("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name, 812 1.55 christos func, line, msg); 813 1.79 riastrad lockdebug_dump(curlwp, ld, printf); 814 1.34 ad __cpu_simple_unlock(&ld->ld_spinlock); 815 1.34 ad splx(s); 816 1.79 riastrad printf("\n"); 817 1.10 ad if (dopanic) 818 1.55 christos panic("LOCKDEBUG: %s error: %s,%zu: %s", 819 1.55 christos ld->ld_lockops->lo_name, func, line, msg); 820 1.2 ad } 821 1.2 ad 822 1.75 christos #endif /* _KERNEL */ 823 1.2 ad #endif /* LOCKDEBUG */ 824 1.2 ad 825 1.2 ad /* 826 1.2 ad * lockdebug_lock_print: 827 1.2 ad * 828 1.2 ad * Handle the DDB 'show lock' command. 829 1.2 ad */ 830 1.2 ad #ifdef DDB 831 1.2 ad void 832 1.69 christos lockdebug_lock_print(void *addr, 833 1.69 christos void (*pr)(const char *, ...) __printflike(1, 2)) 834 1.2 ad { 835 1.2 ad #ifdef LOCKDEBUG 836 1.75 christos lockdebug_t *ld, lds; 837 1.2 ad 838 1.2 ad TAILQ_FOREACH(ld, &ld_all, ld_achain) { 839 1.75 christos db_read_bytes((db_addr_t)ld, sizeof(lds), __UNVOLATILE(&lds)); 840 1.75 christos ld = &lds; 841 1.41 dyoung if (ld->ld_lock == NULL) 842 1.41 dyoung continue; 843 1.41 dyoung if (addr == NULL || ld->ld_lock == addr) { 844 1.74 ad lockdebug_dump(curlwp, ld, pr); 845 1.41 dyoung if (addr != NULL) 846 1.41 dyoung return; 847 1.2 ad } 848 1.2 ad } 849 1.41 dyoung if (addr != NULL) { 850 1.41 dyoung (*pr)("Sorry, no record of a lock with address %p found.\n", 851 1.41 dyoung addr); 852 1.41 dyoung } 853 1.2 ad #else 854 1.81 riastrad char sym[128]; 855 1.81 riastrad uintptr_t word; 856 1.81 riastrad 857 1.81 riastrad (*pr)("WARNING: lock print is unreliable without LOCKDEBUG\n"); 858 1.83 nakayama db_symstr(sym, sizeof(sym), (db_expr_t)(intptr_t)addr, DB_STGY_ANY); 859 1.82 msaitoh db_read_bytes((db_addr_t)addr, sizeof(word), (char *)&word); 860 1.82 msaitoh (*pr)("%s: possible owner: %p, bits: 0x%" PRIxPTR "\n", sym, 861 1.81 riastrad (void *)(word & ~(uintptr_t)ALIGNBYTES), word & ALIGNBYTES); 862 1.2 ad #endif /* LOCKDEBUG */ 863 1.2 ad } 864 1.61 ozaki 865 1.75 christos #ifdef _KERNEL 866 1.62 ozaki #ifdef LOCKDEBUG 867 1.62 ozaki static void 868 1.74 ad lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i, 869 1.69 christos void (*pr)(const char *, ...) __printflike(1, 2)) 870 1.69 christos { 871 1.81 riastrad char sym[128]; 872 1.69 christos 873 1.81 riastrad #ifdef DDB 874 1.81 riastrad db_symstr(sym, sizeof(sym), (db_expr_t)ld->ld_initaddr, DB_STGY_PROC); 875 1.81 riastrad #else 876 1.81 riastrad snprintf(sym, sizeof(sym), "%p", (void *)ld->ld_initaddr); 877 1.75 christos #endif 878 1.74 ad (*pr)("* Lock %d (initialized at %s)\n", i++, sym); 879 1.74 ad lockdebug_dump(l, ld, pr); 880 1.69 christos } 881 1.69 christos 882 1.69 christos static void 883 1.69 christos lockdebug_show_trace(const void *ptr, 884 1.69 christos void (*pr)(const char *, ...) __printflike(1, 2)) 885 1.69 christos { 886 1.80 rin 887 1.80 rin db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr); 888 1.69 christos } 889 1.69 christos 890 1.69 christos static void 891 1.69 christos lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2), 892 1.69 christos bool show_trace) 893 1.62 ozaki { 894 1.62 ozaki struct proc *p; 895 1.62 ozaki 896 1.62 ozaki LIST_FOREACH(p, &allproc, p_list) { 897 1.62 ozaki struct lwp *l; 898 1.62 ozaki LIST_FOREACH(l, &p->p_lwps, l_sibling) { 899 1.62 ozaki lockdebug_t *ld; 900 1.62 ozaki int i = 0; 901 1.74 ad if (TAILQ_EMPTY(&l->l_ld_locks) && 902 1.74 ad l->l_ld_wanted == NULL) { 903 1.74 ad continue; 904 1.74 ad } 905 1.74 ad (*pr)("\n****** LWP %d.%d (%s) @ %p, l_stat=%d\n", 906 1.74 ad p->p_pid, l->l_lid, 907 1.74 ad l->l_name ? l->l_name : p->p_comm, l, l->l_stat); 908 1.74 ad if (!TAILQ_EMPTY(&l->l_ld_locks)) { 909 1.74 ad (*pr)("\n*** Locks held: \n"); 910 1.74 ad TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) { 911 1.74 ad (*pr)("\n"); 912 1.74 ad lockdebug_show_one(l, ld, i++, pr); 913 1.74 ad } 914 1.74 ad } else { 915 1.74 ad (*pr)("\n*** Locks held: none\n"); 916 1.74 ad } 917 1.74 ad 918 1.74 ad if (l->l_ld_wanted != NULL) { 919 1.74 ad (*pr)("\n*** Locks wanted: \n\n"); 920 1.74 ad lockdebug_show_one(l, l->l_ld_wanted, 0, pr); 921 1.74 ad } else { 922 1.74 ad (*pr)("\n*** Locks wanted: none\n"); 923 1.62 ozaki } 924 1.74 ad if (show_trace) { 925 1.74 ad (*pr)("\n*** Traceback: \n\n"); 926 1.69 christos lockdebug_show_trace(l, pr); 927 1.74 ad (*pr)("\n"); 928 1.74 ad } 929 1.62 ozaki } 930 1.62 ozaki } 931 1.62 ozaki } 932 1.62 ozaki 933 1.62 ozaki static void 934 1.69 christos lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2), 935 1.69 christos bool show_trace) 936 1.62 ozaki { 937 1.62 ozaki lockdebug_t *ld; 938 1.62 ozaki CPU_INFO_ITERATOR cii; 939 1.62 ozaki struct cpu_info *ci; 940 1.62 ozaki 941 1.62 ozaki for (CPU_INFO_FOREACH(cii, ci)) { 942 1.62 ozaki int i = 0; 943 1.62 ozaki if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks)) 944 1.62 ozaki continue; 945 1.74 ad (*pr)("\n******* Locks held on %s:\n", cpu_name(ci)); 946 1.62 ozaki TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) { 947 1.74 ad (*pr)("\n"); 948 1.74 ad #ifdef MULTIPROCESSOR 949 1.74 ad lockdebug_show_one(ci->ci_curlwp, ld, i++, pr); 950 1.69 christos if (show_trace) 951 1.69 christos lockdebug_show_trace(ci->ci_curlwp, pr); 952 1.68 mrg #else 953 1.74 ad lockdebug_show_one(curlwp, ld, i++, pr); 954 1.74 ad if (show_trace) 955 1.72 ryo lockdebug_show_trace(curlwp, pr); 956 1.68 mrg #endif 957 1.62 ozaki } 958 1.62 ozaki } 959 1.62 ozaki } 960 1.75 christos #endif /* _KERNEL */ 961 1.62 ozaki #endif /* LOCKDEBUG */ 962 1.62 ozaki 963 1.75 christos #ifdef _KERNEL 964 1.62 ozaki void 965 1.69 christos lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2), 966 1.69 christos const char *modif) 967 1.62 ozaki { 968 1.62 ozaki #ifdef LOCKDEBUG 969 1.62 ozaki bool show_trace = false; 970 1.62 ozaki if (modif[0] == 't') 971 1.62 ozaki show_trace = true; 972 1.62 ozaki 973 1.62 ozaki (*pr)("[Locks tracked through LWPs]\n"); 974 1.62 ozaki lockdebug_show_all_locks_lwp(pr, show_trace); 975 1.62 ozaki (*pr)("\n"); 976 1.62 ozaki 977 1.62 ozaki (*pr)("[Locks tracked through CPUs]\n"); 978 1.62 ozaki lockdebug_show_all_locks_cpu(pr, show_trace); 979 1.62 ozaki (*pr)("\n"); 980 1.62 ozaki #else 981 1.62 ozaki (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 982 1.62 ozaki #endif /* LOCKDEBUG */ 983 1.62 ozaki } 984 1.62 ozaki 985 1.61 ozaki void 986 1.69 christos lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2)) 987 1.61 ozaki { 988 1.61 ozaki #ifdef LOCKDEBUG 989 1.61 ozaki lockdebug_t *ld; 990 1.61 ozaki void *_ld; 991 1.61 ozaki uint32_t n_null = 0; 992 1.61 ozaki uint32_t n_spin_mutex = 0; 993 1.61 ozaki uint32_t n_adaptive_mutex = 0; 994 1.61 ozaki uint32_t n_rwlock = 0; 995 1.61 ozaki uint32_t n_others = 0; 996 1.61 ozaki 997 1.61 ozaki RB_TREE_FOREACH(_ld, &ld_rb_tree) { 998 1.61 ozaki ld = _ld; 999 1.61 ozaki if (ld->ld_lock == NULL) { 1000 1.61 ozaki n_null++; 1001 1.61 ozaki continue; 1002 1.61 ozaki } 1003 1.61 ozaki if (ld->ld_lockops->lo_name[0] == 'M') { 1004 1.61 ozaki if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP) 1005 1.61 ozaki n_adaptive_mutex++; 1006 1.61 ozaki else 1007 1.61 ozaki n_spin_mutex++; 1008 1.61 ozaki continue; 1009 1.61 ozaki } 1010 1.61 ozaki if (ld->ld_lockops->lo_name[0] == 'R') { 1011 1.61 ozaki n_rwlock++; 1012 1.61 ozaki continue; 1013 1.61 ozaki } 1014 1.61 ozaki n_others++; 1015 1.61 ozaki } 1016 1.61 ozaki (*pr)( 1017 1.61 ozaki "spin mutex: %u\n" 1018 1.61 ozaki "adaptive mutex: %u\n" 1019 1.61 ozaki "rwlock: %u\n" 1020 1.61 ozaki "null locks: %u\n" 1021 1.61 ozaki "others: %u\n", 1022 1.76 ad n_spin_mutex, n_adaptive_mutex, n_rwlock, 1023 1.61 ozaki n_null, n_others); 1024 1.61 ozaki #else 1025 1.61 ozaki (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 1026 1.61 ozaki #endif /* LOCKDEBUG */ 1027 1.61 ozaki } 1028 1.75 christos #endif /* _KERNEL */ 1029 1.2 ad #endif /* DDB */ 1030 1.2 ad 1031 1.75 christos #ifdef _KERNEL 1032 1.2 ad /* 1033 1.65 mrg * lockdebug_dismiss: 1034 1.65 mrg * 1035 1.65 mrg * The system is rebooting, and potentially from an unsafe 1036 1.65 mrg * place so avoid any future aborts. 1037 1.65 mrg */ 1038 1.65 mrg void 1039 1.65 mrg lockdebug_dismiss(void) 1040 1.65 mrg { 1041 1.65 mrg 1042 1.65 mrg atomic_inc_uint_nv(&ld_panic); 1043 1.65 mrg } 1044 1.65 mrg 1045 1.65 mrg /* 1046 1.2 ad * lockdebug_abort: 1047 1.2 ad * 1048 1.2 ad * An error has been trapped - dump lock info and call panic(). 1049 1.2 ad */ 1050 1.2 ad void 1051 1.58 christos lockdebug_abort(const char *func, size_t line, const volatile void *lock, 1052 1.55 christos lockops_t *ops, const char *msg) 1053 1.2 ad { 1054 1.2 ad #ifdef LOCKDEBUG 1055 1.2 ad lockdebug_t *ld; 1056 1.34 ad int s; 1057 1.2 ad 1058 1.34 ad s = splhigh(); 1059 1.78 riastrad if ((ld = lockdebug_lookup(func, line, lock, 1060 1.38 rafal (uintptr_t) __builtin_return_address(0))) != NULL) { 1061 1.55 christos lockdebug_abort1(func, line, ld, s, msg, true); 1062 1.34 ad return; 1063 1.2 ad } 1064 1.34 ad splx(s); 1065 1.2 ad #endif /* LOCKDEBUG */ 1066 1.2 ad 1067 1.27 ad /* 1068 1.67 mrg * Don't make the situation worse if the system is already going 1069 1.67 mrg * down in flames. Once a panic is triggered, lockdebug state 1070 1.67 mrg * becomes stale and cannot be trusted. 1071 1.27 ad */ 1072 1.67 mrg if (atomic_inc_uint_nv(&ld_panic) > 1) 1073 1.67 mrg return; 1074 1.67 mrg 1075 1.81 riastrad char locksym[128]; 1076 1.81 riastrad 1077 1.81 riastrad #ifdef DDB 1078 1.83 nakayama db_symstr(locksym, sizeof(locksym), (db_expr_t)(intptr_t)lock, 1079 1.83 nakayama DB_STGY_ANY); 1080 1.81 riastrad #else 1081 1.81 riastrad snprintf(locksym, sizeof(locksym), "%#018lx", (unsigned long)lock); 1082 1.81 riastrad #endif 1083 1.81 riastrad 1084 1.79 riastrad printf("%s error: %s,%zu: %s\n\n" 1085 1.81 riastrad "lock address : %s\n" 1086 1.67 mrg "current cpu : %18d\n" 1087 1.67 mrg "current lwp : %#018lx\n", 1088 1.81 riastrad ops->lo_name, func, line, msg, locksym, 1089 1.67 mrg (int)cpu_index(curcpu()), (long)curlwp); 1090 1.79 riastrad (*ops->lo_dump)(lock, printf); 1091 1.79 riastrad printf("\n"); 1092 1.2 ad 1093 1.55 christos panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p", 1094 1.55 christos ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp); 1095 1.2 ad } 1096 1.75 christos #endif /* _KERNEL */ 1097