Home | History | Annotate | Line # | Download | only in kern
subr_lockdebug.c revision 1.46.2.1
      1 /*	$NetBSD: subr_lockdebug.c,v 1.46.2.1 2013/02/25 00:29:53 tls Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Basic lock debugging code shared among lock primitives.
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.46.2.1 2013/02/25 00:29:53 tls Exp $");
     38 
     39 #include "opt_ddb.h"
     40 
     41 #include <sys/param.h>
     42 #include <sys/proc.h>
     43 #include <sys/systm.h>
     44 #include <sys/kernel.h>
     45 #include <sys/kmem.h>
     46 #include <sys/lockdebug.h>
     47 #include <sys/sleepq.h>
     48 #include <sys/cpu.h>
     49 #include <sys/atomic.h>
     50 #include <sys/lock.h>
     51 #include <sys/rbtree.h>
     52 
     53 #include <machine/lock.h>
     54 
     55 unsigned int		ld_panic;
     56 
     57 #ifdef LOCKDEBUG
     58 
     59 #define	LD_BATCH_SHIFT	9
     60 #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
     61 #define	LD_BATCH_MASK	(LD_BATCH - 1)
     62 #define	LD_MAX_LOCKS	1048576
     63 #define	LD_SLOP		16
     64 
     65 #define	LD_LOCKED	0x01
     66 #define	LD_SLEEPER	0x02
     67 
     68 #define	LD_WRITE_LOCK	0x80000000
     69 
     70 typedef struct lockdebug {
     71 	struct rb_node	ld_rb_node;
     72 	__cpu_simple_lock_t ld_spinlock;
     73 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
     74 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
     75 	volatile void	*ld_lock;
     76 	lockops_t	*ld_lockops;
     77 	struct lwp	*ld_lwp;
     78 	uintptr_t	ld_locked;
     79 	uintptr_t	ld_unlocked;
     80 	uintptr_t	ld_initaddr;
     81 	uint16_t	ld_shares;
     82 	uint16_t	ld_cpu;
     83 	uint8_t		ld_flags;
     84 	uint8_t		ld_shwant;	/* advisory */
     85 	uint8_t		ld_exwant;	/* advisory */
     86 	uint8_t		ld_unused;
     87 } volatile lockdebug_t;
     88 
     89 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
     90 
     91 __cpu_simple_lock_t	ld_mod_lk;
     92 lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
     93 lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
     94 int			ld_nfree;
     95 int			ld_freeptr;
     96 int			ld_recurse;
     97 bool			ld_nomore;
     98 lockdebug_t		ld_prime[LD_BATCH];
     99 
    100 static void	lockdebug_abort1(lockdebug_t *, int, const char *,
    101 				 const char *, bool);
    102 static int	lockdebug_more(int);
    103 static void	lockdebug_init(void);
    104 
    105 static signed int
    106 ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
    107 {
    108 	const lockdebug_t *ld1 = n1;
    109 	const lockdebug_t *ld2 = n2;
    110 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
    111 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
    112 
    113 	if (a < b)
    114 		return -1;
    115 	if (a > b)
    116 		return 1;
    117 	return 0;
    118 }
    119 
    120 static signed int
    121 ld_rbto_compare_key(void *ctx, const void *n, const void *key)
    122 {
    123 	const lockdebug_t *ld = n;
    124 	const uintptr_t a = (uintptr_t)ld->ld_lock;
    125 	const uintptr_t b = (uintptr_t)key;
    126 
    127 	if (a < b)
    128 		return -1;
    129 	if (a > b)
    130 		return 1;
    131 	return 0;
    132 }
    133 
    134 static rb_tree_t ld_rb_tree;
    135 
    136 static const rb_tree_ops_t ld_rb_tree_ops = {
    137 	.rbto_compare_nodes = ld_rbto_compare_nodes,
    138 	.rbto_compare_key = ld_rbto_compare_key,
    139 	.rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
    140 	.rbto_context = NULL
    141 };
    142 
    143 static inline lockdebug_t *
    144 lockdebug_lookup1(volatile void *lock)
    145 {
    146 	lockdebug_t *ld;
    147 	struct cpu_info *ci;
    148 
    149 	ci = curcpu();
    150 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
    151 	ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
    152 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
    153 	if (ld == NULL) {
    154 		return NULL;
    155 	}
    156 	__cpu_simple_lock(&ld->ld_spinlock);
    157 
    158 	return ld;
    159 }
    160 
    161 static void
    162 lockdebug_lock_cpus(void)
    163 {
    164 	CPU_INFO_ITERATOR cii;
    165 	struct cpu_info *ci;
    166 
    167 	for (CPU_INFO_FOREACH(cii, ci)) {
    168 		__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
    169 	}
    170 }
    171 
    172 static void
    173 lockdebug_unlock_cpus(void)
    174 {
    175 	CPU_INFO_ITERATOR cii;
    176 	struct cpu_info *ci;
    177 
    178 	for (CPU_INFO_FOREACH(cii, ci)) {
    179 		__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
    180 	}
    181 }
    182 
    183 /*
    184  * lockdebug_lookup:
    185  *
    186  *	Find a lockdebug structure by a pointer to a lock and return it locked.
    187  */
    188 static inline lockdebug_t *
    189 lockdebug_lookup(volatile void *lock, uintptr_t where)
    190 {
    191 	lockdebug_t *ld;
    192 
    193 	ld = lockdebug_lookup1(lock);
    194 	if (ld == NULL) {
    195 		panic("lockdebug_lookup: uninitialized lock "
    196 		    "(lock=%p, from=%08"PRIxPTR")", lock, where);
    197 	}
    198 	return ld;
    199 }
    200 
    201 /*
    202  * lockdebug_init:
    203  *
    204  *	Initialize the lockdebug system.  Allocate an initial pool of
    205  *	lockdebug structures before the VM system is up and running.
    206  */
    207 static void
    208 lockdebug_init(void)
    209 {
    210 	lockdebug_t *ld;
    211 	int i;
    212 
    213 	TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
    214 	TAILQ_INIT(&curlwp->l_ld_locks);
    215 	__cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
    216 	__cpu_simple_lock_init(&ld_mod_lk);
    217 
    218 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
    219 
    220 	ld = ld_prime;
    221 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
    222 		__cpu_simple_lock_init(&ld->ld_spinlock);
    223 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    224 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
    225 	}
    226 	ld_freeptr = 1;
    227 	ld_nfree = LD_BATCH - 1;
    228 }
    229 
    230 /*
    231  * lockdebug_alloc:
    232  *
    233  *	A lock is being initialized, so allocate an associated debug
    234  *	structure.
    235  */
    236 bool
    237 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
    238 {
    239 	struct cpu_info *ci;
    240 	lockdebug_t *ld;
    241 	int s;
    242 
    243 	if (lo == NULL || panicstr != NULL || ld_panic)
    244 		return false;
    245 	if (ld_freeptr == 0)
    246 		lockdebug_init();
    247 
    248 	s = splhigh();
    249 	__cpu_simple_lock(&ld_mod_lk);
    250 	if ((ld = lockdebug_lookup1(lock)) != NULL) {
    251 		__cpu_simple_unlock(&ld_mod_lk);
    252 		lockdebug_abort1(ld, s, __func__, "already initialized", true);
    253 		return false;
    254 	}
    255 
    256 	/*
    257 	 * Pinch a new debug structure.  We may recurse because we call
    258 	 * kmem_alloc(), which may need to initialize new locks somewhere
    259 	 * down the path.  If not recursing, we try to maintain at least
    260 	 * LD_SLOP structures free, which should hopefully be enough to
    261 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
    262 	 * worry: we'll just mark the lock as not having an ID.
    263 	 */
    264 	ci = curcpu();
    265 	ci->ci_lkdebug_recurse++;
    266 	if (TAILQ_EMPTY(&ld_free)) {
    267 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
    268 			ci->ci_lkdebug_recurse--;
    269 			__cpu_simple_unlock(&ld_mod_lk);
    270 			splx(s);
    271 			return false;
    272 		}
    273 		s = lockdebug_more(s);
    274 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
    275 		s = lockdebug_more(s);
    276 	}
    277 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
    278 		__cpu_simple_unlock(&ld_mod_lk);
    279 		splx(s);
    280 		return false;
    281 	}
    282 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
    283 	ld_nfree--;
    284 	ci->ci_lkdebug_recurse--;
    285 
    286 	if (ld->ld_lock != NULL) {
    287 		panic("lockdebug_alloc: corrupt table");
    288 	}
    289 
    290 	/* Initialise the structure. */
    291 	ld->ld_lock = lock;
    292 	ld->ld_lockops = lo;
    293 	ld->ld_locked = 0;
    294 	ld->ld_unlocked = 0;
    295 	ld->ld_lwp = NULL;
    296 	ld->ld_initaddr = initaddr;
    297 	ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
    298 	lockdebug_lock_cpus();
    299 	(void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
    300 	lockdebug_unlock_cpus();
    301 	__cpu_simple_unlock(&ld_mod_lk);
    302 
    303 	splx(s);
    304 	return true;
    305 }
    306 
    307 /*
    308  * lockdebug_free:
    309  *
    310  *	A lock is being destroyed, so release debugging resources.
    311  */
    312 void
    313 lockdebug_free(volatile void *lock)
    314 {
    315 	lockdebug_t *ld;
    316 	int s;
    317 
    318 	if (panicstr != NULL || ld_panic)
    319 		return;
    320 
    321 	s = splhigh();
    322 	__cpu_simple_lock(&ld_mod_lk);
    323 	ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0));
    324 	if (ld == NULL) {
    325 		__cpu_simple_unlock(&ld_mod_lk);
    326 		panic("lockdebug_free: destroying uninitialized object %p"
    327 		    "(ld_lock=%p)", lock, ld->ld_lock);
    328 		return;
    329 	}
    330 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
    331 		__cpu_simple_unlock(&ld_mod_lk);
    332 		lockdebug_abort1(ld, s, __func__, "is locked or in use", true);
    333 		return;
    334 	}
    335 	lockdebug_lock_cpus();
    336 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
    337 	lockdebug_unlock_cpus();
    338 	ld->ld_lock = NULL;
    339 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    340 	ld_nfree++;
    341 	__cpu_simple_unlock(&ld->ld_spinlock);
    342 	__cpu_simple_unlock(&ld_mod_lk);
    343 	splx(s);
    344 }
    345 
    346 /*
    347  * lockdebug_more:
    348  *
    349  *	Allocate a batch of debug structures and add to the free list.
    350  *	Must be called with ld_mod_lk held.
    351  */
    352 static int
    353 lockdebug_more(int s)
    354 {
    355 	lockdebug_t *ld;
    356 	void *block;
    357 	int i, base, m;
    358 
    359 	/*
    360 	 * Can't call kmem_alloc() if in interrupt context.  XXX We could
    361 	 * deadlock, because we don't know which locks the caller holds.
    362 	 */
    363 	if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) {
    364 		return s;
    365 	}
    366 
    367 	while (ld_nfree < LD_SLOP) {
    368 		__cpu_simple_unlock(&ld_mod_lk);
    369 		splx(s);
    370 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
    371 		s = splhigh();
    372 		__cpu_simple_lock(&ld_mod_lk);
    373 
    374 		if (block == NULL)
    375 			return s;
    376 
    377 		if (ld_nfree > LD_SLOP) {
    378 			/* Somebody beat us to it. */
    379 			__cpu_simple_unlock(&ld_mod_lk);
    380 			splx(s);
    381 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
    382 			s = splhigh();
    383 			__cpu_simple_lock(&ld_mod_lk);
    384 			continue;
    385 		}
    386 
    387 		base = ld_freeptr;
    388 		ld_nfree += LD_BATCH;
    389 		ld = block;
    390 		base <<= LD_BATCH_SHIFT;
    391 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
    392 
    393 		if (m == LD_MAX_LOCKS)
    394 			ld_nomore = true;
    395 
    396 		for (i = base; i < m; i++, ld++) {
    397 			__cpu_simple_lock_init(&ld->ld_spinlock);
    398 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    399 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
    400 		}
    401 
    402 		membar_producer();
    403 	}
    404 
    405 	return s;
    406 }
    407 
    408 /*
    409  * lockdebug_wantlock:
    410  *
    411  *	Process the preamble to a lock acquire.
    412  */
    413 void
    414 lockdebug_wantlock(volatile void *lock, uintptr_t where, bool shared,
    415 		   bool trylock)
    416 {
    417 	struct lwp *l = curlwp;
    418 	lockdebug_t *ld;
    419 	bool recurse;
    420 	int s;
    421 
    422 	(void)shared;
    423 	recurse = false;
    424 
    425 	if (panicstr != NULL || ld_panic)
    426 		return;
    427 
    428 	s = splhigh();
    429 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
    430 		splx(s);
    431 		return;
    432 	}
    433 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
    434 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    435 			if (ld->ld_lwp == l && !(shared && trylock))
    436 				recurse = true;
    437 		} else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
    438 			recurse = true;
    439 	}
    440 	if (cpu_intr_p()) {
    441 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    442 			lockdebug_abort1(ld, s, __func__,
    443 			    "acquiring sleep lock from interrupt context",
    444 			    true);
    445 			return;
    446 		}
    447 	}
    448 	if (shared)
    449 		ld->ld_shwant++;
    450 	else
    451 		ld->ld_exwant++;
    452 	if (recurse) {
    453 		lockdebug_abort1(ld, s, __func__, "locking against myself",
    454 		    true);
    455 		return;
    456 	}
    457 	__cpu_simple_unlock(&ld->ld_spinlock);
    458 	splx(s);
    459 }
    460 
    461 /*
    462  * lockdebug_locked:
    463  *
    464  *	Process a lock acquire operation.
    465  */
    466 void
    467 lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where,
    468 		 int shared)
    469 {
    470 	struct lwp *l = curlwp;
    471 	lockdebug_t *ld;
    472 	int s;
    473 
    474 	if (panicstr != NULL || ld_panic)
    475 		return;
    476 
    477 	s = splhigh();
    478 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
    479 		splx(s);
    480 		return;
    481 	}
    482 	if (cvlock) {
    483 		KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
    484 		if (lock == (void *)&lbolt) {
    485 			/* nothing */
    486 		} else if (ld->ld_shares++ == 0) {
    487 			ld->ld_locked = (uintptr_t)cvlock;
    488 		} else if (cvlock != (void *)ld->ld_locked) {
    489 			lockdebug_abort1(ld, s, __func__, "multiple locks used"
    490 			    " with condition variable", true);
    491 			return;
    492 		}
    493 	} else if (shared) {
    494 		l->l_shlocks++;
    495 		ld->ld_locked = where;
    496 		ld->ld_shares++;
    497 		ld->ld_shwant--;
    498 	} else {
    499 		if ((ld->ld_flags & LD_LOCKED) != 0) {
    500 			lockdebug_abort1(ld, s, __func__, "already locked",
    501 			    true);
    502 			return;
    503 		}
    504 		ld->ld_flags |= LD_LOCKED;
    505 		ld->ld_locked = where;
    506 		ld->ld_exwant--;
    507 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    508 			TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
    509 		} else {
    510 			TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
    511 			    ld, ld_chain);
    512 		}
    513 	}
    514 	ld->ld_cpu = (uint16_t)cpu_index(curcpu());
    515 	ld->ld_lwp = l;
    516 	__cpu_simple_unlock(&ld->ld_spinlock);
    517 	splx(s);
    518 }
    519 
    520 /*
    521  * lockdebug_unlocked:
    522  *
    523  *	Process a lock release operation.
    524  */
    525 void
    526 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
    527 {
    528 	struct lwp *l = curlwp;
    529 	lockdebug_t *ld;
    530 	int s;
    531 
    532 	if (panicstr != NULL || ld_panic)
    533 		return;
    534 
    535 	s = splhigh();
    536 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
    537 		splx(s);
    538 		return;
    539 	}
    540 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
    541 		if (lock == (void *)&lbolt) {
    542 			/* nothing */
    543 		} else {
    544 			ld->ld_shares--;
    545 		}
    546 	} else if (shared) {
    547 		if (l->l_shlocks == 0) {
    548 			lockdebug_abort1(ld, s, __func__,
    549 			    "no shared locks held by LWP", true);
    550 			return;
    551 		}
    552 		if (ld->ld_shares == 0) {
    553 			lockdebug_abort1(ld, s, __func__,
    554 			    "no shared holds on this lock", true);
    555 			return;
    556 		}
    557 		l->l_shlocks--;
    558 		ld->ld_shares--;
    559 		if (ld->ld_lwp == l) {
    560 			ld->ld_unlocked = where;
    561 			ld->ld_lwp = NULL;
    562 		}
    563 		if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
    564 			ld->ld_cpu = (uint16_t)-1;
    565 	} else {
    566 		if ((ld->ld_flags & LD_LOCKED) == 0) {
    567 			lockdebug_abort1(ld, s, __func__, "not locked", true);
    568 			return;
    569 		}
    570 
    571 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    572 			if (ld->ld_lwp != curlwp) {
    573 				lockdebug_abort1(ld, s, __func__,
    574 				    "not held by current LWP", true);
    575 				return;
    576 			}
    577 			TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
    578 		} else {
    579 			if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) {
    580 				lockdebug_abort1(ld, s, __func__,
    581 				    "not held by current CPU", true);
    582 				return;
    583 			}
    584 			TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
    585 			    ld_chain);
    586 		}
    587 		ld->ld_flags &= ~LD_LOCKED;
    588 		ld->ld_unlocked = where;
    589 		ld->ld_lwp = NULL;
    590 	}
    591 	__cpu_simple_unlock(&ld->ld_spinlock);
    592 	splx(s);
    593 }
    594 
    595 /*
    596  * lockdebug_wakeup:
    597  *
    598  *	Process a wakeup on a condition variable.
    599  */
    600 void
    601 lockdebug_wakeup(volatile void *lock, uintptr_t where)
    602 {
    603 	lockdebug_t *ld;
    604 	int s;
    605 
    606 	if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
    607 		return;
    608 
    609 	s = splhigh();
    610 	/* Find the CV... */
    611 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
    612 		splx(s);
    613 		return;
    614 	}
    615 	/*
    616 	 * If it has any waiters, ensure that they are using the
    617 	 * same interlock.
    618 	 */
    619 	if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
    620 		lockdebug_abort1(ld, s, __func__, "interlocking mutex not "
    621 		    "held during wakeup", true);
    622 		return;
    623 	}
    624 	__cpu_simple_unlock(&ld->ld_spinlock);
    625 	splx(s);
    626 }
    627 
    628 /*
    629  * lockdebug_barrier:
    630  *
    631  *	Panic if we hold more than one specified spin lock, and optionally,
    632  *	if we hold sleep locks.
    633  */
    634 void
    635 lockdebug_barrier(volatile void *spinlock, int slplocks)
    636 {
    637 	struct lwp *l = curlwp;
    638 	lockdebug_t *ld;
    639 	int s;
    640 
    641 	if (panicstr != NULL || ld_panic)
    642 		return;
    643 
    644 	s = splhigh();
    645 	if ((l->l_pflag & LP_INTR) == 0) {
    646 		TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
    647 			if (ld->ld_lock == spinlock) {
    648 				continue;
    649 			}
    650 			__cpu_simple_lock(&ld->ld_spinlock);
    651 			lockdebug_abort1(ld, s, __func__,
    652 			    "spin lock held", true);
    653 			return;
    654 		}
    655 	}
    656 	if (slplocks) {
    657 		splx(s);
    658 		return;
    659 	}
    660 	if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
    661 		__cpu_simple_lock(&ld->ld_spinlock);
    662 		lockdebug_abort1(ld, s, __func__, "sleep lock held", true);
    663 		return;
    664 	}
    665 	splx(s);
    666 	if (l->l_shlocks != 0) {
    667 		panic("lockdebug_barrier: holding %d shared locks",
    668 		    l->l_shlocks);
    669 	}
    670 }
    671 
    672 /*
    673  * lockdebug_mem_check:
    674  *
    675  *	Check for in-use locks within a memory region that is
    676  *	being freed.
    677  */
    678 void
    679 lockdebug_mem_check(const char *func, void *base, size_t sz)
    680 {
    681 	lockdebug_t *ld;
    682 	struct cpu_info *ci;
    683 	int s;
    684 
    685 	if (panicstr != NULL || ld_panic)
    686 		return;
    687 
    688 	s = splhigh();
    689 	ci = curcpu();
    690 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
    691 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
    692 	if (ld != NULL) {
    693 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
    694 
    695 		if ((uintptr_t)base > lock)
    696 			panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
    697 			    __func__, ld, base, sz);
    698 		if (lock >= (uintptr_t)base + sz)
    699 			ld = NULL;
    700 	}
    701 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
    702 	if (ld != NULL) {
    703 		__cpu_simple_lock(&ld->ld_spinlock);
    704 		lockdebug_abort1(ld, s, func,
    705 		    "allocation contains active lock", !cold);
    706 		return;
    707 	}
    708 	splx(s);
    709 }
    710 
    711 /*
    712  * lockdebug_dump:
    713  *
    714  *	Dump information about a lock on panic, or for DDB.
    715  */
    716 static void
    717 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
    718     __printflike(1, 2))
    719 {
    720 	int sleeper = (ld->ld_flags & LD_SLEEPER);
    721 
    722 	(*pr)(
    723 	    "lock address : %#018lx type     : %18s\n"
    724 	    "initialized  : %#018lx",
    725 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
    726 	    (long)ld->ld_initaddr);
    727 
    728 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
    729 		(*pr)(" interlock: %#018lx\n", (long)ld->ld_locked);
    730 	} else {
    731 		(*pr)("\n"
    732 		    "shared holds : %18u exclusive: %18u\n"
    733 		    "shares wanted: %18u exclusive: %18u\n"
    734 		    "current cpu  : %18u last held: %18u\n"
    735 		    "current lwp  : %#018lx last held: %#018lx\n"
    736 		    "last locked%c : %#018lx unlocked%c: %#018lx\n",
    737 		    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
    738 		    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
    739 		    (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
    740 		    (long)curlwp, (long)ld->ld_lwp,
    741 		    ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
    742 		    (long)ld->ld_locked,
    743 		    ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
    744 		    (long)ld->ld_unlocked);
    745 	}
    746 
    747 	if (ld->ld_lockops->lo_dump != NULL)
    748 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
    749 
    750 	if (sleeper) {
    751 		(*pr)("\n");
    752 		turnstile_print(ld->ld_lock, pr);
    753 	}
    754 }
    755 
    756 /*
    757  * lockdebug_abort1:
    758  *
    759  *	An error has been trapped - dump lock info and panic.
    760  */
    761 static void
    762 lockdebug_abort1(lockdebug_t *ld, int s, const char *func,
    763 		 const char *msg, bool dopanic)
    764 {
    765 
    766 	/*
    767 	 * Don't make the situation worse if the system is already going
    768 	 * down in flames.  Once a panic is triggered, lockdebug state
    769 	 * becomes stale and cannot be trusted.
    770 	 */
    771 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
    772 		__cpu_simple_unlock(&ld->ld_spinlock);
    773 		splx(s);
    774 		return;
    775 	}
    776 
    777 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
    778 	    func, msg);
    779 	lockdebug_dump(ld, printf_nolog);
    780 	__cpu_simple_unlock(&ld->ld_spinlock);
    781 	splx(s);
    782 	printf_nolog("\n");
    783 	if (dopanic)
    784 		panic("LOCKDEBUG");
    785 }
    786 
    787 #endif	/* LOCKDEBUG */
    788 
    789 /*
    790  * lockdebug_lock_print:
    791  *
    792  *	Handle the DDB 'show lock' command.
    793  */
    794 #ifdef DDB
    795 void
    796 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
    797 {
    798 #ifdef LOCKDEBUG
    799 	lockdebug_t *ld;
    800 
    801 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
    802 		if (ld->ld_lock == NULL)
    803 			continue;
    804 		if (addr == NULL || ld->ld_lock == addr) {
    805 			lockdebug_dump(ld, pr);
    806 			if (addr != NULL)
    807 				return;
    808 		}
    809 	}
    810 	if (addr != NULL) {
    811 		(*pr)("Sorry, no record of a lock with address %p found.\n",
    812 		    addr);
    813 	}
    814 #else
    815 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
    816 #endif	/* LOCKDEBUG */
    817 }
    818 #endif	/* DDB */
    819 
    820 /*
    821  * lockdebug_abort:
    822  *
    823  *	An error has been trapped - dump lock info and call panic().
    824  */
    825 void
    826 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
    827 		const char *msg)
    828 {
    829 #ifdef LOCKDEBUG
    830 	lockdebug_t *ld;
    831 	int s;
    832 
    833 	s = splhigh();
    834 	if ((ld = lockdebug_lookup(lock,
    835 			(uintptr_t) __builtin_return_address(0))) != NULL) {
    836 		lockdebug_abort1(ld, s, func, msg, true);
    837 		return;
    838 	}
    839 	splx(s);
    840 #endif	/* LOCKDEBUG */
    841 
    842 	/*
    843 	 * Complain first on the occurrance only.  Otherwise proceeed to
    844 	 * panic where we will `rendezvous' with other CPUs if the machine
    845 	 * is going down in flames.
    846 	 */
    847 	if (atomic_inc_uint_nv(&ld_panic) == 1) {
    848 		printf_nolog("%s error: %s: %s\n\n"
    849 		    "lock address : %#018lx\n"
    850 		    "current cpu  : %18d\n"
    851 		    "current lwp  : %#018lx\n",
    852 		    ops->lo_name, func, msg, (long)lock,
    853 		    (int)cpu_index(curcpu()), (long)curlwp);
    854 		(*ops->lo_dump)(lock);
    855 		printf_nolog("\n");
    856 	}
    857 
    858 	panic("lock error");
    859 }
    860