Home | History | Annotate | Line # | Download | only in kern
subr_lockdebug.c revision 1.5.2.5
      1 /*	$NetBSD: subr_lockdebug.c,v 1.5.2.5 2007/07/29 11:34:47 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Basic lock debugging code shared among lock primatives.
     41  */
     42 
     43 #include "opt_multiprocessor.h"
     44 #include "opt_ddb.h"
     45 
     46 #include <sys/cdefs.h>
     47 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.5.2.5 2007/07/29 11:34:47 ad Exp $");
     48 
     49 #include <sys/param.h>
     50 #include <sys/proc.h>
     51 #include <sys/systm.h>
     52 #include <sys/kernel.h>
     53 #include <sys/kmem.h>
     54 #include <sys/lock.h>
     55 #include <sys/lockdebug.h>
     56 #include <sys/sleepq.h>
     57 #include <sys/cpu.h>
     58 
     59 #ifdef LOCKDEBUG
     60 
     61 #define	LD_BATCH_SHIFT	9
     62 #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
     63 #define	LD_BATCH_MASK	(LD_BATCH - 1)
     64 #define	LD_MAX_LOCKS	1048576
     65 #define	LD_SLOP		16
     66 
     67 #define	LD_LOCKED	0x01
     68 #define	LD_SLEEPER	0x02
     69 #define	LD_MLOCKS	8
     70 #define	LD_MLISTS	8192
     71 
     72 #define	LD_NOID		(LD_MAX_LOCKS + 1)
     73 
     74 typedef union lockdebuglk {
     75 	struct {
     76 		__cpu_simple_lock_t	lku_lock;
     77 		int			lku_oldspl;
     78 	} ul;
     79 	uint8_t	lk_pad[64];
     80 } volatile __aligned(64) lockdebuglk_t;
     81 
     82 #define	lk_lock		ul.lku_lock
     83 #define	lk_oldspl	ul.lku_oldspl
     84 
     85 typedef struct lockdebug {
     86 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
     87 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
     88 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_mchain;
     89 	volatile void	*ld_lock;
     90 	lockops_t	*ld_lockops;
     91 	struct lwp	*ld_lwp;
     92 	uintptr_t	ld_locked;
     93 	uintptr_t	ld_unlocked;
     94 	u_int		ld_id;
     95 	uint16_t	ld_shares;
     96 	uint16_t	ld_cpu;
     97 	uint8_t		ld_flags;
     98 	uint8_t		ld_shwant;	/* advisory */
     99 	uint8_t		ld_exwant;	/* advisory */
    100 	uint8_t		ld_unused;
    101 } volatile lockdebug_t;
    102 
    103 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
    104 
    105 lockdebuglk_t		ld_sleeper_lk;
    106 lockdebuglk_t		ld_spinner_lk;
    107 lockdebuglk_t		ld_free_lk;
    108 lockdebuglk_t		ld_mem_lk[LD_MLOCKS];
    109 
    110 lockdebuglist_t		ld_mem_list[LD_MLISTS];
    111 lockdebuglist_t		ld_sleepers;
    112 lockdebuglist_t		ld_spinners;
    113 lockdebuglist_t		ld_free;
    114 lockdebuglist_t		ld_all;
    115 int			ld_nfree;
    116 int			ld_freeptr;
    117 int			ld_recurse;
    118 bool			ld_nomore;
    119 lockdebug_t		*ld_table[LD_MAX_LOCKS / LD_BATCH];
    120 
    121 lockdebug_t		ld_prime[LD_BATCH];
    122 
    123 static void	lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
    124 				 const char *, const char *, bool);
    125 static void	lockdebug_more(void);
    126 static void	lockdebug_init(void);
    127 
    128 static inline void
    129 lockdebug_lock(lockdebuglk_t *lk)
    130 {
    131 	int s;
    132 
    133 	s = splhigh();
    134 	__cpu_simple_lock(&lk->lk_lock);
    135 	lk->lk_oldspl = s;
    136 }
    137 
    138 static inline void
    139 lockdebug_unlock(lockdebuglk_t *lk)
    140 {
    141 	int s;
    142 
    143 	s = lk->lk_oldspl;
    144 	__cpu_simple_unlock(&(lk->lk_lock));
    145 	splx(s);
    146 }
    147 
    148 static inline void
    149 lockdebug_mhash(volatile void *addr, lockdebuglk_t **lk, lockdebuglist_t **head)
    150 {
    151 	u_int hash;
    152 
    153 	hash = (uintptr_t)addr >> PGSHIFT;
    154 	*lk = &ld_mem_lk[hash & (LD_MLOCKS - 1)];
    155 	*head = &ld_mem_list[hash & (LD_MLISTS - 1)];
    156 	lockdebug_lock(*lk);
    157 }
    158 
    159 /*
    160  * lockdebug_lookup:
    161  *
    162  *	Find a lockdebug structure by ID and return it locked.
    163  */
    164 static inline lockdebug_t *
    165 lockdebug_lookup(u_int id, lockdebuglk_t **lk)
    166 {
    167 	lockdebug_t *base, *ld;
    168 
    169 	if (id == LD_NOID)
    170 		return NULL;
    171 
    172 	if (id == 0 || id >= LD_MAX_LOCKS)
    173 		panic("lockdebug_lookup: uninitialized lock (1, id=%d)", id);
    174 
    175 	base = ld_table[id >> LD_BATCH_SHIFT];
    176 	ld = base + (id & LD_BATCH_MASK);
    177 
    178 	if (base == NULL || ld->ld_lock == NULL || ld->ld_id != id)
    179 		panic("lockdebug_lookup: uninitialized lock (2, id=%d)", id);
    180 
    181 	if ((ld->ld_flags & LD_SLEEPER) != 0)
    182 		*lk = &ld_sleeper_lk;
    183 	else
    184 		*lk = &ld_spinner_lk;
    185 
    186 	lockdebug_lock(*lk);
    187 	return ld;
    188 }
    189 
    190 /*
    191  * lockdebug_init:
    192  *
    193  *	Initialize the lockdebug system.  Allocate an initial pool of
    194  *	lockdebug structures before the VM system is up and running.
    195  */
    196 static void
    197 lockdebug_init(void)
    198 {
    199 	lockdebug_t *ld;
    200 	int i;
    201 
    202 	__cpu_simple_lock_init(&ld_sleeper_lk.lk_lock);
    203 	__cpu_simple_lock_init(&ld_spinner_lk.lk_lock);
    204 	__cpu_simple_lock_init(&ld_free_lk.lk_lock);
    205 
    206 	TAILQ_INIT(&ld_free);
    207 	TAILQ_INIT(&ld_all);
    208 	TAILQ_INIT(&ld_sleepers);
    209 	TAILQ_INIT(&ld_spinners);
    210 
    211 	ld = ld_prime;
    212 	ld_table[0] = ld;
    213 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
    214 		ld->ld_id = i;
    215 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    216 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
    217 	}
    218 	ld_freeptr = 1;
    219 	ld_nfree = LD_BATCH - 1;
    220 
    221 	for (i = 0; i < LD_MLOCKS; i++)
    222 		__cpu_simple_lock_init(&ld_mem_lk[i].lk_lock);
    223 	for (i = 0; i < LD_MLISTS; i++)
    224 		TAILQ_INIT(&ld_mem_list[i]);
    225 }
    226 
    227 /*
    228  * lockdebug_alloc:
    229  *
    230  *	A lock is being initialized, so allocate an associated debug
    231  *	structure.
    232  */
    233 u_int
    234 lockdebug_alloc(volatile void *lock, lockops_t *lo)
    235 {
    236 	lockdebuglist_t *head;
    237 	struct cpu_info *ci;
    238 	lockdebuglk_t *lk;
    239 	lockdebug_t *ld;
    240 
    241 	if (lo == NULL || panicstr != NULL)
    242 		return LD_NOID;
    243 	if (ld_freeptr == 0)
    244 		lockdebug_init();
    245 
    246 	ci = curcpu();
    247 
    248 	/*
    249 	 * Pinch a new debug structure.  We may recurse because we call
    250 	 * kmem_alloc(), which may need to initialize new locks somewhere
    251 	 * down the path.  If not recursing, we try to maintain at least
    252 	 * LD_SLOP structures free, which should hopefully be enough to
    253 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
    254 	 * worry: we'll just mark the lock as not having an ID.
    255 	 */
    256 	lockdebug_lock(&ld_free_lk);
    257 	ci->ci_lkdebug_recurse++;
    258 
    259 	if (TAILQ_EMPTY(&ld_free)) {
    260 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
    261 			ci->ci_lkdebug_recurse--;
    262 			lockdebug_unlock(&ld_free_lk);
    263 			return LD_NOID;
    264 		}
    265 		lockdebug_more();
    266 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
    267 		lockdebug_more();
    268 
    269 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
    270 		lockdebug_unlock(&ld_free_lk);
    271 		return LD_NOID;
    272 	}
    273 
    274 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
    275 	ld_nfree--;
    276 
    277 	ci->ci_lkdebug_recurse--;
    278 	lockdebug_unlock(&ld_free_lk);
    279 
    280 	if (ld->ld_lock != NULL)
    281 		panic("lockdebug_alloc: corrupt table");
    282 
    283 	if (lo->lo_sleeplock)
    284 		lockdebug_lock(&ld_sleeper_lk);
    285 	else
    286 		lockdebug_lock(&ld_spinner_lk);
    287 
    288 	/* Initialise the structure. */
    289 	ld->ld_lock = lock;
    290 	ld->ld_lockops = lo;
    291 	ld->ld_locked = 0;
    292 	ld->ld_unlocked = 0;
    293 	ld->ld_lwp = NULL;
    294 
    295 	if (lo->lo_sleeplock) {
    296 		ld->ld_flags = LD_SLEEPER;
    297 		lockdebug_unlock(&ld_sleeper_lk);
    298 	} else {
    299 		ld->ld_flags = 0;
    300 		lockdebug_unlock(&ld_spinner_lk);
    301 	}
    302 
    303 	/* Insert into address hash. */
    304 	lockdebug_mhash(lock, &lk, &head);
    305 	TAILQ_INSERT_HEAD(head, ld, ld_mchain);
    306 	lockdebug_unlock(lk);
    307 
    308 	return ld->ld_id;
    309 }
    310 
    311 /*
    312  * lockdebug_free:
    313  *
    314  *	A lock is being destroyed, so release debugging resources.
    315  */
    316 void
    317 lockdebug_free(volatile void *lock, u_int id)
    318 {
    319 	lockdebuglist_t *head;
    320 	lockdebug_t *ld;
    321 	lockdebuglk_t *lk;
    322 
    323 	if (panicstr != NULL)
    324 		return;
    325 
    326 	if ((ld = lockdebug_lookup(id, &lk)) == NULL)
    327 		return;
    328 
    329 	if (ld->ld_lock != lock) {
    330 		panic("lockdebug_free: destroying uninitialized lock %p"
    331 		    "(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock);
    332 		lockdebug_abort1(ld, lk, __func__, "lock record follows",
    333 		    true);
    334 	}
    335 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
    336 		lockdebug_abort1(ld, lk, __func__, "is locked", true);
    337 
    338 	ld->ld_lock = NULL;
    339 
    340 	lockdebug_unlock(lk);
    341 
    342 	lockdebug_lock(&ld_free_lk);
    343 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    344 	ld_nfree++;
    345 	lockdebug_unlock(&ld_free_lk);
    346 
    347 	/* Remove from address hash. */
    348 	lockdebug_mhash(lock, &lk, &head);
    349 	TAILQ_REMOVE(head, ld, ld_mchain);
    350 	lockdebug_unlock(lk);
    351 }
    352 
    353 /*
    354  * lockdebug_more:
    355  *
    356  *	Allocate a batch of debug structures and add to the free list.
    357  *	Must be called with ld_free_lk held.
    358  */
    359 static void
    360 lockdebug_more(void)
    361 {
    362 	lockdebug_t *ld;
    363 	void *block;
    364 	int i, base, m;
    365 
    366 	while (ld_nfree < LD_SLOP) {
    367 		lockdebug_unlock(&ld_free_lk);
    368 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
    369 		lockdebug_lock(&ld_free_lk);
    370 
    371 		if (block == NULL)
    372 			return;
    373 
    374 		if (ld_nfree > LD_SLOP) {
    375 			/* Somebody beat us to it. */
    376 			lockdebug_unlock(&ld_free_lk);
    377 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
    378 			lockdebug_lock(&ld_free_lk);
    379 			continue;
    380 		}
    381 
    382 		base = ld_freeptr;
    383 		ld_nfree += LD_BATCH;
    384 		ld = block;
    385 		base <<= LD_BATCH_SHIFT;
    386 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
    387 
    388 		if (m == LD_MAX_LOCKS)
    389 			ld_nomore = true;
    390 
    391 		for (i = base; i < m; i++, ld++) {
    392 			ld->ld_id = i;
    393 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    394 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
    395 		}
    396 
    397 		mb_write();
    398 		ld_table[ld_freeptr++] = block;
    399 	}
    400 }
    401 
    402 /*
    403  * lockdebug_wantlock:
    404  *
    405  *	Process the preamble to a lock acquire.
    406  */
    407 void
    408 lockdebug_wantlock(u_int id, uintptr_t where, int shared)
    409 {
    410 	struct lwp *l = curlwp;
    411 	lockdebuglk_t *lk;
    412 	lockdebug_t *ld;
    413 	bool recurse;
    414 
    415 	(void)shared;
    416 	recurse = false;
    417 
    418 	if (panicstr != NULL)
    419 		return;
    420 
    421 	if ((ld = lockdebug_lookup(id, &lk)) == NULL)
    422 		return;
    423 
    424 	if ((ld->ld_flags & LD_LOCKED) != 0) {
    425 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    426 			if (ld->ld_lwp == l)
    427 				recurse = true;
    428 		} else if (ld->ld_cpu == (uint16_t)cpu_number())
    429 			recurse = true;
    430 	}
    431 
    432 	if (cpu_intr_p()) {
    433 		if ((ld->ld_flags & LD_SLEEPER) != 0)
    434 			lockdebug_abort1(ld, lk, __func__,
    435 			    "acquiring sleep lock from interrupt context",
    436 			    true);
    437 	}
    438 
    439 	if (shared)
    440 		ld->ld_shwant++;
    441 	else
    442 		ld->ld_exwant++;
    443 
    444 	if (recurse)
    445 		lockdebug_abort1(ld, lk, __func__, "locking against myself",
    446 		    true);
    447 
    448 	lockdebug_unlock(lk);
    449 }
    450 
    451 /*
    452  * lockdebug_locked:
    453  *
    454  *	Process a lock acquire operation.
    455  */
    456 void
    457 lockdebug_locked(u_int id, uintptr_t where, int shared)
    458 {
    459 	struct lwp *l = curlwp;
    460 	lockdebuglk_t *lk;
    461 	lockdebug_t *ld;
    462 
    463 	if (panicstr != NULL)
    464 		return;
    465 
    466 	if ((ld = lockdebug_lookup(id, &lk)) == NULL)
    467 		return;
    468 
    469 	if (shared) {
    470 		l->l_shlocks++;
    471 		ld->ld_shares++;
    472 		ld->ld_shwant--;
    473 	} else {
    474 		if ((ld->ld_flags & LD_LOCKED) != 0)
    475 			lockdebug_abort1(ld, lk, __func__,
    476 			    "already locked", true);
    477 
    478 		ld->ld_flags |= LD_LOCKED;
    479 		ld->ld_locked = where;
    480 		ld->ld_cpu = (uint16_t)cpu_number();
    481 		ld->ld_lwp = l;
    482 		ld->ld_exwant--;
    483 
    484 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    485 			l->l_exlocks++;
    486 			TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
    487 		} else {
    488 			curcpu()->ci_spin_locks2++;
    489 			TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
    490 		}
    491 	}
    492 
    493 	lockdebug_unlock(lk);
    494 }
    495 
    496 /*
    497  * lockdebug_unlocked:
    498  *
    499  *	Process a lock release operation.
    500  */
    501 void
    502 lockdebug_unlocked(u_int id, uintptr_t where, int shared)
    503 {
    504 	struct lwp *l = curlwp;
    505 	lockdebuglk_t *lk;
    506 	lockdebug_t *ld;
    507 
    508 	if (panicstr != NULL)
    509 		return;
    510 
    511 	if ((ld = lockdebug_lookup(id, &lk)) == NULL)
    512 		return;
    513 
    514 	if (shared) {
    515 		if (l->l_shlocks == 0)
    516 			lockdebug_abort1(ld, lk, __func__,
    517 			    "no shared locks held by LWP", true);
    518 		if (ld->ld_shares == 0)
    519 			lockdebug_abort1(ld, lk, __func__,
    520 			    "no shared holds on this lock", true);
    521 		l->l_shlocks--;
    522 		ld->ld_shares--;
    523 	} else {
    524 		if ((ld->ld_flags & LD_LOCKED) == 0)
    525 			lockdebug_abort1(ld, lk, __func__, "not locked",
    526 			    true);
    527 
    528 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    529 			if (ld->ld_lwp != curlwp)
    530 				lockdebug_abort1(ld, lk, __func__,
    531 				    "not held by current LWP", true);
    532 			ld->ld_flags &= ~LD_LOCKED;
    533 			ld->ld_unlocked = where;
    534 			ld->ld_lwp = NULL;
    535 			curlwp->l_exlocks--;
    536 			TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
    537 		} else {
    538 			if (ld->ld_cpu != (uint16_t)cpu_number())
    539 				lockdebug_abort1(ld, lk, __func__,
    540 				    "not held by current CPU", true);
    541 			ld->ld_flags &= ~LD_LOCKED;
    542 			ld->ld_unlocked = where;
    543 			ld->ld_lwp = NULL;
    544 			curcpu()->ci_spin_locks2--;
    545 			TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
    546 		}
    547 	}
    548 
    549 	lockdebug_unlock(lk);
    550 }
    551 
    552 /*
    553  * lockdebug_barrier:
    554  *
    555  *	Panic if we hold more than one specified spin lock, and optionally,
    556  *	if we hold sleep locks.
    557  */
    558 void
    559 lockdebug_barrier(volatile void *spinlock, int slplocks)
    560 {
    561 	struct lwp *l = curlwp;
    562 	lockdebug_t *ld;
    563 	uint16_t cpuno;
    564 
    565 	if (panicstr != NULL)
    566 		return;
    567 
    568 	if (curcpu()->ci_spin_locks2 != 0) {
    569 		cpuno = (uint16_t)cpu_number();
    570 
    571 		lockdebug_lock(&ld_spinner_lk);
    572 		TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
    573 			if (ld->ld_lock == spinlock) {
    574 				if (ld->ld_cpu != cpuno)
    575 					lockdebug_abort1(ld, &ld_spinner_lk,
    576 					    __func__,
    577 					    "not held by current CPU", true);
    578 				continue;
    579 			}
    580 			if (ld->ld_cpu == cpuno && (l->l_flag & LW_INTR) == 0)
    581 				lockdebug_abort1(ld, &ld_spinner_lk,
    582 				    __func__, "spin lock held", true);
    583 		}
    584 		lockdebug_unlock(&ld_spinner_lk);
    585 	}
    586 
    587 	if (!slplocks) {
    588 		if (l->l_exlocks != 0) {
    589 			lockdebug_lock(&ld_sleeper_lk);
    590 			TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
    591 				if (ld->ld_lwp == l)
    592 					lockdebug_abort1(ld, &ld_sleeper_lk,
    593 					    __func__, "sleep lock held", true);
    594 			}
    595 			lockdebug_unlock(&ld_sleeper_lk);
    596 		}
    597 		if (l->l_shlocks != 0)
    598 			panic("lockdebug_barrier: holding %d shared locks",
    599 			    l->l_shlocks);
    600 	}
    601 }
    602 
    603 /*
    604  * lockdebug_mem_check:
    605  *
    606  *	Check for in-use locks within a memory region that is
    607  *	being freed.  We only check for active locks within the
    608  *	first page of the allocation.
    609  */
    610 void
    611 lockdebug_mem_check(const char *func, void *base, size_t sz)
    612 {
    613 	lockdebuglist_t *head;
    614 	lockdebuglk_t *lk;
    615 	lockdebug_t *ld;
    616 	uintptr_t sa, ea, la;
    617 
    618 	sa = (uintptr_t)base;
    619 	ea = sa + sz;
    620 
    621 	lockdebug_mhash(base, &lk, &head);
    622 	TAILQ_FOREACH(ld, head, ld_mchain) {
    623 		la = (uintptr_t)ld->ld_lock;
    624 		if (la >= sa && la < ea) {
    625 			lockdebug_abort1(ld, lk, func,
    626 			    "allocation contains active lock", !cold);
    627 			return;
    628 		}
    629 	}
    630 	lockdebug_unlock(lk);
    631 }
    632 
    633 /*
    634  * lockdebug_dump:
    635  *
    636  *	Dump information about a lock on panic, or for DDB.
    637  */
    638 static void
    639 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
    640 {
    641 	int sleeper = (ld->ld_flags & LD_SLEEPER);
    642 
    643 	(*pr)(
    644 	    "lock address : %#018lx type     : %18s\n"
    645 	    "shared holds : %18u exclusive: %18u\n"
    646 	    "shares wanted: %18u exclusive: %18u\n"
    647 	    "current cpu  : %18u last held: %18u\n"
    648 	    "current lwp  : %#018lx last held: %#018lx\n"
    649 	    "last locked  : %#018lx unlocked : %#018lx\n",
    650 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
    651 	    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
    652 	    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
    653 	    (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
    654 	    (long)curlwp, (long)ld->ld_lwp,
    655 	    (long)ld->ld_locked, (long)ld->ld_unlocked);
    656 
    657 	if (ld->ld_lockops->lo_dump != NULL)
    658 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
    659 
    660 	if (sleeper) {
    661 		(*pr)("\n");
    662 		turnstile_print(ld->ld_lock, pr);
    663 	}
    664 }
    665 
    666 /*
    667  * lockdebug_dump:
    668  *
    669  *	Dump information about a known lock.
    670  */
    671 static void
    672 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
    673 		 const char *msg, bool dopanic)
    674 {
    675 
    676 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
    677 	    func, msg);
    678 	lockdebug_dump(ld, printf_nolog);
    679 	lockdebug_unlock(lk);
    680 	printf_nolog("\n");
    681 	if (dopanic)
    682 		panic("LOCKDEBUG");
    683 }
    684 
    685 #endif	/* LOCKDEBUG */
    686 
    687 /*
    688  * lockdebug_lock_print:
    689  *
    690  *	Handle the DDB 'show lock' command.
    691  */
    692 #ifdef DDB
    693 void
    694 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
    695 {
    696 #ifdef LOCKDEBUG
    697 	lockdebug_t *ld;
    698 
    699 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
    700 		if (ld->ld_lock == addr) {
    701 			lockdebug_dump(ld, pr);
    702 			return;
    703 		}
    704 	}
    705 	(*pr)("Sorry, no record of a lock with address %p found.\n", addr);
    706 #else
    707 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
    708 #endif	/* LOCKDEBUG */
    709 }
    710 #endif	/* DDB */
    711 
    712 /*
    713  * lockdebug_abort:
    714  *
    715  *	An error has been trapped - dump lock info and call panic().
    716  */
    717 void
    718 lockdebug_abort(u_int id, volatile void *lock, lockops_t *ops,
    719 		const char *func, const char *msg)
    720 {
    721 #ifdef LOCKDEBUG
    722 	lockdebug_t *ld;
    723 	lockdebuglk_t *lk;
    724 
    725 	if ((ld = lockdebug_lookup(id, &lk)) != NULL) {
    726 		lockdebug_abort1(ld, lk, func, msg, true);
    727 		/* NOTREACHED */
    728 	}
    729 #endif	/* LOCKDEBUG */
    730 
    731 	printf_nolog("%s error: %s: %s\n\n"
    732 	    "lock address : %#018lx\n"
    733 	    "current cpu  : %18d\n"
    734 	    "current lwp  : %#018lx\n",
    735 	    ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
    736 	    (long)curlwp);
    737 
    738 	(*ops->lo_dump)(lock);
    739 
    740 	printf_nolog("\n");
    741 	panic("lock error");
    742 }
    743