Home | History | Annotate | Line # | Download | only in kern
subr_lockdebug.c revision 1.4.2.7
      1  1.4.2.7  yamt /*	$NetBSD: subr_lockdebug.c,v 1.4.2.7 2008/01/21 09:46:19 yamt Exp $	*/
      2  1.4.2.2  yamt 
      3  1.4.2.2  yamt /*-
      4  1.4.2.2  yamt  * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
      5  1.4.2.2  yamt  * All rights reserved.
      6  1.4.2.2  yamt  *
      7  1.4.2.2  yamt  * This code is derived from software contributed to The NetBSD Foundation
      8  1.4.2.2  yamt  * by Andrew Doran.
      9  1.4.2.2  yamt  *
     10  1.4.2.2  yamt  * Redistribution and use in source and binary forms, with or without
     11  1.4.2.2  yamt  * modification, are permitted provided that the following conditions
     12  1.4.2.2  yamt  * are met:
     13  1.4.2.2  yamt  * 1. Redistributions of source code must retain the above copyright
     14  1.4.2.2  yamt  *    notice, this list of conditions and the following disclaimer.
     15  1.4.2.2  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.4.2.2  yamt  *    notice, this list of conditions and the following disclaimer in the
     17  1.4.2.2  yamt  *    documentation and/or other materials provided with the distribution.
     18  1.4.2.2  yamt  * 3. All advertising materials mentioning features or use of this software
     19  1.4.2.2  yamt  *    must display the following acknowledgement:
     20  1.4.2.2  yamt  *	This product includes software developed by the NetBSD
     21  1.4.2.2  yamt  *	Foundation, Inc. and its contributors.
     22  1.4.2.2  yamt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  1.4.2.2  yamt  *    contributors may be used to endorse or promote products derived
     24  1.4.2.2  yamt  *    from this software without specific prior written permission.
     25  1.4.2.2  yamt  *
     26  1.4.2.2  yamt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  1.4.2.2  yamt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  1.4.2.2  yamt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  1.4.2.2  yamt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  1.4.2.2  yamt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  1.4.2.2  yamt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  1.4.2.2  yamt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  1.4.2.2  yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  1.4.2.2  yamt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  1.4.2.2  yamt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  1.4.2.2  yamt  * POSSIBILITY OF SUCH DAMAGE.
     37  1.4.2.2  yamt  */
     38  1.4.2.2  yamt 
     39  1.4.2.2  yamt /*
     40  1.4.2.4  yamt  * Basic lock debugging code shared among lock primitives.
     41  1.4.2.2  yamt  */
     42  1.4.2.2  yamt 
     43  1.4.2.4  yamt #include <sys/cdefs.h>
     44  1.4.2.7  yamt __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.4.2.7 2008/01/21 09:46:19 yamt Exp $");
     45  1.4.2.4  yamt 
     46  1.4.2.2  yamt #include "opt_ddb.h"
     47  1.4.2.2  yamt 
     48  1.4.2.2  yamt #include <sys/param.h>
     49  1.4.2.2  yamt #include <sys/proc.h>
     50  1.4.2.2  yamt #include <sys/systm.h>
     51  1.4.2.4  yamt #include <sys/kernel.h>
     52  1.4.2.2  yamt #include <sys/kmem.h>
     53  1.4.2.2  yamt #include <sys/lockdebug.h>
     54  1.4.2.2  yamt #include <sys/sleepq.h>
     55  1.4.2.4  yamt #include <sys/cpu.h>
     56  1.4.2.6  yamt #include <sys/atomic.h>
     57  1.4.2.7  yamt #include <sys/lock.h>
     58  1.4.2.6  yamt 
     59  1.4.2.6  yamt #include <lib/libkern/rb.h>
     60  1.4.2.2  yamt 
     61  1.4.2.7  yamt #include <machine/lock.h>
     62  1.4.2.7  yamt 
     63  1.4.2.2  yamt #ifdef LOCKDEBUG
     64  1.4.2.2  yamt 
     65  1.4.2.2  yamt #define	LD_BATCH_SHIFT	9
     66  1.4.2.2  yamt #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
     67  1.4.2.2  yamt #define	LD_BATCH_MASK	(LD_BATCH - 1)
     68  1.4.2.2  yamt #define	LD_MAX_LOCKS	1048576
     69  1.4.2.2  yamt #define	LD_SLOP		16
     70  1.4.2.2  yamt 
     71  1.4.2.2  yamt #define	LD_LOCKED	0x01
     72  1.4.2.2  yamt #define	LD_SLEEPER	0x02
     73  1.4.2.2  yamt 
     74  1.4.2.7  yamt #define	LD_WRITE_LOCK	0x80000000
     75  1.4.2.7  yamt 
     76  1.4.2.2  yamt typedef union lockdebuglk {
     77  1.4.2.2  yamt 	struct {
     78  1.4.2.7  yamt 		u_int	lku_lock;
     79  1.4.2.7  yamt 		int	lku_oldspl;
     80  1.4.2.2  yamt 	} ul;
     81  1.4.2.7  yamt 	uint8_t	lk_pad[CACHE_LINE_SIZE];
     82  1.4.2.7  yamt } volatile __aligned(CACHE_LINE_SIZE) lockdebuglk_t;
     83  1.4.2.2  yamt 
     84  1.4.2.2  yamt #define	lk_lock		ul.lku_lock
     85  1.4.2.2  yamt #define	lk_oldspl	ul.lku_oldspl
     86  1.4.2.2  yamt 
     87  1.4.2.2  yamt typedef struct lockdebug {
     88  1.4.2.6  yamt 	struct rb_node	ld_rb_node;	/* must be the first member */
     89  1.4.2.2  yamt 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
     90  1.4.2.2  yamt 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
     91  1.4.2.2  yamt 	volatile void	*ld_lock;
     92  1.4.2.2  yamt 	lockops_t	*ld_lockops;
     93  1.4.2.2  yamt 	struct lwp	*ld_lwp;
     94  1.4.2.2  yamt 	uintptr_t	ld_locked;
     95  1.4.2.2  yamt 	uintptr_t	ld_unlocked;
     96  1.4.2.4  yamt 	uintptr_t	ld_initaddr;
     97  1.4.2.2  yamt 	uint16_t	ld_shares;
     98  1.4.2.2  yamt 	uint16_t	ld_cpu;
     99  1.4.2.2  yamt 	uint8_t		ld_flags;
    100  1.4.2.2  yamt 	uint8_t		ld_shwant;	/* advisory */
    101  1.4.2.2  yamt 	uint8_t		ld_exwant;	/* advisory */
    102  1.4.2.2  yamt 	uint8_t		ld_unused;
    103  1.4.2.2  yamt } volatile lockdebug_t;
    104  1.4.2.2  yamt 
    105  1.4.2.2  yamt typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
    106  1.4.2.2  yamt 
    107  1.4.2.6  yamt lockdebuglk_t		ld_tree_lk;
    108  1.4.2.2  yamt lockdebuglk_t		ld_sleeper_lk;
    109  1.4.2.2  yamt lockdebuglk_t		ld_spinner_lk;
    110  1.4.2.2  yamt lockdebuglk_t		ld_free_lk;
    111  1.4.2.2  yamt 
    112  1.4.2.5  yamt lockdebuglist_t		ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
    113  1.4.2.5  yamt lockdebuglist_t		ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
    114  1.4.2.5  yamt lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
    115  1.4.2.5  yamt lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
    116  1.4.2.2  yamt int			ld_nfree;
    117  1.4.2.2  yamt int			ld_freeptr;
    118  1.4.2.2  yamt int			ld_recurse;
    119  1.4.2.3  yamt bool			ld_nomore;
    120  1.4.2.2  yamt lockdebug_t		*ld_table[LD_MAX_LOCKS / LD_BATCH];
    121  1.4.2.2  yamt 
    122  1.4.2.2  yamt lockdebug_t		ld_prime[LD_BATCH];
    123  1.4.2.2  yamt 
    124  1.4.2.3  yamt static void	lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
    125  1.4.2.4  yamt 				 const char *, const char *, bool);
    126  1.4.2.3  yamt static void	lockdebug_more(void);
    127  1.4.2.3  yamt static void	lockdebug_init(void);
    128  1.4.2.2  yamt 
    129  1.4.2.6  yamt static signed int
    130  1.4.2.6  yamt ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
    131  1.4.2.6  yamt {
    132  1.4.2.6  yamt 	const lockdebug_t *ld1 = (const void *)n1;
    133  1.4.2.6  yamt 	const lockdebug_t *ld2 = (const void *)n2;
    134  1.4.2.6  yamt 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
    135  1.4.2.6  yamt 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
    136  1.4.2.6  yamt 
    137  1.4.2.6  yamt 	if (a < b)
    138  1.4.2.6  yamt 		return 1;
    139  1.4.2.6  yamt 	if (a > b)
    140  1.4.2.6  yamt 		return -1;
    141  1.4.2.6  yamt 	return 0;
    142  1.4.2.6  yamt }
    143  1.4.2.6  yamt 
    144  1.4.2.6  yamt static signed int
    145  1.4.2.6  yamt ld_rb_compare_key(const struct rb_node *n, const void *key)
    146  1.4.2.6  yamt {
    147  1.4.2.6  yamt 	const lockdebug_t *ld = (const void *)n;
    148  1.4.2.6  yamt 	const uintptr_t a = (uintptr_t)ld->ld_lock;
    149  1.4.2.6  yamt 	const uintptr_t b = (uintptr_t)key;
    150  1.4.2.6  yamt 
    151  1.4.2.6  yamt 	if (a < b)
    152  1.4.2.6  yamt 		return 1;
    153  1.4.2.6  yamt 	if (a > b)
    154  1.4.2.6  yamt 		return -1;
    155  1.4.2.6  yamt 	return 0;
    156  1.4.2.6  yamt }
    157  1.4.2.6  yamt 
    158  1.4.2.6  yamt static struct rb_tree ld_rb_tree;
    159  1.4.2.6  yamt 
    160  1.4.2.6  yamt static const struct rb_tree_ops ld_rb_tree_ops = {
    161  1.4.2.6  yamt 	.rb_compare_nodes = ld_rb_compare_nodes,
    162  1.4.2.6  yamt 	.rb_compare_key = ld_rb_compare_key,
    163  1.4.2.6  yamt };
    164  1.4.2.6  yamt 
    165  1.4.2.7  yamt static void
    166  1.4.2.7  yamt lockdebug_lock_init(lockdebuglk_t *lk)
    167  1.4.2.7  yamt {
    168  1.4.2.7  yamt 
    169  1.4.2.7  yamt 	lk->lk_lock = 0;
    170  1.4.2.7  yamt }
    171  1.4.2.7  yamt 
    172  1.4.2.7  yamt static void
    173  1.4.2.2  yamt lockdebug_lock(lockdebuglk_t *lk)
    174  1.4.2.2  yamt {
    175  1.4.2.2  yamt 	int s;
    176  1.4.2.2  yamt 
    177  1.4.2.3  yamt 	s = splhigh();
    178  1.4.2.7  yamt 	do {
    179  1.4.2.7  yamt 		while (lk->lk_lock != 0) {
    180  1.4.2.7  yamt 			SPINLOCK_SPIN_HOOK;
    181  1.4.2.7  yamt 		}
    182  1.4.2.7  yamt 	} while (atomic_cas_uint(&lk->lk_lock, 0, LD_WRITE_LOCK) != 0);
    183  1.4.2.2  yamt 	lk->lk_oldspl = s;
    184  1.4.2.7  yamt 	membar_enter();
    185  1.4.2.2  yamt }
    186  1.4.2.2  yamt 
    187  1.4.2.7  yamt static void
    188  1.4.2.2  yamt lockdebug_unlock(lockdebuglk_t *lk)
    189  1.4.2.2  yamt {
    190  1.4.2.2  yamt 	int s;
    191  1.4.2.2  yamt 
    192  1.4.2.2  yamt 	s = lk->lk_oldspl;
    193  1.4.2.7  yamt 	membar_exit();
    194  1.4.2.7  yamt 	lk->lk_lock = 0;
    195  1.4.2.7  yamt 	splx(s);
    196  1.4.2.7  yamt }
    197  1.4.2.7  yamt 
    198  1.4.2.7  yamt static int
    199  1.4.2.7  yamt lockdebug_lock_rd(lockdebuglk_t *lk)
    200  1.4.2.7  yamt {
    201  1.4.2.7  yamt 	u_int val;
    202  1.4.2.7  yamt 	int s;
    203  1.4.2.7  yamt 
    204  1.4.2.7  yamt 	s = splhigh();
    205  1.4.2.7  yamt 	do {
    206  1.4.2.7  yamt 		while ((val = lk->lk_lock) == LD_WRITE_LOCK){
    207  1.4.2.7  yamt 			SPINLOCK_SPIN_HOOK;
    208  1.4.2.7  yamt 		}
    209  1.4.2.7  yamt 	} while (atomic_cas_uint(&lk->lk_lock, val, val + 1) != val);
    210  1.4.2.7  yamt 	membar_enter();
    211  1.4.2.7  yamt 	return s;
    212  1.4.2.7  yamt }
    213  1.4.2.7  yamt 
    214  1.4.2.7  yamt static void
    215  1.4.2.7  yamt lockdebug_unlock_rd(lockdebuglk_t *lk, int s)
    216  1.4.2.7  yamt {
    217  1.4.2.7  yamt 
    218  1.4.2.7  yamt 	membar_exit();
    219  1.4.2.7  yamt 	atomic_dec_uint(&lk->lk_lock);
    220  1.4.2.2  yamt 	splx(s);
    221  1.4.2.2  yamt }
    222  1.4.2.2  yamt 
    223  1.4.2.6  yamt static inline lockdebug_t *
    224  1.4.2.6  yamt lockdebug_lookup1(volatile void *lock, lockdebuglk_t **lk)
    225  1.4.2.4  yamt {
    226  1.4.2.6  yamt 	lockdebug_t *ld;
    227  1.4.2.7  yamt 	int s;
    228  1.4.2.6  yamt 
    229  1.4.2.7  yamt 	s = lockdebug_lock_rd(&ld_tree_lk);
    230  1.4.2.6  yamt 	ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
    231  1.4.2.7  yamt 	lockdebug_unlock_rd(&ld_tree_lk, s);
    232  1.4.2.6  yamt 	if (ld == NULL)
    233  1.4.2.6  yamt 		return NULL;
    234  1.4.2.6  yamt 
    235  1.4.2.6  yamt 	if ((ld->ld_flags & LD_SLEEPER) != 0)
    236  1.4.2.6  yamt 		*lk = &ld_sleeper_lk;
    237  1.4.2.6  yamt 	else
    238  1.4.2.6  yamt 		*lk = &ld_spinner_lk;
    239  1.4.2.4  yamt 
    240  1.4.2.4  yamt 	lockdebug_lock(*lk);
    241  1.4.2.6  yamt 	return ld;
    242  1.4.2.4  yamt }
    243  1.4.2.4  yamt 
    244  1.4.2.2  yamt /*
    245  1.4.2.2  yamt  * lockdebug_lookup:
    246  1.4.2.2  yamt  *
    247  1.4.2.6  yamt  *	Find a lockdebug structure by a pointer to a lock and return it locked.
    248  1.4.2.2  yamt  */
    249  1.4.2.2  yamt static inline lockdebug_t *
    250  1.4.2.6  yamt lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
    251  1.4.2.2  yamt {
    252  1.4.2.6  yamt 	lockdebug_t *ld;
    253  1.4.2.2  yamt 
    254  1.4.2.6  yamt 	ld = lockdebug_lookup1(lock, lk);
    255  1.4.2.6  yamt 	if (ld == NULL)
    256  1.4.2.6  yamt 		panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
    257  1.4.2.2  yamt 	return ld;
    258  1.4.2.2  yamt }
    259  1.4.2.2  yamt 
    260  1.4.2.2  yamt /*
    261  1.4.2.2  yamt  * lockdebug_init:
    262  1.4.2.2  yamt  *
    263  1.4.2.2  yamt  *	Initialize the lockdebug system.  Allocate an initial pool of
    264  1.4.2.2  yamt  *	lockdebug structures before the VM system is up and running.
    265  1.4.2.2  yamt  */
    266  1.4.2.3  yamt static void
    267  1.4.2.2  yamt lockdebug_init(void)
    268  1.4.2.2  yamt {
    269  1.4.2.2  yamt 	lockdebug_t *ld;
    270  1.4.2.2  yamt 	int i;
    271  1.4.2.2  yamt 
    272  1.4.2.7  yamt 	lockdebug_lock_init(&ld_tree_lk);
    273  1.4.2.7  yamt 	lockdebug_lock_init(&ld_sleeper_lk);
    274  1.4.2.7  yamt 	lockdebug_lock_init(&ld_spinner_lk);
    275  1.4.2.7  yamt 	lockdebug_lock_init(&ld_free_lk);
    276  1.4.2.2  yamt 
    277  1.4.2.6  yamt 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
    278  1.4.2.6  yamt 
    279  1.4.2.2  yamt 	ld = ld_prime;
    280  1.4.2.2  yamt 	ld_table[0] = ld;
    281  1.4.2.2  yamt 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
    282  1.4.2.2  yamt 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    283  1.4.2.2  yamt 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
    284  1.4.2.2  yamt 	}
    285  1.4.2.2  yamt 	ld_freeptr = 1;
    286  1.4.2.2  yamt 	ld_nfree = LD_BATCH - 1;
    287  1.4.2.2  yamt }
    288  1.4.2.2  yamt 
    289  1.4.2.2  yamt /*
    290  1.4.2.2  yamt  * lockdebug_alloc:
    291  1.4.2.2  yamt  *
    292  1.4.2.2  yamt  *	A lock is being initialized, so allocate an associated debug
    293  1.4.2.2  yamt  *	structure.
    294  1.4.2.2  yamt  */
    295  1.4.2.6  yamt bool
    296  1.4.2.4  yamt lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
    297  1.4.2.2  yamt {
    298  1.4.2.2  yamt 	struct cpu_info *ci;
    299  1.4.2.2  yamt 	lockdebug_t *ld;
    300  1.4.2.6  yamt 	lockdebuglk_t *lk;
    301  1.4.2.2  yamt 
    302  1.4.2.3  yamt 	if (lo == NULL || panicstr != NULL)
    303  1.4.2.6  yamt 		return false;
    304  1.4.2.3  yamt 	if (ld_freeptr == 0)
    305  1.4.2.3  yamt 		lockdebug_init();
    306  1.4.2.2  yamt 
    307  1.4.2.6  yamt 	if ((ld = lockdebug_lookup1(lock, &lk)) != NULL) {
    308  1.4.2.6  yamt 		lockdebug_abort1(ld, lk, __func__, "already initialized", true);
    309  1.4.2.6  yamt 		/* NOTREACHED */
    310  1.4.2.6  yamt 	}
    311  1.4.2.6  yamt 
    312  1.4.2.2  yamt 	/*
    313  1.4.2.2  yamt 	 * Pinch a new debug structure.  We may recurse because we call
    314  1.4.2.2  yamt 	 * kmem_alloc(), which may need to initialize new locks somewhere
    315  1.4.2.3  yamt 	 * down the path.  If not recursing, we try to maintain at least
    316  1.4.2.2  yamt 	 * LD_SLOP structures free, which should hopefully be enough to
    317  1.4.2.2  yamt 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
    318  1.4.2.2  yamt 	 * worry: we'll just mark the lock as not having an ID.
    319  1.4.2.2  yamt 	 */
    320  1.4.2.2  yamt 	lockdebug_lock(&ld_free_lk);
    321  1.4.2.7  yamt 	ci = curcpu();
    322  1.4.2.2  yamt 	ci->ci_lkdebug_recurse++;
    323  1.4.2.2  yamt 
    324  1.4.2.2  yamt 	if (TAILQ_EMPTY(&ld_free)) {
    325  1.4.2.3  yamt 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
    326  1.4.2.2  yamt 			ci->ci_lkdebug_recurse--;
    327  1.4.2.2  yamt 			lockdebug_unlock(&ld_free_lk);
    328  1.4.2.6  yamt 			return false;
    329  1.4.2.2  yamt 		}
    330  1.4.2.2  yamt 		lockdebug_more();
    331  1.4.2.2  yamt 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
    332  1.4.2.2  yamt 		lockdebug_more();
    333  1.4.2.2  yamt 
    334  1.4.2.2  yamt 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
    335  1.4.2.2  yamt 		lockdebug_unlock(&ld_free_lk);
    336  1.4.2.6  yamt 		return false;
    337  1.4.2.2  yamt 	}
    338  1.4.2.2  yamt 
    339  1.4.2.2  yamt 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
    340  1.4.2.2  yamt 	ld_nfree--;
    341  1.4.2.2  yamt 
    342  1.4.2.2  yamt 	ci->ci_lkdebug_recurse--;
    343  1.4.2.2  yamt 	lockdebug_unlock(&ld_free_lk);
    344  1.4.2.2  yamt 
    345  1.4.2.2  yamt 	if (ld->ld_lock != NULL)
    346  1.4.2.2  yamt 		panic("lockdebug_alloc: corrupt table");
    347  1.4.2.2  yamt 
    348  1.4.2.2  yamt 	if (lo->lo_sleeplock)
    349  1.4.2.2  yamt 		lockdebug_lock(&ld_sleeper_lk);
    350  1.4.2.2  yamt 	else
    351  1.4.2.2  yamt 		lockdebug_lock(&ld_spinner_lk);
    352  1.4.2.2  yamt 
    353  1.4.2.2  yamt 	/* Initialise the structure. */
    354  1.4.2.2  yamt 	ld->ld_lock = lock;
    355  1.4.2.2  yamt 	ld->ld_lockops = lo;
    356  1.4.2.2  yamt 	ld->ld_locked = 0;
    357  1.4.2.2  yamt 	ld->ld_unlocked = 0;
    358  1.4.2.2  yamt 	ld->ld_lwp = NULL;
    359  1.4.2.4  yamt 	ld->ld_initaddr = initaddr;
    360  1.4.2.2  yamt 
    361  1.4.2.6  yamt 	lockdebug_lock(&ld_tree_lk);
    362  1.4.2.6  yamt 	rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
    363  1.4.2.6  yamt 	lockdebug_unlock(&ld_tree_lk);
    364  1.4.2.6  yamt 
    365  1.4.2.2  yamt 	if (lo->lo_sleeplock) {
    366  1.4.2.2  yamt 		ld->ld_flags = LD_SLEEPER;
    367  1.4.2.2  yamt 		lockdebug_unlock(&ld_sleeper_lk);
    368  1.4.2.2  yamt 	} else {
    369  1.4.2.2  yamt 		ld->ld_flags = 0;
    370  1.4.2.2  yamt 		lockdebug_unlock(&ld_spinner_lk);
    371  1.4.2.2  yamt 	}
    372  1.4.2.2  yamt 
    373  1.4.2.6  yamt 	return true;
    374  1.4.2.2  yamt }
    375  1.4.2.2  yamt 
    376  1.4.2.2  yamt /*
    377  1.4.2.2  yamt  * lockdebug_free:
    378  1.4.2.2  yamt  *
    379  1.4.2.2  yamt  *	A lock is being destroyed, so release debugging resources.
    380  1.4.2.2  yamt  */
    381  1.4.2.2  yamt void
    382  1.4.2.6  yamt lockdebug_free(volatile void *lock)
    383  1.4.2.2  yamt {
    384  1.4.2.2  yamt 	lockdebug_t *ld;
    385  1.4.2.2  yamt 	lockdebuglk_t *lk;
    386  1.4.2.2  yamt 
    387  1.4.2.2  yamt 	if (panicstr != NULL)
    388  1.4.2.2  yamt 		return;
    389  1.4.2.2  yamt 
    390  1.4.2.6  yamt 	ld = lockdebug_lookup(lock, &lk);
    391  1.4.2.6  yamt 	if (ld == NULL) {
    392  1.4.2.2  yamt 		panic("lockdebug_free: destroying uninitialized lock %p"
    393  1.4.2.6  yamt 		    "(ld_lock=%p)", lock, ld->ld_lock);
    394  1.4.2.4  yamt 		lockdebug_abort1(ld, lk, __func__, "lock record follows",
    395  1.4.2.4  yamt 		    true);
    396  1.4.2.2  yamt 	}
    397  1.4.2.2  yamt 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
    398  1.4.2.4  yamt 		lockdebug_abort1(ld, lk, __func__, "is locked", true);
    399  1.4.2.6  yamt 	lockdebug_lock(&ld_tree_lk);
    400  1.4.2.6  yamt 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
    401  1.4.2.6  yamt 	lockdebug_unlock(&ld_tree_lk);
    402  1.4.2.2  yamt 	ld->ld_lock = NULL;
    403  1.4.2.2  yamt 	lockdebug_unlock(lk);
    404  1.4.2.2  yamt 
    405  1.4.2.2  yamt 	lockdebug_lock(&ld_free_lk);
    406  1.4.2.2  yamt 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    407  1.4.2.2  yamt 	ld_nfree++;
    408  1.4.2.2  yamt 	lockdebug_unlock(&ld_free_lk);
    409  1.4.2.2  yamt }
    410  1.4.2.2  yamt 
    411  1.4.2.2  yamt /*
    412  1.4.2.2  yamt  * lockdebug_more:
    413  1.4.2.2  yamt  *
    414  1.4.2.2  yamt  *	Allocate a batch of debug structures and add to the free list.
    415  1.4.2.2  yamt  *	Must be called with ld_free_lk held.
    416  1.4.2.2  yamt  */
    417  1.4.2.3  yamt static void
    418  1.4.2.2  yamt lockdebug_more(void)
    419  1.4.2.2  yamt {
    420  1.4.2.2  yamt 	lockdebug_t *ld;
    421  1.4.2.2  yamt 	void *block;
    422  1.4.2.3  yamt 	int i, base, m;
    423  1.4.2.2  yamt 
    424  1.4.2.2  yamt 	while (ld_nfree < LD_SLOP) {
    425  1.4.2.2  yamt 		lockdebug_unlock(&ld_free_lk);
    426  1.4.2.2  yamt 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
    427  1.4.2.2  yamt 		lockdebug_lock(&ld_free_lk);
    428  1.4.2.2  yamt 
    429  1.4.2.2  yamt 		if (block == NULL)
    430  1.4.2.2  yamt 			return;
    431  1.4.2.2  yamt 
    432  1.4.2.2  yamt 		if (ld_nfree > LD_SLOP) {
    433  1.4.2.2  yamt 			/* Somebody beat us to it. */
    434  1.4.2.2  yamt 			lockdebug_unlock(&ld_free_lk);
    435  1.4.2.2  yamt 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
    436  1.4.2.2  yamt 			lockdebug_lock(&ld_free_lk);
    437  1.4.2.2  yamt 			continue;
    438  1.4.2.2  yamt 		}
    439  1.4.2.2  yamt 
    440  1.4.2.2  yamt 		base = ld_freeptr;
    441  1.4.2.2  yamt 		ld_nfree += LD_BATCH;
    442  1.4.2.2  yamt 		ld = block;
    443  1.4.2.2  yamt 		base <<= LD_BATCH_SHIFT;
    444  1.4.2.3  yamt 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
    445  1.4.2.3  yamt 
    446  1.4.2.3  yamt 		if (m == LD_MAX_LOCKS)
    447  1.4.2.3  yamt 			ld_nomore = true;
    448  1.4.2.2  yamt 
    449  1.4.2.3  yamt 		for (i = base; i < m; i++, ld++) {
    450  1.4.2.2  yamt 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    451  1.4.2.2  yamt 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
    452  1.4.2.2  yamt 		}
    453  1.4.2.2  yamt 
    454  1.4.2.6  yamt 		membar_producer();
    455  1.4.2.2  yamt 		ld_table[ld_freeptr++] = block;
    456  1.4.2.2  yamt 	}
    457  1.4.2.2  yamt }
    458  1.4.2.2  yamt 
    459  1.4.2.2  yamt /*
    460  1.4.2.2  yamt  * lockdebug_wantlock:
    461  1.4.2.2  yamt  *
    462  1.4.2.2  yamt  *	Process the preamble to a lock acquire.
    463  1.4.2.2  yamt  */
    464  1.4.2.2  yamt void
    465  1.4.2.6  yamt lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
    466  1.4.2.2  yamt {
    467  1.4.2.2  yamt 	struct lwp *l = curlwp;
    468  1.4.2.2  yamt 	lockdebuglk_t *lk;
    469  1.4.2.2  yamt 	lockdebug_t *ld;
    470  1.4.2.2  yamt 	bool recurse;
    471  1.4.2.2  yamt 
    472  1.4.2.2  yamt 	(void)shared;
    473  1.4.2.2  yamt 	recurse = false;
    474  1.4.2.2  yamt 
    475  1.4.2.2  yamt 	if (panicstr != NULL)
    476  1.4.2.2  yamt 		return;
    477  1.4.2.2  yamt 
    478  1.4.2.6  yamt 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
    479  1.4.2.2  yamt 		return;
    480  1.4.2.2  yamt 
    481  1.4.2.2  yamt 	if ((ld->ld_flags & LD_LOCKED) != 0) {
    482  1.4.2.2  yamt 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    483  1.4.2.2  yamt 			if (ld->ld_lwp == l)
    484  1.4.2.2  yamt 				recurse = true;
    485  1.4.2.2  yamt 		} else if (ld->ld_cpu == (uint16_t)cpu_number())
    486  1.4.2.2  yamt 			recurse = true;
    487  1.4.2.2  yamt 	}
    488  1.4.2.2  yamt 
    489  1.4.2.4  yamt 	if (cpu_intr_p()) {
    490  1.4.2.4  yamt 		if ((ld->ld_flags & LD_SLEEPER) != 0)
    491  1.4.2.4  yamt 			lockdebug_abort1(ld, lk, __func__,
    492  1.4.2.4  yamt 			    "acquiring sleep lock from interrupt context",
    493  1.4.2.4  yamt 			    true);
    494  1.4.2.4  yamt 	}
    495  1.4.2.4  yamt 
    496  1.4.2.2  yamt 	if (shared)
    497  1.4.2.2  yamt 		ld->ld_shwant++;
    498  1.4.2.2  yamt 	else
    499  1.4.2.2  yamt 		ld->ld_exwant++;
    500  1.4.2.2  yamt 
    501  1.4.2.2  yamt 	if (recurse)
    502  1.4.2.4  yamt 		lockdebug_abort1(ld, lk, __func__, "locking against myself",
    503  1.4.2.4  yamt 		    true);
    504  1.4.2.2  yamt 
    505  1.4.2.2  yamt 	lockdebug_unlock(lk);
    506  1.4.2.2  yamt }
    507  1.4.2.2  yamt 
    508  1.4.2.2  yamt /*
    509  1.4.2.2  yamt  * lockdebug_locked:
    510  1.4.2.2  yamt  *
    511  1.4.2.2  yamt  *	Process a lock acquire operation.
    512  1.4.2.2  yamt  */
    513  1.4.2.2  yamt void
    514  1.4.2.6  yamt lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
    515  1.4.2.2  yamt {
    516  1.4.2.2  yamt 	struct lwp *l = curlwp;
    517  1.4.2.2  yamt 	lockdebuglk_t *lk;
    518  1.4.2.2  yamt 	lockdebug_t *ld;
    519  1.4.2.2  yamt 
    520  1.4.2.2  yamt 	if (panicstr != NULL)
    521  1.4.2.2  yamt 		return;
    522  1.4.2.2  yamt 
    523  1.4.2.6  yamt 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
    524  1.4.2.2  yamt 		return;
    525  1.4.2.2  yamt 
    526  1.4.2.2  yamt 	if (shared) {
    527  1.4.2.2  yamt 		l->l_shlocks++;
    528  1.4.2.2  yamt 		ld->ld_shares++;
    529  1.4.2.2  yamt 		ld->ld_shwant--;
    530  1.4.2.2  yamt 	} else {
    531  1.4.2.2  yamt 		if ((ld->ld_flags & LD_LOCKED) != 0)
    532  1.4.2.2  yamt 			lockdebug_abort1(ld, lk, __func__,
    533  1.4.2.4  yamt 			    "already locked", true);
    534  1.4.2.2  yamt 
    535  1.4.2.2  yamt 		ld->ld_flags |= LD_LOCKED;
    536  1.4.2.2  yamt 		ld->ld_locked = where;
    537  1.4.2.2  yamt 		ld->ld_cpu = (uint16_t)cpu_number();
    538  1.4.2.2  yamt 		ld->ld_lwp = l;
    539  1.4.2.2  yamt 		ld->ld_exwant--;
    540  1.4.2.2  yamt 
    541  1.4.2.2  yamt 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    542  1.4.2.2  yamt 			l->l_exlocks++;
    543  1.4.2.2  yamt 			TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
    544  1.4.2.2  yamt 		} else {
    545  1.4.2.2  yamt 			curcpu()->ci_spin_locks2++;
    546  1.4.2.2  yamt 			TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
    547  1.4.2.2  yamt 		}
    548  1.4.2.2  yamt 	}
    549  1.4.2.2  yamt 
    550  1.4.2.2  yamt 	lockdebug_unlock(lk);
    551  1.4.2.2  yamt }
    552  1.4.2.2  yamt 
    553  1.4.2.2  yamt /*
    554  1.4.2.2  yamt  * lockdebug_unlocked:
    555  1.4.2.2  yamt  *
    556  1.4.2.2  yamt  *	Process a lock release operation.
    557  1.4.2.2  yamt  */
    558  1.4.2.2  yamt void
    559  1.4.2.6  yamt lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
    560  1.4.2.2  yamt {
    561  1.4.2.2  yamt 	struct lwp *l = curlwp;
    562  1.4.2.2  yamt 	lockdebuglk_t *lk;
    563  1.4.2.2  yamt 	lockdebug_t *ld;
    564  1.4.2.2  yamt 
    565  1.4.2.2  yamt 	if (panicstr != NULL)
    566  1.4.2.2  yamt 		return;
    567  1.4.2.2  yamt 
    568  1.4.2.6  yamt 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
    569  1.4.2.2  yamt 		return;
    570  1.4.2.2  yamt 
    571  1.4.2.2  yamt 	if (shared) {
    572  1.4.2.2  yamt 		if (l->l_shlocks == 0)
    573  1.4.2.2  yamt 			lockdebug_abort1(ld, lk, __func__,
    574  1.4.2.4  yamt 			    "no shared locks held by LWP", true);
    575  1.4.2.2  yamt 		if (ld->ld_shares == 0)
    576  1.4.2.2  yamt 			lockdebug_abort1(ld, lk, __func__,
    577  1.4.2.4  yamt 			    "no shared holds on this lock", true);
    578  1.4.2.2  yamt 		l->l_shlocks--;
    579  1.4.2.2  yamt 		ld->ld_shares--;
    580  1.4.2.2  yamt 	} else {
    581  1.4.2.2  yamt 		if ((ld->ld_flags & LD_LOCKED) == 0)
    582  1.4.2.4  yamt 			lockdebug_abort1(ld, lk, __func__, "not locked",
    583  1.4.2.4  yamt 			    true);
    584  1.4.2.2  yamt 
    585  1.4.2.2  yamt 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    586  1.4.2.2  yamt 			if (ld->ld_lwp != curlwp)
    587  1.4.2.2  yamt 				lockdebug_abort1(ld, lk, __func__,
    588  1.4.2.4  yamt 				    "not held by current LWP", true);
    589  1.4.2.2  yamt 			ld->ld_flags &= ~LD_LOCKED;
    590  1.4.2.2  yamt 			ld->ld_unlocked = where;
    591  1.4.2.2  yamt 			ld->ld_lwp = NULL;
    592  1.4.2.2  yamt 			curlwp->l_exlocks--;
    593  1.4.2.2  yamt 			TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
    594  1.4.2.2  yamt 		} else {
    595  1.4.2.2  yamt 			if (ld->ld_cpu != (uint16_t)cpu_number())
    596  1.4.2.2  yamt 				lockdebug_abort1(ld, lk, __func__,
    597  1.4.2.4  yamt 				    "not held by current CPU", true);
    598  1.4.2.2  yamt 			ld->ld_flags &= ~LD_LOCKED;
    599  1.4.2.2  yamt 			ld->ld_unlocked = where;
    600  1.4.2.2  yamt 			ld->ld_lwp = NULL;
    601  1.4.2.2  yamt 			curcpu()->ci_spin_locks2--;
    602  1.4.2.2  yamt 			TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
    603  1.4.2.2  yamt 		}
    604  1.4.2.2  yamt 	}
    605  1.4.2.2  yamt 
    606  1.4.2.2  yamt 	lockdebug_unlock(lk);
    607  1.4.2.2  yamt }
    608  1.4.2.2  yamt 
    609  1.4.2.2  yamt /*
    610  1.4.2.2  yamt  * lockdebug_barrier:
    611  1.4.2.2  yamt  *
    612  1.4.2.2  yamt  *	Panic if we hold more than one specified spin lock, and optionally,
    613  1.4.2.2  yamt  *	if we hold sleep locks.
    614  1.4.2.2  yamt  */
    615  1.4.2.2  yamt void
    616  1.4.2.2  yamt lockdebug_barrier(volatile void *spinlock, int slplocks)
    617  1.4.2.2  yamt {
    618  1.4.2.2  yamt 	struct lwp *l = curlwp;
    619  1.4.2.2  yamt 	lockdebug_t *ld;
    620  1.4.2.2  yamt 	uint16_t cpuno;
    621  1.4.2.7  yamt 	int s;
    622  1.4.2.2  yamt 
    623  1.4.2.2  yamt 	if (panicstr != NULL)
    624  1.4.2.2  yamt 		return;
    625  1.4.2.2  yamt 
    626  1.4.2.7  yamt 	crit_enter();
    627  1.4.2.7  yamt 
    628  1.4.2.2  yamt 	if (curcpu()->ci_spin_locks2 != 0) {
    629  1.4.2.2  yamt 		cpuno = (uint16_t)cpu_number();
    630  1.4.2.2  yamt 
    631  1.4.2.7  yamt 		s = lockdebug_lock_rd(&ld_spinner_lk);
    632  1.4.2.2  yamt 		TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
    633  1.4.2.2  yamt 			if (ld->ld_lock == spinlock) {
    634  1.4.2.2  yamt 				if (ld->ld_cpu != cpuno)
    635  1.4.2.2  yamt 					lockdebug_abort1(ld, &ld_spinner_lk,
    636  1.4.2.2  yamt 					    __func__,
    637  1.4.2.4  yamt 					    "not held by current CPU", true);
    638  1.4.2.2  yamt 				continue;
    639  1.4.2.2  yamt 			}
    640  1.4.2.5  yamt 			if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0)
    641  1.4.2.2  yamt 				lockdebug_abort1(ld, &ld_spinner_lk,
    642  1.4.2.4  yamt 				    __func__, "spin lock held", true);
    643  1.4.2.2  yamt 		}
    644  1.4.2.7  yamt 		lockdebug_unlock_rd(&ld_spinner_lk, s);
    645  1.4.2.2  yamt 	}
    646  1.4.2.2  yamt 
    647  1.4.2.2  yamt 	if (!slplocks) {
    648  1.4.2.2  yamt 		if (l->l_exlocks != 0) {
    649  1.4.2.7  yamt 			s = lockdebug_lock_rd(&ld_sleeper_lk);
    650  1.4.2.2  yamt 			TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
    651  1.4.2.2  yamt 				if (ld->ld_lwp == l)
    652  1.4.2.2  yamt 					lockdebug_abort1(ld, &ld_sleeper_lk,
    653  1.4.2.4  yamt 					    __func__, "sleep lock held", true);
    654  1.4.2.2  yamt 			}
    655  1.4.2.7  yamt 			lockdebug_unlock_rd(&ld_sleeper_lk, s);
    656  1.4.2.2  yamt 		}
    657  1.4.2.2  yamt 		if (l->l_shlocks != 0)
    658  1.4.2.2  yamt 			panic("lockdebug_barrier: holding %d shared locks",
    659  1.4.2.2  yamt 			    l->l_shlocks);
    660  1.4.2.2  yamt 	}
    661  1.4.2.7  yamt 
    662  1.4.2.7  yamt 	crit_exit();
    663  1.4.2.2  yamt }
    664  1.4.2.2  yamt 
    665  1.4.2.2  yamt /*
    666  1.4.2.4  yamt  * lockdebug_mem_check:
    667  1.4.2.4  yamt  *
    668  1.4.2.4  yamt  *	Check for in-use locks within a memory region that is
    669  1.4.2.6  yamt  *	being freed.
    670  1.4.2.4  yamt  */
    671  1.4.2.4  yamt void
    672  1.4.2.4  yamt lockdebug_mem_check(const char *func, void *base, size_t sz)
    673  1.4.2.4  yamt {
    674  1.4.2.4  yamt 	lockdebug_t *ld;
    675  1.4.2.6  yamt 	lockdebuglk_t *lk;
    676  1.4.2.7  yamt 	int s;
    677  1.4.2.4  yamt 
    678  1.4.2.7  yamt 	if (panicstr != NULL)
    679  1.4.2.7  yamt 		return;
    680  1.4.2.7  yamt 
    681  1.4.2.7  yamt 	s = lockdebug_lock_rd(&ld_tree_lk);
    682  1.4.2.6  yamt 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
    683  1.4.2.7  yamt 	if (ld != NULL) {
    684  1.4.2.7  yamt 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
    685  1.4.2.7  yamt 
    686  1.4.2.7  yamt 		if ((uintptr_t)base > lock)
    687  1.4.2.7  yamt 			panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
    688  1.4.2.7  yamt 			    __func__, ld, base, sz);
    689  1.4.2.7  yamt 		if (lock >= (uintptr_t)base + sz)
    690  1.4.2.7  yamt 			ld = NULL;
    691  1.4.2.7  yamt 	}
    692  1.4.2.7  yamt 	lockdebug_unlock_rd(&ld_tree_lk, s);
    693  1.4.2.6  yamt 	if (ld == NULL)
    694  1.4.2.6  yamt 		return;
    695  1.4.2.7  yamt 
    696  1.4.2.6  yamt 	if ((ld->ld_flags & LD_SLEEPER) != 0)
    697  1.4.2.6  yamt 		lk = &ld_sleeper_lk;
    698  1.4.2.6  yamt 	else
    699  1.4.2.6  yamt 		lk = &ld_spinner_lk;
    700  1.4.2.4  yamt 
    701  1.4.2.6  yamt 	lockdebug_lock(lk);
    702  1.4.2.7  yamt 	lockdebug_abort1(ld, lk, func,
    703  1.4.2.7  yamt 	    "allocation contains active lock", !cold);
    704  1.4.2.7  yamt 	return;
    705  1.4.2.4  yamt }
    706  1.4.2.4  yamt 
    707  1.4.2.4  yamt /*
    708  1.4.2.2  yamt  * lockdebug_dump:
    709  1.4.2.2  yamt  *
    710  1.4.2.2  yamt  *	Dump information about a lock on panic, or for DDB.
    711  1.4.2.2  yamt  */
    712  1.4.2.2  yamt static void
    713  1.4.2.2  yamt lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
    714  1.4.2.2  yamt {
    715  1.4.2.2  yamt 	int sleeper = (ld->ld_flags & LD_SLEEPER);
    716  1.4.2.2  yamt 
    717  1.4.2.2  yamt 	(*pr)(
    718  1.4.2.2  yamt 	    "lock address : %#018lx type     : %18s\n"
    719  1.4.2.2  yamt 	    "shared holds : %18u exclusive: %18u\n"
    720  1.4.2.2  yamt 	    "shares wanted: %18u exclusive: %18u\n"
    721  1.4.2.2  yamt 	    "current cpu  : %18u last held: %18u\n"
    722  1.4.2.2  yamt 	    "current lwp  : %#018lx last held: %#018lx\n"
    723  1.4.2.4  yamt 	    "last locked  : %#018lx unlocked : %#018lx\n"
    724  1.4.2.4  yamt 	    "initialized  : %#018lx\n",
    725  1.4.2.2  yamt 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
    726  1.4.2.2  yamt 	    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
    727  1.4.2.2  yamt 	    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
    728  1.4.2.2  yamt 	    (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
    729  1.4.2.2  yamt 	    (long)curlwp, (long)ld->ld_lwp,
    730  1.4.2.4  yamt 	    (long)ld->ld_locked, (long)ld->ld_unlocked,
    731  1.4.2.4  yamt 	    (long)ld->ld_initaddr);
    732  1.4.2.2  yamt 
    733  1.4.2.2  yamt 	if (ld->ld_lockops->lo_dump != NULL)
    734  1.4.2.2  yamt 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
    735  1.4.2.2  yamt 
    736  1.4.2.2  yamt 	if (sleeper) {
    737  1.4.2.2  yamt 		(*pr)("\n");
    738  1.4.2.2  yamt 		turnstile_print(ld->ld_lock, pr);
    739  1.4.2.2  yamt 	}
    740  1.4.2.2  yamt }
    741  1.4.2.2  yamt 
    742  1.4.2.2  yamt /*
    743  1.4.2.2  yamt  * lockdebug_dump:
    744  1.4.2.2  yamt  *
    745  1.4.2.2  yamt  *	Dump information about a known lock.
    746  1.4.2.2  yamt  */
    747  1.4.2.3  yamt static void
    748  1.4.2.2  yamt lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
    749  1.4.2.4  yamt 		 const char *msg, bool dopanic)
    750  1.4.2.2  yamt {
    751  1.4.2.2  yamt 
    752  1.4.2.2  yamt 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
    753  1.4.2.2  yamt 	    func, msg);
    754  1.4.2.2  yamt 	lockdebug_dump(ld, printf_nolog);
    755  1.4.2.2  yamt 	lockdebug_unlock(lk);
    756  1.4.2.2  yamt 	printf_nolog("\n");
    757  1.4.2.4  yamt 	if (dopanic)
    758  1.4.2.4  yamt 		panic("LOCKDEBUG");
    759  1.4.2.2  yamt }
    760  1.4.2.2  yamt 
    761  1.4.2.2  yamt #endif	/* LOCKDEBUG */
    762  1.4.2.2  yamt 
    763  1.4.2.2  yamt /*
    764  1.4.2.2  yamt  * lockdebug_lock_print:
    765  1.4.2.2  yamt  *
    766  1.4.2.2  yamt  *	Handle the DDB 'show lock' command.
    767  1.4.2.2  yamt  */
    768  1.4.2.2  yamt #ifdef DDB
    769  1.4.2.2  yamt void
    770  1.4.2.2  yamt lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
    771  1.4.2.2  yamt {
    772  1.4.2.2  yamt #ifdef LOCKDEBUG
    773  1.4.2.2  yamt 	lockdebug_t *ld;
    774  1.4.2.2  yamt 
    775  1.4.2.2  yamt 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
    776  1.4.2.2  yamt 		if (ld->ld_lock == addr) {
    777  1.4.2.2  yamt 			lockdebug_dump(ld, pr);
    778  1.4.2.2  yamt 			return;
    779  1.4.2.2  yamt 		}
    780  1.4.2.2  yamt 	}
    781  1.4.2.2  yamt 	(*pr)("Sorry, no record of a lock with address %p found.\n", addr);
    782  1.4.2.2  yamt #else
    783  1.4.2.2  yamt 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
    784  1.4.2.2  yamt #endif	/* LOCKDEBUG */
    785  1.4.2.2  yamt }
    786  1.4.2.2  yamt #endif	/* DDB */
    787  1.4.2.2  yamt 
    788  1.4.2.2  yamt /*
    789  1.4.2.2  yamt  * lockdebug_abort:
    790  1.4.2.2  yamt  *
    791  1.4.2.2  yamt  *	An error has been trapped - dump lock info and call panic().
    792  1.4.2.2  yamt  */
    793  1.4.2.2  yamt void
    794  1.4.2.6  yamt lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
    795  1.4.2.6  yamt 		const char *msg)
    796  1.4.2.2  yamt {
    797  1.4.2.2  yamt #ifdef LOCKDEBUG
    798  1.4.2.2  yamt 	lockdebug_t *ld;
    799  1.4.2.2  yamt 	lockdebuglk_t *lk;
    800  1.4.2.2  yamt 
    801  1.4.2.6  yamt 	if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
    802  1.4.2.4  yamt 		lockdebug_abort1(ld, lk, func, msg, true);
    803  1.4.2.2  yamt 		/* NOTREACHED */
    804  1.4.2.2  yamt 	}
    805  1.4.2.2  yamt #endif	/* LOCKDEBUG */
    806  1.4.2.2  yamt 
    807  1.4.2.2  yamt 	printf_nolog("%s error: %s: %s\n\n"
    808  1.4.2.2  yamt 	    "lock address : %#018lx\n"
    809  1.4.2.2  yamt 	    "current cpu  : %18d\n"
    810  1.4.2.2  yamt 	    "current lwp  : %#018lx\n",
    811  1.4.2.2  yamt 	    ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
    812  1.4.2.2  yamt 	    (long)curlwp);
    813  1.4.2.2  yamt 
    814  1.4.2.2  yamt 	(*ops->lo_dump)(lock);
    815  1.4.2.2  yamt 
    816  1.4.2.2  yamt 	printf_nolog("\n");
    817  1.4.2.2  yamt 	panic("lock error");
    818  1.4.2.2  yamt }
    819