Home | History | Annotate | Line # | Download | only in kern
subr_lockdebug.c revision 1.25
      1  1.25       ad /*	$NetBSD: subr_lockdebug.c,v 1.25 2008/01/04 21:18:12 ad Exp $	*/
      2   1.2       ad 
      3   1.2       ad /*-
      4   1.2       ad  * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
      5   1.2       ad  * All rights reserved.
      6   1.2       ad  *
      7   1.2       ad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.2       ad  * by Andrew Doran.
      9   1.2       ad  *
     10   1.2       ad  * Redistribution and use in source and binary forms, with or without
     11   1.2       ad  * modification, are permitted provided that the following conditions
     12   1.2       ad  * are met:
     13   1.2       ad  * 1. Redistributions of source code must retain the above copyright
     14   1.2       ad  *    notice, this list of conditions and the following disclaimer.
     15   1.2       ad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.2       ad  *    notice, this list of conditions and the following disclaimer in the
     17   1.2       ad  *    documentation and/or other materials provided with the distribution.
     18   1.2       ad  * 3. All advertising materials mentioning features or use of this software
     19   1.2       ad  *    must display the following acknowledgement:
     20   1.2       ad  *	This product includes software developed by the NetBSD
     21   1.2       ad  *	Foundation, Inc. and its contributors.
     22   1.2       ad  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23   1.2       ad  *    contributors may be used to endorse or promote products derived
     24   1.2       ad  *    from this software without specific prior written permission.
     25   1.2       ad  *
     26   1.2       ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27   1.2       ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28   1.2       ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29   1.2       ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30   1.2       ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31   1.2       ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32   1.2       ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33   1.2       ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34   1.2       ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35   1.2       ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36   1.2       ad  * POSSIBILITY OF SUCH DAMAGE.
     37   1.2       ad  */
     38   1.2       ad 
     39   1.2       ad /*
     40  1.11       ad  * Basic lock debugging code shared among lock primitives.
     41   1.2       ad  */
     42   1.2       ad 
     43   1.9      dsl #include <sys/cdefs.h>
     44  1.25       ad __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.25 2008/01/04 21:18:12 ad Exp $");
     45   1.9      dsl 
     46   1.2       ad #include "opt_ddb.h"
     47   1.2       ad 
     48   1.2       ad #include <sys/param.h>
     49   1.2       ad #include <sys/proc.h>
     50   1.2       ad #include <sys/systm.h>
     51  1.10       ad #include <sys/kernel.h>
     52   1.2       ad #include <sys/kmem.h>
     53   1.2       ad #include <sys/lockdebug.h>
     54   1.2       ad #include <sys/sleepq.h>
     55  1.10       ad #include <sys/cpu.h>
     56  1.22       ad #include <sys/atomic.h>
     57   1.2       ad 
     58  1.16     yamt #include <lib/libkern/rb.h>
     59  1.16     yamt 
     60  1.25       ad #include <machine/lock.h>
     61  1.25       ad 
     62   1.2       ad #ifdef LOCKDEBUG
     63   1.2       ad 
     64   1.2       ad #define	LD_BATCH_SHIFT	9
     65   1.2       ad #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
     66   1.2       ad #define	LD_BATCH_MASK	(LD_BATCH - 1)
     67   1.2       ad #define	LD_MAX_LOCKS	1048576
     68   1.2       ad #define	LD_SLOP		16
     69   1.2       ad 
     70   1.2       ad #define	LD_LOCKED	0x01
     71   1.2       ad #define	LD_SLEEPER	0x02
     72   1.2       ad 
     73  1.23       ad #define	LD_WRITE_LOCK	0x80000000
     74  1.23       ad 
     75   1.2       ad typedef union lockdebuglk {
     76   1.2       ad 	struct {
     77  1.23       ad 		u_int	lku_lock;
     78  1.23       ad 		int	lku_oldspl;
     79   1.2       ad 	} ul;
     80  1.23       ad 	uint8_t	lk_pad[CACHE_LINE_SIZE];
     81  1.23       ad } volatile __aligned(CACHE_LINE_SIZE) lockdebuglk_t;
     82   1.2       ad 
     83   1.2       ad #define	lk_lock		ul.lku_lock
     84   1.2       ad #define	lk_oldspl	ul.lku_oldspl
     85   1.2       ad 
     86   1.2       ad typedef struct lockdebug {
     87  1.16     yamt 	struct rb_node	ld_rb_node;	/* must be the first member */
     88   1.2       ad 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
     89   1.2       ad 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
     90   1.2       ad 	volatile void	*ld_lock;
     91   1.2       ad 	lockops_t	*ld_lockops;
     92   1.2       ad 	struct lwp	*ld_lwp;
     93   1.2       ad 	uintptr_t	ld_locked;
     94   1.2       ad 	uintptr_t	ld_unlocked;
     95  1.10       ad 	uintptr_t	ld_initaddr;
     96   1.2       ad 	uint16_t	ld_shares;
     97   1.2       ad 	uint16_t	ld_cpu;
     98   1.2       ad 	uint8_t		ld_flags;
     99   1.2       ad 	uint8_t		ld_shwant;	/* advisory */
    100   1.2       ad 	uint8_t		ld_exwant;	/* advisory */
    101   1.2       ad 	uint8_t		ld_unused;
    102   1.2       ad } volatile lockdebug_t;
    103   1.2       ad 
    104   1.2       ad typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
    105   1.2       ad 
    106  1.16     yamt lockdebuglk_t		ld_tree_lk;
    107  1.15     matt lockdebuglk_t		ld_sleeper_lk;
    108  1.15     matt lockdebuglk_t		ld_spinner_lk;
    109  1.15     matt lockdebuglk_t		ld_free_lk;
    110   1.2       ad 
    111  1.13     matt lockdebuglist_t		ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
    112  1.13     matt lockdebuglist_t		ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
    113  1.13     matt lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
    114  1.13     matt lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
    115   1.2       ad int			ld_nfree;
    116   1.2       ad int			ld_freeptr;
    117   1.2       ad int			ld_recurse;
    118   1.5       ad bool			ld_nomore;
    119   1.2       ad lockdebug_t		*ld_table[LD_MAX_LOCKS / LD_BATCH];
    120   1.2       ad 
    121   1.2       ad lockdebug_t		ld_prime[LD_BATCH];
    122   1.2       ad 
    123   1.5       ad static void	lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
    124  1.10       ad 				 const char *, const char *, bool);
    125   1.5       ad static void	lockdebug_more(void);
    126   1.5       ad static void	lockdebug_init(void);
    127   1.2       ad 
    128  1.16     yamt static signed int
    129  1.16     yamt ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
    130  1.16     yamt {
    131  1.16     yamt 	const lockdebug_t *ld1 = (const void *)n1;
    132  1.16     yamt 	const lockdebug_t *ld2 = (const void *)n2;
    133  1.20     yamt 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
    134  1.20     yamt 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
    135  1.20     yamt 
    136  1.20     yamt 	if (a < b)
    137  1.20     yamt 		return 1;
    138  1.20     yamt 	if (a > b)
    139  1.16     yamt 		return -1;
    140  1.16     yamt 	return 0;
    141  1.16     yamt }
    142  1.16     yamt 
    143  1.16     yamt static signed int
    144  1.16     yamt ld_rb_compare_key(const struct rb_node *n, const void *key)
    145  1.16     yamt {
    146  1.16     yamt 	const lockdebug_t *ld = (const void *)n;
    147  1.20     yamt 	const uintptr_t a = (uintptr_t)ld->ld_lock;
    148  1.20     yamt 	const uintptr_t b = (uintptr_t)key;
    149  1.20     yamt 
    150  1.20     yamt 	if (a < b)
    151  1.20     yamt 		return 1;
    152  1.20     yamt 	if (a > b)
    153  1.16     yamt 		return -1;
    154  1.16     yamt 	return 0;
    155  1.16     yamt }
    156  1.16     yamt 
    157  1.16     yamt static struct rb_tree ld_rb_tree;
    158  1.16     yamt 
    159  1.16     yamt static const struct rb_tree_ops ld_rb_tree_ops = {
    160  1.16     yamt 	.rb_compare_nodes = ld_rb_compare_nodes,
    161  1.16     yamt 	.rb_compare_key = ld_rb_compare_key,
    162  1.16     yamt };
    163  1.16     yamt 
    164  1.23       ad static void
    165  1.23       ad lockdebug_lock_init(lockdebuglk_t *lk)
    166  1.23       ad {
    167  1.23       ad 
    168  1.23       ad 	lk->lk_lock = 0;
    169  1.23       ad }
    170  1.23       ad 
    171  1.23       ad static void
    172   1.2       ad lockdebug_lock(lockdebuglk_t *lk)
    173   1.2       ad {
    174   1.2       ad 	int s;
    175   1.2       ad 
    176   1.8       ad 	s = splhigh();
    177  1.23       ad 	do {
    178  1.23       ad 		while (lk->lk_lock != 0) {
    179  1.23       ad 			SPINLOCK_SPIN_HOOK;
    180  1.23       ad 		}
    181  1.23       ad 	} while (atomic_cas_uint(&lk->lk_lock, 0, LD_WRITE_LOCK) != 0);
    182   1.2       ad 	lk->lk_oldspl = s;
    183  1.23       ad 	membar_enter();
    184   1.2       ad }
    185   1.2       ad 
    186  1.23       ad static void
    187   1.2       ad lockdebug_unlock(lockdebuglk_t *lk)
    188   1.2       ad {
    189   1.2       ad 	int s;
    190   1.2       ad 
    191   1.2       ad 	s = lk->lk_oldspl;
    192  1.23       ad 	membar_exit();
    193  1.23       ad 	lk->lk_lock = 0;
    194  1.23       ad 	splx(s);
    195  1.23       ad }
    196  1.23       ad 
    197  1.23       ad static int
    198  1.23       ad lockdebug_lock_rd(lockdebuglk_t *lk)
    199  1.23       ad {
    200  1.23       ad 	u_int val;
    201  1.23       ad 	int s;
    202  1.23       ad 
    203  1.23       ad 	s = splhigh();
    204  1.23       ad 	do {
    205  1.23       ad 		while ((val = lk->lk_lock) == LD_WRITE_LOCK){
    206  1.23       ad 			SPINLOCK_SPIN_HOOK;
    207  1.23       ad 		}
    208  1.23       ad 	} while (atomic_cas_uint(&lk->lk_lock, val, val + 1) != val);
    209  1.23       ad 	membar_enter();
    210  1.23       ad 	return s;
    211  1.23       ad }
    212  1.23       ad 
    213  1.23       ad static void
    214  1.23       ad lockdebug_unlock_rd(lockdebuglk_t *lk, int s)
    215  1.23       ad {
    216  1.23       ad 
    217  1.23       ad 	membar_exit();
    218  1.23       ad 	atomic_dec_uint(&lk->lk_lock);
    219   1.2       ad 	splx(s);
    220   1.2       ad }
    221   1.2       ad 
    222   1.2       ad static inline lockdebug_t *
    223  1.19     yamt lockdebug_lookup1(volatile void *lock, lockdebuglk_t **lk)
    224   1.2       ad {
    225  1.16     yamt 	lockdebug_t *ld;
    226  1.23       ad 	int s;
    227   1.2       ad 
    228  1.23       ad 	s = lockdebug_lock_rd(&ld_tree_lk);
    229  1.16     yamt 	ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
    230  1.23       ad 	lockdebug_unlock_rd(&ld_tree_lk, s);
    231  1.16     yamt 	if (ld == NULL)
    232  1.19     yamt 		return NULL;
    233   1.2       ad 
    234   1.2       ad 	if ((ld->ld_flags & LD_SLEEPER) != 0)
    235   1.2       ad 		*lk = &ld_sleeper_lk;
    236   1.2       ad 	else
    237   1.2       ad 		*lk = &ld_spinner_lk;
    238   1.2       ad 
    239   1.2       ad 	lockdebug_lock(*lk);
    240   1.2       ad 	return ld;
    241   1.2       ad }
    242   1.2       ad 
    243   1.2       ad /*
    244  1.19     yamt  * lockdebug_lookup:
    245  1.19     yamt  *
    246  1.19     yamt  *	Find a lockdebug structure by a pointer to a lock and return it locked.
    247  1.19     yamt  */
    248  1.19     yamt static inline lockdebug_t *
    249  1.19     yamt lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
    250  1.19     yamt {
    251  1.19     yamt 	lockdebug_t *ld;
    252  1.19     yamt 
    253  1.19     yamt 	ld = lockdebug_lookup1(lock, lk);
    254  1.19     yamt 	if (ld == NULL)
    255  1.19     yamt 		panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
    256  1.19     yamt 	return ld;
    257  1.19     yamt }
    258  1.19     yamt 
    259  1.19     yamt /*
    260   1.2       ad  * lockdebug_init:
    261   1.2       ad  *
    262   1.2       ad  *	Initialize the lockdebug system.  Allocate an initial pool of
    263   1.2       ad  *	lockdebug structures before the VM system is up and running.
    264   1.2       ad  */
    265   1.5       ad static void
    266   1.2       ad lockdebug_init(void)
    267   1.2       ad {
    268   1.2       ad 	lockdebug_t *ld;
    269   1.2       ad 	int i;
    270   1.2       ad 
    271  1.23       ad 	lockdebug_lock_init(&ld_tree_lk);
    272  1.23       ad 	lockdebug_lock_init(&ld_sleeper_lk);
    273  1.23       ad 	lockdebug_lock_init(&ld_spinner_lk);
    274  1.23       ad 	lockdebug_lock_init(&ld_free_lk);
    275  1.15     matt 
    276  1.16     yamt 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
    277  1.16     yamt 
    278   1.2       ad 	ld = ld_prime;
    279   1.2       ad 	ld_table[0] = ld;
    280   1.2       ad 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
    281   1.2       ad 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    282   1.2       ad 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
    283   1.2       ad 	}
    284   1.2       ad 	ld_freeptr = 1;
    285   1.2       ad 	ld_nfree = LD_BATCH - 1;
    286   1.2       ad }
    287   1.2       ad 
    288   1.2       ad /*
    289   1.2       ad  * lockdebug_alloc:
    290   1.2       ad  *
    291   1.2       ad  *	A lock is being initialized, so allocate an associated debug
    292   1.2       ad  *	structure.
    293   1.2       ad  */
    294  1.16     yamt bool
    295  1.10       ad lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
    296   1.2       ad {
    297   1.2       ad 	struct cpu_info *ci;
    298   1.2       ad 	lockdebug_t *ld;
    299  1.19     yamt 	lockdebuglk_t *lk;
    300   1.2       ad 
    301   1.5       ad 	if (lo == NULL || panicstr != NULL)
    302  1.16     yamt 		return false;
    303   1.5       ad 	if (ld_freeptr == 0)
    304   1.5       ad 		lockdebug_init();
    305   1.2       ad 
    306  1.19     yamt 	if ((ld = lockdebug_lookup1(lock, &lk)) != NULL) {
    307  1.19     yamt 		lockdebug_abort1(ld, lk, __func__, "already initialized", true);
    308  1.19     yamt 		/* NOTREACHED */
    309  1.19     yamt 	}
    310  1.19     yamt 
    311   1.2       ad 	/*
    312   1.2       ad 	 * Pinch a new debug structure.  We may recurse because we call
    313   1.2       ad 	 * kmem_alloc(), which may need to initialize new locks somewhere
    314   1.7    skrll 	 * down the path.  If not recursing, we try to maintain at least
    315   1.2       ad 	 * LD_SLOP structures free, which should hopefully be enough to
    316   1.2       ad 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
    317   1.2       ad 	 * worry: we'll just mark the lock as not having an ID.
    318   1.2       ad 	 */
    319   1.2       ad 	lockdebug_lock(&ld_free_lk);
    320  1.23       ad 	ci = curcpu();
    321   1.2       ad 	ci->ci_lkdebug_recurse++;
    322   1.2       ad 
    323   1.2       ad 	if (TAILQ_EMPTY(&ld_free)) {
    324   1.5       ad 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
    325   1.2       ad 			ci->ci_lkdebug_recurse--;
    326   1.2       ad 			lockdebug_unlock(&ld_free_lk);
    327  1.16     yamt 			return false;
    328   1.2       ad 		}
    329   1.2       ad 		lockdebug_more();
    330   1.2       ad 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
    331   1.2       ad 		lockdebug_more();
    332   1.2       ad 
    333   1.2       ad 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
    334   1.2       ad 		lockdebug_unlock(&ld_free_lk);
    335  1.16     yamt 		return false;
    336   1.2       ad 	}
    337   1.2       ad 
    338   1.2       ad 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
    339   1.2       ad 	ld_nfree--;
    340   1.2       ad 
    341   1.2       ad 	ci->ci_lkdebug_recurse--;
    342   1.2       ad 	lockdebug_unlock(&ld_free_lk);
    343   1.2       ad 
    344   1.2       ad 	if (ld->ld_lock != NULL)
    345   1.2       ad 		panic("lockdebug_alloc: corrupt table");
    346   1.2       ad 
    347   1.2       ad 	if (lo->lo_sleeplock)
    348   1.2       ad 		lockdebug_lock(&ld_sleeper_lk);
    349   1.2       ad 	else
    350   1.2       ad 		lockdebug_lock(&ld_spinner_lk);
    351   1.2       ad 
    352   1.2       ad 	/* Initialise the structure. */
    353   1.2       ad 	ld->ld_lock = lock;
    354   1.2       ad 	ld->ld_lockops = lo;
    355   1.2       ad 	ld->ld_locked = 0;
    356   1.2       ad 	ld->ld_unlocked = 0;
    357   1.2       ad 	ld->ld_lwp = NULL;
    358  1.10       ad 	ld->ld_initaddr = initaddr;
    359   1.2       ad 
    360  1.16     yamt 	lockdebug_lock(&ld_tree_lk);
    361  1.16     yamt 	rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
    362  1.16     yamt 	lockdebug_unlock(&ld_tree_lk);
    363  1.16     yamt 
    364   1.2       ad 	if (lo->lo_sleeplock) {
    365   1.2       ad 		ld->ld_flags = LD_SLEEPER;
    366   1.2       ad 		lockdebug_unlock(&ld_sleeper_lk);
    367   1.2       ad 	} else {
    368   1.2       ad 		ld->ld_flags = 0;
    369   1.2       ad 		lockdebug_unlock(&ld_spinner_lk);
    370   1.2       ad 	}
    371   1.2       ad 
    372  1.16     yamt 	return true;
    373   1.2       ad }
    374   1.2       ad 
    375   1.2       ad /*
    376   1.2       ad  * lockdebug_free:
    377   1.2       ad  *
    378   1.2       ad  *	A lock is being destroyed, so release debugging resources.
    379   1.2       ad  */
    380   1.2       ad void
    381  1.16     yamt lockdebug_free(volatile void *lock)
    382   1.2       ad {
    383   1.2       ad 	lockdebug_t *ld;
    384   1.2       ad 	lockdebuglk_t *lk;
    385   1.2       ad 
    386   1.2       ad 	if (panicstr != NULL)
    387   1.2       ad 		return;
    388   1.2       ad 
    389  1.16     yamt 	ld = lockdebug_lookup(lock, &lk);
    390  1.16     yamt 	if (ld == NULL) {
    391   1.2       ad 		panic("lockdebug_free: destroying uninitialized lock %p"
    392  1.16     yamt 		    "(ld_lock=%p)", lock, ld->ld_lock);
    393  1.10       ad 		lockdebug_abort1(ld, lk, __func__, "lock record follows",
    394  1.10       ad 		    true);
    395   1.2       ad 	}
    396   1.2       ad 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
    397  1.10       ad 		lockdebug_abort1(ld, lk, __func__, "is locked", true);
    398  1.16     yamt 	lockdebug_lock(&ld_tree_lk);
    399  1.16     yamt 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
    400  1.16     yamt 	lockdebug_unlock(&ld_tree_lk);
    401   1.2       ad 	ld->ld_lock = NULL;
    402   1.2       ad 	lockdebug_unlock(lk);
    403   1.2       ad 
    404   1.2       ad 	lockdebug_lock(&ld_free_lk);
    405   1.2       ad 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    406   1.2       ad 	ld_nfree++;
    407   1.2       ad 	lockdebug_unlock(&ld_free_lk);
    408   1.2       ad }
    409   1.2       ad 
    410   1.2       ad /*
    411   1.2       ad  * lockdebug_more:
    412   1.2       ad  *
    413   1.2       ad  *	Allocate a batch of debug structures and add to the free list.
    414   1.2       ad  *	Must be called with ld_free_lk held.
    415   1.2       ad  */
    416   1.5       ad static void
    417   1.2       ad lockdebug_more(void)
    418   1.2       ad {
    419   1.2       ad 	lockdebug_t *ld;
    420   1.2       ad 	void *block;
    421   1.5       ad 	int i, base, m;
    422   1.2       ad 
    423   1.2       ad 	while (ld_nfree < LD_SLOP) {
    424   1.2       ad 		lockdebug_unlock(&ld_free_lk);
    425   1.2       ad 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
    426   1.2       ad 		lockdebug_lock(&ld_free_lk);
    427   1.2       ad 
    428   1.2       ad 		if (block == NULL)
    429   1.2       ad 			return;
    430   1.2       ad 
    431   1.2       ad 		if (ld_nfree > LD_SLOP) {
    432   1.2       ad 			/* Somebody beat us to it. */
    433   1.2       ad 			lockdebug_unlock(&ld_free_lk);
    434   1.2       ad 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
    435   1.2       ad 			lockdebug_lock(&ld_free_lk);
    436   1.2       ad 			continue;
    437   1.2       ad 		}
    438   1.2       ad 
    439   1.2       ad 		base = ld_freeptr;
    440   1.2       ad 		ld_nfree += LD_BATCH;
    441   1.2       ad 		ld = block;
    442   1.2       ad 		base <<= LD_BATCH_SHIFT;
    443   1.5       ad 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
    444   1.5       ad 
    445   1.5       ad 		if (m == LD_MAX_LOCKS)
    446   1.5       ad 			ld_nomore = true;
    447   1.2       ad 
    448   1.5       ad 		for (i = base; i < m; i++, ld++) {
    449   1.2       ad 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    450   1.2       ad 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
    451   1.2       ad 		}
    452   1.2       ad 
    453  1.22       ad 		membar_producer();
    454   1.2       ad 		ld_table[ld_freeptr++] = block;
    455   1.2       ad 	}
    456   1.2       ad }
    457   1.2       ad 
    458   1.2       ad /*
    459   1.2       ad  * lockdebug_wantlock:
    460   1.2       ad  *
    461   1.2       ad  *	Process the preamble to a lock acquire.
    462   1.2       ad  */
    463   1.2       ad void
    464  1.16     yamt lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
    465   1.2       ad {
    466   1.2       ad 	struct lwp *l = curlwp;
    467   1.2       ad 	lockdebuglk_t *lk;
    468   1.2       ad 	lockdebug_t *ld;
    469   1.3  thorpej 	bool recurse;
    470   1.2       ad 
    471   1.2       ad 	(void)shared;
    472   1.4  thorpej 	recurse = false;
    473   1.2       ad 
    474   1.2       ad 	if (panicstr != NULL)
    475   1.2       ad 		return;
    476   1.2       ad 
    477  1.16     yamt 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
    478   1.2       ad 		return;
    479   1.2       ad 
    480   1.2       ad 	if ((ld->ld_flags & LD_LOCKED) != 0) {
    481   1.2       ad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    482   1.2       ad 			if (ld->ld_lwp == l)
    483   1.4  thorpej 				recurse = true;
    484   1.2       ad 		} else if (ld->ld_cpu == (uint16_t)cpu_number())
    485   1.4  thorpej 			recurse = true;
    486   1.2       ad 	}
    487   1.2       ad 
    488  1.10       ad 	if (cpu_intr_p()) {
    489  1.10       ad 		if ((ld->ld_flags & LD_SLEEPER) != 0)
    490  1.10       ad 			lockdebug_abort1(ld, lk, __func__,
    491  1.10       ad 			    "acquiring sleep lock from interrupt context",
    492  1.10       ad 			    true);
    493  1.10       ad 	}
    494  1.10       ad 
    495   1.2       ad 	if (shared)
    496   1.2       ad 		ld->ld_shwant++;
    497   1.2       ad 	else
    498   1.2       ad 		ld->ld_exwant++;
    499   1.2       ad 
    500   1.2       ad 	if (recurse)
    501  1.10       ad 		lockdebug_abort1(ld, lk, __func__, "locking against myself",
    502  1.10       ad 		    true);
    503   1.2       ad 
    504   1.2       ad 	lockdebug_unlock(lk);
    505   1.2       ad }
    506   1.2       ad 
    507   1.2       ad /*
    508   1.2       ad  * lockdebug_locked:
    509   1.2       ad  *
    510   1.2       ad  *	Process a lock acquire operation.
    511   1.2       ad  */
    512   1.2       ad void
    513  1.16     yamt lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
    514   1.2       ad {
    515   1.2       ad 	struct lwp *l = curlwp;
    516   1.2       ad 	lockdebuglk_t *lk;
    517   1.2       ad 	lockdebug_t *ld;
    518   1.2       ad 
    519   1.2       ad 	if (panicstr != NULL)
    520   1.2       ad 		return;
    521   1.2       ad 
    522  1.16     yamt 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
    523   1.2       ad 		return;
    524   1.2       ad 
    525   1.2       ad 	if (shared) {
    526   1.2       ad 		l->l_shlocks++;
    527   1.2       ad 		ld->ld_shares++;
    528   1.2       ad 		ld->ld_shwant--;
    529   1.2       ad 	} else {
    530   1.2       ad 		if ((ld->ld_flags & LD_LOCKED) != 0)
    531   1.2       ad 			lockdebug_abort1(ld, lk, __func__,
    532  1.10       ad 			    "already locked", true);
    533   1.2       ad 
    534   1.2       ad 		ld->ld_flags |= LD_LOCKED;
    535   1.2       ad 		ld->ld_locked = where;
    536   1.2       ad 		ld->ld_cpu = (uint16_t)cpu_number();
    537   1.2       ad 		ld->ld_lwp = l;
    538   1.2       ad 		ld->ld_exwant--;
    539   1.2       ad 
    540   1.2       ad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    541   1.2       ad 			l->l_exlocks++;
    542   1.2       ad 			TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
    543   1.2       ad 		} else {
    544   1.2       ad 			curcpu()->ci_spin_locks2++;
    545   1.2       ad 			TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
    546   1.2       ad 		}
    547   1.2       ad 	}
    548   1.2       ad 
    549   1.2       ad 	lockdebug_unlock(lk);
    550   1.2       ad }
    551   1.2       ad 
    552   1.2       ad /*
    553   1.2       ad  * lockdebug_unlocked:
    554   1.2       ad  *
    555   1.2       ad  *	Process a lock release operation.
    556   1.2       ad  */
    557   1.2       ad void
    558  1.16     yamt lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
    559   1.2       ad {
    560   1.2       ad 	struct lwp *l = curlwp;
    561   1.2       ad 	lockdebuglk_t *lk;
    562   1.2       ad 	lockdebug_t *ld;
    563   1.2       ad 
    564   1.2       ad 	if (panicstr != NULL)
    565   1.2       ad 		return;
    566   1.2       ad 
    567  1.16     yamt 	if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
    568   1.2       ad 		return;
    569   1.2       ad 
    570   1.2       ad 	if (shared) {
    571   1.2       ad 		if (l->l_shlocks == 0)
    572   1.2       ad 			lockdebug_abort1(ld, lk, __func__,
    573  1.10       ad 			    "no shared locks held by LWP", true);
    574   1.2       ad 		if (ld->ld_shares == 0)
    575   1.2       ad 			lockdebug_abort1(ld, lk, __func__,
    576  1.10       ad 			    "no shared holds on this lock", true);
    577   1.2       ad 		l->l_shlocks--;
    578   1.2       ad 		ld->ld_shares--;
    579   1.2       ad 	} else {
    580   1.2       ad 		if ((ld->ld_flags & LD_LOCKED) == 0)
    581  1.10       ad 			lockdebug_abort1(ld, lk, __func__, "not locked",
    582  1.10       ad 			    true);
    583   1.2       ad 
    584   1.2       ad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    585   1.2       ad 			if (ld->ld_lwp != curlwp)
    586   1.2       ad 				lockdebug_abort1(ld, lk, __func__,
    587  1.10       ad 				    "not held by current LWP", true);
    588   1.2       ad 			ld->ld_flags &= ~LD_LOCKED;
    589   1.2       ad 			ld->ld_unlocked = where;
    590   1.2       ad 			ld->ld_lwp = NULL;
    591   1.2       ad 			curlwp->l_exlocks--;
    592   1.2       ad 			TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
    593   1.2       ad 		} else {
    594   1.2       ad 			if (ld->ld_cpu != (uint16_t)cpu_number())
    595   1.2       ad 				lockdebug_abort1(ld, lk, __func__,
    596  1.10       ad 				    "not held by current CPU", true);
    597   1.2       ad 			ld->ld_flags &= ~LD_LOCKED;
    598   1.2       ad 			ld->ld_unlocked = where;
    599   1.2       ad 			ld->ld_lwp = NULL;
    600   1.2       ad 			curcpu()->ci_spin_locks2--;
    601   1.2       ad 			TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
    602   1.2       ad 		}
    603   1.2       ad 	}
    604   1.2       ad 
    605   1.2       ad 	lockdebug_unlock(lk);
    606   1.2       ad }
    607   1.2       ad 
    608   1.2       ad /*
    609   1.2       ad  * lockdebug_barrier:
    610   1.2       ad  *
    611   1.2       ad  *	Panic if we hold more than one specified spin lock, and optionally,
    612   1.2       ad  *	if we hold sleep locks.
    613   1.2       ad  */
    614   1.2       ad void
    615   1.2       ad lockdebug_barrier(volatile void *spinlock, int slplocks)
    616   1.2       ad {
    617   1.2       ad 	struct lwp *l = curlwp;
    618   1.2       ad 	lockdebug_t *ld;
    619   1.2       ad 	uint16_t cpuno;
    620  1.23       ad 	int s;
    621   1.2       ad 
    622   1.2       ad 	if (panicstr != NULL)
    623   1.2       ad 		return;
    624   1.2       ad 
    625  1.23       ad 	crit_enter();
    626  1.23       ad 
    627   1.2       ad 	if (curcpu()->ci_spin_locks2 != 0) {
    628   1.2       ad 		cpuno = (uint16_t)cpu_number();
    629   1.2       ad 
    630  1.23       ad 		s = lockdebug_lock_rd(&ld_spinner_lk);
    631   1.2       ad 		TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
    632   1.2       ad 			if (ld->ld_lock == spinlock) {
    633   1.2       ad 				if (ld->ld_cpu != cpuno)
    634   1.2       ad 					lockdebug_abort1(ld, &ld_spinner_lk,
    635   1.2       ad 					    __func__,
    636  1.10       ad 					    "not held by current CPU", true);
    637   1.2       ad 				continue;
    638   1.2       ad 			}
    639  1.12       ad 			if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0)
    640   1.2       ad 				lockdebug_abort1(ld, &ld_spinner_lk,
    641  1.10       ad 				    __func__, "spin lock held", true);
    642   1.2       ad 		}
    643  1.23       ad 		lockdebug_unlock_rd(&ld_spinner_lk, s);
    644   1.2       ad 	}
    645   1.2       ad 
    646   1.2       ad 	if (!slplocks) {
    647   1.2       ad 		if (l->l_exlocks != 0) {
    648  1.23       ad 			s = lockdebug_lock_rd(&ld_sleeper_lk);
    649   1.2       ad 			TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
    650   1.2       ad 				if (ld->ld_lwp == l)
    651   1.2       ad 					lockdebug_abort1(ld, &ld_sleeper_lk,
    652  1.10       ad 					    __func__, "sleep lock held", true);
    653   1.2       ad 			}
    654  1.23       ad 			lockdebug_unlock_rd(&ld_sleeper_lk, s);
    655   1.2       ad 		}
    656   1.2       ad 		if (l->l_shlocks != 0)
    657   1.2       ad 			panic("lockdebug_barrier: holding %d shared locks",
    658   1.2       ad 			    l->l_shlocks);
    659   1.2       ad 	}
    660  1.23       ad 
    661  1.23       ad 	crit_exit();
    662   1.2       ad }
    663   1.2       ad 
    664   1.2       ad /*
    665  1.10       ad  * lockdebug_mem_check:
    666  1.10       ad  *
    667  1.10       ad  *	Check for in-use locks within a memory region that is
    668  1.16     yamt  *	being freed.
    669  1.10       ad  */
    670  1.10       ad void
    671  1.10       ad lockdebug_mem_check(const char *func, void *base, size_t sz)
    672  1.10       ad {
    673  1.16     yamt 	lockdebug_t *ld;
    674  1.10       ad 	lockdebuglk_t *lk;
    675  1.23       ad 	int s;
    676  1.10       ad 
    677  1.24       ad 	if (panicstr != NULL)
    678  1.24       ad 		return;
    679  1.24       ad 
    680  1.23       ad 	s = lockdebug_lock_rd(&ld_tree_lk);
    681  1.16     yamt 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
    682  1.23       ad 	if (ld != NULL) {
    683  1.23       ad 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
    684  1.23       ad 
    685  1.23       ad 		if ((uintptr_t)base > lock)
    686  1.23       ad 			panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
    687  1.23       ad 			    __func__, ld, base, sz);
    688  1.23       ad 		if (lock >= (uintptr_t)base + sz)
    689  1.23       ad 			ld = NULL;
    690  1.23       ad 	}
    691  1.23       ad 	lockdebug_unlock_rd(&ld_tree_lk, s);
    692  1.16     yamt 	if (ld == NULL)
    693  1.16     yamt 		return;
    694  1.23       ad 
    695  1.16     yamt 	if ((ld->ld_flags & LD_SLEEPER) != 0)
    696  1.16     yamt 		lk = &ld_sleeper_lk;
    697  1.16     yamt 	else
    698  1.16     yamt 		lk = &ld_spinner_lk;
    699  1.10       ad 
    700  1.16     yamt 	lockdebug_lock(lk);
    701  1.23       ad 	lockdebug_abort1(ld, lk, func,
    702  1.23       ad 	    "allocation contains active lock", !cold);
    703  1.23       ad 	return;
    704  1.10       ad }
    705  1.10       ad 
    706  1.10       ad /*
    707   1.2       ad  * lockdebug_dump:
    708   1.2       ad  *
    709   1.2       ad  *	Dump information about a lock on panic, or for DDB.
    710   1.2       ad  */
    711   1.2       ad static void
    712   1.2       ad lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
    713   1.2       ad {
    714   1.2       ad 	int sleeper = (ld->ld_flags & LD_SLEEPER);
    715   1.2       ad 
    716   1.2       ad 	(*pr)(
    717   1.2       ad 	    "lock address : %#018lx type     : %18s\n"
    718   1.2       ad 	    "shared holds : %18u exclusive: %18u\n"
    719   1.2       ad 	    "shares wanted: %18u exclusive: %18u\n"
    720   1.2       ad 	    "current cpu  : %18u last held: %18u\n"
    721   1.2       ad 	    "current lwp  : %#018lx last held: %#018lx\n"
    722  1.10       ad 	    "last locked  : %#018lx unlocked : %#018lx\n"
    723  1.10       ad 	    "initialized  : %#018lx\n",
    724   1.2       ad 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
    725   1.2       ad 	    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
    726   1.2       ad 	    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
    727   1.2       ad 	    (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
    728   1.2       ad 	    (long)curlwp, (long)ld->ld_lwp,
    729  1.10       ad 	    (long)ld->ld_locked, (long)ld->ld_unlocked,
    730  1.10       ad 	    (long)ld->ld_initaddr);
    731   1.2       ad 
    732   1.2       ad 	if (ld->ld_lockops->lo_dump != NULL)
    733   1.2       ad 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
    734   1.2       ad 
    735   1.2       ad 	if (sleeper) {
    736   1.2       ad 		(*pr)("\n");
    737   1.2       ad 		turnstile_print(ld->ld_lock, pr);
    738   1.2       ad 	}
    739   1.2       ad }
    740   1.2       ad 
    741   1.2       ad /*
    742   1.2       ad  * lockdebug_dump:
    743   1.2       ad  *
    744   1.2       ad  *	Dump information about a known lock.
    745   1.2       ad  */
    746   1.5       ad static void
    747   1.2       ad lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
    748  1.10       ad 		 const char *msg, bool dopanic)
    749   1.2       ad {
    750   1.2       ad 
    751   1.2       ad 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
    752   1.2       ad 	    func, msg);
    753   1.2       ad 	lockdebug_dump(ld, printf_nolog);
    754   1.2       ad 	lockdebug_unlock(lk);
    755   1.2       ad 	printf_nolog("\n");
    756  1.10       ad 	if (dopanic)
    757  1.10       ad 		panic("LOCKDEBUG");
    758   1.2       ad }
    759   1.2       ad 
    760   1.2       ad #endif	/* LOCKDEBUG */
    761   1.2       ad 
    762   1.2       ad /*
    763   1.2       ad  * lockdebug_lock_print:
    764   1.2       ad  *
    765   1.2       ad  *	Handle the DDB 'show lock' command.
    766   1.2       ad  */
    767   1.2       ad #ifdef DDB
    768   1.2       ad void
    769   1.2       ad lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
    770   1.2       ad {
    771   1.2       ad #ifdef LOCKDEBUG
    772   1.2       ad 	lockdebug_t *ld;
    773   1.2       ad 
    774   1.2       ad 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
    775   1.2       ad 		if (ld->ld_lock == addr) {
    776   1.2       ad 			lockdebug_dump(ld, pr);
    777   1.2       ad 			return;
    778   1.2       ad 		}
    779   1.2       ad 	}
    780   1.2       ad 	(*pr)("Sorry, no record of a lock with address %p found.\n", addr);
    781   1.2       ad #else
    782   1.2       ad 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
    783   1.2       ad #endif	/* LOCKDEBUG */
    784   1.2       ad }
    785   1.2       ad #endif	/* DDB */
    786   1.2       ad 
    787   1.2       ad /*
    788   1.2       ad  * lockdebug_abort:
    789   1.2       ad  *
    790   1.2       ad  *	An error has been trapped - dump lock info and call panic().
    791   1.2       ad  */
    792   1.2       ad void
    793  1.16     yamt lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
    794  1.16     yamt 		const char *msg)
    795   1.2       ad {
    796   1.2       ad #ifdef LOCKDEBUG
    797   1.2       ad 	lockdebug_t *ld;
    798   1.2       ad 	lockdebuglk_t *lk;
    799   1.2       ad 
    800  1.16     yamt 	if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
    801  1.10       ad 		lockdebug_abort1(ld, lk, func, msg, true);
    802   1.2       ad 		/* NOTREACHED */
    803   1.2       ad 	}
    804   1.2       ad #endif	/* LOCKDEBUG */
    805   1.2       ad 
    806   1.2       ad 	printf_nolog("%s error: %s: %s\n\n"
    807   1.2       ad 	    "lock address : %#018lx\n"
    808   1.2       ad 	    "current cpu  : %18d\n"
    809   1.2       ad 	    "current lwp  : %#018lx\n",
    810   1.2       ad 	    ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
    811   1.2       ad 	    (long)curlwp);
    812   1.2       ad 
    813   1.2       ad 	(*ops->lo_dump)(lock);
    814   1.2       ad 
    815   1.2       ad 	printf_nolog("\n");
    816   1.2       ad 	panic("lock error");
    817   1.2       ad }
    818