Home | History | Annotate | Line # | Download | only in kern
subr_lockdebug.c revision 1.46
      1  1.46  christos /*	$NetBSD: subr_lockdebug.c,v 1.46 2012/08/04 12:38:20 christos Exp $	*/
      2   1.2        ad 
      3   1.2        ad /*-
      4  1.28        ad  * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5   1.2        ad  * All rights reserved.
      6   1.2        ad  *
      7   1.2        ad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.2        ad  * by Andrew Doran.
      9   1.2        ad  *
     10   1.2        ad  * Redistribution and use in source and binary forms, with or without
     11   1.2        ad  * modification, are permitted provided that the following conditions
     12   1.2        ad  * are met:
     13   1.2        ad  * 1. Redistributions of source code must retain the above copyright
     14   1.2        ad  *    notice, this list of conditions and the following disclaimer.
     15   1.2        ad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.2        ad  *    notice, this list of conditions and the following disclaimer in the
     17   1.2        ad  *    documentation and/or other materials provided with the distribution.
     18   1.2        ad  *
     19   1.2        ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.2        ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.2        ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.2        ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.2        ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.2        ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.2        ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.2        ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.2        ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.2        ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.2        ad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.2        ad  */
     31   1.2        ad 
     32   1.2        ad /*
     33  1.11        ad  * Basic lock debugging code shared among lock primitives.
     34   1.2        ad  */
     35   1.2        ad 
     36   1.9       dsl #include <sys/cdefs.h>
     37  1.46  christos __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.46 2012/08/04 12:38:20 christos Exp $");
     38   1.9       dsl 
     39   1.2        ad #include "opt_ddb.h"
     40   1.2        ad 
     41   1.2        ad #include <sys/param.h>
     42   1.2        ad #include <sys/proc.h>
     43   1.2        ad #include <sys/systm.h>
     44  1.10        ad #include <sys/kernel.h>
     45   1.2        ad #include <sys/kmem.h>
     46   1.2        ad #include <sys/lockdebug.h>
     47   1.2        ad #include <sys/sleepq.h>
     48  1.10        ad #include <sys/cpu.h>
     49  1.22        ad #include <sys/atomic.h>
     50  1.26        ad #include <sys/lock.h>
     51  1.43      matt #include <sys/rbtree.h>
     52  1.16      yamt 
     53  1.25        ad #include <machine/lock.h>
     54  1.25        ad 
     55  1.28        ad unsigned int		ld_panic;
     56  1.28        ad 
     57   1.2        ad #ifdef LOCKDEBUG
     58   1.2        ad 
     59   1.2        ad #define	LD_BATCH_SHIFT	9
     60   1.2        ad #define	LD_BATCH	(1 << LD_BATCH_SHIFT)
     61   1.2        ad #define	LD_BATCH_MASK	(LD_BATCH - 1)
     62   1.2        ad #define	LD_MAX_LOCKS	1048576
     63   1.2        ad #define	LD_SLOP		16
     64   1.2        ad 
     65   1.2        ad #define	LD_LOCKED	0x01
     66   1.2        ad #define	LD_SLEEPER	0x02
     67   1.2        ad 
     68  1.23        ad #define	LD_WRITE_LOCK	0x80000000
     69  1.23        ad 
     70   1.2        ad typedef struct lockdebug {
     71  1.42     rmind 	struct rb_node	ld_rb_node;
     72  1.34        ad 	__cpu_simple_lock_t ld_spinlock;
     73   1.2        ad 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
     74   1.2        ad 	_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
     75   1.2        ad 	volatile void	*ld_lock;
     76   1.2        ad 	lockops_t	*ld_lockops;
     77   1.2        ad 	struct lwp	*ld_lwp;
     78   1.2        ad 	uintptr_t	ld_locked;
     79   1.2        ad 	uintptr_t	ld_unlocked;
     80  1.10        ad 	uintptr_t	ld_initaddr;
     81   1.2        ad 	uint16_t	ld_shares;
     82   1.2        ad 	uint16_t	ld_cpu;
     83   1.2        ad 	uint8_t		ld_flags;
     84   1.2        ad 	uint8_t		ld_shwant;	/* advisory */
     85   1.2        ad 	uint8_t		ld_exwant;	/* advisory */
     86   1.2        ad 	uint8_t		ld_unused;
     87   1.2        ad } volatile lockdebug_t;
     88   1.2        ad 
     89   1.2        ad typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
     90   1.2        ad 
     91  1.34        ad __cpu_simple_lock_t	ld_mod_lk;
     92  1.13      matt lockdebuglist_t		ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
     93  1.13      matt lockdebuglist_t		ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
     94   1.2        ad int			ld_nfree;
     95   1.2        ad int			ld_freeptr;
     96   1.2        ad int			ld_recurse;
     97   1.5        ad bool			ld_nomore;
     98   1.2        ad lockdebug_t		ld_prime[LD_BATCH];
     99   1.2        ad 
    100  1.34        ad static void	lockdebug_abort1(lockdebug_t *, int, const char *,
    101  1.34        ad 				 const char *, bool);
    102  1.34        ad static int	lockdebug_more(int);
    103   1.5        ad static void	lockdebug_init(void);
    104   1.2        ad 
    105  1.16      yamt static signed int
    106  1.42     rmind ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
    107  1.16      yamt {
    108  1.42     rmind 	const lockdebug_t *ld1 = n1;
    109  1.42     rmind 	const lockdebug_t *ld2 = n2;
    110  1.20      yamt 	const uintptr_t a = (uintptr_t)ld1->ld_lock;
    111  1.20      yamt 	const uintptr_t b = (uintptr_t)ld2->ld_lock;
    112  1.20      yamt 
    113  1.20      yamt 	if (a < b)
    114  1.42     rmind 		return -1;
    115  1.42     rmind 	if (a > b)
    116  1.20      yamt 		return 1;
    117  1.16      yamt 	return 0;
    118  1.16      yamt }
    119  1.16      yamt 
    120  1.16      yamt static signed int
    121  1.42     rmind ld_rbto_compare_key(void *ctx, const void *n, const void *key)
    122  1.16      yamt {
    123  1.42     rmind 	const lockdebug_t *ld = n;
    124  1.20      yamt 	const uintptr_t a = (uintptr_t)ld->ld_lock;
    125  1.20      yamt 	const uintptr_t b = (uintptr_t)key;
    126  1.20      yamt 
    127  1.20      yamt 	if (a < b)
    128  1.42     rmind 		return -1;
    129  1.42     rmind 	if (a > b)
    130  1.20      yamt 		return 1;
    131  1.16      yamt 	return 0;
    132  1.16      yamt }
    133  1.16      yamt 
    134  1.42     rmind static rb_tree_t ld_rb_tree;
    135  1.16      yamt 
    136  1.42     rmind static const rb_tree_ops_t ld_rb_tree_ops = {
    137  1.37      matt 	.rbto_compare_nodes = ld_rbto_compare_nodes,
    138  1.37      matt 	.rbto_compare_key = ld_rbto_compare_key,
    139  1.42     rmind 	.rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
    140  1.42     rmind 	.rbto_context = NULL
    141  1.16      yamt };
    142  1.16      yamt 
    143  1.34        ad static inline lockdebug_t *
    144  1.34        ad lockdebug_lookup1(volatile void *lock)
    145  1.23        ad {
    146  1.34        ad 	lockdebug_t *ld;
    147  1.34        ad 	struct cpu_info *ci;
    148  1.23        ad 
    149  1.34        ad 	ci = curcpu();
    150  1.34        ad 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
    151  1.34        ad 	ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
    152  1.34        ad 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
    153  1.34        ad 	if (ld == NULL) {
    154  1.34        ad 		return NULL;
    155  1.34        ad 	}
    156  1.34        ad 	__cpu_simple_lock(&ld->ld_spinlock);
    157  1.23        ad 
    158  1.34        ad 	return ld;
    159   1.2        ad }
    160   1.2        ad 
    161  1.23        ad static void
    162  1.34        ad lockdebug_lock_cpus(void)
    163   1.2        ad {
    164  1.34        ad 	CPU_INFO_ITERATOR cii;
    165  1.34        ad 	struct cpu_info *ci;
    166   1.2        ad 
    167  1.34        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    168  1.34        ad 		__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
    169  1.34        ad 	}
    170  1.23        ad }
    171  1.23        ad 
    172  1.23        ad static void
    173  1.34        ad lockdebug_unlock_cpus(void)
    174  1.23        ad {
    175  1.34        ad 	CPU_INFO_ITERATOR cii;
    176  1.34        ad 	struct cpu_info *ci;
    177  1.23        ad 
    178  1.34        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    179  1.34        ad 		__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
    180  1.34        ad 	}
    181   1.2        ad }
    182   1.2        ad 
    183   1.2        ad /*
    184  1.19      yamt  * lockdebug_lookup:
    185  1.19      yamt  *
    186  1.19      yamt  *	Find a lockdebug structure by a pointer to a lock and return it locked.
    187  1.19      yamt  */
    188  1.19      yamt static inline lockdebug_t *
    189  1.38     rafal lockdebug_lookup(volatile void *lock, uintptr_t where)
    190  1.19      yamt {
    191  1.19      yamt 	lockdebug_t *ld;
    192  1.19      yamt 
    193  1.34        ad 	ld = lockdebug_lookup1(lock);
    194  1.42     rmind 	if (ld == NULL) {
    195  1.42     rmind 		panic("lockdebug_lookup: uninitialized lock "
    196  1.42     rmind 		    "(lock=%p, from=%08"PRIxPTR")", lock, where);
    197  1.42     rmind 	}
    198  1.19      yamt 	return ld;
    199  1.19      yamt }
    200  1.19      yamt 
    201  1.19      yamt /*
    202   1.2        ad  * lockdebug_init:
    203   1.2        ad  *
    204   1.2        ad  *	Initialize the lockdebug system.  Allocate an initial pool of
    205   1.2        ad  *	lockdebug structures before the VM system is up and running.
    206   1.2        ad  */
    207   1.5        ad static void
    208   1.2        ad lockdebug_init(void)
    209   1.2        ad {
    210   1.2        ad 	lockdebug_t *ld;
    211   1.2        ad 	int i;
    212   1.2        ad 
    213  1.34        ad 	TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
    214  1.34        ad 	TAILQ_INIT(&curlwp->l_ld_locks);
    215  1.34        ad 	__cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
    216  1.34        ad 	__cpu_simple_lock_init(&ld_mod_lk);
    217  1.15      matt 
    218  1.16      yamt 	rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
    219  1.16      yamt 
    220   1.2        ad 	ld = ld_prime;
    221   1.2        ad 	for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
    222  1.34        ad 		__cpu_simple_lock_init(&ld->ld_spinlock);
    223   1.2        ad 		TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    224   1.2        ad 		TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
    225   1.2        ad 	}
    226   1.2        ad 	ld_freeptr = 1;
    227   1.2        ad 	ld_nfree = LD_BATCH - 1;
    228   1.2        ad }
    229   1.2        ad 
    230   1.2        ad /*
    231   1.2        ad  * lockdebug_alloc:
    232   1.2        ad  *
    233   1.2        ad  *	A lock is being initialized, so allocate an associated debug
    234   1.2        ad  *	structure.
    235   1.2        ad  */
    236  1.16      yamt bool
    237  1.10        ad lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
    238   1.2        ad {
    239   1.2        ad 	struct cpu_info *ci;
    240   1.2        ad 	lockdebug_t *ld;
    241  1.34        ad 	int s;
    242   1.2        ad 
    243  1.27        ad 	if (lo == NULL || panicstr != NULL || ld_panic)
    244  1.16      yamt 		return false;
    245   1.5        ad 	if (ld_freeptr == 0)
    246   1.5        ad 		lockdebug_init();
    247   1.2        ad 
    248  1.34        ad 	s = splhigh();
    249  1.34        ad 	__cpu_simple_lock(&ld_mod_lk);
    250  1.34        ad 	if ((ld = lockdebug_lookup1(lock)) != NULL) {
    251  1.34        ad 		__cpu_simple_unlock(&ld_mod_lk);
    252  1.34        ad 		lockdebug_abort1(ld, s, __func__, "already initialized", true);
    253  1.27        ad 		return false;
    254  1.19      yamt 	}
    255  1.19      yamt 
    256   1.2        ad 	/*
    257   1.2        ad 	 * Pinch a new debug structure.  We may recurse because we call
    258   1.2        ad 	 * kmem_alloc(), which may need to initialize new locks somewhere
    259   1.7     skrll 	 * down the path.  If not recursing, we try to maintain at least
    260   1.2        ad 	 * LD_SLOP structures free, which should hopefully be enough to
    261   1.2        ad 	 * satisfy kmem_alloc().  If we can't provide a structure, not to
    262   1.2        ad 	 * worry: we'll just mark the lock as not having an ID.
    263   1.2        ad 	 */
    264  1.23        ad 	ci = curcpu();
    265   1.2        ad 	ci->ci_lkdebug_recurse++;
    266   1.2        ad 	if (TAILQ_EMPTY(&ld_free)) {
    267   1.5        ad 		if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
    268   1.2        ad 			ci->ci_lkdebug_recurse--;
    269  1.34        ad 			__cpu_simple_unlock(&ld_mod_lk);
    270  1.34        ad 			splx(s);
    271  1.16      yamt 			return false;
    272   1.2        ad 		}
    273  1.34        ad 		s = lockdebug_more(s);
    274  1.34        ad 	} else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
    275  1.34        ad 		s = lockdebug_more(s);
    276  1.34        ad 	}
    277   1.2        ad 	if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
    278  1.34        ad 		__cpu_simple_unlock(&ld_mod_lk);
    279  1.34        ad 		splx(s);
    280  1.16      yamt 		return false;
    281   1.2        ad 	}
    282   1.2        ad 	TAILQ_REMOVE(&ld_free, ld, ld_chain);
    283   1.2        ad 	ld_nfree--;
    284   1.2        ad 	ci->ci_lkdebug_recurse--;
    285   1.2        ad 
    286  1.34        ad 	if (ld->ld_lock != NULL) {
    287   1.2        ad 		panic("lockdebug_alloc: corrupt table");
    288  1.34        ad 	}
    289   1.2        ad 
    290   1.2        ad 	/* Initialise the structure. */
    291   1.2        ad 	ld->ld_lock = lock;
    292   1.2        ad 	ld->ld_lockops = lo;
    293   1.2        ad 	ld->ld_locked = 0;
    294   1.2        ad 	ld->ld_unlocked = 0;
    295   1.2        ad 	ld->ld_lwp = NULL;
    296  1.10        ad 	ld->ld_initaddr = initaddr;
    297  1.35        ad 	ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
    298  1.34        ad 	lockdebug_lock_cpus();
    299  1.42     rmind 	(void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
    300  1.34        ad 	lockdebug_unlock_cpus();
    301  1.34        ad 	__cpu_simple_unlock(&ld_mod_lk);
    302   1.2        ad 
    303  1.34        ad 	splx(s);
    304  1.16      yamt 	return true;
    305   1.2        ad }
    306   1.2        ad 
    307   1.2        ad /*
    308   1.2        ad  * lockdebug_free:
    309   1.2        ad  *
    310   1.2        ad  *	A lock is being destroyed, so release debugging resources.
    311   1.2        ad  */
    312   1.2        ad void
    313  1.16      yamt lockdebug_free(volatile void *lock)
    314   1.2        ad {
    315   1.2        ad 	lockdebug_t *ld;
    316  1.34        ad 	int s;
    317   1.2        ad 
    318  1.27        ad 	if (panicstr != NULL || ld_panic)
    319   1.2        ad 		return;
    320   1.2        ad 
    321  1.34        ad 	s = splhigh();
    322  1.34        ad 	__cpu_simple_lock(&ld_mod_lk);
    323  1.38     rafal 	ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0));
    324  1.16      yamt 	if (ld == NULL) {
    325  1.34        ad 		__cpu_simple_unlock(&ld_mod_lk);
    326  1.35        ad 		panic("lockdebug_free: destroying uninitialized object %p"
    327  1.16      yamt 		    "(ld_lock=%p)", lock, ld->ld_lock);
    328  1.27        ad 		return;
    329   1.2        ad 	}
    330  1.27        ad 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
    331  1.34        ad 		__cpu_simple_unlock(&ld_mod_lk);
    332  1.35        ad 		lockdebug_abort1(ld, s, __func__, "is locked or in use", true);
    333  1.27        ad 		return;
    334  1.27        ad 	}
    335  1.34        ad 	lockdebug_lock_cpus();
    336  1.42     rmind 	rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
    337  1.34        ad 	lockdebug_unlock_cpus();
    338   1.2        ad 	ld->ld_lock = NULL;
    339   1.2        ad 	TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    340   1.2        ad 	ld_nfree++;
    341  1.34        ad 	__cpu_simple_unlock(&ld->ld_spinlock);
    342  1.34        ad 	__cpu_simple_unlock(&ld_mod_lk);
    343  1.34        ad 	splx(s);
    344   1.2        ad }
    345   1.2        ad 
    346   1.2        ad /*
    347   1.2        ad  * lockdebug_more:
    348   1.2        ad  *
    349   1.2        ad  *	Allocate a batch of debug structures and add to the free list.
    350  1.34        ad  *	Must be called with ld_mod_lk held.
    351   1.2        ad  */
    352  1.34        ad static int
    353  1.34        ad lockdebug_more(int s)
    354   1.2        ad {
    355   1.2        ad 	lockdebug_t *ld;
    356   1.2        ad 	void *block;
    357   1.5        ad 	int i, base, m;
    358   1.2        ad 
    359  1.35        ad 	/*
    360  1.35        ad 	 * Can't call kmem_alloc() if in interrupt context.  XXX We could
    361  1.35        ad 	 * deadlock, because we don't know which locks the caller holds.
    362  1.35        ad 	 */
    363  1.35        ad 	if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) {
    364  1.35        ad 		return s;
    365  1.35        ad 	}
    366  1.35        ad 
    367   1.2        ad 	while (ld_nfree < LD_SLOP) {
    368  1.34        ad 		__cpu_simple_unlock(&ld_mod_lk);
    369  1.34        ad 		splx(s);
    370   1.2        ad 		block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
    371  1.34        ad 		s = splhigh();
    372  1.34        ad 		__cpu_simple_lock(&ld_mod_lk);
    373   1.2        ad 
    374   1.2        ad 		if (block == NULL)
    375  1.34        ad 			return s;
    376   1.2        ad 
    377   1.2        ad 		if (ld_nfree > LD_SLOP) {
    378   1.2        ad 			/* Somebody beat us to it. */
    379  1.34        ad 			__cpu_simple_unlock(&ld_mod_lk);
    380  1.34        ad 			splx(s);
    381   1.2        ad 			kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
    382  1.34        ad 			s = splhigh();
    383  1.34        ad 			__cpu_simple_lock(&ld_mod_lk);
    384   1.2        ad 			continue;
    385   1.2        ad 		}
    386   1.2        ad 
    387   1.2        ad 		base = ld_freeptr;
    388   1.2        ad 		ld_nfree += LD_BATCH;
    389   1.2        ad 		ld = block;
    390   1.2        ad 		base <<= LD_BATCH_SHIFT;
    391   1.5        ad 		m = min(LD_MAX_LOCKS, base + LD_BATCH);
    392   1.5        ad 
    393   1.5        ad 		if (m == LD_MAX_LOCKS)
    394   1.5        ad 			ld_nomore = true;
    395   1.2        ad 
    396   1.5        ad 		for (i = base; i < m; i++, ld++) {
    397  1.34        ad 			__cpu_simple_lock_init(&ld->ld_spinlock);
    398   1.2        ad 			TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
    399   1.2        ad 			TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
    400   1.2        ad 		}
    401   1.2        ad 
    402  1.22        ad 		membar_producer();
    403   1.2        ad 	}
    404  1.34        ad 
    405  1.34        ad 	return s;
    406   1.2        ad }
    407   1.2        ad 
    408   1.2        ad /*
    409   1.2        ad  * lockdebug_wantlock:
    410   1.2        ad  *
    411   1.2        ad  *	Process the preamble to a lock acquire.
    412   1.2        ad  */
    413   1.2        ad void
    414  1.33        ad lockdebug_wantlock(volatile void *lock, uintptr_t where, bool shared,
    415  1.33        ad 		   bool trylock)
    416   1.2        ad {
    417   1.2        ad 	struct lwp *l = curlwp;
    418   1.2        ad 	lockdebug_t *ld;
    419   1.3   thorpej 	bool recurse;
    420  1.34        ad 	int s;
    421   1.2        ad 
    422   1.2        ad 	(void)shared;
    423   1.4   thorpej 	recurse = false;
    424   1.2        ad 
    425  1.27        ad 	if (panicstr != NULL || ld_panic)
    426   1.2        ad 		return;
    427   1.2        ad 
    428  1.34        ad 	s = splhigh();
    429  1.38     rafal 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
    430  1.34        ad 		splx(s);
    431   1.2        ad 		return;
    432  1.34        ad 	}
    433  1.32      yamt 	if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
    434   1.2        ad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    435  1.33        ad 			if (ld->ld_lwp == l && !(shared && trylock))
    436   1.4   thorpej 				recurse = true;
    437  1.40     rmind 		} else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
    438   1.4   thorpej 			recurse = true;
    439   1.2        ad 	}
    440  1.10        ad 	if (cpu_intr_p()) {
    441  1.27        ad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    442  1.34        ad 			lockdebug_abort1(ld, s, __func__,
    443  1.10        ad 			    "acquiring sleep lock from interrupt context",
    444  1.10        ad 			    true);
    445  1.27        ad 			return;
    446  1.27        ad 		}
    447  1.10        ad 	}
    448   1.2        ad 	if (shared)
    449   1.2        ad 		ld->ld_shwant++;
    450   1.2        ad 	else
    451   1.2        ad 		ld->ld_exwant++;
    452  1.27        ad 	if (recurse) {
    453  1.34        ad 		lockdebug_abort1(ld, s, __func__, "locking against myself",
    454  1.10        ad 		    true);
    455  1.27        ad 		return;
    456  1.27        ad 	}
    457  1.34        ad 	__cpu_simple_unlock(&ld->ld_spinlock);
    458  1.34        ad 	splx(s);
    459   1.2        ad }
    460   1.2        ad 
    461   1.2        ad /*
    462   1.2        ad  * lockdebug_locked:
    463   1.2        ad  *
    464   1.2        ad  *	Process a lock acquire operation.
    465   1.2        ad  */
    466   1.2        ad void
    467  1.35        ad lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where,
    468  1.35        ad 		 int shared)
    469   1.2        ad {
    470   1.2        ad 	struct lwp *l = curlwp;
    471   1.2        ad 	lockdebug_t *ld;
    472  1.34        ad 	int s;
    473   1.2        ad 
    474  1.27        ad 	if (panicstr != NULL || ld_panic)
    475   1.2        ad 		return;
    476   1.2        ad 
    477  1.34        ad 	s = splhigh();
    478  1.38     rafal 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
    479  1.34        ad 		splx(s);
    480   1.2        ad 		return;
    481  1.34        ad 	}
    482  1.35        ad 	if (cvlock) {
    483  1.35        ad 		KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
    484  1.35        ad 		if (lock == (void *)&lbolt) {
    485  1.35        ad 			/* nothing */
    486  1.35        ad 		} else if (ld->ld_shares++ == 0) {
    487  1.35        ad 			ld->ld_locked = (uintptr_t)cvlock;
    488  1.35        ad 		} else if (cvlock != (void *)ld->ld_locked) {
    489  1.35        ad 			lockdebug_abort1(ld, s, __func__, "multiple locks used"
    490  1.35        ad 			    " with condition variable", true);
    491  1.35        ad 			return;
    492  1.35        ad 		}
    493  1.35        ad 	} else if (shared) {
    494   1.2        ad 		l->l_shlocks++;
    495  1.45      yamt 		ld->ld_locked = where;
    496   1.2        ad 		ld->ld_shares++;
    497   1.2        ad 		ld->ld_shwant--;
    498   1.2        ad 	} else {
    499  1.27        ad 		if ((ld->ld_flags & LD_LOCKED) != 0) {
    500  1.34        ad 			lockdebug_abort1(ld, s, __func__, "already locked",
    501  1.34        ad 			    true);
    502  1.27        ad 			return;
    503  1.27        ad 		}
    504   1.2        ad 		ld->ld_flags |= LD_LOCKED;
    505   1.2        ad 		ld->ld_locked = where;
    506   1.2        ad 		ld->ld_exwant--;
    507   1.2        ad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    508  1.34        ad 			TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
    509   1.2        ad 		} else {
    510  1.34        ad 			TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
    511  1.34        ad 			    ld, ld_chain);
    512   1.2        ad 		}
    513   1.2        ad 	}
    514  1.40     rmind 	ld->ld_cpu = (uint16_t)cpu_index(curcpu());
    515  1.32      yamt 	ld->ld_lwp = l;
    516  1.34        ad 	__cpu_simple_unlock(&ld->ld_spinlock);
    517  1.34        ad 	splx(s);
    518   1.2        ad }
    519   1.2        ad 
    520   1.2        ad /*
    521   1.2        ad  * lockdebug_unlocked:
    522   1.2        ad  *
    523   1.2        ad  *	Process a lock release operation.
    524   1.2        ad  */
    525   1.2        ad void
    526  1.16      yamt lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
    527   1.2        ad {
    528   1.2        ad 	struct lwp *l = curlwp;
    529   1.2        ad 	lockdebug_t *ld;
    530  1.34        ad 	int s;
    531   1.2        ad 
    532  1.27        ad 	if (panicstr != NULL || ld_panic)
    533   1.2        ad 		return;
    534   1.2        ad 
    535  1.34        ad 	s = splhigh();
    536  1.38     rafal 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
    537  1.34        ad 		splx(s);
    538   1.2        ad 		return;
    539  1.34        ad 	}
    540  1.35        ad 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
    541  1.35        ad 		if (lock == (void *)&lbolt) {
    542  1.35        ad 			/* nothing */
    543  1.35        ad 		} else {
    544  1.35        ad 			ld->ld_shares--;
    545  1.35        ad 		}
    546  1.35        ad 	} else if (shared) {
    547  1.27        ad 		if (l->l_shlocks == 0) {
    548  1.34        ad 			lockdebug_abort1(ld, s, __func__,
    549  1.10        ad 			    "no shared locks held by LWP", true);
    550  1.27        ad 			return;
    551  1.27        ad 		}
    552  1.27        ad 		if (ld->ld_shares == 0) {
    553  1.34        ad 			lockdebug_abort1(ld, s, __func__,
    554  1.10        ad 			    "no shared holds on this lock", true);
    555  1.27        ad 			return;
    556  1.27        ad 		}
    557   1.2        ad 		l->l_shlocks--;
    558   1.2        ad 		ld->ld_shares--;
    559  1.45      yamt 		if (ld->ld_lwp == l) {
    560  1.45      yamt 			ld->ld_unlocked = where;
    561  1.32      yamt 			ld->ld_lwp = NULL;
    562  1.45      yamt 		}
    563  1.40     rmind 		if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
    564  1.32      yamt 			ld->ld_cpu = (uint16_t)-1;
    565   1.2        ad 	} else {
    566  1.27        ad 		if ((ld->ld_flags & LD_LOCKED) == 0) {
    567  1.34        ad 			lockdebug_abort1(ld, s, __func__, "not locked", true);
    568  1.27        ad 			return;
    569  1.27        ad 		}
    570   1.2        ad 
    571   1.2        ad 		if ((ld->ld_flags & LD_SLEEPER) != 0) {
    572  1.27        ad 			if (ld->ld_lwp != curlwp) {
    573  1.34        ad 				lockdebug_abort1(ld, s, __func__,
    574  1.10        ad 				    "not held by current LWP", true);
    575  1.27        ad 				return;
    576  1.27        ad 			}
    577  1.34        ad 			TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
    578   1.2        ad 		} else {
    579  1.40     rmind 			if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) {
    580  1.34        ad 				lockdebug_abort1(ld, s, __func__,
    581  1.10        ad 				    "not held by current CPU", true);
    582  1.27        ad 				return;
    583  1.27        ad 			}
    584  1.34        ad 			TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
    585  1.34        ad 			    ld_chain);
    586   1.2        ad 		}
    587  1.44      matt 		ld->ld_flags &= ~LD_LOCKED;
    588  1.44      matt 		ld->ld_unlocked = where;
    589  1.44      matt 		ld->ld_lwp = NULL;
    590   1.2        ad 	}
    591  1.34        ad 	__cpu_simple_unlock(&ld->ld_spinlock);
    592  1.34        ad 	splx(s);
    593   1.2        ad }
    594   1.2        ad 
    595   1.2        ad /*
    596  1.35        ad  * lockdebug_wakeup:
    597  1.35        ad  *
    598  1.35        ad  *	Process a wakeup on a condition variable.
    599  1.35        ad  */
    600  1.35        ad void
    601  1.35        ad lockdebug_wakeup(volatile void *lock, uintptr_t where)
    602  1.35        ad {
    603  1.35        ad 	lockdebug_t *ld;
    604  1.35        ad 	int s;
    605  1.35        ad 
    606  1.35        ad 	if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
    607  1.35        ad 		return;
    608  1.35        ad 
    609  1.35        ad 	s = splhigh();
    610  1.35        ad 	/* Find the CV... */
    611  1.38     rafal 	if ((ld = lockdebug_lookup(lock, where)) == NULL) {
    612  1.35        ad 		splx(s);
    613  1.35        ad 		return;
    614  1.35        ad 	}
    615  1.35        ad 	/*
    616  1.35        ad 	 * If it has any waiters, ensure that they are using the
    617  1.35        ad 	 * same interlock.
    618  1.35        ad 	 */
    619  1.35        ad 	if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
    620  1.35        ad 		lockdebug_abort1(ld, s, __func__, "interlocking mutex not "
    621  1.35        ad 		    "held during wakeup", true);
    622  1.35        ad 		return;
    623  1.35        ad 	}
    624  1.35        ad 	__cpu_simple_unlock(&ld->ld_spinlock);
    625  1.35        ad 	splx(s);
    626  1.35        ad }
    627  1.35        ad 
    628  1.35        ad /*
    629   1.2        ad  * lockdebug_barrier:
    630   1.2        ad  *
    631   1.2        ad  *	Panic if we hold more than one specified spin lock, and optionally,
    632   1.2        ad  *	if we hold sleep locks.
    633   1.2        ad  */
    634   1.2        ad void
    635   1.2        ad lockdebug_barrier(volatile void *spinlock, int slplocks)
    636   1.2        ad {
    637   1.2        ad 	struct lwp *l = curlwp;
    638   1.2        ad 	lockdebug_t *ld;
    639  1.34        ad 	int s;
    640   1.2        ad 
    641  1.27        ad 	if (panicstr != NULL || ld_panic)
    642   1.2        ad 		return;
    643   1.2        ad 
    644  1.34        ad 	s = splhigh();
    645  1.34        ad 	if ((l->l_pflag & LP_INTR) == 0) {
    646  1.34        ad 		TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
    647   1.2        ad 			if (ld->ld_lock == spinlock) {
    648   1.2        ad 				continue;
    649   1.2        ad 			}
    650  1.34        ad 			__cpu_simple_lock(&ld->ld_spinlock);
    651  1.34        ad 			lockdebug_abort1(ld, s, __func__,
    652  1.34        ad 			    "spin lock held", true);
    653  1.34        ad 			return;
    654   1.2        ad 		}
    655   1.2        ad 	}
    656  1.34        ad 	if (slplocks) {
    657  1.34        ad 		splx(s);
    658  1.34        ad 		return;
    659  1.34        ad 	}
    660  1.34        ad 	if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
    661  1.34        ad 		__cpu_simple_lock(&ld->ld_spinlock);
    662  1.34        ad 		lockdebug_abort1(ld, s, __func__, "sleep lock held", true);
    663  1.34        ad 		return;
    664  1.34        ad 	}
    665  1.34        ad 	splx(s);
    666  1.34        ad 	if (l->l_shlocks != 0) {
    667  1.34        ad 		panic("lockdebug_barrier: holding %d shared locks",
    668  1.34        ad 		    l->l_shlocks);
    669   1.2        ad 	}
    670   1.2        ad }
    671   1.2        ad 
    672   1.2        ad /*
    673  1.10        ad  * lockdebug_mem_check:
    674  1.10        ad  *
    675  1.10        ad  *	Check for in-use locks within a memory region that is
    676  1.16      yamt  *	being freed.
    677  1.10        ad  */
    678  1.10        ad void
    679  1.10        ad lockdebug_mem_check(const char *func, void *base, size_t sz)
    680  1.10        ad {
    681  1.16      yamt 	lockdebug_t *ld;
    682  1.34        ad 	struct cpu_info *ci;
    683  1.23        ad 	int s;
    684  1.10        ad 
    685  1.27        ad 	if (panicstr != NULL || ld_panic)
    686  1.24        ad 		return;
    687  1.24        ad 
    688  1.34        ad 	s = splhigh();
    689  1.34        ad 	ci = curcpu();
    690  1.34        ad 	__cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
    691  1.16      yamt 	ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
    692  1.23        ad 	if (ld != NULL) {
    693  1.23        ad 		const uintptr_t lock = (uintptr_t)ld->ld_lock;
    694  1.23        ad 
    695  1.23        ad 		if ((uintptr_t)base > lock)
    696  1.23        ad 			panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
    697  1.23        ad 			    __func__, ld, base, sz);
    698  1.23        ad 		if (lock >= (uintptr_t)base + sz)
    699  1.23        ad 			ld = NULL;
    700  1.23        ad 	}
    701  1.34        ad 	__cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
    702  1.34        ad 	if (ld != NULL) {
    703  1.34        ad 		__cpu_simple_lock(&ld->ld_spinlock);
    704  1.34        ad 		lockdebug_abort1(ld, s, func,
    705  1.34        ad 		    "allocation contains active lock", !cold);
    706  1.16      yamt 		return;
    707  1.34        ad 	}
    708  1.34        ad 	splx(s);
    709  1.10        ad }
    710  1.10        ad 
    711  1.10        ad /*
    712   1.2        ad  * lockdebug_dump:
    713   1.2        ad  *
    714   1.2        ad  *	Dump information about a lock on panic, or for DDB.
    715   1.2        ad  */
    716   1.2        ad static void
    717   1.2        ad lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
    718   1.2        ad {
    719   1.2        ad 	int sleeper = (ld->ld_flags & LD_SLEEPER);
    720   1.2        ad 
    721   1.2        ad 	(*pr)(
    722   1.2        ad 	    "lock address : %#018lx type     : %18s\n"
    723  1.35        ad 	    "initialized  : %#018lx",
    724   1.2        ad 	    (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
    725  1.10        ad 	    (long)ld->ld_initaddr);
    726   1.2        ad 
    727  1.35        ad 	if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
    728  1.35        ad 		(*pr)(" interlock: %#018lx\n", ld->ld_locked);
    729  1.35        ad 	} else {
    730  1.35        ad 		(*pr)("\n"
    731  1.35        ad 		    "shared holds : %18u exclusive: %18u\n"
    732  1.35        ad 		    "shares wanted: %18u exclusive: %18u\n"
    733  1.35        ad 		    "current cpu  : %18u last held: %18u\n"
    734  1.35        ad 		    "current lwp  : %#018lx last held: %#018lx\n"
    735  1.44      matt 		    "last locked%c : %#018lx unlocked%c: %#018lx\n",
    736  1.35        ad 		    (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
    737  1.35        ad 		    (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
    738  1.40     rmind 		    (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
    739  1.35        ad 		    (long)curlwp, (long)ld->ld_lwp,
    740  1.44      matt 		    ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
    741  1.44      matt 		    (long)ld->ld_locked,
    742  1.44      matt 		    ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
    743  1.44      matt 		    (long)ld->ld_unlocked);
    744  1.35        ad 	}
    745  1.35        ad 
    746   1.2        ad 	if (ld->ld_lockops->lo_dump != NULL)
    747   1.2        ad 		(*ld->ld_lockops->lo_dump)(ld->ld_lock);
    748   1.2        ad 
    749   1.2        ad 	if (sleeper) {
    750   1.2        ad 		(*pr)("\n");
    751   1.2        ad 		turnstile_print(ld->ld_lock, pr);
    752   1.2        ad 	}
    753   1.2        ad }
    754   1.2        ad 
    755   1.2        ad /*
    756  1.27        ad  * lockdebug_abort1:
    757   1.2        ad  *
    758  1.27        ad  *	An error has been trapped - dump lock info and panic.
    759   1.2        ad  */
    760   1.5        ad static void
    761  1.34        ad lockdebug_abort1(lockdebug_t *ld, int s, const char *func,
    762  1.10        ad 		 const char *msg, bool dopanic)
    763   1.2        ad {
    764   1.2        ad 
    765  1.27        ad 	/*
    766  1.46  christos 	 * Don't make the situation worse if the system is already going
    767  1.27        ad 	 * down in flames.  Once a panic is triggered, lockdebug state
    768  1.27        ad 	 * becomes stale and cannot be trusted.
    769  1.27        ad 	 */
    770  1.27        ad 	if (atomic_inc_uint_nv(&ld_panic) != 1) {
    771  1.34        ad 		__cpu_simple_unlock(&ld->ld_spinlock);
    772  1.34        ad 		splx(s);
    773  1.27        ad 		return;
    774  1.27        ad 	}
    775  1.27        ad 
    776   1.2        ad 	printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
    777   1.2        ad 	    func, msg);
    778   1.2        ad 	lockdebug_dump(ld, printf_nolog);
    779  1.34        ad 	__cpu_simple_unlock(&ld->ld_spinlock);
    780  1.34        ad 	splx(s);
    781   1.2        ad 	printf_nolog("\n");
    782  1.10        ad 	if (dopanic)
    783  1.10        ad 		panic("LOCKDEBUG");
    784   1.2        ad }
    785   1.2        ad 
    786   1.2        ad #endif	/* LOCKDEBUG */
    787   1.2        ad 
    788   1.2        ad /*
    789   1.2        ad  * lockdebug_lock_print:
    790   1.2        ad  *
    791   1.2        ad  *	Handle the DDB 'show lock' command.
    792   1.2        ad  */
    793   1.2        ad #ifdef DDB
    794   1.2        ad void
    795   1.2        ad lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
    796   1.2        ad {
    797   1.2        ad #ifdef LOCKDEBUG
    798   1.2        ad 	lockdebug_t *ld;
    799   1.2        ad 
    800   1.2        ad 	TAILQ_FOREACH(ld, &ld_all, ld_achain) {
    801  1.41    dyoung 		if (ld->ld_lock == NULL)
    802  1.41    dyoung 			continue;
    803  1.41    dyoung 		if (addr == NULL || ld->ld_lock == addr) {
    804   1.2        ad 			lockdebug_dump(ld, pr);
    805  1.41    dyoung 			if (addr != NULL)
    806  1.41    dyoung 				return;
    807   1.2        ad 		}
    808   1.2        ad 	}
    809  1.41    dyoung 	if (addr != NULL) {
    810  1.41    dyoung 		(*pr)("Sorry, no record of a lock with address %p found.\n",
    811  1.41    dyoung 		    addr);
    812  1.41    dyoung 	}
    813   1.2        ad #else
    814   1.2        ad 	(*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
    815   1.2        ad #endif	/* LOCKDEBUG */
    816   1.2        ad }
    817   1.2        ad #endif	/* DDB */
    818   1.2        ad 
    819   1.2        ad /*
    820   1.2        ad  * lockdebug_abort:
    821   1.2        ad  *
    822   1.2        ad  *	An error has been trapped - dump lock info and call panic().
    823   1.2        ad  */
    824   1.2        ad void
    825  1.16      yamt lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
    826  1.16      yamt 		const char *msg)
    827   1.2        ad {
    828   1.2        ad #ifdef LOCKDEBUG
    829   1.2        ad 	lockdebug_t *ld;
    830  1.34        ad 	int s;
    831   1.2        ad 
    832  1.34        ad 	s = splhigh();
    833  1.38     rafal 	if ((ld = lockdebug_lookup(lock,
    834  1.38     rafal 			(uintptr_t) __builtin_return_address(0))) != NULL) {
    835  1.34        ad 		lockdebug_abort1(ld, s, func, msg, true);
    836  1.34        ad 		return;
    837   1.2        ad 	}
    838  1.34        ad 	splx(s);
    839   1.2        ad #endif	/* LOCKDEBUG */
    840   1.2        ad 
    841  1.27        ad 	/*
    842  1.27        ad 	 * Complain first on the occurrance only.  Otherwise proceeed to
    843  1.27        ad 	 * panic where we will `rendezvous' with other CPUs if the machine
    844  1.27        ad 	 * is going down in flames.
    845  1.27        ad 	 */
    846  1.27        ad 	if (atomic_inc_uint_nv(&ld_panic) == 1) {
    847  1.27        ad 		printf_nolog("%s error: %s: %s\n\n"
    848  1.27        ad 		    "lock address : %#018lx\n"
    849  1.27        ad 		    "current cpu  : %18d\n"
    850  1.27        ad 		    "current lwp  : %#018lx\n",
    851  1.40     rmind 		    ops->lo_name, func, msg, (long)lock,
    852  1.40     rmind 		    (int)cpu_index(curcpu()), (long)curlwp);
    853  1.27        ad 		(*ops->lo_dump)(lock);
    854  1.27        ad 		printf_nolog("\n");
    855  1.27        ad 	}
    856   1.2        ad 
    857   1.2        ad 	panic("lock error");
    858   1.2        ad }
    859