subr_lockdebug.c revision 1.33 1 /* $NetBSD: subr_lockdebug.c,v 1.33 2008/05/06 17:11:45 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Basic lock debugging code shared among lock primitives.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.33 2008/05/06 17:11:45 ad Exp $");
38
39 #include "opt_ddb.h"
40
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/lockdebug.h>
47 #include <sys/sleepq.h>
48 #include <sys/cpu.h>
49 #include <sys/atomic.h>
50 #include <sys/lock.h>
51
52 #include <lib/libkern/rb.h>
53
54 #include <machine/lock.h>
55
56 unsigned int ld_panic;
57
58 #ifdef LOCKDEBUG
59
60 #define LD_BATCH_SHIFT 9
61 #define LD_BATCH (1 << LD_BATCH_SHIFT)
62 #define LD_BATCH_MASK (LD_BATCH - 1)
63 #define LD_MAX_LOCKS 1048576
64 #define LD_SLOP 16
65
66 #define LD_LOCKED 0x01
67 #define LD_SLEEPER 0x02
68
69 #define LD_WRITE_LOCK 0x80000000
70
71 typedef union lockdebuglk {
72 struct {
73 u_int lku_lock;
74 int lku_oldspl;
75 } ul;
76 uint8_t lk_pad[COHERENCY_UNIT];
77 } volatile __aligned(COHERENCY_UNIT) lockdebuglk_t;
78
79 #define lk_lock ul.lku_lock
80 #define lk_oldspl ul.lku_oldspl
81
82 typedef struct lockdebug {
83 struct rb_node ld_rb_node; /* must be the first member */
84 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
85 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
86 volatile void *ld_lock;
87 lockops_t *ld_lockops;
88 struct lwp *ld_lwp;
89 uintptr_t ld_locked;
90 uintptr_t ld_unlocked;
91 uintptr_t ld_initaddr;
92 uint16_t ld_shares;
93 uint16_t ld_cpu;
94 uint8_t ld_flags;
95 uint8_t ld_shwant; /* advisory */
96 uint8_t ld_exwant; /* advisory */
97 uint8_t ld_unused;
98 } volatile lockdebug_t;
99
100 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
101
102 lockdebuglk_t ld_tree_lk;
103 lockdebuglk_t ld_sleeper_lk;
104 lockdebuglk_t ld_spinner_lk;
105 lockdebuglk_t ld_free_lk;
106
107 lockdebuglist_t ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
108 lockdebuglist_t ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
109 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
110 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
111 int ld_nfree;
112 int ld_freeptr;
113 int ld_recurse;
114 bool ld_nomore;
115 lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH];
116
117 lockdebug_t ld_prime[LD_BATCH];
118
119 static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
120 const char *, const char *, bool);
121 static void lockdebug_more(void);
122 static void lockdebug_init(void);
123
124 static signed int
125 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
126 {
127 const lockdebug_t *ld1 = (const void *)n1;
128 const lockdebug_t *ld2 = (const void *)n2;
129 const uintptr_t a = (uintptr_t)ld1->ld_lock;
130 const uintptr_t b = (uintptr_t)ld2->ld_lock;
131
132 if (a < b)
133 return 1;
134 if (a > b)
135 return -1;
136 return 0;
137 }
138
139 static signed int
140 ld_rb_compare_key(const struct rb_node *n, const void *key)
141 {
142 const lockdebug_t *ld = (const void *)n;
143 const uintptr_t a = (uintptr_t)ld->ld_lock;
144 const uintptr_t b = (uintptr_t)key;
145
146 if (a < b)
147 return 1;
148 if (a > b)
149 return -1;
150 return 0;
151 }
152
153 static struct rb_tree ld_rb_tree;
154
155 static const struct rb_tree_ops ld_rb_tree_ops = {
156 .rb_compare_nodes = ld_rb_compare_nodes,
157 .rb_compare_key = ld_rb_compare_key,
158 };
159
160 static void
161 lockdebug_lock_init(lockdebuglk_t *lk)
162 {
163
164 lk->lk_lock = 0;
165 }
166
167 static void
168 lockdebug_lock(lockdebuglk_t *lk)
169 {
170 int s;
171
172 s = splhigh();
173 do {
174 while (lk->lk_lock != 0) {
175 SPINLOCK_SPIN_HOOK;
176 }
177 } while (atomic_cas_uint(&lk->lk_lock, 0, LD_WRITE_LOCK) != 0);
178 lk->lk_oldspl = s;
179 membar_enter();
180 }
181
182 static void
183 lockdebug_unlock(lockdebuglk_t *lk)
184 {
185 int s;
186
187 s = lk->lk_oldspl;
188 membar_exit();
189 lk->lk_lock = 0;
190 splx(s);
191 }
192
193 static int
194 lockdebug_lock_rd(lockdebuglk_t *lk)
195 {
196 u_int val;
197 int s;
198
199 s = splhigh();
200 do {
201 while ((val = lk->lk_lock) == LD_WRITE_LOCK){
202 SPINLOCK_SPIN_HOOK;
203 }
204 } while (atomic_cas_uint(&lk->lk_lock, val, val + 1) != val);
205 membar_enter();
206 return s;
207 }
208
209 static void
210 lockdebug_unlock_rd(lockdebuglk_t *lk, int s)
211 {
212
213 membar_exit();
214 atomic_dec_uint(&lk->lk_lock);
215 splx(s);
216 }
217
218 static inline lockdebug_t *
219 lockdebug_lookup1(volatile void *lock, lockdebuglk_t **lk)
220 {
221 lockdebug_t *ld;
222 int s;
223
224 s = lockdebug_lock_rd(&ld_tree_lk);
225 ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
226 lockdebug_unlock_rd(&ld_tree_lk, s);
227 if (ld == NULL)
228 return NULL;
229
230 if ((ld->ld_flags & LD_SLEEPER) != 0)
231 *lk = &ld_sleeper_lk;
232 else
233 *lk = &ld_spinner_lk;
234
235 lockdebug_lock(*lk);
236 return ld;
237 }
238
239 /*
240 * lockdebug_lookup:
241 *
242 * Find a lockdebug structure by a pointer to a lock and return it locked.
243 */
244 static inline lockdebug_t *
245 lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
246 {
247 lockdebug_t *ld;
248
249 ld = lockdebug_lookup1(lock, lk);
250 if (ld == NULL)
251 panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
252 return ld;
253 }
254
255 /*
256 * lockdebug_init:
257 *
258 * Initialize the lockdebug system. Allocate an initial pool of
259 * lockdebug structures before the VM system is up and running.
260 */
261 static void
262 lockdebug_init(void)
263 {
264 lockdebug_t *ld;
265 int i;
266
267 lockdebug_lock_init(&ld_tree_lk);
268 lockdebug_lock_init(&ld_sleeper_lk);
269 lockdebug_lock_init(&ld_spinner_lk);
270 lockdebug_lock_init(&ld_free_lk);
271
272 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
273
274 ld = ld_prime;
275 ld_table[0] = ld;
276 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
277 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
278 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
279 }
280 ld_freeptr = 1;
281 ld_nfree = LD_BATCH - 1;
282 }
283
284 /*
285 * lockdebug_alloc:
286 *
287 * A lock is being initialized, so allocate an associated debug
288 * structure.
289 */
290 bool
291 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
292 {
293 struct cpu_info *ci;
294 lockdebug_t *ld;
295 lockdebuglk_t *lk;
296
297 if (lo == NULL || panicstr != NULL || ld_panic)
298 return false;
299 if (ld_freeptr == 0)
300 lockdebug_init();
301
302 if ((ld = lockdebug_lookup1(lock, &lk)) != NULL) {
303 lockdebug_abort1(ld, lk, __func__, "already initialized", true);
304 return false;
305 }
306
307 /*
308 * Pinch a new debug structure. We may recurse because we call
309 * kmem_alloc(), which may need to initialize new locks somewhere
310 * down the path. If not recursing, we try to maintain at least
311 * LD_SLOP structures free, which should hopefully be enough to
312 * satisfy kmem_alloc(). If we can't provide a structure, not to
313 * worry: we'll just mark the lock as not having an ID.
314 */
315 lockdebug_lock(&ld_free_lk);
316 ci = curcpu();
317 ci->ci_lkdebug_recurse++;
318
319 if (TAILQ_EMPTY(&ld_free)) {
320 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
321 ci->ci_lkdebug_recurse--;
322 lockdebug_unlock(&ld_free_lk);
323 return false;
324 }
325 lockdebug_more();
326 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
327 lockdebug_more();
328
329 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
330 lockdebug_unlock(&ld_free_lk);
331 return false;
332 }
333
334 TAILQ_REMOVE(&ld_free, ld, ld_chain);
335 ld_nfree--;
336
337 ci->ci_lkdebug_recurse--;
338 lockdebug_unlock(&ld_free_lk);
339
340 if (ld->ld_lock != NULL)
341 panic("lockdebug_alloc: corrupt table");
342
343 if (lo->lo_sleeplock)
344 lockdebug_lock(&ld_sleeper_lk);
345 else
346 lockdebug_lock(&ld_spinner_lk);
347
348 /* Initialise the structure. */
349 ld->ld_lock = lock;
350 ld->ld_lockops = lo;
351 ld->ld_locked = 0;
352 ld->ld_unlocked = 0;
353 ld->ld_lwp = NULL;
354 ld->ld_initaddr = initaddr;
355
356 lockdebug_lock(&ld_tree_lk);
357 rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
358 lockdebug_unlock(&ld_tree_lk);
359
360 if (lo->lo_sleeplock) {
361 ld->ld_flags = LD_SLEEPER;
362 lockdebug_unlock(&ld_sleeper_lk);
363 } else {
364 ld->ld_flags = 0;
365 lockdebug_unlock(&ld_spinner_lk);
366 }
367
368 return true;
369 }
370
371 /*
372 * lockdebug_free:
373 *
374 * A lock is being destroyed, so release debugging resources.
375 */
376 void
377 lockdebug_free(volatile void *lock)
378 {
379 lockdebug_t *ld;
380 lockdebuglk_t *lk;
381
382 if (panicstr != NULL || ld_panic)
383 return;
384
385 ld = lockdebug_lookup(lock, &lk);
386 if (ld == NULL) {
387 panic("lockdebug_free: destroying uninitialized lock %p"
388 "(ld_lock=%p)", lock, ld->ld_lock);
389 lockdebug_abort1(ld, lk, __func__, "lock record follows",
390 true);
391 return;
392 }
393 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
394 lockdebug_abort1(ld, lk, __func__, "is locked", true);
395 return;
396 }
397 lockdebug_lock(&ld_tree_lk);
398 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
399 lockdebug_unlock(&ld_tree_lk);
400 ld->ld_lock = NULL;
401 lockdebug_unlock(lk);
402
403 lockdebug_lock(&ld_free_lk);
404 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
405 ld_nfree++;
406 lockdebug_unlock(&ld_free_lk);
407 }
408
409 /*
410 * lockdebug_more:
411 *
412 * Allocate a batch of debug structures and add to the free list.
413 * Must be called with ld_free_lk held.
414 */
415 static void
416 lockdebug_more(void)
417 {
418 lockdebug_t *ld;
419 void *block;
420 int i, base, m;
421
422 while (ld_nfree < LD_SLOP) {
423 lockdebug_unlock(&ld_free_lk);
424 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
425 lockdebug_lock(&ld_free_lk);
426
427 if (block == NULL)
428 return;
429
430 if (ld_nfree > LD_SLOP) {
431 /* Somebody beat us to it. */
432 lockdebug_unlock(&ld_free_lk);
433 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
434 lockdebug_lock(&ld_free_lk);
435 continue;
436 }
437
438 base = ld_freeptr;
439 ld_nfree += LD_BATCH;
440 ld = block;
441 base <<= LD_BATCH_SHIFT;
442 m = min(LD_MAX_LOCKS, base + LD_BATCH);
443
444 if (m == LD_MAX_LOCKS)
445 ld_nomore = true;
446
447 for (i = base; i < m; i++, ld++) {
448 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
449 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
450 }
451
452 membar_producer();
453 ld_table[ld_freeptr++] = block;
454 }
455 }
456
457 /*
458 * lockdebug_wantlock:
459 *
460 * Process the preamble to a lock acquire.
461 */
462 void
463 lockdebug_wantlock(volatile void *lock, uintptr_t where, bool shared,
464 bool trylock)
465 {
466 struct lwp *l = curlwp;
467 lockdebuglk_t *lk;
468 lockdebug_t *ld;
469 bool recurse;
470
471 (void)shared;
472 recurse = false;
473
474 if (panicstr != NULL || ld_panic)
475 return;
476
477 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
478 return;
479
480 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
481 if ((ld->ld_flags & LD_SLEEPER) != 0) {
482 if (ld->ld_lwp == l && !(shared && trylock))
483 recurse = true;
484 } else if (ld->ld_cpu == (uint16_t)cpu_number())
485 recurse = true;
486 }
487
488 if (cpu_intr_p()) {
489 if ((ld->ld_flags & LD_SLEEPER) != 0) {
490 lockdebug_abort1(ld, lk, __func__,
491 "acquiring sleep lock from interrupt context",
492 true);
493 return;
494 }
495 }
496
497 if (shared)
498 ld->ld_shwant++;
499 else
500 ld->ld_exwant++;
501
502 if (recurse) {
503 lockdebug_abort1(ld, lk, __func__, "locking against myself",
504 true);
505 return;
506 }
507
508 lockdebug_unlock(lk);
509 }
510
511 /*
512 * lockdebug_locked:
513 *
514 * Process a lock acquire operation.
515 */
516 void
517 lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
518 {
519 struct lwp *l = curlwp;
520 lockdebuglk_t *lk;
521 lockdebug_t *ld;
522
523 if (panicstr != NULL || ld_panic)
524 return;
525
526 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
527 return;
528
529 if (shared) {
530 l->l_shlocks++;
531 ld->ld_shares++;
532 ld->ld_shwant--;
533 } else {
534 if ((ld->ld_flags & LD_LOCKED) != 0) {
535 lockdebug_abort1(ld, lk, __func__,
536 "already locked", true);
537 return;
538 }
539
540 ld->ld_flags |= LD_LOCKED;
541 ld->ld_locked = where;
542 ld->ld_exwant--;
543
544 if ((ld->ld_flags & LD_SLEEPER) != 0) {
545 l->l_exlocks++;
546 TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
547 } else {
548 curcpu()->ci_spin_locks2++;
549 TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
550 }
551 }
552 ld->ld_cpu = (uint16_t)cpu_number();
553 ld->ld_lwp = l;
554
555 lockdebug_unlock(lk);
556 }
557
558 /*
559 * lockdebug_unlocked:
560 *
561 * Process a lock release operation.
562 */
563 void
564 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
565 {
566 struct lwp *l = curlwp;
567 lockdebuglk_t *lk;
568 lockdebug_t *ld;
569
570 if (panicstr != NULL || ld_panic)
571 return;
572
573 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
574 return;
575
576 if (shared) {
577 if (l->l_shlocks == 0) {
578 lockdebug_abort1(ld, lk, __func__,
579 "no shared locks held by LWP", true);
580 return;
581 }
582 if (ld->ld_shares == 0) {
583 lockdebug_abort1(ld, lk, __func__,
584 "no shared holds on this lock", true);
585 return;
586 }
587 l->l_shlocks--;
588 ld->ld_shares--;
589 if (ld->ld_lwp == l)
590 ld->ld_lwp = NULL;
591 if (ld->ld_cpu == (uint16_t)cpu_number())
592 ld->ld_cpu = (uint16_t)-1;
593 } else {
594 if ((ld->ld_flags & LD_LOCKED) == 0) {
595 lockdebug_abort1(ld, lk, __func__, "not locked",
596 true);
597 return;
598 }
599
600 if ((ld->ld_flags & LD_SLEEPER) != 0) {
601 if (ld->ld_lwp != curlwp) {
602 lockdebug_abort1(ld, lk, __func__,
603 "not held by current LWP", true);
604 return;
605 }
606 ld->ld_flags &= ~LD_LOCKED;
607 ld->ld_unlocked = where;
608 ld->ld_lwp = NULL;
609 curlwp->l_exlocks--;
610 TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
611 } else {
612 if (ld->ld_cpu != (uint16_t)cpu_number()) {
613 lockdebug_abort1(ld, lk, __func__,
614 "not held by current CPU", true);
615 return;
616 }
617 ld->ld_flags &= ~LD_LOCKED;
618 ld->ld_unlocked = where;
619 ld->ld_lwp = NULL;
620 curcpu()->ci_spin_locks2--;
621 TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
622 }
623 }
624
625 lockdebug_unlock(lk);
626 }
627
628 /*
629 * lockdebug_barrier:
630 *
631 * Panic if we hold more than one specified spin lock, and optionally,
632 * if we hold sleep locks.
633 */
634 void
635 lockdebug_barrier(volatile void *spinlock, int slplocks)
636 {
637 struct lwp *l = curlwp;
638 lockdebug_t *ld;
639 uint16_t cpuno;
640 int s, s0;
641
642 if (panicstr != NULL || ld_panic)
643 return;
644
645 /*
646 * Use splsoftclock() and not a critical section to block preemption.
647 * kpreempt_disable() will skew preemption statistics by firing again
648 * in mi_switch(), while we are preempting!
649 */
650 s0 = splsoftclock();
651
652 if (curcpu()->ci_spin_locks2 != 0) {
653 cpuno = (uint16_t)cpu_number();
654
655 s = lockdebug_lock_rd(&ld_spinner_lk);
656 TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
657 if (ld->ld_lock == spinlock) {
658 if (ld->ld_cpu != cpuno) {
659 lockdebug_abort1(ld, &ld_spinner_lk,
660 __func__,
661 "not held by current CPU", true);
662 splx(s0);
663 return;
664 }
665 continue;
666 }
667 if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0) {
668 lockdebug_abort1(ld, &ld_spinner_lk,
669 __func__, "spin lock held", true);
670 splx(s0);
671 return;
672 }
673 }
674 lockdebug_unlock_rd(&ld_spinner_lk, s);
675 }
676
677 if (!slplocks) {
678 if (l->l_exlocks != 0) {
679 s = lockdebug_lock_rd(&ld_sleeper_lk);
680 TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
681 if (ld->ld_lwp == l) {
682 lockdebug_abort1(ld, &ld_sleeper_lk,
683 __func__, "sleep lock held", true);
684 splx(s0);
685 return;
686 }
687 }
688 lockdebug_unlock_rd(&ld_sleeper_lk, s);
689 }
690 if (l->l_shlocks != 0)
691 panic("lockdebug_barrier: holding %d shared locks",
692 l->l_shlocks);
693 }
694
695 splx(s0);
696 }
697
698 /*
699 * lockdebug_mem_check:
700 *
701 * Check for in-use locks within a memory region that is
702 * being freed.
703 */
704 void
705 lockdebug_mem_check(const char *func, void *base, size_t sz)
706 {
707 lockdebug_t *ld;
708 lockdebuglk_t *lk;
709 int s;
710
711 if (panicstr != NULL || ld_panic)
712 return;
713
714 s = lockdebug_lock_rd(&ld_tree_lk);
715 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
716 if (ld != NULL) {
717 const uintptr_t lock = (uintptr_t)ld->ld_lock;
718
719 if ((uintptr_t)base > lock)
720 panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
721 __func__, ld, base, sz);
722 if (lock >= (uintptr_t)base + sz)
723 ld = NULL;
724 }
725 lockdebug_unlock_rd(&ld_tree_lk, s);
726 if (ld == NULL)
727 return;
728
729 if ((ld->ld_flags & LD_SLEEPER) != 0)
730 lk = &ld_sleeper_lk;
731 else
732 lk = &ld_spinner_lk;
733
734 lockdebug_lock(lk);
735 lockdebug_abort1(ld, lk, func,
736 "allocation contains active lock", !cold);
737 }
738
739 /*
740 * lockdebug_dump:
741 *
742 * Dump information about a lock on panic, or for DDB.
743 */
744 static void
745 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
746 {
747 int sleeper = (ld->ld_flags & LD_SLEEPER);
748
749 (*pr)(
750 "lock address : %#018lx type : %18s\n"
751 "shared holds : %18u exclusive: %18u\n"
752 "shares wanted: %18u exclusive: %18u\n"
753 "current cpu : %18u last held: %18u\n"
754 "current lwp : %#018lx last held: %#018lx\n"
755 "last locked : %#018lx unlocked : %#018lx\n"
756 "initialized : %#018lx\n",
757 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
758 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
759 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
760 (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
761 (long)curlwp, (long)ld->ld_lwp,
762 (long)ld->ld_locked, (long)ld->ld_unlocked,
763 (long)ld->ld_initaddr);
764
765 if (ld->ld_lockops->lo_dump != NULL)
766 (*ld->ld_lockops->lo_dump)(ld->ld_lock);
767
768 if (sleeper) {
769 (*pr)("\n");
770 turnstile_print(ld->ld_lock, pr);
771 }
772 }
773
774 /*
775 * lockdebug_abort1:
776 *
777 * An error has been trapped - dump lock info and panic.
778 */
779 static void
780 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
781 const char *msg, bool dopanic)
782 {
783
784 /*
785 * Don't make the situation wose if the system is already going
786 * down in flames. Once a panic is triggered, lockdebug state
787 * becomes stale and cannot be trusted.
788 */
789 if (atomic_inc_uint_nv(&ld_panic) != 1) {
790 lockdebug_unlock(lk);
791 return;
792 }
793
794 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
795 func, msg);
796 lockdebug_dump(ld, printf_nolog);
797 lockdebug_unlock(lk);
798 printf_nolog("\n");
799 if (dopanic)
800 panic("LOCKDEBUG");
801 }
802
803 #endif /* LOCKDEBUG */
804
805 /*
806 * lockdebug_lock_print:
807 *
808 * Handle the DDB 'show lock' command.
809 */
810 #ifdef DDB
811 void
812 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
813 {
814 #ifdef LOCKDEBUG
815 lockdebug_t *ld;
816
817 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
818 if (ld->ld_lock == addr) {
819 lockdebug_dump(ld, pr);
820 return;
821 }
822 }
823 (*pr)("Sorry, no record of a lock with address %p found.\n", addr);
824 #else
825 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
826 #endif /* LOCKDEBUG */
827 }
828 #endif /* DDB */
829
830 /*
831 * lockdebug_abort:
832 *
833 * An error has been trapped - dump lock info and call panic().
834 */
835 void
836 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
837 const char *msg)
838 {
839 #ifdef LOCKDEBUG
840 lockdebug_t *ld;
841 lockdebuglk_t *lk;
842
843 if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
844 lockdebug_abort1(ld, lk, func, msg, true);
845 /* NOTREACHED */
846 }
847 #endif /* LOCKDEBUG */
848
849 /*
850 * Complain first on the occurrance only. Otherwise proceeed to
851 * panic where we will `rendezvous' with other CPUs if the machine
852 * is going down in flames.
853 */
854 if (atomic_inc_uint_nv(&ld_panic) == 1) {
855 printf_nolog("%s error: %s: %s\n\n"
856 "lock address : %#018lx\n"
857 "current cpu : %18d\n"
858 "current lwp : %#018lx\n",
859 ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
860 (long)curlwp);
861 (*ops->lo_dump)(lock);
862 printf_nolog("\n");
863 }
864
865 panic("lock error");
866 }
867