subr_lockdebug.c revision 1.32 1 /* $NetBSD: subr_lockdebug.c,v 1.32 2008/05/03 06:24:55 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Basic lock debugging code shared among lock primitives.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.32 2008/05/03 06:24:55 yamt Exp $");
38
39 #include "opt_ddb.h"
40
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/lockdebug.h>
47 #include <sys/sleepq.h>
48 #include <sys/cpu.h>
49 #include <sys/atomic.h>
50 #include <sys/lock.h>
51
52 #include <lib/libkern/rb.h>
53
54 #include <machine/lock.h>
55
56 unsigned int ld_panic;
57
58 #ifdef LOCKDEBUG
59
60 #define LD_BATCH_SHIFT 9
61 #define LD_BATCH (1 << LD_BATCH_SHIFT)
62 #define LD_BATCH_MASK (LD_BATCH - 1)
63 #define LD_MAX_LOCKS 1048576
64 #define LD_SLOP 16
65
66 #define LD_LOCKED 0x01
67 #define LD_SLEEPER 0x02
68
69 #define LD_WRITE_LOCK 0x80000000
70
71 typedef union lockdebuglk {
72 struct {
73 u_int lku_lock;
74 int lku_oldspl;
75 } ul;
76 uint8_t lk_pad[COHERENCY_UNIT];
77 } volatile __aligned(COHERENCY_UNIT) lockdebuglk_t;
78
79 #define lk_lock ul.lku_lock
80 #define lk_oldspl ul.lku_oldspl
81
82 typedef struct lockdebug {
83 struct rb_node ld_rb_node; /* must be the first member */
84 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
85 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
86 volatile void *ld_lock;
87 lockops_t *ld_lockops;
88 struct lwp *ld_lwp;
89 uintptr_t ld_locked;
90 uintptr_t ld_unlocked;
91 uintptr_t ld_initaddr;
92 uint16_t ld_shares;
93 uint16_t ld_cpu;
94 uint8_t ld_flags;
95 uint8_t ld_shwant; /* advisory */
96 uint8_t ld_exwant; /* advisory */
97 uint8_t ld_unused;
98 } volatile lockdebug_t;
99
100 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
101
102 lockdebuglk_t ld_tree_lk;
103 lockdebuglk_t ld_sleeper_lk;
104 lockdebuglk_t ld_spinner_lk;
105 lockdebuglk_t ld_free_lk;
106
107 lockdebuglist_t ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
108 lockdebuglist_t ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
109 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
110 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
111 int ld_nfree;
112 int ld_freeptr;
113 int ld_recurse;
114 bool ld_nomore;
115 lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH];
116
117 lockdebug_t ld_prime[LD_BATCH];
118
119 static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
120 const char *, const char *, bool);
121 static void lockdebug_more(void);
122 static void lockdebug_init(void);
123
124 static signed int
125 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
126 {
127 const lockdebug_t *ld1 = (const void *)n1;
128 const lockdebug_t *ld2 = (const void *)n2;
129 const uintptr_t a = (uintptr_t)ld1->ld_lock;
130 const uintptr_t b = (uintptr_t)ld2->ld_lock;
131
132 if (a < b)
133 return 1;
134 if (a > b)
135 return -1;
136 return 0;
137 }
138
139 static signed int
140 ld_rb_compare_key(const struct rb_node *n, const void *key)
141 {
142 const lockdebug_t *ld = (const void *)n;
143 const uintptr_t a = (uintptr_t)ld->ld_lock;
144 const uintptr_t b = (uintptr_t)key;
145
146 if (a < b)
147 return 1;
148 if (a > b)
149 return -1;
150 return 0;
151 }
152
153 static struct rb_tree ld_rb_tree;
154
155 static const struct rb_tree_ops ld_rb_tree_ops = {
156 .rb_compare_nodes = ld_rb_compare_nodes,
157 .rb_compare_key = ld_rb_compare_key,
158 };
159
160 static void
161 lockdebug_lock_init(lockdebuglk_t *lk)
162 {
163
164 lk->lk_lock = 0;
165 }
166
167 static void
168 lockdebug_lock(lockdebuglk_t *lk)
169 {
170 int s;
171
172 s = splhigh();
173 do {
174 while (lk->lk_lock != 0) {
175 SPINLOCK_SPIN_HOOK;
176 }
177 } while (atomic_cas_uint(&lk->lk_lock, 0, LD_WRITE_LOCK) != 0);
178 lk->lk_oldspl = s;
179 membar_enter();
180 }
181
182 static void
183 lockdebug_unlock(lockdebuglk_t *lk)
184 {
185 int s;
186
187 s = lk->lk_oldspl;
188 membar_exit();
189 lk->lk_lock = 0;
190 splx(s);
191 }
192
193 static int
194 lockdebug_lock_rd(lockdebuglk_t *lk)
195 {
196 u_int val;
197 int s;
198
199 s = splhigh();
200 do {
201 while ((val = lk->lk_lock) == LD_WRITE_LOCK){
202 SPINLOCK_SPIN_HOOK;
203 }
204 } while (atomic_cas_uint(&lk->lk_lock, val, val + 1) != val);
205 membar_enter();
206 return s;
207 }
208
209 static void
210 lockdebug_unlock_rd(lockdebuglk_t *lk, int s)
211 {
212
213 membar_exit();
214 atomic_dec_uint(&lk->lk_lock);
215 splx(s);
216 }
217
218 static inline lockdebug_t *
219 lockdebug_lookup1(volatile void *lock, lockdebuglk_t **lk)
220 {
221 lockdebug_t *ld;
222 int s;
223
224 s = lockdebug_lock_rd(&ld_tree_lk);
225 ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
226 lockdebug_unlock_rd(&ld_tree_lk, s);
227 if (ld == NULL)
228 return NULL;
229
230 if ((ld->ld_flags & LD_SLEEPER) != 0)
231 *lk = &ld_sleeper_lk;
232 else
233 *lk = &ld_spinner_lk;
234
235 lockdebug_lock(*lk);
236 return ld;
237 }
238
239 /*
240 * lockdebug_lookup:
241 *
242 * Find a lockdebug structure by a pointer to a lock and return it locked.
243 */
244 static inline lockdebug_t *
245 lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
246 {
247 lockdebug_t *ld;
248
249 ld = lockdebug_lookup1(lock, lk);
250 if (ld == NULL)
251 panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
252 return ld;
253 }
254
255 /*
256 * lockdebug_init:
257 *
258 * Initialize the lockdebug system. Allocate an initial pool of
259 * lockdebug structures before the VM system is up and running.
260 */
261 static void
262 lockdebug_init(void)
263 {
264 lockdebug_t *ld;
265 int i;
266
267 lockdebug_lock_init(&ld_tree_lk);
268 lockdebug_lock_init(&ld_sleeper_lk);
269 lockdebug_lock_init(&ld_spinner_lk);
270 lockdebug_lock_init(&ld_free_lk);
271
272 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
273
274 ld = ld_prime;
275 ld_table[0] = ld;
276 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
277 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
278 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
279 }
280 ld_freeptr = 1;
281 ld_nfree = LD_BATCH - 1;
282 }
283
284 /*
285 * lockdebug_alloc:
286 *
287 * A lock is being initialized, so allocate an associated debug
288 * structure.
289 */
290 bool
291 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
292 {
293 struct cpu_info *ci;
294 lockdebug_t *ld;
295 lockdebuglk_t *lk;
296
297 if (lo == NULL || panicstr != NULL || ld_panic)
298 return false;
299 if (ld_freeptr == 0)
300 lockdebug_init();
301
302 if ((ld = lockdebug_lookup1(lock, &lk)) != NULL) {
303 lockdebug_abort1(ld, lk, __func__, "already initialized", true);
304 return false;
305 }
306
307 /*
308 * Pinch a new debug structure. We may recurse because we call
309 * kmem_alloc(), which may need to initialize new locks somewhere
310 * down the path. If not recursing, we try to maintain at least
311 * LD_SLOP structures free, which should hopefully be enough to
312 * satisfy kmem_alloc(). If we can't provide a structure, not to
313 * worry: we'll just mark the lock as not having an ID.
314 */
315 lockdebug_lock(&ld_free_lk);
316 ci = curcpu();
317 ci->ci_lkdebug_recurse++;
318
319 if (TAILQ_EMPTY(&ld_free)) {
320 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
321 ci->ci_lkdebug_recurse--;
322 lockdebug_unlock(&ld_free_lk);
323 return false;
324 }
325 lockdebug_more();
326 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
327 lockdebug_more();
328
329 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
330 lockdebug_unlock(&ld_free_lk);
331 return false;
332 }
333
334 TAILQ_REMOVE(&ld_free, ld, ld_chain);
335 ld_nfree--;
336
337 ci->ci_lkdebug_recurse--;
338 lockdebug_unlock(&ld_free_lk);
339
340 if (ld->ld_lock != NULL)
341 panic("lockdebug_alloc: corrupt table");
342
343 if (lo->lo_sleeplock)
344 lockdebug_lock(&ld_sleeper_lk);
345 else
346 lockdebug_lock(&ld_spinner_lk);
347
348 /* Initialise the structure. */
349 ld->ld_lock = lock;
350 ld->ld_lockops = lo;
351 ld->ld_locked = 0;
352 ld->ld_unlocked = 0;
353 ld->ld_lwp = NULL;
354 ld->ld_initaddr = initaddr;
355
356 lockdebug_lock(&ld_tree_lk);
357 rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
358 lockdebug_unlock(&ld_tree_lk);
359
360 if (lo->lo_sleeplock) {
361 ld->ld_flags = LD_SLEEPER;
362 lockdebug_unlock(&ld_sleeper_lk);
363 } else {
364 ld->ld_flags = 0;
365 lockdebug_unlock(&ld_spinner_lk);
366 }
367
368 return true;
369 }
370
371 /*
372 * lockdebug_free:
373 *
374 * A lock is being destroyed, so release debugging resources.
375 */
376 void
377 lockdebug_free(volatile void *lock)
378 {
379 lockdebug_t *ld;
380 lockdebuglk_t *lk;
381
382 if (panicstr != NULL || ld_panic)
383 return;
384
385 ld = lockdebug_lookup(lock, &lk);
386 if (ld == NULL) {
387 panic("lockdebug_free: destroying uninitialized lock %p"
388 "(ld_lock=%p)", lock, ld->ld_lock);
389 lockdebug_abort1(ld, lk, __func__, "lock record follows",
390 true);
391 return;
392 }
393 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
394 lockdebug_abort1(ld, lk, __func__, "is locked", true);
395 return;
396 }
397 lockdebug_lock(&ld_tree_lk);
398 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
399 lockdebug_unlock(&ld_tree_lk);
400 ld->ld_lock = NULL;
401 lockdebug_unlock(lk);
402
403 lockdebug_lock(&ld_free_lk);
404 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
405 ld_nfree++;
406 lockdebug_unlock(&ld_free_lk);
407 }
408
409 /*
410 * lockdebug_more:
411 *
412 * Allocate a batch of debug structures and add to the free list.
413 * Must be called with ld_free_lk held.
414 */
415 static void
416 lockdebug_more(void)
417 {
418 lockdebug_t *ld;
419 void *block;
420 int i, base, m;
421
422 while (ld_nfree < LD_SLOP) {
423 lockdebug_unlock(&ld_free_lk);
424 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
425 lockdebug_lock(&ld_free_lk);
426
427 if (block == NULL)
428 return;
429
430 if (ld_nfree > LD_SLOP) {
431 /* Somebody beat us to it. */
432 lockdebug_unlock(&ld_free_lk);
433 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
434 lockdebug_lock(&ld_free_lk);
435 continue;
436 }
437
438 base = ld_freeptr;
439 ld_nfree += LD_BATCH;
440 ld = block;
441 base <<= LD_BATCH_SHIFT;
442 m = min(LD_MAX_LOCKS, base + LD_BATCH);
443
444 if (m == LD_MAX_LOCKS)
445 ld_nomore = true;
446
447 for (i = base; i < m; i++, ld++) {
448 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
449 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
450 }
451
452 membar_producer();
453 ld_table[ld_freeptr++] = block;
454 }
455 }
456
457 /*
458 * lockdebug_wantlock:
459 *
460 * Process the preamble to a lock acquire.
461 */
462 void
463 lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
464 {
465 struct lwp *l = curlwp;
466 lockdebuglk_t *lk;
467 lockdebug_t *ld;
468 bool recurse;
469
470 (void)shared;
471 recurse = false;
472
473 if (panicstr != NULL || ld_panic)
474 return;
475
476 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
477 return;
478
479 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
480 if ((ld->ld_flags & LD_SLEEPER) != 0) {
481 if (ld->ld_lwp == l)
482 recurse = true;
483 } else if (ld->ld_cpu == (uint16_t)cpu_number())
484 recurse = true;
485 }
486
487 if (cpu_intr_p()) {
488 if ((ld->ld_flags & LD_SLEEPER) != 0) {
489 lockdebug_abort1(ld, lk, __func__,
490 "acquiring sleep lock from interrupt context",
491 true);
492 return;
493 }
494 }
495
496 if (shared)
497 ld->ld_shwant++;
498 else
499 ld->ld_exwant++;
500
501 if (recurse) {
502 lockdebug_abort1(ld, lk, __func__, "locking against myself",
503 true);
504 return;
505 }
506
507 lockdebug_unlock(lk);
508 }
509
510 /*
511 * lockdebug_locked:
512 *
513 * Process a lock acquire operation.
514 */
515 void
516 lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
517 {
518 struct lwp *l = curlwp;
519 lockdebuglk_t *lk;
520 lockdebug_t *ld;
521
522 if (panicstr != NULL || ld_panic)
523 return;
524
525 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
526 return;
527
528 if (shared) {
529 l->l_shlocks++;
530 ld->ld_shares++;
531 ld->ld_shwant--;
532 } else {
533 if ((ld->ld_flags & LD_LOCKED) != 0) {
534 lockdebug_abort1(ld, lk, __func__,
535 "already locked", true);
536 return;
537 }
538
539 ld->ld_flags |= LD_LOCKED;
540 ld->ld_locked = where;
541 ld->ld_exwant--;
542
543 if ((ld->ld_flags & LD_SLEEPER) != 0) {
544 l->l_exlocks++;
545 TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
546 } else {
547 curcpu()->ci_spin_locks2++;
548 TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
549 }
550 }
551 ld->ld_cpu = (uint16_t)cpu_number();
552 ld->ld_lwp = l;
553
554 lockdebug_unlock(lk);
555 }
556
557 /*
558 * lockdebug_unlocked:
559 *
560 * Process a lock release operation.
561 */
562 void
563 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
564 {
565 struct lwp *l = curlwp;
566 lockdebuglk_t *lk;
567 lockdebug_t *ld;
568
569 if (panicstr != NULL || ld_panic)
570 return;
571
572 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
573 return;
574
575 if (shared) {
576 if (l->l_shlocks == 0) {
577 lockdebug_abort1(ld, lk, __func__,
578 "no shared locks held by LWP", true);
579 return;
580 }
581 if (ld->ld_shares == 0) {
582 lockdebug_abort1(ld, lk, __func__,
583 "no shared holds on this lock", true);
584 return;
585 }
586 l->l_shlocks--;
587 ld->ld_shares--;
588 if (ld->ld_lwp == l)
589 ld->ld_lwp = NULL;
590 if (ld->ld_cpu == (uint16_t)cpu_number())
591 ld->ld_cpu = (uint16_t)-1;
592 } else {
593 if ((ld->ld_flags & LD_LOCKED) == 0) {
594 lockdebug_abort1(ld, lk, __func__, "not locked",
595 true);
596 return;
597 }
598
599 if ((ld->ld_flags & LD_SLEEPER) != 0) {
600 if (ld->ld_lwp != curlwp) {
601 lockdebug_abort1(ld, lk, __func__,
602 "not held by current LWP", true);
603 return;
604 }
605 ld->ld_flags &= ~LD_LOCKED;
606 ld->ld_unlocked = where;
607 ld->ld_lwp = NULL;
608 curlwp->l_exlocks--;
609 TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
610 } else {
611 if (ld->ld_cpu != (uint16_t)cpu_number()) {
612 lockdebug_abort1(ld, lk, __func__,
613 "not held by current CPU", true);
614 return;
615 }
616 ld->ld_flags &= ~LD_LOCKED;
617 ld->ld_unlocked = where;
618 ld->ld_lwp = NULL;
619 curcpu()->ci_spin_locks2--;
620 TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
621 }
622 }
623
624 lockdebug_unlock(lk);
625 }
626
627 /*
628 * lockdebug_barrier:
629 *
630 * Panic if we hold more than one specified spin lock, and optionally,
631 * if we hold sleep locks.
632 */
633 void
634 lockdebug_barrier(volatile void *spinlock, int slplocks)
635 {
636 struct lwp *l = curlwp;
637 lockdebug_t *ld;
638 uint16_t cpuno;
639 int s, s0;
640
641 if (panicstr != NULL || ld_panic)
642 return;
643
644 /*
645 * Use splsoftclock() and not a critical section to block preemption.
646 * kpreempt_disable() will skew preemption statistics by firing again
647 * in mi_switch(), while we are preempting!
648 */
649 s0 = splsoftclock();
650
651 if (curcpu()->ci_spin_locks2 != 0) {
652 cpuno = (uint16_t)cpu_number();
653
654 s = lockdebug_lock_rd(&ld_spinner_lk);
655 TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
656 if (ld->ld_lock == spinlock) {
657 if (ld->ld_cpu != cpuno) {
658 lockdebug_abort1(ld, &ld_spinner_lk,
659 __func__,
660 "not held by current CPU", true);
661 splx(s0);
662 return;
663 }
664 continue;
665 }
666 if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0) {
667 lockdebug_abort1(ld, &ld_spinner_lk,
668 __func__, "spin lock held", true);
669 splx(s0);
670 return;
671 }
672 }
673 lockdebug_unlock_rd(&ld_spinner_lk, s);
674 }
675
676 if (!slplocks) {
677 if (l->l_exlocks != 0) {
678 s = lockdebug_lock_rd(&ld_sleeper_lk);
679 TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
680 if (ld->ld_lwp == l) {
681 lockdebug_abort1(ld, &ld_sleeper_lk,
682 __func__, "sleep lock held", true);
683 splx(s0);
684 return;
685 }
686 }
687 lockdebug_unlock_rd(&ld_sleeper_lk, s);
688 }
689 if (l->l_shlocks != 0)
690 panic("lockdebug_barrier: holding %d shared locks",
691 l->l_shlocks);
692 }
693
694 splx(s0);
695 }
696
697 /*
698 * lockdebug_mem_check:
699 *
700 * Check for in-use locks within a memory region that is
701 * being freed.
702 */
703 void
704 lockdebug_mem_check(const char *func, void *base, size_t sz)
705 {
706 lockdebug_t *ld;
707 lockdebuglk_t *lk;
708 int s;
709
710 if (panicstr != NULL || ld_panic)
711 return;
712
713 s = lockdebug_lock_rd(&ld_tree_lk);
714 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
715 if (ld != NULL) {
716 const uintptr_t lock = (uintptr_t)ld->ld_lock;
717
718 if ((uintptr_t)base > lock)
719 panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
720 __func__, ld, base, sz);
721 if (lock >= (uintptr_t)base + sz)
722 ld = NULL;
723 }
724 lockdebug_unlock_rd(&ld_tree_lk, s);
725 if (ld == NULL)
726 return;
727
728 if ((ld->ld_flags & LD_SLEEPER) != 0)
729 lk = &ld_sleeper_lk;
730 else
731 lk = &ld_spinner_lk;
732
733 lockdebug_lock(lk);
734 lockdebug_abort1(ld, lk, func,
735 "allocation contains active lock", !cold);
736 }
737
738 /*
739 * lockdebug_dump:
740 *
741 * Dump information about a lock on panic, or for DDB.
742 */
743 static void
744 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
745 {
746 int sleeper = (ld->ld_flags & LD_SLEEPER);
747
748 (*pr)(
749 "lock address : %#018lx type : %18s\n"
750 "shared holds : %18u exclusive: %18u\n"
751 "shares wanted: %18u exclusive: %18u\n"
752 "current cpu : %18u last held: %18u\n"
753 "current lwp : %#018lx last held: %#018lx\n"
754 "last locked : %#018lx unlocked : %#018lx\n"
755 "initialized : %#018lx\n",
756 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
757 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
758 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
759 (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
760 (long)curlwp, (long)ld->ld_lwp,
761 (long)ld->ld_locked, (long)ld->ld_unlocked,
762 (long)ld->ld_initaddr);
763
764 if (ld->ld_lockops->lo_dump != NULL)
765 (*ld->ld_lockops->lo_dump)(ld->ld_lock);
766
767 if (sleeper) {
768 (*pr)("\n");
769 turnstile_print(ld->ld_lock, pr);
770 }
771 }
772
773 /*
774 * lockdebug_abort1:
775 *
776 * An error has been trapped - dump lock info and panic.
777 */
778 static void
779 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
780 const char *msg, bool dopanic)
781 {
782
783 /*
784 * Don't make the situation wose if the system is already going
785 * down in flames. Once a panic is triggered, lockdebug state
786 * becomes stale and cannot be trusted.
787 */
788 if (atomic_inc_uint_nv(&ld_panic) != 1) {
789 lockdebug_unlock(lk);
790 return;
791 }
792
793 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
794 func, msg);
795 lockdebug_dump(ld, printf_nolog);
796 lockdebug_unlock(lk);
797 printf_nolog("\n");
798 if (dopanic)
799 panic("LOCKDEBUG");
800 }
801
802 #endif /* LOCKDEBUG */
803
804 /*
805 * lockdebug_lock_print:
806 *
807 * Handle the DDB 'show lock' command.
808 */
809 #ifdef DDB
810 void
811 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
812 {
813 #ifdef LOCKDEBUG
814 lockdebug_t *ld;
815
816 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
817 if (ld->ld_lock == addr) {
818 lockdebug_dump(ld, pr);
819 return;
820 }
821 }
822 (*pr)("Sorry, no record of a lock with address %p found.\n", addr);
823 #else
824 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
825 #endif /* LOCKDEBUG */
826 }
827 #endif /* DDB */
828
829 /*
830 * lockdebug_abort:
831 *
832 * An error has been trapped - dump lock info and call panic().
833 */
834 void
835 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
836 const char *msg)
837 {
838 #ifdef LOCKDEBUG
839 lockdebug_t *ld;
840 lockdebuglk_t *lk;
841
842 if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
843 lockdebug_abort1(ld, lk, func, msg, true);
844 /* NOTREACHED */
845 }
846 #endif /* LOCKDEBUG */
847
848 /*
849 * Complain first on the occurrance only. Otherwise proceeed to
850 * panic where we will `rendezvous' with other CPUs if the machine
851 * is going down in flames.
852 */
853 if (atomic_inc_uint_nv(&ld_panic) == 1) {
854 printf_nolog("%s error: %s: %s\n\n"
855 "lock address : %#018lx\n"
856 "current cpu : %18d\n"
857 "current lwp : %#018lx\n",
858 ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
859 (long)curlwp);
860 (*ops->lo_dump)(lock);
861 printf_nolog("\n");
862 }
863
864 panic("lock error");
865 }
866