subr_lockdebug.c revision 1.25 1 /* $NetBSD: subr_lockdebug.c,v 1.25 2008/01/04 21:18:12 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Basic lock debugging code shared among lock primitives.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.25 2008/01/04 21:18:12 ad Exp $");
45
46 #include "opt_ddb.h"
47
48 #include <sys/param.h>
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/kmem.h>
53 #include <sys/lockdebug.h>
54 #include <sys/sleepq.h>
55 #include <sys/cpu.h>
56 #include <sys/atomic.h>
57
58 #include <lib/libkern/rb.h>
59
60 #include <machine/lock.h>
61
62 #ifdef LOCKDEBUG
63
64 #define LD_BATCH_SHIFT 9
65 #define LD_BATCH (1 << LD_BATCH_SHIFT)
66 #define LD_BATCH_MASK (LD_BATCH - 1)
67 #define LD_MAX_LOCKS 1048576
68 #define LD_SLOP 16
69
70 #define LD_LOCKED 0x01
71 #define LD_SLEEPER 0x02
72
73 #define LD_WRITE_LOCK 0x80000000
74
75 typedef union lockdebuglk {
76 struct {
77 u_int lku_lock;
78 int lku_oldspl;
79 } ul;
80 uint8_t lk_pad[CACHE_LINE_SIZE];
81 } volatile __aligned(CACHE_LINE_SIZE) lockdebuglk_t;
82
83 #define lk_lock ul.lku_lock
84 #define lk_oldspl ul.lku_oldspl
85
86 typedef struct lockdebug {
87 struct rb_node ld_rb_node; /* must be the first member */
88 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
89 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
90 volatile void *ld_lock;
91 lockops_t *ld_lockops;
92 struct lwp *ld_lwp;
93 uintptr_t ld_locked;
94 uintptr_t ld_unlocked;
95 uintptr_t ld_initaddr;
96 uint16_t ld_shares;
97 uint16_t ld_cpu;
98 uint8_t ld_flags;
99 uint8_t ld_shwant; /* advisory */
100 uint8_t ld_exwant; /* advisory */
101 uint8_t ld_unused;
102 } volatile lockdebug_t;
103
104 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
105
106 lockdebuglk_t ld_tree_lk;
107 lockdebuglk_t ld_sleeper_lk;
108 lockdebuglk_t ld_spinner_lk;
109 lockdebuglk_t ld_free_lk;
110
111 lockdebuglist_t ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
112 lockdebuglist_t ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
113 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
114 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
115 int ld_nfree;
116 int ld_freeptr;
117 int ld_recurse;
118 bool ld_nomore;
119 lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH];
120
121 lockdebug_t ld_prime[LD_BATCH];
122
123 static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
124 const char *, const char *, bool);
125 static void lockdebug_more(void);
126 static void lockdebug_init(void);
127
128 static signed int
129 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
130 {
131 const lockdebug_t *ld1 = (const void *)n1;
132 const lockdebug_t *ld2 = (const void *)n2;
133 const uintptr_t a = (uintptr_t)ld1->ld_lock;
134 const uintptr_t b = (uintptr_t)ld2->ld_lock;
135
136 if (a < b)
137 return 1;
138 if (a > b)
139 return -1;
140 return 0;
141 }
142
143 static signed int
144 ld_rb_compare_key(const struct rb_node *n, const void *key)
145 {
146 const lockdebug_t *ld = (const void *)n;
147 const uintptr_t a = (uintptr_t)ld->ld_lock;
148 const uintptr_t b = (uintptr_t)key;
149
150 if (a < b)
151 return 1;
152 if (a > b)
153 return -1;
154 return 0;
155 }
156
157 static struct rb_tree ld_rb_tree;
158
159 static const struct rb_tree_ops ld_rb_tree_ops = {
160 .rb_compare_nodes = ld_rb_compare_nodes,
161 .rb_compare_key = ld_rb_compare_key,
162 };
163
164 static void
165 lockdebug_lock_init(lockdebuglk_t *lk)
166 {
167
168 lk->lk_lock = 0;
169 }
170
171 static void
172 lockdebug_lock(lockdebuglk_t *lk)
173 {
174 int s;
175
176 s = splhigh();
177 do {
178 while (lk->lk_lock != 0) {
179 SPINLOCK_SPIN_HOOK;
180 }
181 } while (atomic_cas_uint(&lk->lk_lock, 0, LD_WRITE_LOCK) != 0);
182 lk->lk_oldspl = s;
183 membar_enter();
184 }
185
186 static void
187 lockdebug_unlock(lockdebuglk_t *lk)
188 {
189 int s;
190
191 s = lk->lk_oldspl;
192 membar_exit();
193 lk->lk_lock = 0;
194 splx(s);
195 }
196
197 static int
198 lockdebug_lock_rd(lockdebuglk_t *lk)
199 {
200 u_int val;
201 int s;
202
203 s = splhigh();
204 do {
205 while ((val = lk->lk_lock) == LD_WRITE_LOCK){
206 SPINLOCK_SPIN_HOOK;
207 }
208 } while (atomic_cas_uint(&lk->lk_lock, val, val + 1) != val);
209 membar_enter();
210 return s;
211 }
212
213 static void
214 lockdebug_unlock_rd(lockdebuglk_t *lk, int s)
215 {
216
217 membar_exit();
218 atomic_dec_uint(&lk->lk_lock);
219 splx(s);
220 }
221
222 static inline lockdebug_t *
223 lockdebug_lookup1(volatile void *lock, lockdebuglk_t **lk)
224 {
225 lockdebug_t *ld;
226 int s;
227
228 s = lockdebug_lock_rd(&ld_tree_lk);
229 ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
230 lockdebug_unlock_rd(&ld_tree_lk, s);
231 if (ld == NULL)
232 return NULL;
233
234 if ((ld->ld_flags & LD_SLEEPER) != 0)
235 *lk = &ld_sleeper_lk;
236 else
237 *lk = &ld_spinner_lk;
238
239 lockdebug_lock(*lk);
240 return ld;
241 }
242
243 /*
244 * lockdebug_lookup:
245 *
246 * Find a lockdebug structure by a pointer to a lock and return it locked.
247 */
248 static inline lockdebug_t *
249 lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
250 {
251 lockdebug_t *ld;
252
253 ld = lockdebug_lookup1(lock, lk);
254 if (ld == NULL)
255 panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
256 return ld;
257 }
258
259 /*
260 * lockdebug_init:
261 *
262 * Initialize the lockdebug system. Allocate an initial pool of
263 * lockdebug structures before the VM system is up and running.
264 */
265 static void
266 lockdebug_init(void)
267 {
268 lockdebug_t *ld;
269 int i;
270
271 lockdebug_lock_init(&ld_tree_lk);
272 lockdebug_lock_init(&ld_sleeper_lk);
273 lockdebug_lock_init(&ld_spinner_lk);
274 lockdebug_lock_init(&ld_free_lk);
275
276 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
277
278 ld = ld_prime;
279 ld_table[0] = ld;
280 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
281 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
282 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
283 }
284 ld_freeptr = 1;
285 ld_nfree = LD_BATCH - 1;
286 }
287
288 /*
289 * lockdebug_alloc:
290 *
291 * A lock is being initialized, so allocate an associated debug
292 * structure.
293 */
294 bool
295 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
296 {
297 struct cpu_info *ci;
298 lockdebug_t *ld;
299 lockdebuglk_t *lk;
300
301 if (lo == NULL || panicstr != NULL)
302 return false;
303 if (ld_freeptr == 0)
304 lockdebug_init();
305
306 if ((ld = lockdebug_lookup1(lock, &lk)) != NULL) {
307 lockdebug_abort1(ld, lk, __func__, "already initialized", true);
308 /* NOTREACHED */
309 }
310
311 /*
312 * Pinch a new debug structure. We may recurse because we call
313 * kmem_alloc(), which may need to initialize new locks somewhere
314 * down the path. If not recursing, we try to maintain at least
315 * LD_SLOP structures free, which should hopefully be enough to
316 * satisfy kmem_alloc(). If we can't provide a structure, not to
317 * worry: we'll just mark the lock as not having an ID.
318 */
319 lockdebug_lock(&ld_free_lk);
320 ci = curcpu();
321 ci->ci_lkdebug_recurse++;
322
323 if (TAILQ_EMPTY(&ld_free)) {
324 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
325 ci->ci_lkdebug_recurse--;
326 lockdebug_unlock(&ld_free_lk);
327 return false;
328 }
329 lockdebug_more();
330 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
331 lockdebug_more();
332
333 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
334 lockdebug_unlock(&ld_free_lk);
335 return false;
336 }
337
338 TAILQ_REMOVE(&ld_free, ld, ld_chain);
339 ld_nfree--;
340
341 ci->ci_lkdebug_recurse--;
342 lockdebug_unlock(&ld_free_lk);
343
344 if (ld->ld_lock != NULL)
345 panic("lockdebug_alloc: corrupt table");
346
347 if (lo->lo_sleeplock)
348 lockdebug_lock(&ld_sleeper_lk);
349 else
350 lockdebug_lock(&ld_spinner_lk);
351
352 /* Initialise the structure. */
353 ld->ld_lock = lock;
354 ld->ld_lockops = lo;
355 ld->ld_locked = 0;
356 ld->ld_unlocked = 0;
357 ld->ld_lwp = NULL;
358 ld->ld_initaddr = initaddr;
359
360 lockdebug_lock(&ld_tree_lk);
361 rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
362 lockdebug_unlock(&ld_tree_lk);
363
364 if (lo->lo_sleeplock) {
365 ld->ld_flags = LD_SLEEPER;
366 lockdebug_unlock(&ld_sleeper_lk);
367 } else {
368 ld->ld_flags = 0;
369 lockdebug_unlock(&ld_spinner_lk);
370 }
371
372 return true;
373 }
374
375 /*
376 * lockdebug_free:
377 *
378 * A lock is being destroyed, so release debugging resources.
379 */
380 void
381 lockdebug_free(volatile void *lock)
382 {
383 lockdebug_t *ld;
384 lockdebuglk_t *lk;
385
386 if (panicstr != NULL)
387 return;
388
389 ld = lockdebug_lookup(lock, &lk);
390 if (ld == NULL) {
391 panic("lockdebug_free: destroying uninitialized lock %p"
392 "(ld_lock=%p)", lock, ld->ld_lock);
393 lockdebug_abort1(ld, lk, __func__, "lock record follows",
394 true);
395 }
396 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
397 lockdebug_abort1(ld, lk, __func__, "is locked", true);
398 lockdebug_lock(&ld_tree_lk);
399 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
400 lockdebug_unlock(&ld_tree_lk);
401 ld->ld_lock = NULL;
402 lockdebug_unlock(lk);
403
404 lockdebug_lock(&ld_free_lk);
405 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
406 ld_nfree++;
407 lockdebug_unlock(&ld_free_lk);
408 }
409
410 /*
411 * lockdebug_more:
412 *
413 * Allocate a batch of debug structures and add to the free list.
414 * Must be called with ld_free_lk held.
415 */
416 static void
417 lockdebug_more(void)
418 {
419 lockdebug_t *ld;
420 void *block;
421 int i, base, m;
422
423 while (ld_nfree < LD_SLOP) {
424 lockdebug_unlock(&ld_free_lk);
425 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
426 lockdebug_lock(&ld_free_lk);
427
428 if (block == NULL)
429 return;
430
431 if (ld_nfree > LD_SLOP) {
432 /* Somebody beat us to it. */
433 lockdebug_unlock(&ld_free_lk);
434 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
435 lockdebug_lock(&ld_free_lk);
436 continue;
437 }
438
439 base = ld_freeptr;
440 ld_nfree += LD_BATCH;
441 ld = block;
442 base <<= LD_BATCH_SHIFT;
443 m = min(LD_MAX_LOCKS, base + LD_BATCH);
444
445 if (m == LD_MAX_LOCKS)
446 ld_nomore = true;
447
448 for (i = base; i < m; i++, ld++) {
449 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
450 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
451 }
452
453 membar_producer();
454 ld_table[ld_freeptr++] = block;
455 }
456 }
457
458 /*
459 * lockdebug_wantlock:
460 *
461 * Process the preamble to a lock acquire.
462 */
463 void
464 lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
465 {
466 struct lwp *l = curlwp;
467 lockdebuglk_t *lk;
468 lockdebug_t *ld;
469 bool recurse;
470
471 (void)shared;
472 recurse = false;
473
474 if (panicstr != NULL)
475 return;
476
477 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
478 return;
479
480 if ((ld->ld_flags & LD_LOCKED) != 0) {
481 if ((ld->ld_flags & LD_SLEEPER) != 0) {
482 if (ld->ld_lwp == l)
483 recurse = true;
484 } else if (ld->ld_cpu == (uint16_t)cpu_number())
485 recurse = true;
486 }
487
488 if (cpu_intr_p()) {
489 if ((ld->ld_flags & LD_SLEEPER) != 0)
490 lockdebug_abort1(ld, lk, __func__,
491 "acquiring sleep lock from interrupt context",
492 true);
493 }
494
495 if (shared)
496 ld->ld_shwant++;
497 else
498 ld->ld_exwant++;
499
500 if (recurse)
501 lockdebug_abort1(ld, lk, __func__, "locking against myself",
502 true);
503
504 lockdebug_unlock(lk);
505 }
506
507 /*
508 * lockdebug_locked:
509 *
510 * Process a lock acquire operation.
511 */
512 void
513 lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
514 {
515 struct lwp *l = curlwp;
516 lockdebuglk_t *lk;
517 lockdebug_t *ld;
518
519 if (panicstr != NULL)
520 return;
521
522 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
523 return;
524
525 if (shared) {
526 l->l_shlocks++;
527 ld->ld_shares++;
528 ld->ld_shwant--;
529 } else {
530 if ((ld->ld_flags & LD_LOCKED) != 0)
531 lockdebug_abort1(ld, lk, __func__,
532 "already locked", true);
533
534 ld->ld_flags |= LD_LOCKED;
535 ld->ld_locked = where;
536 ld->ld_cpu = (uint16_t)cpu_number();
537 ld->ld_lwp = l;
538 ld->ld_exwant--;
539
540 if ((ld->ld_flags & LD_SLEEPER) != 0) {
541 l->l_exlocks++;
542 TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
543 } else {
544 curcpu()->ci_spin_locks2++;
545 TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
546 }
547 }
548
549 lockdebug_unlock(lk);
550 }
551
552 /*
553 * lockdebug_unlocked:
554 *
555 * Process a lock release operation.
556 */
557 void
558 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
559 {
560 struct lwp *l = curlwp;
561 lockdebuglk_t *lk;
562 lockdebug_t *ld;
563
564 if (panicstr != NULL)
565 return;
566
567 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
568 return;
569
570 if (shared) {
571 if (l->l_shlocks == 0)
572 lockdebug_abort1(ld, lk, __func__,
573 "no shared locks held by LWP", true);
574 if (ld->ld_shares == 0)
575 lockdebug_abort1(ld, lk, __func__,
576 "no shared holds on this lock", true);
577 l->l_shlocks--;
578 ld->ld_shares--;
579 } else {
580 if ((ld->ld_flags & LD_LOCKED) == 0)
581 lockdebug_abort1(ld, lk, __func__, "not locked",
582 true);
583
584 if ((ld->ld_flags & LD_SLEEPER) != 0) {
585 if (ld->ld_lwp != curlwp)
586 lockdebug_abort1(ld, lk, __func__,
587 "not held by current LWP", true);
588 ld->ld_flags &= ~LD_LOCKED;
589 ld->ld_unlocked = where;
590 ld->ld_lwp = NULL;
591 curlwp->l_exlocks--;
592 TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
593 } else {
594 if (ld->ld_cpu != (uint16_t)cpu_number())
595 lockdebug_abort1(ld, lk, __func__,
596 "not held by current CPU", true);
597 ld->ld_flags &= ~LD_LOCKED;
598 ld->ld_unlocked = where;
599 ld->ld_lwp = NULL;
600 curcpu()->ci_spin_locks2--;
601 TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
602 }
603 }
604
605 lockdebug_unlock(lk);
606 }
607
608 /*
609 * lockdebug_barrier:
610 *
611 * Panic if we hold more than one specified spin lock, and optionally,
612 * if we hold sleep locks.
613 */
614 void
615 lockdebug_barrier(volatile void *spinlock, int slplocks)
616 {
617 struct lwp *l = curlwp;
618 lockdebug_t *ld;
619 uint16_t cpuno;
620 int s;
621
622 if (panicstr != NULL)
623 return;
624
625 crit_enter();
626
627 if (curcpu()->ci_spin_locks2 != 0) {
628 cpuno = (uint16_t)cpu_number();
629
630 s = lockdebug_lock_rd(&ld_spinner_lk);
631 TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
632 if (ld->ld_lock == spinlock) {
633 if (ld->ld_cpu != cpuno)
634 lockdebug_abort1(ld, &ld_spinner_lk,
635 __func__,
636 "not held by current CPU", true);
637 continue;
638 }
639 if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0)
640 lockdebug_abort1(ld, &ld_spinner_lk,
641 __func__, "spin lock held", true);
642 }
643 lockdebug_unlock_rd(&ld_spinner_lk, s);
644 }
645
646 if (!slplocks) {
647 if (l->l_exlocks != 0) {
648 s = lockdebug_lock_rd(&ld_sleeper_lk);
649 TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
650 if (ld->ld_lwp == l)
651 lockdebug_abort1(ld, &ld_sleeper_lk,
652 __func__, "sleep lock held", true);
653 }
654 lockdebug_unlock_rd(&ld_sleeper_lk, s);
655 }
656 if (l->l_shlocks != 0)
657 panic("lockdebug_barrier: holding %d shared locks",
658 l->l_shlocks);
659 }
660
661 crit_exit();
662 }
663
664 /*
665 * lockdebug_mem_check:
666 *
667 * Check for in-use locks within a memory region that is
668 * being freed.
669 */
670 void
671 lockdebug_mem_check(const char *func, void *base, size_t sz)
672 {
673 lockdebug_t *ld;
674 lockdebuglk_t *lk;
675 int s;
676
677 if (panicstr != NULL)
678 return;
679
680 s = lockdebug_lock_rd(&ld_tree_lk);
681 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
682 if (ld != NULL) {
683 const uintptr_t lock = (uintptr_t)ld->ld_lock;
684
685 if ((uintptr_t)base > lock)
686 panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
687 __func__, ld, base, sz);
688 if (lock >= (uintptr_t)base + sz)
689 ld = NULL;
690 }
691 lockdebug_unlock_rd(&ld_tree_lk, s);
692 if (ld == NULL)
693 return;
694
695 if ((ld->ld_flags & LD_SLEEPER) != 0)
696 lk = &ld_sleeper_lk;
697 else
698 lk = &ld_spinner_lk;
699
700 lockdebug_lock(lk);
701 lockdebug_abort1(ld, lk, func,
702 "allocation contains active lock", !cold);
703 return;
704 }
705
706 /*
707 * lockdebug_dump:
708 *
709 * Dump information about a lock on panic, or for DDB.
710 */
711 static void
712 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
713 {
714 int sleeper = (ld->ld_flags & LD_SLEEPER);
715
716 (*pr)(
717 "lock address : %#018lx type : %18s\n"
718 "shared holds : %18u exclusive: %18u\n"
719 "shares wanted: %18u exclusive: %18u\n"
720 "current cpu : %18u last held: %18u\n"
721 "current lwp : %#018lx last held: %#018lx\n"
722 "last locked : %#018lx unlocked : %#018lx\n"
723 "initialized : %#018lx\n",
724 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
725 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
726 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
727 (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
728 (long)curlwp, (long)ld->ld_lwp,
729 (long)ld->ld_locked, (long)ld->ld_unlocked,
730 (long)ld->ld_initaddr);
731
732 if (ld->ld_lockops->lo_dump != NULL)
733 (*ld->ld_lockops->lo_dump)(ld->ld_lock);
734
735 if (sleeper) {
736 (*pr)("\n");
737 turnstile_print(ld->ld_lock, pr);
738 }
739 }
740
741 /*
742 * lockdebug_dump:
743 *
744 * Dump information about a known lock.
745 */
746 static void
747 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
748 const char *msg, bool dopanic)
749 {
750
751 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
752 func, msg);
753 lockdebug_dump(ld, printf_nolog);
754 lockdebug_unlock(lk);
755 printf_nolog("\n");
756 if (dopanic)
757 panic("LOCKDEBUG");
758 }
759
760 #endif /* LOCKDEBUG */
761
762 /*
763 * lockdebug_lock_print:
764 *
765 * Handle the DDB 'show lock' command.
766 */
767 #ifdef DDB
768 void
769 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
770 {
771 #ifdef LOCKDEBUG
772 lockdebug_t *ld;
773
774 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
775 if (ld->ld_lock == addr) {
776 lockdebug_dump(ld, pr);
777 return;
778 }
779 }
780 (*pr)("Sorry, no record of a lock with address %p found.\n", addr);
781 #else
782 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
783 #endif /* LOCKDEBUG */
784 }
785 #endif /* DDB */
786
787 /*
788 * lockdebug_abort:
789 *
790 * An error has been trapped - dump lock info and call panic().
791 */
792 void
793 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
794 const char *msg)
795 {
796 #ifdef LOCKDEBUG
797 lockdebug_t *ld;
798 lockdebuglk_t *lk;
799
800 if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
801 lockdebug_abort1(ld, lk, func, msg, true);
802 /* NOTREACHED */
803 }
804 #endif /* LOCKDEBUG */
805
806 printf_nolog("%s error: %s: %s\n\n"
807 "lock address : %#018lx\n"
808 "current cpu : %18d\n"
809 "current lwp : %#018lx\n",
810 ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
811 (long)curlwp);
812
813 (*ops->lo_dump)(lock);
814
815 printf_nolog("\n");
816 panic("lock error");
817 }
818