subr_lockdebug.c revision 1.18 1 /* $NetBSD: subr_lockdebug.c,v 1.18 2007/11/21 10:25:51 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Basic lock debugging code shared among lock primitives.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.18 2007/11/21 10:25:51 yamt Exp $");
45
46 #include "opt_multiprocessor.h"
47 #include "opt_ddb.h"
48
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/kmem.h>
54 #include <sys/lock.h>
55 #include <sys/lockdebug.h>
56 #include <sys/sleepq.h>
57 #include <sys/cpu.h>
58
59 #include <lib/libkern/rb.h>
60
61 #ifdef LOCKDEBUG
62
63 #define LD_BATCH_SHIFT 9
64 #define LD_BATCH (1 << LD_BATCH_SHIFT)
65 #define LD_BATCH_MASK (LD_BATCH - 1)
66 #define LD_MAX_LOCKS 1048576
67 #define LD_SLOP 16
68
69 #define LD_LOCKED 0x01
70 #define LD_SLEEPER 0x02
71
72 typedef union lockdebuglk {
73 struct {
74 __cpu_simple_lock_t lku_lock;
75 int lku_oldspl;
76 } ul;
77 uint8_t lk_pad[64];
78 } volatile __aligned(64) lockdebuglk_t;
79
80 #define lk_lock ul.lku_lock
81 #define lk_oldspl ul.lku_oldspl
82
83 typedef struct lockdebug {
84 struct rb_node ld_rb_node; /* must be the first member */
85 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
86 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
87 volatile void *ld_lock;
88 lockops_t *ld_lockops;
89 struct lwp *ld_lwp;
90 uintptr_t ld_locked;
91 uintptr_t ld_unlocked;
92 uintptr_t ld_initaddr;
93 uint16_t ld_shares;
94 uint16_t ld_cpu;
95 uint8_t ld_flags;
96 uint8_t ld_shwant; /* advisory */
97 uint8_t ld_exwant; /* advisory */
98 uint8_t ld_unused;
99 } volatile lockdebug_t;
100
101 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
102
103 lockdebuglk_t ld_tree_lk;
104 lockdebuglk_t ld_sleeper_lk;
105 lockdebuglk_t ld_spinner_lk;
106 lockdebuglk_t ld_free_lk;
107
108 lockdebuglist_t ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
109 lockdebuglist_t ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
110 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
111 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
112 int ld_nfree;
113 int ld_freeptr;
114 int ld_recurse;
115 bool ld_nomore;
116 lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH];
117
118 lockdebug_t ld_prime[LD_BATCH];
119
120 static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
121 const char *, const char *, bool);
122 static void lockdebug_more(void);
123 static void lockdebug_init(void);
124
125 static signed int
126 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
127 {
128 const lockdebug_t *ld1 = (const void *)n1;
129 const lockdebug_t *ld2 = (const void *)n2;
130 intptr_t diff = (intptr_t)ld1->ld_lock - (intptr_t)ld2->ld_lock;
131 if (diff < 0)
132 return -1;
133 else if (diff > 0)
134 return 1;
135 return 0;
136 }
137
138 static signed int
139 ld_rb_compare_key(const struct rb_node *n, const void *key)
140 {
141 const lockdebug_t *ld = (const void *)n;
142 intptr_t diff = (intptr_t)ld->ld_lock - (intptr_t)key;
143 if (diff < 0)
144 return -1;
145 else if (diff > 0)
146 return 1;
147 return 0;
148 }
149
150 static struct rb_tree ld_rb_tree;
151
152 static const struct rb_tree_ops ld_rb_tree_ops = {
153 .rb_compare_nodes = ld_rb_compare_nodes,
154 .rb_compare_key = ld_rb_compare_key,
155 };
156
157 static inline void
158 lockdebug_lock(lockdebuglk_t *lk)
159 {
160 int s;
161
162 s = splhigh();
163 __cpu_simple_lock(&lk->lk_lock);
164 lk->lk_oldspl = s;
165 }
166
167 static inline void
168 lockdebug_unlock(lockdebuglk_t *lk)
169 {
170 int s;
171
172 s = lk->lk_oldspl;
173 __cpu_simple_unlock(&(lk->lk_lock));
174 splx(s);
175 }
176
177 /*
178 * lockdebug_lookup:
179 *
180 * Find a lockdebug structure by a pointer to a lock and return it locked.
181 */
182 static inline lockdebug_t *
183 lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
184 {
185 lockdebug_t *ld;
186
187 lockdebug_lock(&ld_tree_lk);
188 ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
189 lockdebug_unlock(&ld_tree_lk);
190 if (ld == NULL)
191 panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
192
193 if ((ld->ld_flags & LD_SLEEPER) != 0)
194 *lk = &ld_sleeper_lk;
195 else
196 *lk = &ld_spinner_lk;
197
198 lockdebug_lock(*lk);
199 return ld;
200 }
201
202 /*
203 * lockdebug_init:
204 *
205 * Initialize the lockdebug system. Allocate an initial pool of
206 * lockdebug structures before the VM system is up and running.
207 */
208 static void
209 lockdebug_init(void)
210 {
211 lockdebug_t *ld;
212 int i;
213
214 __cpu_simple_lock_init(&ld_tree_lk.lk_lock);
215 __cpu_simple_lock_init(&ld_sleeper_lk.lk_lock);
216 __cpu_simple_lock_init(&ld_spinner_lk.lk_lock);
217 __cpu_simple_lock_init(&ld_free_lk.lk_lock);
218
219 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
220
221 ld = ld_prime;
222 ld_table[0] = ld;
223 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
224 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
225 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
226 }
227 ld_freeptr = 1;
228 ld_nfree = LD_BATCH - 1;
229 }
230
231 /*
232 * lockdebug_alloc:
233 *
234 * A lock is being initialized, so allocate an associated debug
235 * structure.
236 */
237 bool
238 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
239 {
240 struct cpu_info *ci;
241 lockdebug_t *ld;
242
243 if (lo == NULL || panicstr != NULL)
244 return false;
245 if (ld_freeptr == 0)
246 lockdebug_init();
247
248 ci = curcpu();
249
250 /*
251 * Pinch a new debug structure. We may recurse because we call
252 * kmem_alloc(), which may need to initialize new locks somewhere
253 * down the path. If not recursing, we try to maintain at least
254 * LD_SLOP structures free, which should hopefully be enough to
255 * satisfy kmem_alloc(). If we can't provide a structure, not to
256 * worry: we'll just mark the lock as not having an ID.
257 */
258 lockdebug_lock(&ld_free_lk);
259 ci->ci_lkdebug_recurse++;
260
261 if (TAILQ_EMPTY(&ld_free)) {
262 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
263 ci->ci_lkdebug_recurse--;
264 lockdebug_unlock(&ld_free_lk);
265 return false;
266 }
267 lockdebug_more();
268 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
269 lockdebug_more();
270
271 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
272 lockdebug_unlock(&ld_free_lk);
273 return false;
274 }
275
276 TAILQ_REMOVE(&ld_free, ld, ld_chain);
277 ld_nfree--;
278
279 ci->ci_lkdebug_recurse--;
280 lockdebug_unlock(&ld_free_lk);
281
282 if (ld->ld_lock != NULL)
283 panic("lockdebug_alloc: corrupt table");
284
285 if (lo->lo_sleeplock)
286 lockdebug_lock(&ld_sleeper_lk);
287 else
288 lockdebug_lock(&ld_spinner_lk);
289
290 /* Initialise the structure. */
291 ld->ld_lock = lock;
292 ld->ld_lockops = lo;
293 ld->ld_locked = 0;
294 ld->ld_unlocked = 0;
295 ld->ld_lwp = NULL;
296 ld->ld_initaddr = initaddr;
297
298 lockdebug_lock(&ld_tree_lk);
299 rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
300 lockdebug_unlock(&ld_tree_lk);
301
302 if (lo->lo_sleeplock) {
303 ld->ld_flags = LD_SLEEPER;
304 lockdebug_unlock(&ld_sleeper_lk);
305 } else {
306 ld->ld_flags = 0;
307 lockdebug_unlock(&ld_spinner_lk);
308 }
309
310 return true;
311 }
312
313 /*
314 * lockdebug_free:
315 *
316 * A lock is being destroyed, so release debugging resources.
317 */
318 void
319 lockdebug_free(volatile void *lock)
320 {
321 lockdebug_t *ld;
322 lockdebuglk_t *lk;
323
324 if (panicstr != NULL)
325 return;
326
327 ld = lockdebug_lookup(lock, &lk);
328 if (ld == NULL) {
329 panic("lockdebug_free: destroying uninitialized lock %p"
330 "(ld_lock=%p)", lock, ld->ld_lock);
331 lockdebug_abort1(ld, lk, __func__, "lock record follows",
332 true);
333 }
334 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
335 lockdebug_abort1(ld, lk, __func__, "is locked", true);
336 lockdebug_lock(&ld_tree_lk);
337 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
338 lockdebug_unlock(&ld_tree_lk);
339 ld->ld_lock = NULL;
340 lockdebug_unlock(lk);
341
342 lockdebug_lock(&ld_free_lk);
343 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
344 ld_nfree++;
345 lockdebug_unlock(&ld_free_lk);
346 }
347
348 /*
349 * lockdebug_more:
350 *
351 * Allocate a batch of debug structures and add to the free list.
352 * Must be called with ld_free_lk held.
353 */
354 static void
355 lockdebug_more(void)
356 {
357 lockdebug_t *ld;
358 void *block;
359 int i, base, m;
360
361 while (ld_nfree < LD_SLOP) {
362 lockdebug_unlock(&ld_free_lk);
363 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
364 lockdebug_lock(&ld_free_lk);
365
366 if (block == NULL)
367 return;
368
369 if (ld_nfree > LD_SLOP) {
370 /* Somebody beat us to it. */
371 lockdebug_unlock(&ld_free_lk);
372 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
373 lockdebug_lock(&ld_free_lk);
374 continue;
375 }
376
377 base = ld_freeptr;
378 ld_nfree += LD_BATCH;
379 ld = block;
380 base <<= LD_BATCH_SHIFT;
381 m = min(LD_MAX_LOCKS, base + LD_BATCH);
382
383 if (m == LD_MAX_LOCKS)
384 ld_nomore = true;
385
386 for (i = base; i < m; i++, ld++) {
387 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
388 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
389 }
390
391 mb_write();
392 ld_table[ld_freeptr++] = block;
393 }
394 }
395
396 /*
397 * lockdebug_wantlock:
398 *
399 * Process the preamble to a lock acquire.
400 */
401 void
402 lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
403 {
404 struct lwp *l = curlwp;
405 lockdebuglk_t *lk;
406 lockdebug_t *ld;
407 bool recurse;
408
409 (void)shared;
410 recurse = false;
411
412 if (panicstr != NULL)
413 return;
414
415 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
416 return;
417
418 if ((ld->ld_flags & LD_LOCKED) != 0) {
419 if ((ld->ld_flags & LD_SLEEPER) != 0) {
420 if (ld->ld_lwp == l)
421 recurse = true;
422 } else if (ld->ld_cpu == (uint16_t)cpu_number())
423 recurse = true;
424 }
425
426 #ifdef notyet
427 if (cpu_intr_p()) {
428 if ((ld->ld_flags & LD_SLEEPER) != 0)
429 lockdebug_abort1(ld, lk, __func__,
430 "acquiring sleep lock from interrupt context",
431 true);
432 }
433 #endif
434
435 if (shared)
436 ld->ld_shwant++;
437 else
438 ld->ld_exwant++;
439
440 if (recurse)
441 lockdebug_abort1(ld, lk, __func__, "locking against myself",
442 true);
443
444 lockdebug_unlock(lk);
445 }
446
447 /*
448 * lockdebug_locked:
449 *
450 * Process a lock acquire operation.
451 */
452 void
453 lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
454 {
455 struct lwp *l = curlwp;
456 lockdebuglk_t *lk;
457 lockdebug_t *ld;
458
459 if (panicstr != NULL)
460 return;
461
462 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
463 return;
464
465 if (shared) {
466 l->l_shlocks++;
467 ld->ld_shares++;
468 ld->ld_shwant--;
469 } else {
470 if ((ld->ld_flags & LD_LOCKED) != 0)
471 lockdebug_abort1(ld, lk, __func__,
472 "already locked", true);
473
474 ld->ld_flags |= LD_LOCKED;
475 ld->ld_locked = where;
476 ld->ld_cpu = (uint16_t)cpu_number();
477 ld->ld_lwp = l;
478 ld->ld_exwant--;
479
480 if ((ld->ld_flags & LD_SLEEPER) != 0) {
481 l->l_exlocks++;
482 TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
483 } else {
484 curcpu()->ci_spin_locks2++;
485 TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
486 }
487 }
488
489 lockdebug_unlock(lk);
490 }
491
492 /*
493 * lockdebug_unlocked:
494 *
495 * Process a lock release operation.
496 */
497 void
498 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
499 {
500 struct lwp *l = curlwp;
501 lockdebuglk_t *lk;
502 lockdebug_t *ld;
503
504 if (panicstr != NULL)
505 return;
506
507 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
508 return;
509
510 if (shared) {
511 if (l->l_shlocks == 0)
512 lockdebug_abort1(ld, lk, __func__,
513 "no shared locks held by LWP", true);
514 if (ld->ld_shares == 0)
515 lockdebug_abort1(ld, lk, __func__,
516 "no shared holds on this lock", true);
517 l->l_shlocks--;
518 ld->ld_shares--;
519 } else {
520 if ((ld->ld_flags & LD_LOCKED) == 0)
521 lockdebug_abort1(ld, lk, __func__, "not locked",
522 true);
523
524 if ((ld->ld_flags & LD_SLEEPER) != 0) {
525 if (ld->ld_lwp != curlwp)
526 lockdebug_abort1(ld, lk, __func__,
527 "not held by current LWP", true);
528 ld->ld_flags &= ~LD_LOCKED;
529 ld->ld_unlocked = where;
530 ld->ld_lwp = NULL;
531 curlwp->l_exlocks--;
532 TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
533 } else {
534 if (ld->ld_cpu != (uint16_t)cpu_number())
535 lockdebug_abort1(ld, lk, __func__,
536 "not held by current CPU", true);
537 ld->ld_flags &= ~LD_LOCKED;
538 ld->ld_unlocked = where;
539 ld->ld_lwp = NULL;
540 curcpu()->ci_spin_locks2--;
541 TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
542 }
543 }
544
545 lockdebug_unlock(lk);
546 }
547
548 /*
549 * lockdebug_barrier:
550 *
551 * Panic if we hold more than one specified spin lock, and optionally,
552 * if we hold sleep locks.
553 */
554 void
555 lockdebug_barrier(volatile void *spinlock, int slplocks)
556 {
557 struct lwp *l = curlwp;
558 lockdebug_t *ld;
559 uint16_t cpuno;
560
561 if (panicstr != NULL)
562 return;
563
564 if (curcpu()->ci_spin_locks2 != 0) {
565 cpuno = (uint16_t)cpu_number();
566
567 lockdebug_lock(&ld_spinner_lk);
568 TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
569 if (ld->ld_lock == spinlock) {
570 if (ld->ld_cpu != cpuno)
571 lockdebug_abort1(ld, &ld_spinner_lk,
572 __func__,
573 "not held by current CPU", true);
574 continue;
575 }
576 if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0)
577 lockdebug_abort1(ld, &ld_spinner_lk,
578 __func__, "spin lock held", true);
579 }
580 lockdebug_unlock(&ld_spinner_lk);
581 }
582
583 if (!slplocks) {
584 if (l->l_exlocks != 0) {
585 lockdebug_lock(&ld_sleeper_lk);
586 TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
587 if (ld->ld_lwp == l)
588 lockdebug_abort1(ld, &ld_sleeper_lk,
589 __func__, "sleep lock held", true);
590 }
591 lockdebug_unlock(&ld_sleeper_lk);
592 }
593 if (l->l_shlocks != 0)
594 panic("lockdebug_barrier: holding %d shared locks",
595 l->l_shlocks);
596 }
597 }
598
599 /*
600 * lockdebug_mem_check:
601 *
602 * Check for in-use locks within a memory region that is
603 * being freed.
604 */
605 void
606 lockdebug_mem_check(const char *func, void *base, size_t sz)
607 {
608 lockdebug_t *ld;
609 lockdebuglk_t *lk;
610 uintptr_t lock;
611
612 lockdebug_lock(&ld_tree_lk);
613 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
614 lockdebug_unlock(&ld_tree_lk);
615 if (ld == NULL)
616 return;
617
618 if ((ld->ld_flags & LD_SLEEPER) != 0)
619 lk = &ld_sleeper_lk;
620 else
621 lk = &ld_spinner_lk;
622
623 lockdebug_lock(lk);
624 lock = (uintptr_t)ld->ld_lock;
625 if ((uintptr_t)base <= lock && lock < (uintptr_t)base + sz) {
626 lockdebug_abort1(ld, lk, func,
627 "allocation contains active lock", !cold);
628 return;
629 }
630 lockdebug_unlock(lk);
631 }
632
633 /*
634 * lockdebug_dump:
635 *
636 * Dump information about a lock on panic, or for DDB.
637 */
638 static void
639 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
640 {
641 int sleeper = (ld->ld_flags & LD_SLEEPER);
642
643 (*pr)(
644 "lock address : %#018lx type : %18s\n"
645 "shared holds : %18u exclusive: %18u\n"
646 "shares wanted: %18u exclusive: %18u\n"
647 "current cpu : %18u last held: %18u\n"
648 "current lwp : %#018lx last held: %#018lx\n"
649 "last locked : %#018lx unlocked : %#018lx\n"
650 "initialized : %#018lx\n",
651 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
652 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
653 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
654 (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
655 (long)curlwp, (long)ld->ld_lwp,
656 (long)ld->ld_locked, (long)ld->ld_unlocked,
657 (long)ld->ld_initaddr);
658
659 if (ld->ld_lockops->lo_dump != NULL)
660 (*ld->ld_lockops->lo_dump)(ld->ld_lock);
661
662 if (sleeper) {
663 (*pr)("\n");
664 turnstile_print(ld->ld_lock, pr);
665 }
666 }
667
668 /*
669 * lockdebug_dump:
670 *
671 * Dump information about a known lock.
672 */
673 static void
674 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
675 const char *msg, bool dopanic)
676 {
677
678 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
679 func, msg);
680 lockdebug_dump(ld, printf_nolog);
681 lockdebug_unlock(lk);
682 printf_nolog("\n");
683 if (dopanic)
684 panic("LOCKDEBUG");
685 }
686
687 #endif /* LOCKDEBUG */
688
689 /*
690 * lockdebug_lock_print:
691 *
692 * Handle the DDB 'show lock' command.
693 */
694 #ifdef DDB
695 void
696 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
697 {
698 #ifdef LOCKDEBUG
699 lockdebug_t *ld;
700
701 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
702 if (ld->ld_lock == addr) {
703 lockdebug_dump(ld, pr);
704 return;
705 }
706 }
707 (*pr)("Sorry, no record of a lock with address %p found.\n", addr);
708 #else
709 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
710 #endif /* LOCKDEBUG */
711 }
712 #endif /* DDB */
713
714 /*
715 * lockdebug_abort:
716 *
717 * An error has been trapped - dump lock info and call panic().
718 */
719 void
720 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
721 const char *msg)
722 {
723 #ifdef LOCKDEBUG
724 lockdebug_t *ld;
725 lockdebuglk_t *lk;
726
727 if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
728 lockdebug_abort1(ld, lk, func, msg, true);
729 /* NOTREACHED */
730 }
731 #endif /* LOCKDEBUG */
732
733 printf_nolog("%s error: %s: %s\n\n"
734 "lock address : %#018lx\n"
735 "current cpu : %18d\n"
736 "current lwp : %#018lx\n",
737 ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
738 (long)curlwp);
739
740 (*ops->lo_dump)(lock);
741
742 printf_nolog("\n");
743 panic("lock error");
744 }
745