subr_lockdebug.c revision 1.19 1 /* $NetBSD: subr_lockdebug.c,v 1.19 2007/11/21 11:33:11 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Basic lock debugging code shared among lock primitives.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.19 2007/11/21 11:33:11 yamt Exp $");
45
46 #include "opt_multiprocessor.h"
47 #include "opt_ddb.h"
48
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/kmem.h>
54 #include <sys/lock.h>
55 #include <sys/lockdebug.h>
56 #include <sys/sleepq.h>
57 #include <sys/cpu.h>
58
59 #include <lib/libkern/rb.h>
60
61 #ifdef LOCKDEBUG
62
63 #define LD_BATCH_SHIFT 9
64 #define LD_BATCH (1 << LD_BATCH_SHIFT)
65 #define LD_BATCH_MASK (LD_BATCH - 1)
66 #define LD_MAX_LOCKS 1048576
67 #define LD_SLOP 16
68
69 #define LD_LOCKED 0x01
70 #define LD_SLEEPER 0x02
71
72 typedef union lockdebuglk {
73 struct {
74 __cpu_simple_lock_t lku_lock;
75 int lku_oldspl;
76 } ul;
77 uint8_t lk_pad[64];
78 } volatile __aligned(64) lockdebuglk_t;
79
80 #define lk_lock ul.lku_lock
81 #define lk_oldspl ul.lku_oldspl
82
83 typedef struct lockdebug {
84 struct rb_node ld_rb_node; /* must be the first member */
85 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
86 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
87 volatile void *ld_lock;
88 lockops_t *ld_lockops;
89 struct lwp *ld_lwp;
90 uintptr_t ld_locked;
91 uintptr_t ld_unlocked;
92 uintptr_t ld_initaddr;
93 uint16_t ld_shares;
94 uint16_t ld_cpu;
95 uint8_t ld_flags;
96 uint8_t ld_shwant; /* advisory */
97 uint8_t ld_exwant; /* advisory */
98 uint8_t ld_unused;
99 } volatile lockdebug_t;
100
101 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
102
103 lockdebuglk_t ld_tree_lk;
104 lockdebuglk_t ld_sleeper_lk;
105 lockdebuglk_t ld_spinner_lk;
106 lockdebuglk_t ld_free_lk;
107
108 lockdebuglist_t ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
109 lockdebuglist_t ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
110 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
111 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
112 int ld_nfree;
113 int ld_freeptr;
114 int ld_recurse;
115 bool ld_nomore;
116 lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH];
117
118 lockdebug_t ld_prime[LD_BATCH];
119
120 static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
121 const char *, const char *, bool);
122 static void lockdebug_more(void);
123 static void lockdebug_init(void);
124
125 static signed int
126 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
127 {
128 const lockdebug_t *ld1 = (const void *)n1;
129 const lockdebug_t *ld2 = (const void *)n2;
130 intptr_t diff = (intptr_t)ld1->ld_lock - (intptr_t)ld2->ld_lock;
131 if (diff < 0)
132 return -1;
133 else if (diff > 0)
134 return 1;
135 return 0;
136 }
137
138 static signed int
139 ld_rb_compare_key(const struct rb_node *n, const void *key)
140 {
141 const lockdebug_t *ld = (const void *)n;
142 intptr_t diff = (intptr_t)ld->ld_lock - (intptr_t)key;
143 if (diff < 0)
144 return -1;
145 else if (diff > 0)
146 return 1;
147 return 0;
148 }
149
150 static struct rb_tree ld_rb_tree;
151
152 static const struct rb_tree_ops ld_rb_tree_ops = {
153 .rb_compare_nodes = ld_rb_compare_nodes,
154 .rb_compare_key = ld_rb_compare_key,
155 };
156
157 static inline void
158 lockdebug_lock(lockdebuglk_t *lk)
159 {
160 int s;
161
162 s = splhigh();
163 __cpu_simple_lock(&lk->lk_lock);
164 lk->lk_oldspl = s;
165 }
166
167 static inline void
168 lockdebug_unlock(lockdebuglk_t *lk)
169 {
170 int s;
171
172 s = lk->lk_oldspl;
173 __cpu_simple_unlock(&(lk->lk_lock));
174 splx(s);
175 }
176
177 static inline lockdebug_t *
178 lockdebug_lookup1(volatile void *lock, lockdebuglk_t **lk)
179 {
180 lockdebug_t *ld;
181
182 lockdebug_lock(&ld_tree_lk);
183 ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
184 lockdebug_unlock(&ld_tree_lk);
185 if (ld == NULL)
186 return NULL;
187
188 if ((ld->ld_flags & LD_SLEEPER) != 0)
189 *lk = &ld_sleeper_lk;
190 else
191 *lk = &ld_spinner_lk;
192
193 lockdebug_lock(*lk);
194 return ld;
195 }
196
197 /*
198 * lockdebug_lookup:
199 *
200 * Find a lockdebug structure by a pointer to a lock and return it locked.
201 */
202 static inline lockdebug_t *
203 lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
204 {
205 lockdebug_t *ld;
206
207 ld = lockdebug_lookup1(lock, lk);
208 if (ld == NULL)
209 panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
210 return ld;
211 }
212
213 /*
214 * lockdebug_init:
215 *
216 * Initialize the lockdebug system. Allocate an initial pool of
217 * lockdebug structures before the VM system is up and running.
218 */
219 static void
220 lockdebug_init(void)
221 {
222 lockdebug_t *ld;
223 int i;
224
225 __cpu_simple_lock_init(&ld_tree_lk.lk_lock);
226 __cpu_simple_lock_init(&ld_sleeper_lk.lk_lock);
227 __cpu_simple_lock_init(&ld_spinner_lk.lk_lock);
228 __cpu_simple_lock_init(&ld_free_lk.lk_lock);
229
230 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
231
232 ld = ld_prime;
233 ld_table[0] = ld;
234 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
235 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
236 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
237 }
238 ld_freeptr = 1;
239 ld_nfree = LD_BATCH - 1;
240 }
241
242 /*
243 * lockdebug_alloc:
244 *
245 * A lock is being initialized, so allocate an associated debug
246 * structure.
247 */
248 bool
249 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
250 {
251 struct cpu_info *ci;
252 lockdebug_t *ld;
253 lockdebuglk_t *lk;
254
255 if (lo == NULL || panicstr != NULL)
256 return false;
257 if (ld_freeptr == 0)
258 lockdebug_init();
259
260 if ((ld = lockdebug_lookup1(lock, &lk)) != NULL) {
261 lockdebug_abort1(ld, lk, __func__, "already initialized", true);
262 /* NOTREACHED */
263 }
264
265 ci = curcpu();
266
267 /*
268 * Pinch a new debug structure. We may recurse because we call
269 * kmem_alloc(), which may need to initialize new locks somewhere
270 * down the path. If not recursing, we try to maintain at least
271 * LD_SLOP structures free, which should hopefully be enough to
272 * satisfy kmem_alloc(). If we can't provide a structure, not to
273 * worry: we'll just mark the lock as not having an ID.
274 */
275 lockdebug_lock(&ld_free_lk);
276 ci->ci_lkdebug_recurse++;
277
278 if (TAILQ_EMPTY(&ld_free)) {
279 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
280 ci->ci_lkdebug_recurse--;
281 lockdebug_unlock(&ld_free_lk);
282 return false;
283 }
284 lockdebug_more();
285 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
286 lockdebug_more();
287
288 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
289 lockdebug_unlock(&ld_free_lk);
290 return false;
291 }
292
293 TAILQ_REMOVE(&ld_free, ld, ld_chain);
294 ld_nfree--;
295
296 ci->ci_lkdebug_recurse--;
297 lockdebug_unlock(&ld_free_lk);
298
299 if (ld->ld_lock != NULL)
300 panic("lockdebug_alloc: corrupt table");
301
302 if (lo->lo_sleeplock)
303 lockdebug_lock(&ld_sleeper_lk);
304 else
305 lockdebug_lock(&ld_spinner_lk);
306
307 /* Initialise the structure. */
308 ld->ld_lock = lock;
309 ld->ld_lockops = lo;
310 ld->ld_locked = 0;
311 ld->ld_unlocked = 0;
312 ld->ld_lwp = NULL;
313 ld->ld_initaddr = initaddr;
314
315 lockdebug_lock(&ld_tree_lk);
316 rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
317 lockdebug_unlock(&ld_tree_lk);
318
319 if (lo->lo_sleeplock) {
320 ld->ld_flags = LD_SLEEPER;
321 lockdebug_unlock(&ld_sleeper_lk);
322 } else {
323 ld->ld_flags = 0;
324 lockdebug_unlock(&ld_spinner_lk);
325 }
326
327 return true;
328 }
329
330 /*
331 * lockdebug_free:
332 *
333 * A lock is being destroyed, so release debugging resources.
334 */
335 void
336 lockdebug_free(volatile void *lock)
337 {
338 lockdebug_t *ld;
339 lockdebuglk_t *lk;
340
341 if (panicstr != NULL)
342 return;
343
344 ld = lockdebug_lookup(lock, &lk);
345 if (ld == NULL) {
346 panic("lockdebug_free: destroying uninitialized lock %p"
347 "(ld_lock=%p)", lock, ld->ld_lock);
348 lockdebug_abort1(ld, lk, __func__, "lock record follows",
349 true);
350 }
351 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
352 lockdebug_abort1(ld, lk, __func__, "is locked", true);
353 lockdebug_lock(&ld_tree_lk);
354 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
355 lockdebug_unlock(&ld_tree_lk);
356 ld->ld_lock = NULL;
357 lockdebug_unlock(lk);
358
359 lockdebug_lock(&ld_free_lk);
360 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
361 ld_nfree++;
362 lockdebug_unlock(&ld_free_lk);
363 }
364
365 /*
366 * lockdebug_more:
367 *
368 * Allocate a batch of debug structures and add to the free list.
369 * Must be called with ld_free_lk held.
370 */
371 static void
372 lockdebug_more(void)
373 {
374 lockdebug_t *ld;
375 void *block;
376 int i, base, m;
377
378 while (ld_nfree < LD_SLOP) {
379 lockdebug_unlock(&ld_free_lk);
380 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
381 lockdebug_lock(&ld_free_lk);
382
383 if (block == NULL)
384 return;
385
386 if (ld_nfree > LD_SLOP) {
387 /* Somebody beat us to it. */
388 lockdebug_unlock(&ld_free_lk);
389 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
390 lockdebug_lock(&ld_free_lk);
391 continue;
392 }
393
394 base = ld_freeptr;
395 ld_nfree += LD_BATCH;
396 ld = block;
397 base <<= LD_BATCH_SHIFT;
398 m = min(LD_MAX_LOCKS, base + LD_BATCH);
399
400 if (m == LD_MAX_LOCKS)
401 ld_nomore = true;
402
403 for (i = base; i < m; i++, ld++) {
404 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
405 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
406 }
407
408 mb_write();
409 ld_table[ld_freeptr++] = block;
410 }
411 }
412
413 /*
414 * lockdebug_wantlock:
415 *
416 * Process the preamble to a lock acquire.
417 */
418 void
419 lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
420 {
421 struct lwp *l = curlwp;
422 lockdebuglk_t *lk;
423 lockdebug_t *ld;
424 bool recurse;
425
426 (void)shared;
427 recurse = false;
428
429 if (panicstr != NULL)
430 return;
431
432 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
433 return;
434
435 if ((ld->ld_flags & LD_LOCKED) != 0) {
436 if ((ld->ld_flags & LD_SLEEPER) != 0) {
437 if (ld->ld_lwp == l)
438 recurse = true;
439 } else if (ld->ld_cpu == (uint16_t)cpu_number())
440 recurse = true;
441 }
442
443 #ifdef notyet
444 if (cpu_intr_p()) {
445 if ((ld->ld_flags & LD_SLEEPER) != 0)
446 lockdebug_abort1(ld, lk, __func__,
447 "acquiring sleep lock from interrupt context",
448 true);
449 }
450 #endif
451
452 if (shared)
453 ld->ld_shwant++;
454 else
455 ld->ld_exwant++;
456
457 if (recurse)
458 lockdebug_abort1(ld, lk, __func__, "locking against myself",
459 true);
460
461 lockdebug_unlock(lk);
462 }
463
464 /*
465 * lockdebug_locked:
466 *
467 * Process a lock acquire operation.
468 */
469 void
470 lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
471 {
472 struct lwp *l = curlwp;
473 lockdebuglk_t *lk;
474 lockdebug_t *ld;
475
476 if (panicstr != NULL)
477 return;
478
479 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
480 return;
481
482 if (shared) {
483 l->l_shlocks++;
484 ld->ld_shares++;
485 ld->ld_shwant--;
486 } else {
487 if ((ld->ld_flags & LD_LOCKED) != 0)
488 lockdebug_abort1(ld, lk, __func__,
489 "already locked", true);
490
491 ld->ld_flags |= LD_LOCKED;
492 ld->ld_locked = where;
493 ld->ld_cpu = (uint16_t)cpu_number();
494 ld->ld_lwp = l;
495 ld->ld_exwant--;
496
497 if ((ld->ld_flags & LD_SLEEPER) != 0) {
498 l->l_exlocks++;
499 TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
500 } else {
501 curcpu()->ci_spin_locks2++;
502 TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
503 }
504 }
505
506 lockdebug_unlock(lk);
507 }
508
509 /*
510 * lockdebug_unlocked:
511 *
512 * Process a lock release operation.
513 */
514 void
515 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
516 {
517 struct lwp *l = curlwp;
518 lockdebuglk_t *lk;
519 lockdebug_t *ld;
520
521 if (panicstr != NULL)
522 return;
523
524 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
525 return;
526
527 if (shared) {
528 if (l->l_shlocks == 0)
529 lockdebug_abort1(ld, lk, __func__,
530 "no shared locks held by LWP", true);
531 if (ld->ld_shares == 0)
532 lockdebug_abort1(ld, lk, __func__,
533 "no shared holds on this lock", true);
534 l->l_shlocks--;
535 ld->ld_shares--;
536 } else {
537 if ((ld->ld_flags & LD_LOCKED) == 0)
538 lockdebug_abort1(ld, lk, __func__, "not locked",
539 true);
540
541 if ((ld->ld_flags & LD_SLEEPER) != 0) {
542 if (ld->ld_lwp != curlwp)
543 lockdebug_abort1(ld, lk, __func__,
544 "not held by current LWP", true);
545 ld->ld_flags &= ~LD_LOCKED;
546 ld->ld_unlocked = where;
547 ld->ld_lwp = NULL;
548 curlwp->l_exlocks--;
549 TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
550 } else {
551 if (ld->ld_cpu != (uint16_t)cpu_number())
552 lockdebug_abort1(ld, lk, __func__,
553 "not held by current CPU", true);
554 ld->ld_flags &= ~LD_LOCKED;
555 ld->ld_unlocked = where;
556 ld->ld_lwp = NULL;
557 curcpu()->ci_spin_locks2--;
558 TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
559 }
560 }
561
562 lockdebug_unlock(lk);
563 }
564
565 /*
566 * lockdebug_barrier:
567 *
568 * Panic if we hold more than one specified spin lock, and optionally,
569 * if we hold sleep locks.
570 */
571 void
572 lockdebug_barrier(volatile void *spinlock, int slplocks)
573 {
574 struct lwp *l = curlwp;
575 lockdebug_t *ld;
576 uint16_t cpuno;
577
578 if (panicstr != NULL)
579 return;
580
581 if (curcpu()->ci_spin_locks2 != 0) {
582 cpuno = (uint16_t)cpu_number();
583
584 lockdebug_lock(&ld_spinner_lk);
585 TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
586 if (ld->ld_lock == spinlock) {
587 if (ld->ld_cpu != cpuno)
588 lockdebug_abort1(ld, &ld_spinner_lk,
589 __func__,
590 "not held by current CPU", true);
591 continue;
592 }
593 if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0)
594 lockdebug_abort1(ld, &ld_spinner_lk,
595 __func__, "spin lock held", true);
596 }
597 lockdebug_unlock(&ld_spinner_lk);
598 }
599
600 if (!slplocks) {
601 if (l->l_exlocks != 0) {
602 lockdebug_lock(&ld_sleeper_lk);
603 TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
604 if (ld->ld_lwp == l)
605 lockdebug_abort1(ld, &ld_sleeper_lk,
606 __func__, "sleep lock held", true);
607 }
608 lockdebug_unlock(&ld_sleeper_lk);
609 }
610 if (l->l_shlocks != 0)
611 panic("lockdebug_barrier: holding %d shared locks",
612 l->l_shlocks);
613 }
614 }
615
616 /*
617 * lockdebug_mem_check:
618 *
619 * Check for in-use locks within a memory region that is
620 * being freed.
621 */
622 void
623 lockdebug_mem_check(const char *func, void *base, size_t sz)
624 {
625 lockdebug_t *ld;
626 lockdebuglk_t *lk;
627 uintptr_t lock;
628
629 lockdebug_lock(&ld_tree_lk);
630 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
631 lockdebug_unlock(&ld_tree_lk);
632 if (ld == NULL)
633 return;
634
635 if ((ld->ld_flags & LD_SLEEPER) != 0)
636 lk = &ld_sleeper_lk;
637 else
638 lk = &ld_spinner_lk;
639
640 lockdebug_lock(lk);
641 lock = (uintptr_t)ld->ld_lock;
642 if ((uintptr_t)base <= lock && lock < (uintptr_t)base + sz) {
643 lockdebug_abort1(ld, lk, func,
644 "allocation contains active lock", !cold);
645 return;
646 }
647 lockdebug_unlock(lk);
648 }
649
650 /*
651 * lockdebug_dump:
652 *
653 * Dump information about a lock on panic, or for DDB.
654 */
655 static void
656 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
657 {
658 int sleeper = (ld->ld_flags & LD_SLEEPER);
659
660 (*pr)(
661 "lock address : %#018lx type : %18s\n"
662 "shared holds : %18u exclusive: %18u\n"
663 "shares wanted: %18u exclusive: %18u\n"
664 "current cpu : %18u last held: %18u\n"
665 "current lwp : %#018lx last held: %#018lx\n"
666 "last locked : %#018lx unlocked : %#018lx\n"
667 "initialized : %#018lx\n",
668 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
669 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
670 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
671 (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
672 (long)curlwp, (long)ld->ld_lwp,
673 (long)ld->ld_locked, (long)ld->ld_unlocked,
674 (long)ld->ld_initaddr);
675
676 if (ld->ld_lockops->lo_dump != NULL)
677 (*ld->ld_lockops->lo_dump)(ld->ld_lock);
678
679 if (sleeper) {
680 (*pr)("\n");
681 turnstile_print(ld->ld_lock, pr);
682 }
683 }
684
685 /*
686 * lockdebug_dump:
687 *
688 * Dump information about a known lock.
689 */
690 static void
691 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
692 const char *msg, bool dopanic)
693 {
694
695 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
696 func, msg);
697 lockdebug_dump(ld, printf_nolog);
698 lockdebug_unlock(lk);
699 printf_nolog("\n");
700 if (dopanic)
701 panic("LOCKDEBUG");
702 }
703
704 #endif /* LOCKDEBUG */
705
706 /*
707 * lockdebug_lock_print:
708 *
709 * Handle the DDB 'show lock' command.
710 */
711 #ifdef DDB
712 void
713 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
714 {
715 #ifdef LOCKDEBUG
716 lockdebug_t *ld;
717
718 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
719 if (ld->ld_lock == addr) {
720 lockdebug_dump(ld, pr);
721 return;
722 }
723 }
724 (*pr)("Sorry, no record of a lock with address %p found.\n", addr);
725 #else
726 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
727 #endif /* LOCKDEBUG */
728 }
729 #endif /* DDB */
730
731 /*
732 * lockdebug_abort:
733 *
734 * An error has been trapped - dump lock info and call panic().
735 */
736 void
737 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
738 const char *msg)
739 {
740 #ifdef LOCKDEBUG
741 lockdebug_t *ld;
742 lockdebuglk_t *lk;
743
744 if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
745 lockdebug_abort1(ld, lk, func, msg, true);
746 /* NOTREACHED */
747 }
748 #endif /* LOCKDEBUG */
749
750 printf_nolog("%s error: %s: %s\n\n"
751 "lock address : %#018lx\n"
752 "current cpu : %18d\n"
753 "current lwp : %#018lx\n",
754 ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
755 (long)curlwp);
756
757 (*ops->lo_dump)(lock);
758
759 printf_nolog("\n");
760 panic("lock error");
761 }
762