subr_lockdebug.c revision 1.4.2.7 1 /* $NetBSD: subr_lockdebug.c,v 1.4.2.7 2008/01/21 09:46:19 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Basic lock debugging code shared among lock primitives.
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.4.2.7 2008/01/21 09:46:19 yamt Exp $");
45
46 #include "opt_ddb.h"
47
48 #include <sys/param.h>
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/kmem.h>
53 #include <sys/lockdebug.h>
54 #include <sys/sleepq.h>
55 #include <sys/cpu.h>
56 #include <sys/atomic.h>
57 #include <sys/lock.h>
58
59 #include <lib/libkern/rb.h>
60
61 #include <machine/lock.h>
62
63 #ifdef LOCKDEBUG
64
65 #define LD_BATCH_SHIFT 9
66 #define LD_BATCH (1 << LD_BATCH_SHIFT)
67 #define LD_BATCH_MASK (LD_BATCH - 1)
68 #define LD_MAX_LOCKS 1048576
69 #define LD_SLOP 16
70
71 #define LD_LOCKED 0x01
72 #define LD_SLEEPER 0x02
73
74 #define LD_WRITE_LOCK 0x80000000
75
76 typedef union lockdebuglk {
77 struct {
78 u_int lku_lock;
79 int lku_oldspl;
80 } ul;
81 uint8_t lk_pad[CACHE_LINE_SIZE];
82 } volatile __aligned(CACHE_LINE_SIZE) lockdebuglk_t;
83
84 #define lk_lock ul.lku_lock
85 #define lk_oldspl ul.lku_oldspl
86
87 typedef struct lockdebug {
88 struct rb_node ld_rb_node; /* must be the first member */
89 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
90 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
91 volatile void *ld_lock;
92 lockops_t *ld_lockops;
93 struct lwp *ld_lwp;
94 uintptr_t ld_locked;
95 uintptr_t ld_unlocked;
96 uintptr_t ld_initaddr;
97 uint16_t ld_shares;
98 uint16_t ld_cpu;
99 uint8_t ld_flags;
100 uint8_t ld_shwant; /* advisory */
101 uint8_t ld_exwant; /* advisory */
102 uint8_t ld_unused;
103 } volatile lockdebug_t;
104
105 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
106
107 lockdebuglk_t ld_tree_lk;
108 lockdebuglk_t ld_sleeper_lk;
109 lockdebuglk_t ld_spinner_lk;
110 lockdebuglk_t ld_free_lk;
111
112 lockdebuglist_t ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
113 lockdebuglist_t ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
114 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
115 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
116 int ld_nfree;
117 int ld_freeptr;
118 int ld_recurse;
119 bool ld_nomore;
120 lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH];
121
122 lockdebug_t ld_prime[LD_BATCH];
123
124 static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
125 const char *, const char *, bool);
126 static void lockdebug_more(void);
127 static void lockdebug_init(void);
128
129 static signed int
130 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
131 {
132 const lockdebug_t *ld1 = (const void *)n1;
133 const lockdebug_t *ld2 = (const void *)n2;
134 const uintptr_t a = (uintptr_t)ld1->ld_lock;
135 const uintptr_t b = (uintptr_t)ld2->ld_lock;
136
137 if (a < b)
138 return 1;
139 if (a > b)
140 return -1;
141 return 0;
142 }
143
144 static signed int
145 ld_rb_compare_key(const struct rb_node *n, const void *key)
146 {
147 const lockdebug_t *ld = (const void *)n;
148 const uintptr_t a = (uintptr_t)ld->ld_lock;
149 const uintptr_t b = (uintptr_t)key;
150
151 if (a < b)
152 return 1;
153 if (a > b)
154 return -1;
155 return 0;
156 }
157
158 static struct rb_tree ld_rb_tree;
159
160 static const struct rb_tree_ops ld_rb_tree_ops = {
161 .rb_compare_nodes = ld_rb_compare_nodes,
162 .rb_compare_key = ld_rb_compare_key,
163 };
164
165 static void
166 lockdebug_lock_init(lockdebuglk_t *lk)
167 {
168
169 lk->lk_lock = 0;
170 }
171
172 static void
173 lockdebug_lock(lockdebuglk_t *lk)
174 {
175 int s;
176
177 s = splhigh();
178 do {
179 while (lk->lk_lock != 0) {
180 SPINLOCK_SPIN_HOOK;
181 }
182 } while (atomic_cas_uint(&lk->lk_lock, 0, LD_WRITE_LOCK) != 0);
183 lk->lk_oldspl = s;
184 membar_enter();
185 }
186
187 static void
188 lockdebug_unlock(lockdebuglk_t *lk)
189 {
190 int s;
191
192 s = lk->lk_oldspl;
193 membar_exit();
194 lk->lk_lock = 0;
195 splx(s);
196 }
197
198 static int
199 lockdebug_lock_rd(lockdebuglk_t *lk)
200 {
201 u_int val;
202 int s;
203
204 s = splhigh();
205 do {
206 while ((val = lk->lk_lock) == LD_WRITE_LOCK){
207 SPINLOCK_SPIN_HOOK;
208 }
209 } while (atomic_cas_uint(&lk->lk_lock, val, val + 1) != val);
210 membar_enter();
211 return s;
212 }
213
214 static void
215 lockdebug_unlock_rd(lockdebuglk_t *lk, int s)
216 {
217
218 membar_exit();
219 atomic_dec_uint(&lk->lk_lock);
220 splx(s);
221 }
222
223 static inline lockdebug_t *
224 lockdebug_lookup1(volatile void *lock, lockdebuglk_t **lk)
225 {
226 lockdebug_t *ld;
227 int s;
228
229 s = lockdebug_lock_rd(&ld_tree_lk);
230 ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
231 lockdebug_unlock_rd(&ld_tree_lk, s);
232 if (ld == NULL)
233 return NULL;
234
235 if ((ld->ld_flags & LD_SLEEPER) != 0)
236 *lk = &ld_sleeper_lk;
237 else
238 *lk = &ld_spinner_lk;
239
240 lockdebug_lock(*lk);
241 return ld;
242 }
243
244 /*
245 * lockdebug_lookup:
246 *
247 * Find a lockdebug structure by a pointer to a lock and return it locked.
248 */
249 static inline lockdebug_t *
250 lockdebug_lookup(volatile void *lock, lockdebuglk_t **lk)
251 {
252 lockdebug_t *ld;
253
254 ld = lockdebug_lookup1(lock, lk);
255 if (ld == NULL)
256 panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
257 return ld;
258 }
259
260 /*
261 * lockdebug_init:
262 *
263 * Initialize the lockdebug system. Allocate an initial pool of
264 * lockdebug structures before the VM system is up and running.
265 */
266 static void
267 lockdebug_init(void)
268 {
269 lockdebug_t *ld;
270 int i;
271
272 lockdebug_lock_init(&ld_tree_lk);
273 lockdebug_lock_init(&ld_sleeper_lk);
274 lockdebug_lock_init(&ld_spinner_lk);
275 lockdebug_lock_init(&ld_free_lk);
276
277 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
278
279 ld = ld_prime;
280 ld_table[0] = ld;
281 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
282 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
283 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
284 }
285 ld_freeptr = 1;
286 ld_nfree = LD_BATCH - 1;
287 }
288
289 /*
290 * lockdebug_alloc:
291 *
292 * A lock is being initialized, so allocate an associated debug
293 * structure.
294 */
295 bool
296 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
297 {
298 struct cpu_info *ci;
299 lockdebug_t *ld;
300 lockdebuglk_t *lk;
301
302 if (lo == NULL || panicstr != NULL)
303 return false;
304 if (ld_freeptr == 0)
305 lockdebug_init();
306
307 if ((ld = lockdebug_lookup1(lock, &lk)) != NULL) {
308 lockdebug_abort1(ld, lk, __func__, "already initialized", true);
309 /* NOTREACHED */
310 }
311
312 /*
313 * Pinch a new debug structure. We may recurse because we call
314 * kmem_alloc(), which may need to initialize new locks somewhere
315 * down the path. If not recursing, we try to maintain at least
316 * LD_SLOP structures free, which should hopefully be enough to
317 * satisfy kmem_alloc(). If we can't provide a structure, not to
318 * worry: we'll just mark the lock as not having an ID.
319 */
320 lockdebug_lock(&ld_free_lk);
321 ci = curcpu();
322 ci->ci_lkdebug_recurse++;
323
324 if (TAILQ_EMPTY(&ld_free)) {
325 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
326 ci->ci_lkdebug_recurse--;
327 lockdebug_unlock(&ld_free_lk);
328 return false;
329 }
330 lockdebug_more();
331 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
332 lockdebug_more();
333
334 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
335 lockdebug_unlock(&ld_free_lk);
336 return false;
337 }
338
339 TAILQ_REMOVE(&ld_free, ld, ld_chain);
340 ld_nfree--;
341
342 ci->ci_lkdebug_recurse--;
343 lockdebug_unlock(&ld_free_lk);
344
345 if (ld->ld_lock != NULL)
346 panic("lockdebug_alloc: corrupt table");
347
348 if (lo->lo_sleeplock)
349 lockdebug_lock(&ld_sleeper_lk);
350 else
351 lockdebug_lock(&ld_spinner_lk);
352
353 /* Initialise the structure. */
354 ld->ld_lock = lock;
355 ld->ld_lockops = lo;
356 ld->ld_locked = 0;
357 ld->ld_unlocked = 0;
358 ld->ld_lwp = NULL;
359 ld->ld_initaddr = initaddr;
360
361 lockdebug_lock(&ld_tree_lk);
362 rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
363 lockdebug_unlock(&ld_tree_lk);
364
365 if (lo->lo_sleeplock) {
366 ld->ld_flags = LD_SLEEPER;
367 lockdebug_unlock(&ld_sleeper_lk);
368 } else {
369 ld->ld_flags = 0;
370 lockdebug_unlock(&ld_spinner_lk);
371 }
372
373 return true;
374 }
375
376 /*
377 * lockdebug_free:
378 *
379 * A lock is being destroyed, so release debugging resources.
380 */
381 void
382 lockdebug_free(volatile void *lock)
383 {
384 lockdebug_t *ld;
385 lockdebuglk_t *lk;
386
387 if (panicstr != NULL)
388 return;
389
390 ld = lockdebug_lookup(lock, &lk);
391 if (ld == NULL) {
392 panic("lockdebug_free: destroying uninitialized lock %p"
393 "(ld_lock=%p)", lock, ld->ld_lock);
394 lockdebug_abort1(ld, lk, __func__, "lock record follows",
395 true);
396 }
397 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
398 lockdebug_abort1(ld, lk, __func__, "is locked", true);
399 lockdebug_lock(&ld_tree_lk);
400 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
401 lockdebug_unlock(&ld_tree_lk);
402 ld->ld_lock = NULL;
403 lockdebug_unlock(lk);
404
405 lockdebug_lock(&ld_free_lk);
406 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
407 ld_nfree++;
408 lockdebug_unlock(&ld_free_lk);
409 }
410
411 /*
412 * lockdebug_more:
413 *
414 * Allocate a batch of debug structures and add to the free list.
415 * Must be called with ld_free_lk held.
416 */
417 static void
418 lockdebug_more(void)
419 {
420 lockdebug_t *ld;
421 void *block;
422 int i, base, m;
423
424 while (ld_nfree < LD_SLOP) {
425 lockdebug_unlock(&ld_free_lk);
426 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
427 lockdebug_lock(&ld_free_lk);
428
429 if (block == NULL)
430 return;
431
432 if (ld_nfree > LD_SLOP) {
433 /* Somebody beat us to it. */
434 lockdebug_unlock(&ld_free_lk);
435 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
436 lockdebug_lock(&ld_free_lk);
437 continue;
438 }
439
440 base = ld_freeptr;
441 ld_nfree += LD_BATCH;
442 ld = block;
443 base <<= LD_BATCH_SHIFT;
444 m = min(LD_MAX_LOCKS, base + LD_BATCH);
445
446 if (m == LD_MAX_LOCKS)
447 ld_nomore = true;
448
449 for (i = base; i < m; i++, ld++) {
450 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
451 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
452 }
453
454 membar_producer();
455 ld_table[ld_freeptr++] = block;
456 }
457 }
458
459 /*
460 * lockdebug_wantlock:
461 *
462 * Process the preamble to a lock acquire.
463 */
464 void
465 lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
466 {
467 struct lwp *l = curlwp;
468 lockdebuglk_t *lk;
469 lockdebug_t *ld;
470 bool recurse;
471
472 (void)shared;
473 recurse = false;
474
475 if (panicstr != NULL)
476 return;
477
478 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
479 return;
480
481 if ((ld->ld_flags & LD_LOCKED) != 0) {
482 if ((ld->ld_flags & LD_SLEEPER) != 0) {
483 if (ld->ld_lwp == l)
484 recurse = true;
485 } else if (ld->ld_cpu == (uint16_t)cpu_number())
486 recurse = true;
487 }
488
489 if (cpu_intr_p()) {
490 if ((ld->ld_flags & LD_SLEEPER) != 0)
491 lockdebug_abort1(ld, lk, __func__,
492 "acquiring sleep lock from interrupt context",
493 true);
494 }
495
496 if (shared)
497 ld->ld_shwant++;
498 else
499 ld->ld_exwant++;
500
501 if (recurse)
502 lockdebug_abort1(ld, lk, __func__, "locking against myself",
503 true);
504
505 lockdebug_unlock(lk);
506 }
507
508 /*
509 * lockdebug_locked:
510 *
511 * Process a lock acquire operation.
512 */
513 void
514 lockdebug_locked(volatile void *lock, uintptr_t where, int shared)
515 {
516 struct lwp *l = curlwp;
517 lockdebuglk_t *lk;
518 lockdebug_t *ld;
519
520 if (panicstr != NULL)
521 return;
522
523 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
524 return;
525
526 if (shared) {
527 l->l_shlocks++;
528 ld->ld_shares++;
529 ld->ld_shwant--;
530 } else {
531 if ((ld->ld_flags & LD_LOCKED) != 0)
532 lockdebug_abort1(ld, lk, __func__,
533 "already locked", true);
534
535 ld->ld_flags |= LD_LOCKED;
536 ld->ld_locked = where;
537 ld->ld_cpu = (uint16_t)cpu_number();
538 ld->ld_lwp = l;
539 ld->ld_exwant--;
540
541 if ((ld->ld_flags & LD_SLEEPER) != 0) {
542 l->l_exlocks++;
543 TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
544 } else {
545 curcpu()->ci_spin_locks2++;
546 TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
547 }
548 }
549
550 lockdebug_unlock(lk);
551 }
552
553 /*
554 * lockdebug_unlocked:
555 *
556 * Process a lock release operation.
557 */
558 void
559 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
560 {
561 struct lwp *l = curlwp;
562 lockdebuglk_t *lk;
563 lockdebug_t *ld;
564
565 if (panicstr != NULL)
566 return;
567
568 if ((ld = lockdebug_lookup(lock, &lk)) == NULL)
569 return;
570
571 if (shared) {
572 if (l->l_shlocks == 0)
573 lockdebug_abort1(ld, lk, __func__,
574 "no shared locks held by LWP", true);
575 if (ld->ld_shares == 0)
576 lockdebug_abort1(ld, lk, __func__,
577 "no shared holds on this lock", true);
578 l->l_shlocks--;
579 ld->ld_shares--;
580 } else {
581 if ((ld->ld_flags & LD_LOCKED) == 0)
582 lockdebug_abort1(ld, lk, __func__, "not locked",
583 true);
584
585 if ((ld->ld_flags & LD_SLEEPER) != 0) {
586 if (ld->ld_lwp != curlwp)
587 lockdebug_abort1(ld, lk, __func__,
588 "not held by current LWP", true);
589 ld->ld_flags &= ~LD_LOCKED;
590 ld->ld_unlocked = where;
591 ld->ld_lwp = NULL;
592 curlwp->l_exlocks--;
593 TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
594 } else {
595 if (ld->ld_cpu != (uint16_t)cpu_number())
596 lockdebug_abort1(ld, lk, __func__,
597 "not held by current CPU", true);
598 ld->ld_flags &= ~LD_LOCKED;
599 ld->ld_unlocked = where;
600 ld->ld_lwp = NULL;
601 curcpu()->ci_spin_locks2--;
602 TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
603 }
604 }
605
606 lockdebug_unlock(lk);
607 }
608
609 /*
610 * lockdebug_barrier:
611 *
612 * Panic if we hold more than one specified spin lock, and optionally,
613 * if we hold sleep locks.
614 */
615 void
616 lockdebug_barrier(volatile void *spinlock, int slplocks)
617 {
618 struct lwp *l = curlwp;
619 lockdebug_t *ld;
620 uint16_t cpuno;
621 int s;
622
623 if (panicstr != NULL)
624 return;
625
626 crit_enter();
627
628 if (curcpu()->ci_spin_locks2 != 0) {
629 cpuno = (uint16_t)cpu_number();
630
631 s = lockdebug_lock_rd(&ld_spinner_lk);
632 TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
633 if (ld->ld_lock == spinlock) {
634 if (ld->ld_cpu != cpuno)
635 lockdebug_abort1(ld, &ld_spinner_lk,
636 __func__,
637 "not held by current CPU", true);
638 continue;
639 }
640 if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0)
641 lockdebug_abort1(ld, &ld_spinner_lk,
642 __func__, "spin lock held", true);
643 }
644 lockdebug_unlock_rd(&ld_spinner_lk, s);
645 }
646
647 if (!slplocks) {
648 if (l->l_exlocks != 0) {
649 s = lockdebug_lock_rd(&ld_sleeper_lk);
650 TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
651 if (ld->ld_lwp == l)
652 lockdebug_abort1(ld, &ld_sleeper_lk,
653 __func__, "sleep lock held", true);
654 }
655 lockdebug_unlock_rd(&ld_sleeper_lk, s);
656 }
657 if (l->l_shlocks != 0)
658 panic("lockdebug_barrier: holding %d shared locks",
659 l->l_shlocks);
660 }
661
662 crit_exit();
663 }
664
665 /*
666 * lockdebug_mem_check:
667 *
668 * Check for in-use locks within a memory region that is
669 * being freed.
670 */
671 void
672 lockdebug_mem_check(const char *func, void *base, size_t sz)
673 {
674 lockdebug_t *ld;
675 lockdebuglk_t *lk;
676 int s;
677
678 if (panicstr != NULL)
679 return;
680
681 s = lockdebug_lock_rd(&ld_tree_lk);
682 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
683 if (ld != NULL) {
684 const uintptr_t lock = (uintptr_t)ld->ld_lock;
685
686 if ((uintptr_t)base > lock)
687 panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
688 __func__, ld, base, sz);
689 if (lock >= (uintptr_t)base + sz)
690 ld = NULL;
691 }
692 lockdebug_unlock_rd(&ld_tree_lk, s);
693 if (ld == NULL)
694 return;
695
696 if ((ld->ld_flags & LD_SLEEPER) != 0)
697 lk = &ld_sleeper_lk;
698 else
699 lk = &ld_spinner_lk;
700
701 lockdebug_lock(lk);
702 lockdebug_abort1(ld, lk, func,
703 "allocation contains active lock", !cold);
704 return;
705 }
706
707 /*
708 * lockdebug_dump:
709 *
710 * Dump information about a lock on panic, or for DDB.
711 */
712 static void
713 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
714 {
715 int sleeper = (ld->ld_flags & LD_SLEEPER);
716
717 (*pr)(
718 "lock address : %#018lx type : %18s\n"
719 "shared holds : %18u exclusive: %18u\n"
720 "shares wanted: %18u exclusive: %18u\n"
721 "current cpu : %18u last held: %18u\n"
722 "current lwp : %#018lx last held: %#018lx\n"
723 "last locked : %#018lx unlocked : %#018lx\n"
724 "initialized : %#018lx\n",
725 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
726 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
727 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
728 (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
729 (long)curlwp, (long)ld->ld_lwp,
730 (long)ld->ld_locked, (long)ld->ld_unlocked,
731 (long)ld->ld_initaddr);
732
733 if (ld->ld_lockops->lo_dump != NULL)
734 (*ld->ld_lockops->lo_dump)(ld->ld_lock);
735
736 if (sleeper) {
737 (*pr)("\n");
738 turnstile_print(ld->ld_lock, pr);
739 }
740 }
741
742 /*
743 * lockdebug_dump:
744 *
745 * Dump information about a known lock.
746 */
747 static void
748 lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
749 const char *msg, bool dopanic)
750 {
751
752 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
753 func, msg);
754 lockdebug_dump(ld, printf_nolog);
755 lockdebug_unlock(lk);
756 printf_nolog("\n");
757 if (dopanic)
758 panic("LOCKDEBUG");
759 }
760
761 #endif /* LOCKDEBUG */
762
763 /*
764 * lockdebug_lock_print:
765 *
766 * Handle the DDB 'show lock' command.
767 */
768 #ifdef DDB
769 void
770 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
771 {
772 #ifdef LOCKDEBUG
773 lockdebug_t *ld;
774
775 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
776 if (ld->ld_lock == addr) {
777 lockdebug_dump(ld, pr);
778 return;
779 }
780 }
781 (*pr)("Sorry, no record of a lock with address %p found.\n", addr);
782 #else
783 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
784 #endif /* LOCKDEBUG */
785 }
786 #endif /* DDB */
787
788 /*
789 * lockdebug_abort:
790 *
791 * An error has been trapped - dump lock info and call panic().
792 */
793 void
794 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
795 const char *msg)
796 {
797 #ifdef LOCKDEBUG
798 lockdebug_t *ld;
799 lockdebuglk_t *lk;
800
801 if ((ld = lockdebug_lookup(lock, &lk)) != NULL) {
802 lockdebug_abort1(ld, lk, func, msg, true);
803 /* NOTREACHED */
804 }
805 #endif /* LOCKDEBUG */
806
807 printf_nolog("%s error: %s: %s\n\n"
808 "lock address : %#018lx\n"
809 "current cpu : %18d\n"
810 "current lwp : %#018lx\n",
811 ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
812 (long)curlwp);
813
814 (*ops->lo_dump)(lock);
815
816 printf_nolog("\n");
817 panic("lock error");
818 }
819