subr_lockdebug.c revision 1.45.2.2 1 /* $NetBSD: subr_lockdebug.c,v 1.45.2.2 2014/05/22 11:41:03 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Basic lock debugging code shared among lock primitives.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.45.2.2 2014/05/22 11:41:03 yamt Exp $");
38
39 #include "opt_ddb.h"
40
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/lockdebug.h>
47 #include <sys/sleepq.h>
48 #include <sys/cpu.h>
49 #include <sys/atomic.h>
50 #include <sys/lock.h>
51 #include <sys/rbtree.h>
52
53 #include <machine/lock.h>
54
55 unsigned int ld_panic;
56
57 #ifdef LOCKDEBUG
58
59 #define LD_BATCH_SHIFT 9
60 #define LD_BATCH (1 << LD_BATCH_SHIFT)
61 #define LD_BATCH_MASK (LD_BATCH - 1)
62 #define LD_MAX_LOCKS 1048576
63 #define LD_SLOP 16
64
65 #define LD_LOCKED 0x01
66 #define LD_SLEEPER 0x02
67
68 #define LD_WRITE_LOCK 0x80000000
69
70 typedef struct lockdebug {
71 struct rb_node ld_rb_node;
72 __cpu_simple_lock_t ld_spinlock;
73 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
74 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
75 volatile void *ld_lock;
76 lockops_t *ld_lockops;
77 struct lwp *ld_lwp;
78 uintptr_t ld_locked;
79 uintptr_t ld_unlocked;
80 uintptr_t ld_initaddr;
81 uint16_t ld_shares;
82 uint16_t ld_cpu;
83 uint8_t ld_flags;
84 uint8_t ld_shwant; /* advisory */
85 uint8_t ld_exwant; /* advisory */
86 uint8_t ld_unused;
87 } volatile lockdebug_t;
88
89 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
90
91 __cpu_simple_lock_t ld_mod_lk;
92 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
93 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
94 int ld_nfree;
95 int ld_freeptr;
96 int ld_recurse;
97 bool ld_nomore;
98 lockdebug_t ld_prime[LD_BATCH];
99
100 static void lockdebug_abort1(lockdebug_t *, int, const char *,
101 const char *, bool);
102 static int lockdebug_more(int);
103 static void lockdebug_init(void);
104
105 static signed int
106 ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
107 {
108 const lockdebug_t *ld1 = n1;
109 const lockdebug_t *ld2 = n2;
110 const uintptr_t a = (uintptr_t)ld1->ld_lock;
111 const uintptr_t b = (uintptr_t)ld2->ld_lock;
112
113 if (a < b)
114 return -1;
115 if (a > b)
116 return 1;
117 return 0;
118 }
119
120 static signed int
121 ld_rbto_compare_key(void *ctx, const void *n, const void *key)
122 {
123 const lockdebug_t *ld = n;
124 const uintptr_t a = (uintptr_t)ld->ld_lock;
125 const uintptr_t b = (uintptr_t)key;
126
127 if (a < b)
128 return -1;
129 if (a > b)
130 return 1;
131 return 0;
132 }
133
134 static rb_tree_t ld_rb_tree;
135
136 static const rb_tree_ops_t ld_rb_tree_ops = {
137 .rbto_compare_nodes = ld_rbto_compare_nodes,
138 .rbto_compare_key = ld_rbto_compare_key,
139 .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
140 .rbto_context = NULL
141 };
142
143 static inline lockdebug_t *
144 lockdebug_lookup1(volatile void *lock)
145 {
146 lockdebug_t *ld;
147 struct cpu_info *ci;
148
149 ci = curcpu();
150 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
151 ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
152 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
153 if (ld == NULL) {
154 return NULL;
155 }
156 __cpu_simple_lock(&ld->ld_spinlock);
157
158 return ld;
159 }
160
161 static void
162 lockdebug_lock_cpus(void)
163 {
164 CPU_INFO_ITERATOR cii;
165 struct cpu_info *ci;
166
167 for (CPU_INFO_FOREACH(cii, ci)) {
168 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
169 }
170 }
171
172 static void
173 lockdebug_unlock_cpus(void)
174 {
175 CPU_INFO_ITERATOR cii;
176 struct cpu_info *ci;
177
178 for (CPU_INFO_FOREACH(cii, ci)) {
179 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
180 }
181 }
182
183 /*
184 * lockdebug_lookup:
185 *
186 * Find a lockdebug structure by a pointer to a lock and return it locked.
187 */
188 static inline lockdebug_t *
189 lockdebug_lookup(volatile void *lock, uintptr_t where)
190 {
191 lockdebug_t *ld;
192
193 ld = lockdebug_lookup1(lock);
194 if (ld == NULL) {
195 panic("lockdebug_lookup: uninitialized lock "
196 "(lock=%p, from=%08"PRIxPTR")", lock, where);
197 }
198 return ld;
199 }
200
201 /*
202 * lockdebug_init:
203 *
204 * Initialize the lockdebug system. Allocate an initial pool of
205 * lockdebug structures before the VM system is up and running.
206 */
207 static void
208 lockdebug_init(void)
209 {
210 lockdebug_t *ld;
211 int i;
212
213 TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
214 TAILQ_INIT(&curlwp->l_ld_locks);
215 __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
216 __cpu_simple_lock_init(&ld_mod_lk);
217
218 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
219
220 ld = ld_prime;
221 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
222 __cpu_simple_lock_init(&ld->ld_spinlock);
223 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
224 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
225 }
226 ld_freeptr = 1;
227 ld_nfree = LD_BATCH - 1;
228 }
229
230 /*
231 * lockdebug_alloc:
232 *
233 * A lock is being initialized, so allocate an associated debug
234 * structure.
235 */
236 bool
237 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
238 {
239 struct cpu_info *ci;
240 lockdebug_t *ld;
241 int s;
242
243 if (lo == NULL || panicstr != NULL || ld_panic)
244 return false;
245 if (ld_freeptr == 0)
246 lockdebug_init();
247
248 s = splhigh();
249 __cpu_simple_lock(&ld_mod_lk);
250 if ((ld = lockdebug_lookup1(lock)) != NULL) {
251 __cpu_simple_unlock(&ld_mod_lk);
252 lockdebug_abort1(ld, s, __func__, "already initialized", true);
253 return false;
254 }
255
256 /*
257 * Pinch a new debug structure. We may recurse because we call
258 * kmem_alloc(), which may need to initialize new locks somewhere
259 * down the path. If not recursing, we try to maintain at least
260 * LD_SLOP structures free, which should hopefully be enough to
261 * satisfy kmem_alloc(). If we can't provide a structure, not to
262 * worry: we'll just mark the lock as not having an ID.
263 */
264 ci = curcpu();
265 ci->ci_lkdebug_recurse++;
266 if (TAILQ_EMPTY(&ld_free)) {
267 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
268 ci->ci_lkdebug_recurse--;
269 __cpu_simple_unlock(&ld_mod_lk);
270 splx(s);
271 return false;
272 }
273 s = lockdebug_more(s);
274 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
275 s = lockdebug_more(s);
276 }
277 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
278 __cpu_simple_unlock(&ld_mod_lk);
279 splx(s);
280 return false;
281 }
282 TAILQ_REMOVE(&ld_free, ld, ld_chain);
283 ld_nfree--;
284 ci->ci_lkdebug_recurse--;
285
286 if (ld->ld_lock != NULL) {
287 panic("lockdebug_alloc: corrupt table ld %p", ld);
288 }
289
290 /* Initialise the structure. */
291 ld->ld_lock = lock;
292 ld->ld_lockops = lo;
293 ld->ld_locked = 0;
294 ld->ld_unlocked = 0;
295 ld->ld_lwp = NULL;
296 ld->ld_initaddr = initaddr;
297 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
298 lockdebug_lock_cpus();
299 (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
300 lockdebug_unlock_cpus();
301 __cpu_simple_unlock(&ld_mod_lk);
302
303 splx(s);
304 return true;
305 }
306
307 /*
308 * lockdebug_free:
309 *
310 * A lock is being destroyed, so release debugging resources.
311 */
312 void
313 lockdebug_free(volatile void *lock)
314 {
315 lockdebug_t *ld;
316 int s;
317
318 if (panicstr != NULL || ld_panic)
319 return;
320
321 s = splhigh();
322 __cpu_simple_lock(&ld_mod_lk);
323 ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0));
324 if (ld == NULL) {
325 __cpu_simple_unlock(&ld_mod_lk);
326 panic("lockdebug_free: destroying uninitialized object %p"
327 "(ld_lock=%p)", lock, ld->ld_lock);
328 return;
329 }
330 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
331 __cpu_simple_unlock(&ld_mod_lk);
332 lockdebug_abort1(ld, s, __func__, "is locked or in use", true);
333 return;
334 }
335 lockdebug_lock_cpus();
336 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
337 lockdebug_unlock_cpus();
338 ld->ld_lock = NULL;
339 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
340 ld_nfree++;
341 __cpu_simple_unlock(&ld->ld_spinlock);
342 __cpu_simple_unlock(&ld_mod_lk);
343 splx(s);
344 }
345
346 /*
347 * lockdebug_more:
348 *
349 * Allocate a batch of debug structures and add to the free list.
350 * Must be called with ld_mod_lk held.
351 */
352 static int
353 lockdebug_more(int s)
354 {
355 lockdebug_t *ld;
356 void *block;
357 int i, base, m;
358
359 /*
360 * Can't call kmem_alloc() if in interrupt context. XXX We could
361 * deadlock, because we don't know which locks the caller holds.
362 */
363 if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) {
364 return s;
365 }
366
367 while (ld_nfree < LD_SLOP) {
368 __cpu_simple_unlock(&ld_mod_lk);
369 splx(s);
370 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
371 s = splhigh();
372 __cpu_simple_lock(&ld_mod_lk);
373
374 if (block == NULL)
375 return s;
376
377 if (ld_nfree > LD_SLOP) {
378 /* Somebody beat us to it. */
379 __cpu_simple_unlock(&ld_mod_lk);
380 splx(s);
381 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
382 s = splhigh();
383 __cpu_simple_lock(&ld_mod_lk);
384 continue;
385 }
386
387 base = ld_freeptr;
388 ld_nfree += LD_BATCH;
389 ld = block;
390 base <<= LD_BATCH_SHIFT;
391 m = min(LD_MAX_LOCKS, base + LD_BATCH);
392
393 if (m == LD_MAX_LOCKS)
394 ld_nomore = true;
395
396 for (i = base; i < m; i++, ld++) {
397 __cpu_simple_lock_init(&ld->ld_spinlock);
398 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
399 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
400 }
401
402 membar_producer();
403 }
404
405 return s;
406 }
407
408 /*
409 * lockdebug_wantlock:
410 *
411 * Process the preamble to a lock acquire.
412 */
413 void
414 lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
415 {
416 struct lwp *l = curlwp;
417 lockdebug_t *ld;
418 bool recurse;
419 int s;
420
421 (void)shared;
422 recurse = false;
423
424 if (panicstr != NULL || ld_panic)
425 return;
426
427 s = splhigh();
428 if ((ld = lockdebug_lookup(lock, where)) == NULL) {
429 splx(s);
430 return;
431 }
432 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
433 if ((ld->ld_flags & LD_SLEEPER) != 0) {
434 if (ld->ld_lwp == l)
435 recurse = true;
436 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
437 recurse = true;
438 }
439 if (cpu_intr_p()) {
440 if ((ld->ld_flags & LD_SLEEPER) != 0) {
441 lockdebug_abort1(ld, s, __func__,
442 "acquiring sleep lock from interrupt context",
443 true);
444 return;
445 }
446 }
447 if (shared)
448 ld->ld_shwant++;
449 else
450 ld->ld_exwant++;
451 if (recurse) {
452 lockdebug_abort1(ld, s, __func__, "locking against myself",
453 true);
454 return;
455 }
456 __cpu_simple_unlock(&ld->ld_spinlock);
457 splx(s);
458 }
459
460 /*
461 * lockdebug_locked:
462 *
463 * Process a lock acquire operation.
464 */
465 void
466 lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where,
467 int shared)
468 {
469 struct lwp *l = curlwp;
470 lockdebug_t *ld;
471 int s;
472
473 if (panicstr != NULL || ld_panic)
474 return;
475
476 s = splhigh();
477 if ((ld = lockdebug_lookup(lock, where)) == NULL) {
478 splx(s);
479 return;
480 }
481 if (cvlock) {
482 KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
483 if (lock == (void *)&lbolt) {
484 /* nothing */
485 } else if (ld->ld_shares++ == 0) {
486 ld->ld_locked = (uintptr_t)cvlock;
487 } else if (cvlock != (void *)ld->ld_locked) {
488 lockdebug_abort1(ld, s, __func__, "multiple locks used"
489 " with condition variable", true);
490 return;
491 }
492 } else if (shared) {
493 l->l_shlocks++;
494 ld->ld_locked = where;
495 ld->ld_shares++;
496 ld->ld_shwant--;
497 } else {
498 if ((ld->ld_flags & LD_LOCKED) != 0) {
499 lockdebug_abort1(ld, s, __func__, "already locked",
500 true);
501 return;
502 }
503 ld->ld_flags |= LD_LOCKED;
504 ld->ld_locked = where;
505 ld->ld_exwant--;
506 if ((ld->ld_flags & LD_SLEEPER) != 0) {
507 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
508 } else {
509 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
510 ld, ld_chain);
511 }
512 }
513 ld->ld_cpu = (uint16_t)cpu_index(curcpu());
514 ld->ld_lwp = l;
515 __cpu_simple_unlock(&ld->ld_spinlock);
516 splx(s);
517 }
518
519 /*
520 * lockdebug_unlocked:
521 *
522 * Process a lock release operation.
523 */
524 void
525 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
526 {
527 struct lwp *l = curlwp;
528 lockdebug_t *ld;
529 int s;
530
531 if (panicstr != NULL || ld_panic)
532 return;
533
534 s = splhigh();
535 if ((ld = lockdebug_lookup(lock, where)) == NULL) {
536 splx(s);
537 return;
538 }
539 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
540 if (lock == (void *)&lbolt) {
541 /* nothing */
542 } else {
543 ld->ld_shares--;
544 }
545 } else if (shared) {
546 if (l->l_shlocks == 0) {
547 lockdebug_abort1(ld, s, __func__,
548 "no shared locks held by LWP", true);
549 return;
550 }
551 if (ld->ld_shares == 0) {
552 lockdebug_abort1(ld, s, __func__,
553 "no shared holds on this lock", true);
554 return;
555 }
556 l->l_shlocks--;
557 ld->ld_shares--;
558 if (ld->ld_lwp == l) {
559 ld->ld_unlocked = where;
560 ld->ld_lwp = NULL;
561 }
562 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
563 ld->ld_cpu = (uint16_t)-1;
564 } else {
565 if ((ld->ld_flags & LD_LOCKED) == 0) {
566 lockdebug_abort1(ld, s, __func__, "not locked", true);
567 return;
568 }
569
570 if ((ld->ld_flags & LD_SLEEPER) != 0) {
571 if (ld->ld_lwp != curlwp) {
572 lockdebug_abort1(ld, s, __func__,
573 "not held by current LWP", true);
574 return;
575 }
576 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
577 } else {
578 if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) {
579 lockdebug_abort1(ld, s, __func__,
580 "not held by current CPU", true);
581 return;
582 }
583 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
584 ld_chain);
585 }
586 ld->ld_flags &= ~LD_LOCKED;
587 ld->ld_unlocked = where;
588 ld->ld_lwp = NULL;
589 }
590 __cpu_simple_unlock(&ld->ld_spinlock);
591 splx(s);
592 }
593
594 /*
595 * lockdebug_wakeup:
596 *
597 * Process a wakeup on a condition variable.
598 */
599 void
600 lockdebug_wakeup(volatile void *lock, uintptr_t where)
601 {
602 lockdebug_t *ld;
603 int s;
604
605 if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
606 return;
607
608 s = splhigh();
609 /* Find the CV... */
610 if ((ld = lockdebug_lookup(lock, where)) == NULL) {
611 splx(s);
612 return;
613 }
614 /*
615 * If it has any waiters, ensure that they are using the
616 * same interlock.
617 */
618 if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
619 lockdebug_abort1(ld, s, __func__, "interlocking mutex not "
620 "held during wakeup", true);
621 return;
622 }
623 __cpu_simple_unlock(&ld->ld_spinlock);
624 splx(s);
625 }
626
627 /*
628 * lockdebug_barrier:
629 *
630 * Panic if we hold more than one specified spin lock, and optionally,
631 * if we hold sleep locks.
632 */
633 void
634 lockdebug_barrier(volatile void *spinlock, int slplocks)
635 {
636 struct lwp *l = curlwp;
637 lockdebug_t *ld;
638 int s;
639
640 if (panicstr != NULL || ld_panic)
641 return;
642
643 s = splhigh();
644 if ((l->l_pflag & LP_INTR) == 0) {
645 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
646 if (ld->ld_lock == spinlock) {
647 continue;
648 }
649 __cpu_simple_lock(&ld->ld_spinlock);
650 lockdebug_abort1(ld, s, __func__,
651 "spin lock held", true);
652 return;
653 }
654 }
655 if (slplocks) {
656 splx(s);
657 return;
658 }
659 if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
660 __cpu_simple_lock(&ld->ld_spinlock);
661 lockdebug_abort1(ld, s, __func__, "sleep lock held", true);
662 return;
663 }
664 splx(s);
665 if (l->l_shlocks != 0) {
666 panic("lockdebug_barrier: holding %d shared locks",
667 l->l_shlocks);
668 }
669 }
670
671 /*
672 * lockdebug_mem_check:
673 *
674 * Check for in-use locks within a memory region that is
675 * being freed.
676 */
677 void
678 lockdebug_mem_check(const char *func, void *base, size_t sz)
679 {
680 lockdebug_t *ld;
681 struct cpu_info *ci;
682 int s;
683
684 if (panicstr != NULL || ld_panic)
685 return;
686
687 s = splhigh();
688 ci = curcpu();
689 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
690 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
691 if (ld != NULL) {
692 const uintptr_t lock = (uintptr_t)ld->ld_lock;
693
694 if ((uintptr_t)base > lock)
695 panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
696 __func__, ld, base, sz);
697 if (lock >= (uintptr_t)base + sz)
698 ld = NULL;
699 }
700 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
701 if (ld != NULL) {
702 __cpu_simple_lock(&ld->ld_spinlock);
703 lockdebug_abort1(ld, s, func,
704 "allocation contains active lock", !cold);
705 return;
706 }
707 splx(s);
708 }
709
710 /*
711 * lockdebug_dump:
712 *
713 * Dump information about a lock on panic, or for DDB.
714 */
715 static void
716 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
717 __printflike(1, 2))
718 {
719 int sleeper = (ld->ld_flags & LD_SLEEPER);
720
721 (*pr)(
722 "lock address : %#018lx type : %18s\n"
723 "initialized : %#018lx",
724 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
725 (long)ld->ld_initaddr);
726
727 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
728 (*pr)(" interlock: %#018lx\n", (long)ld->ld_locked);
729 } else {
730 (*pr)("\n"
731 "shared holds : %18u exclusive: %18u\n"
732 "shares wanted: %18u exclusive: %18u\n"
733 "current cpu : %18u last held: %18u\n"
734 "current lwp : %#018lx last held: %#018lx\n"
735 "last locked%c : %#018lx unlocked%c: %#018lx\n",
736 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
737 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
738 (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
739 (long)curlwp, (long)ld->ld_lwp,
740 ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
741 (long)ld->ld_locked,
742 ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
743 (long)ld->ld_unlocked);
744 }
745
746 if (ld->ld_lockops->lo_dump != NULL)
747 (*ld->ld_lockops->lo_dump)(ld->ld_lock);
748
749 if (sleeper) {
750 (*pr)("\n");
751 turnstile_print(ld->ld_lock, pr);
752 }
753 }
754
755 /*
756 * lockdebug_abort1:
757 *
758 * An error has been trapped - dump lock info and panic.
759 */
760 static void
761 lockdebug_abort1(lockdebug_t *ld, int s, const char *func,
762 const char *msg, bool dopanic)
763 {
764
765 /*
766 * Don't make the situation worse if the system is already going
767 * down in flames. Once a panic is triggered, lockdebug state
768 * becomes stale and cannot be trusted.
769 */
770 if (atomic_inc_uint_nv(&ld_panic) != 1) {
771 __cpu_simple_unlock(&ld->ld_spinlock);
772 splx(s);
773 return;
774 }
775
776 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
777 func, msg);
778 lockdebug_dump(ld, printf_nolog);
779 __cpu_simple_unlock(&ld->ld_spinlock);
780 splx(s);
781 printf_nolog("\n");
782 if (dopanic)
783 panic("LOCKDEBUG: %s error: %s: %s", ld->ld_lockops->lo_name,
784 func, msg);
785 }
786
787 #endif /* LOCKDEBUG */
788
789 /*
790 * lockdebug_lock_print:
791 *
792 * Handle the DDB 'show lock' command.
793 */
794 #ifdef DDB
795 void
796 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
797 {
798 #ifdef LOCKDEBUG
799 lockdebug_t *ld;
800
801 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
802 if (ld->ld_lock == NULL)
803 continue;
804 if (addr == NULL || ld->ld_lock == addr) {
805 lockdebug_dump(ld, pr);
806 if (addr != NULL)
807 return;
808 }
809 }
810 if (addr != NULL) {
811 (*pr)("Sorry, no record of a lock with address %p found.\n",
812 addr);
813 }
814 #else
815 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
816 #endif /* LOCKDEBUG */
817 }
818 #endif /* DDB */
819
820 /*
821 * lockdebug_abort:
822 *
823 * An error has been trapped - dump lock info and call panic().
824 */
825 void
826 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
827 const char *msg)
828 {
829 #ifdef LOCKDEBUG
830 lockdebug_t *ld;
831 int s;
832
833 s = splhigh();
834 if ((ld = lockdebug_lookup(lock,
835 (uintptr_t) __builtin_return_address(0))) != NULL) {
836 lockdebug_abort1(ld, s, func, msg, true);
837 return;
838 }
839 splx(s);
840 #endif /* LOCKDEBUG */
841
842 /*
843 * Complain first on the occurrance only. Otherwise proceeed to
844 * panic where we will `rendezvous' with other CPUs if the machine
845 * is going down in flames.
846 */
847 if (atomic_inc_uint_nv(&ld_panic) == 1) {
848 printf_nolog("%s error: %s: %s\n\n"
849 "lock address : %#018lx\n"
850 "current cpu : %18d\n"
851 "current lwp : %#018lx\n",
852 ops->lo_name, func, msg, (long)lock,
853 (int)cpu_index(curcpu()), (long)curlwp);
854 (*ops->lo_dump)(lock);
855 printf_nolog("\n");
856 }
857
858 panic("lock error");
859 }
860