subr_lockdebug.c revision 1.29.2.2 1 /* $NetBSD: subr_lockdebug.c,v 1.29.2.2 2008/06/04 02:05:39 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Basic lock debugging code shared among lock primitives.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.29.2.2 2008/06/04 02:05:39 yamt Exp $");
38
39 #include "opt_ddb.h"
40
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/lockdebug.h>
47 #include <sys/sleepq.h>
48 #include <sys/cpu.h>
49 #include <sys/atomic.h>
50 #include <sys/lock.h>
51
52 #include <lib/libkern/rb.h>
53
54 #include <machine/lock.h>
55
56 unsigned int ld_panic;
57
58 #ifdef LOCKDEBUG
59
60 #define LD_BATCH_SHIFT 9
61 #define LD_BATCH (1 << LD_BATCH_SHIFT)
62 #define LD_BATCH_MASK (LD_BATCH - 1)
63 #define LD_MAX_LOCKS 1048576
64 #define LD_SLOP 16
65
66 #define LD_LOCKED 0x01
67 #define LD_SLEEPER 0x02
68
69 #define LD_WRITE_LOCK 0x80000000
70
71 typedef struct lockdebug {
72 struct rb_node ld_rb_node; /* must be the first member */
73 __cpu_simple_lock_t ld_spinlock;
74 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
75 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
76 volatile void *ld_lock;
77 lockops_t *ld_lockops;
78 struct lwp *ld_lwp;
79 uintptr_t ld_locked;
80 uintptr_t ld_unlocked;
81 uintptr_t ld_initaddr;
82 uint16_t ld_shares;
83 uint16_t ld_cpu;
84 uint8_t ld_flags;
85 uint8_t ld_shwant; /* advisory */
86 uint8_t ld_exwant; /* advisory */
87 uint8_t ld_unused;
88 } volatile lockdebug_t;
89
90 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
91
92 __cpu_simple_lock_t ld_mod_lk;
93 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
94 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
95 int ld_nfree;
96 int ld_freeptr;
97 int ld_recurse;
98 bool ld_nomore;
99 lockdebug_t ld_prime[LD_BATCH];
100
101 static void lockdebug_abort1(lockdebug_t *, int, const char *,
102 const char *, bool);
103 static int lockdebug_more(int);
104 static void lockdebug_init(void);
105
106 static signed int
107 ld_rb_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
108 {
109 const lockdebug_t *ld1 = (const void *)n1;
110 const lockdebug_t *ld2 = (const void *)n2;
111 const uintptr_t a = (uintptr_t)ld1->ld_lock;
112 const uintptr_t b = (uintptr_t)ld2->ld_lock;
113
114 if (a < b)
115 return 1;
116 if (a > b)
117 return -1;
118 return 0;
119 }
120
121 static signed int
122 ld_rb_compare_key(const struct rb_node *n, const void *key)
123 {
124 const lockdebug_t *ld = (const void *)n;
125 const uintptr_t a = (uintptr_t)ld->ld_lock;
126 const uintptr_t b = (uintptr_t)key;
127
128 if (a < b)
129 return 1;
130 if (a > b)
131 return -1;
132 return 0;
133 }
134
135 static struct rb_tree ld_rb_tree;
136
137 static const struct rb_tree_ops ld_rb_tree_ops = {
138 .rb_compare_nodes = ld_rb_compare_nodes,
139 .rb_compare_key = ld_rb_compare_key,
140 };
141
142 static inline lockdebug_t *
143 lockdebug_lookup1(volatile void *lock)
144 {
145 lockdebug_t *ld;
146 struct cpu_info *ci;
147
148 ci = curcpu();
149 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
150 ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
151 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
152 if (ld == NULL) {
153 return NULL;
154 }
155 __cpu_simple_lock(&ld->ld_spinlock);
156
157 return ld;
158 }
159
160 static void
161 lockdebug_lock_cpus(void)
162 {
163 CPU_INFO_ITERATOR cii;
164 struct cpu_info *ci;
165
166 for (CPU_INFO_FOREACH(cii, ci)) {
167 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
168 }
169 }
170
171 static void
172 lockdebug_unlock_cpus(void)
173 {
174 CPU_INFO_ITERATOR cii;
175 struct cpu_info *ci;
176
177 for (CPU_INFO_FOREACH(cii, ci)) {
178 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
179 }
180 }
181
182 /*
183 * lockdebug_lookup:
184 *
185 * Find a lockdebug structure by a pointer to a lock and return it locked.
186 */
187 static inline lockdebug_t *
188 lockdebug_lookup(volatile void *lock)
189 {
190 lockdebug_t *ld;
191
192 ld = lockdebug_lookup1(lock);
193 if (ld == NULL)
194 panic("lockdebug_lookup: uninitialized lock (lock=%p)", lock);
195 return ld;
196 }
197
198 /*
199 * lockdebug_init:
200 *
201 * Initialize the lockdebug system. Allocate an initial pool of
202 * lockdebug structures before the VM system is up and running.
203 */
204 static void
205 lockdebug_init(void)
206 {
207 lockdebug_t *ld;
208 int i;
209
210 TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
211 TAILQ_INIT(&curlwp->l_ld_locks);
212 __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
213 __cpu_simple_lock_init(&ld_mod_lk);
214
215 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
216
217 ld = ld_prime;
218 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
219 __cpu_simple_lock_init(&ld->ld_spinlock);
220 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
221 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
222 }
223 ld_freeptr = 1;
224 ld_nfree = LD_BATCH - 1;
225 }
226
227 /*
228 * lockdebug_alloc:
229 *
230 * A lock is being initialized, so allocate an associated debug
231 * structure.
232 */
233 bool
234 lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
235 {
236 struct cpu_info *ci;
237 lockdebug_t *ld;
238 int s;
239
240 if (lo == NULL || panicstr != NULL || ld_panic)
241 return false;
242 if (ld_freeptr == 0)
243 lockdebug_init();
244
245 s = splhigh();
246 __cpu_simple_lock(&ld_mod_lk);
247 if ((ld = lockdebug_lookup1(lock)) != NULL) {
248 __cpu_simple_unlock(&ld_mod_lk);
249 lockdebug_abort1(ld, s, __func__, "already initialized", true);
250 return false;
251 }
252
253 /*
254 * Pinch a new debug structure. We may recurse because we call
255 * kmem_alloc(), which may need to initialize new locks somewhere
256 * down the path. If not recursing, we try to maintain at least
257 * LD_SLOP structures free, which should hopefully be enough to
258 * satisfy kmem_alloc(). If we can't provide a structure, not to
259 * worry: we'll just mark the lock as not having an ID.
260 */
261 ci = curcpu();
262 ci->ci_lkdebug_recurse++;
263 if (TAILQ_EMPTY(&ld_free)) {
264 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
265 ci->ci_lkdebug_recurse--;
266 __cpu_simple_unlock(&ld_mod_lk);
267 splx(s);
268 return false;
269 }
270 s = lockdebug_more(s);
271 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
272 s = lockdebug_more(s);
273 }
274 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
275 __cpu_simple_unlock(&ld_mod_lk);
276 splx(s);
277 return false;
278 }
279 TAILQ_REMOVE(&ld_free, ld, ld_chain);
280 ld_nfree--;
281 ci->ci_lkdebug_recurse--;
282
283 if (ld->ld_lock != NULL) {
284 panic("lockdebug_alloc: corrupt table");
285 }
286
287 /* Initialise the structure. */
288 ld->ld_lock = lock;
289 ld->ld_lockops = lo;
290 ld->ld_locked = 0;
291 ld->ld_unlocked = 0;
292 ld->ld_lwp = NULL;
293 ld->ld_initaddr = initaddr;
294 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
295 lockdebug_lock_cpus();
296 rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
297 lockdebug_unlock_cpus();
298 __cpu_simple_unlock(&ld_mod_lk);
299
300 splx(s);
301 return true;
302 }
303
304 /*
305 * lockdebug_free:
306 *
307 * A lock is being destroyed, so release debugging resources.
308 */
309 void
310 lockdebug_free(volatile void *lock)
311 {
312 lockdebug_t *ld;
313 int s;
314
315 if (panicstr != NULL || ld_panic)
316 return;
317
318 s = splhigh();
319 __cpu_simple_lock(&ld_mod_lk);
320 ld = lockdebug_lookup(lock);
321 if (ld == NULL) {
322 __cpu_simple_unlock(&ld_mod_lk);
323 panic("lockdebug_free: destroying uninitialized object %p"
324 "(ld_lock=%p)", lock, ld->ld_lock);
325 lockdebug_abort1(ld, s, __func__, "record follows", true);
326 return;
327 }
328 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
329 __cpu_simple_unlock(&ld_mod_lk);
330 lockdebug_abort1(ld, s, __func__, "is locked or in use", true);
331 return;
332 }
333 lockdebug_lock_cpus();
334 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(&ld->ld_rb_node));
335 lockdebug_unlock_cpus();
336 ld->ld_lock = NULL;
337 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
338 ld_nfree++;
339 __cpu_simple_unlock(&ld->ld_spinlock);
340 __cpu_simple_unlock(&ld_mod_lk);
341 splx(s);
342 }
343
344 /*
345 * lockdebug_more:
346 *
347 * Allocate a batch of debug structures and add to the free list.
348 * Must be called with ld_mod_lk held.
349 */
350 static int
351 lockdebug_more(int s)
352 {
353 lockdebug_t *ld;
354 void *block;
355 int i, base, m;
356
357 /*
358 * Can't call kmem_alloc() if in interrupt context. XXX We could
359 * deadlock, because we don't know which locks the caller holds.
360 */
361 if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) {
362 return s;
363 }
364
365 while (ld_nfree < LD_SLOP) {
366 __cpu_simple_unlock(&ld_mod_lk);
367 splx(s);
368 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
369 s = splhigh();
370 __cpu_simple_lock(&ld_mod_lk);
371
372 if (block == NULL)
373 return s;
374
375 if (ld_nfree > LD_SLOP) {
376 /* Somebody beat us to it. */
377 __cpu_simple_unlock(&ld_mod_lk);
378 splx(s);
379 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
380 s = splhigh();
381 __cpu_simple_lock(&ld_mod_lk);
382 continue;
383 }
384
385 base = ld_freeptr;
386 ld_nfree += LD_BATCH;
387 ld = block;
388 base <<= LD_BATCH_SHIFT;
389 m = min(LD_MAX_LOCKS, base + LD_BATCH);
390
391 if (m == LD_MAX_LOCKS)
392 ld_nomore = true;
393
394 for (i = base; i < m; i++, ld++) {
395 __cpu_simple_lock_init(&ld->ld_spinlock);
396 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
397 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
398 }
399
400 membar_producer();
401 }
402
403 return s;
404 }
405
406 /*
407 * lockdebug_wantlock:
408 *
409 * Process the preamble to a lock acquire.
410 */
411 void
412 lockdebug_wantlock(volatile void *lock, uintptr_t where, bool shared,
413 bool trylock)
414 {
415 struct lwp *l = curlwp;
416 lockdebug_t *ld;
417 bool recurse;
418 int s;
419
420 (void)shared;
421 recurse = false;
422
423 if (panicstr != NULL || ld_panic)
424 return;
425
426 s = splhigh();
427 if ((ld = lockdebug_lookup(lock)) == NULL) {
428 splx(s);
429 return;
430 }
431 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
432 if ((ld->ld_flags & LD_SLEEPER) != 0) {
433 if (ld->ld_lwp == l && !(shared && trylock))
434 recurse = true;
435 } else if (ld->ld_cpu == (uint16_t)cpu_number())
436 recurse = true;
437 }
438 if (cpu_intr_p()) {
439 if ((ld->ld_flags & LD_SLEEPER) != 0) {
440 lockdebug_abort1(ld, s, __func__,
441 "acquiring sleep lock from interrupt context",
442 true);
443 return;
444 }
445 }
446 if (shared)
447 ld->ld_shwant++;
448 else
449 ld->ld_exwant++;
450 if (recurse) {
451 lockdebug_abort1(ld, s, __func__, "locking against myself",
452 true);
453 return;
454 }
455 __cpu_simple_unlock(&ld->ld_spinlock);
456 splx(s);
457 }
458
459 /*
460 * lockdebug_locked:
461 *
462 * Process a lock acquire operation.
463 */
464 void
465 lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where,
466 int shared)
467 {
468 struct lwp *l = curlwp;
469 lockdebug_t *ld;
470 int s;
471
472 if (panicstr != NULL || ld_panic)
473 return;
474
475 s = splhigh();
476 if ((ld = lockdebug_lookup(lock)) == NULL) {
477 splx(s);
478 return;
479 }
480 if (cvlock) {
481 KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
482 if (lock == (void *)&lbolt) {
483 /* nothing */
484 } else if (ld->ld_shares++ == 0) {
485 ld->ld_locked = (uintptr_t)cvlock;
486 } else if (cvlock != (void *)ld->ld_locked) {
487 lockdebug_abort1(ld, s, __func__, "multiple locks used"
488 " with condition variable", true);
489 return;
490 }
491 } else if (shared) {
492 l->l_shlocks++;
493 ld->ld_shares++;
494 ld->ld_shwant--;
495 } else {
496 if ((ld->ld_flags & LD_LOCKED) != 0) {
497 lockdebug_abort1(ld, s, __func__, "already locked",
498 true);
499 return;
500 }
501 ld->ld_flags |= LD_LOCKED;
502 ld->ld_locked = where;
503 ld->ld_exwant--;
504 if ((ld->ld_flags & LD_SLEEPER) != 0) {
505 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
506 } else {
507 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
508 ld, ld_chain);
509 }
510 }
511 ld->ld_cpu = (uint16_t)cpu_number();
512 ld->ld_lwp = l;
513 __cpu_simple_unlock(&ld->ld_spinlock);
514 splx(s);
515 }
516
517 /*
518 * lockdebug_unlocked:
519 *
520 * Process a lock release operation.
521 */
522 void
523 lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
524 {
525 struct lwp *l = curlwp;
526 lockdebug_t *ld;
527 int s;
528
529 if (panicstr != NULL || ld_panic)
530 return;
531
532 s = splhigh();
533 if ((ld = lockdebug_lookup(lock)) == NULL) {
534 splx(s);
535 return;
536 }
537 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
538 if (lock == (void *)&lbolt) {
539 /* nothing */
540 } else {
541 ld->ld_shares--;
542 }
543 } else if (shared) {
544 if (l->l_shlocks == 0) {
545 lockdebug_abort1(ld, s, __func__,
546 "no shared locks held by LWP", true);
547 return;
548 }
549 if (ld->ld_shares == 0) {
550 lockdebug_abort1(ld, s, __func__,
551 "no shared holds on this lock", true);
552 return;
553 }
554 l->l_shlocks--;
555 ld->ld_shares--;
556 if (ld->ld_lwp == l)
557 ld->ld_lwp = NULL;
558 if (ld->ld_cpu == (uint16_t)cpu_number())
559 ld->ld_cpu = (uint16_t)-1;
560 } else {
561 if ((ld->ld_flags & LD_LOCKED) == 0) {
562 lockdebug_abort1(ld, s, __func__, "not locked", true);
563 return;
564 }
565
566 if ((ld->ld_flags & LD_SLEEPER) != 0) {
567 if (ld->ld_lwp != curlwp) {
568 lockdebug_abort1(ld, s, __func__,
569 "not held by current LWP", true);
570 return;
571 }
572 ld->ld_flags &= ~LD_LOCKED;
573 ld->ld_unlocked = where;
574 ld->ld_lwp = NULL;
575 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
576 } else {
577 if (ld->ld_cpu != (uint16_t)cpu_number()) {
578 lockdebug_abort1(ld, s, __func__,
579 "not held by current CPU", true);
580 return;
581 }
582 ld->ld_flags &= ~LD_LOCKED;
583 ld->ld_unlocked = where;
584 ld->ld_lwp = NULL;
585 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
586 ld_chain);
587 }
588 }
589 __cpu_simple_unlock(&ld->ld_spinlock);
590 splx(s);
591 }
592
593 /*
594 * lockdebug_wakeup:
595 *
596 * Process a wakeup on a condition variable.
597 */
598 void
599 lockdebug_wakeup(volatile void *lock, uintptr_t where)
600 {
601 lockdebug_t *ld;
602 int s;
603
604 if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
605 return;
606
607 s = splhigh();
608 /* Find the CV... */
609 if ((ld = lockdebug_lookup(lock)) == NULL) {
610 splx(s);
611 return;
612 }
613 /*
614 * If it has any waiters, ensure that they are using the
615 * same interlock.
616 */
617 if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
618 lockdebug_abort1(ld, s, __func__, "interlocking mutex not "
619 "held during wakeup", true);
620 return;
621 }
622 __cpu_simple_unlock(&ld->ld_spinlock);
623 splx(s);
624 }
625
626 /*
627 * lockdebug_barrier:
628 *
629 * Panic if we hold more than one specified spin lock, and optionally,
630 * if we hold sleep locks.
631 */
632 void
633 lockdebug_barrier(volatile void *spinlock, int slplocks)
634 {
635 struct lwp *l = curlwp;
636 lockdebug_t *ld;
637 int s;
638
639 if (panicstr != NULL || ld_panic)
640 return;
641
642 s = splhigh();
643 if ((l->l_pflag & LP_INTR) == 0) {
644 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
645 if (ld->ld_lock == spinlock) {
646 continue;
647 }
648 __cpu_simple_lock(&ld->ld_spinlock);
649 lockdebug_abort1(ld, s, __func__,
650 "spin lock held", true);
651 return;
652 }
653 }
654 if (slplocks) {
655 splx(s);
656 return;
657 }
658 if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
659 __cpu_simple_lock(&ld->ld_spinlock);
660 lockdebug_abort1(ld, s, __func__, "sleep lock held", true);
661 return;
662 }
663 splx(s);
664 if (l->l_shlocks != 0) {
665 panic("lockdebug_barrier: holding %d shared locks",
666 l->l_shlocks);
667 }
668 }
669
670 /*
671 * lockdebug_mem_check:
672 *
673 * Check for in-use locks within a memory region that is
674 * being freed.
675 */
676 void
677 lockdebug_mem_check(const char *func, void *base, size_t sz)
678 {
679 lockdebug_t *ld;
680 struct cpu_info *ci;
681 int s;
682
683 if (panicstr != NULL || ld_panic)
684 return;
685
686 s = splhigh();
687 ci = curcpu();
688 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
689 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
690 if (ld != NULL) {
691 const uintptr_t lock = (uintptr_t)ld->ld_lock;
692
693 if ((uintptr_t)base > lock)
694 panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
695 __func__, ld, base, sz);
696 if (lock >= (uintptr_t)base + sz)
697 ld = NULL;
698 }
699 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
700 if (ld != NULL) {
701 __cpu_simple_lock(&ld->ld_spinlock);
702 lockdebug_abort1(ld, s, func,
703 "allocation contains active lock", !cold);
704 return;
705 }
706 splx(s);
707 }
708
709 /*
710 * lockdebug_dump:
711 *
712 * Dump information about a lock on panic, or for DDB.
713 */
714 static void
715 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
716 {
717 int sleeper = (ld->ld_flags & LD_SLEEPER);
718
719 (*pr)(
720 "lock address : %#018lx type : %18s\n"
721 "initialized : %#018lx",
722 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
723 (long)ld->ld_initaddr);
724
725 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
726 (*pr)(" interlock: %#018lx\n", ld->ld_locked);
727 } else {
728 (*pr)("\n"
729 "shared holds : %18u exclusive: %18u\n"
730 "shares wanted: %18u exclusive: %18u\n"
731 "current cpu : %18u last held: %18u\n"
732 "current lwp : %#018lx last held: %#018lx\n"
733 "last locked : %#018lx unlocked : %#018lx\n",
734 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
735 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
736 (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
737 (long)curlwp, (long)ld->ld_lwp,
738 (long)ld->ld_locked, (long)ld->ld_unlocked);
739 }
740
741 if (ld->ld_lockops->lo_dump != NULL)
742 (*ld->ld_lockops->lo_dump)(ld->ld_lock);
743
744 if (sleeper) {
745 (*pr)("\n");
746 turnstile_print(ld->ld_lock, pr);
747 }
748 }
749
750 /*
751 * lockdebug_abort1:
752 *
753 * An error has been trapped - dump lock info and panic.
754 */
755 static void
756 lockdebug_abort1(lockdebug_t *ld, int s, const char *func,
757 const char *msg, bool dopanic)
758 {
759
760 /*
761 * Don't make the situation wose if the system is already going
762 * down in flames. Once a panic is triggered, lockdebug state
763 * becomes stale and cannot be trusted.
764 */
765 if (atomic_inc_uint_nv(&ld_panic) != 1) {
766 __cpu_simple_unlock(&ld->ld_spinlock);
767 splx(s);
768 return;
769 }
770
771 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
772 func, msg);
773 lockdebug_dump(ld, printf_nolog);
774 __cpu_simple_unlock(&ld->ld_spinlock);
775 splx(s);
776 printf_nolog("\n");
777 if (dopanic)
778 panic("LOCKDEBUG");
779 }
780
781 #endif /* LOCKDEBUG */
782
783 /*
784 * lockdebug_lock_print:
785 *
786 * Handle the DDB 'show lock' command.
787 */
788 #ifdef DDB
789 void
790 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
791 {
792 #ifdef LOCKDEBUG
793 lockdebug_t *ld;
794
795 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
796 if (ld->ld_lock == addr) {
797 lockdebug_dump(ld, pr);
798 return;
799 }
800 }
801 (*pr)("Sorry, no record of a lock with address %p found.\n", addr);
802 #else
803 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
804 #endif /* LOCKDEBUG */
805 }
806 #endif /* DDB */
807
808 /*
809 * lockdebug_abort:
810 *
811 * An error has been trapped - dump lock info and call panic().
812 */
813 void
814 lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
815 const char *msg)
816 {
817 #ifdef LOCKDEBUG
818 lockdebug_t *ld;
819 int s;
820
821 s = splhigh();
822 if ((ld = lockdebug_lookup(lock)) != NULL) {
823 lockdebug_abort1(ld, s, func, msg, true);
824 return;
825 }
826 splx(s);
827 #endif /* LOCKDEBUG */
828
829 /*
830 * Complain first on the occurrance only. Otherwise proceeed to
831 * panic where we will `rendezvous' with other CPUs if the machine
832 * is going down in flames.
833 */
834 if (atomic_inc_uint_nv(&ld_panic) == 1) {
835 printf_nolog("%s error: %s: %s\n\n"
836 "lock address : %#018lx\n"
837 "current cpu : %18d\n"
838 "current lwp : %#018lx\n",
839 ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
840 (long)curlwp);
841 (*ops->lo_dump)(lock);
842 printf_nolog("\n");
843 }
844
845 panic("lock error");
846 }
847