subr_lockdebug.c revision 1.75.2.1 1 /* $NetBSD: subr_lockdebug.c,v 1.75.2.1 2020/04/20 11:29:10 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Basic lock debugging code shared among lock primitives.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.75.2.1 2020/04/20 11:29:10 bouyer Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ddb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/proc.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/kmem.h>
48 #include <sys/lockdebug.h>
49 #include <sys/sleepq.h>
50 #include <sys/cpu.h>
51 #include <sys/atomic.h>
52 #include <sys/lock.h>
53 #include <sys/rbtree.h>
54 #include <sys/ksyms.h>
55
56 #include <machine/lock.h>
57
58 unsigned int ld_panic;
59
60 #ifdef LOCKDEBUG
61
62 #ifdef __ia64__
63 #define LD_BATCH_SHIFT 16
64 #else
65 #define LD_BATCH_SHIFT 9
66 #endif
67 #define LD_BATCH (1 << LD_BATCH_SHIFT)
68 #define LD_BATCH_MASK (LD_BATCH - 1)
69 #define LD_MAX_LOCKS 1048576
70 #define LD_SLOP 16
71
72 #define LD_LOCKED 0x01
73 #define LD_SLEEPER 0x02
74
75 #define LD_WRITE_LOCK 0x80000000
76
77 typedef struct lockdebug {
78 struct rb_node ld_rb_node;
79 __cpu_simple_lock_t ld_spinlock;
80 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
81 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
82 volatile void *ld_lock;
83 lockops_t *ld_lockops;
84 struct lwp *ld_lwp;
85 uintptr_t ld_locked;
86 uintptr_t ld_unlocked;
87 uintptr_t ld_initaddr;
88 uint16_t ld_shares;
89 uint16_t ld_cpu;
90 uint8_t ld_flags;
91 uint8_t ld_shwant; /* advisory */
92 uint8_t ld_exwant; /* advisory */
93 uint8_t ld_unused;
94 } volatile lockdebug_t;
95
96 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
97
98 __cpu_simple_lock_t ld_mod_lk;
99 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
100 #ifdef _KERNEL
101 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
102 #else
103 extern lockdebuglist_t ld_all;
104 #define cpu_name(a) "?"
105 #define cpu_index(a) -1
106 #define curlwp NULL
107 #endif /* _KERNEL */
108 int ld_nfree;
109 int ld_freeptr;
110 int ld_recurse;
111 bool ld_nomore;
112 lockdebug_t ld_prime[LD_BATCH];
113
114 #ifdef _KERNEL
115 static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
116 const char *, bool);
117 static int lockdebug_more(int);
118 static void lockdebug_init(void);
119 static void lockdebug_dump(lwp_t *, lockdebug_t *,
120 void (*)(const char *, ...)
121 __printflike(1, 2));
122
123 static signed int
124 ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
125 {
126 const lockdebug_t *ld1 = n1;
127 const lockdebug_t *ld2 = n2;
128 const uintptr_t a = (uintptr_t)ld1->ld_lock;
129 const uintptr_t b = (uintptr_t)ld2->ld_lock;
130
131 if (a < b)
132 return -1;
133 if (a > b)
134 return 1;
135 return 0;
136 }
137
138 static signed int
139 ld_rbto_compare_key(void *ctx, const void *n, const void *key)
140 {
141 const lockdebug_t *ld = n;
142 const uintptr_t a = (uintptr_t)ld->ld_lock;
143 const uintptr_t b = (uintptr_t)key;
144
145 if (a < b)
146 return -1;
147 if (a > b)
148 return 1;
149 return 0;
150 }
151
152 static rb_tree_t ld_rb_tree;
153
154 static const rb_tree_ops_t ld_rb_tree_ops = {
155 .rbto_compare_nodes = ld_rbto_compare_nodes,
156 .rbto_compare_key = ld_rbto_compare_key,
157 .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
158 .rbto_context = NULL
159 };
160
161 static inline lockdebug_t *
162 lockdebug_lookup1(const volatile void *lock)
163 {
164 lockdebug_t *ld;
165 struct cpu_info *ci;
166
167 ci = curcpu();
168 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
169 ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
170 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
171 if (ld == NULL) {
172 return NULL;
173 }
174 __cpu_simple_lock(&ld->ld_spinlock);
175
176 return ld;
177 }
178
179 static void
180 lockdebug_lock_cpus(void)
181 {
182 CPU_INFO_ITERATOR cii;
183 struct cpu_info *ci;
184
185 for (CPU_INFO_FOREACH(cii, ci)) {
186 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
187 }
188 }
189
190 static void
191 lockdebug_unlock_cpus(void)
192 {
193 CPU_INFO_ITERATOR cii;
194 struct cpu_info *ci;
195
196 for (CPU_INFO_FOREACH(cii, ci)) {
197 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
198 }
199 }
200
201 /*
202 * lockdebug_lookup:
203 *
204 * Find a lockdebug structure by a pointer to a lock and return it locked.
205 */
206 static inline lockdebug_t *
207 lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
208 uintptr_t where)
209 {
210 lockdebug_t *ld;
211
212 ld = lockdebug_lookup1(lock);
213 if (__predict_false(ld == NULL)) {
214 panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
215 PRIxPTR ")", func, line, lock, where);
216 }
217 return ld;
218 }
219
220 /*
221 * lockdebug_init:
222 *
223 * Initialize the lockdebug system. Allocate an initial pool of
224 * lockdebug structures before the VM system is up and running.
225 */
226 static void
227 lockdebug_init(void)
228 {
229 lockdebug_t *ld;
230 int i;
231
232 TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
233 TAILQ_INIT(&curlwp->l_ld_locks);
234 __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
235 __cpu_simple_lock_init(&ld_mod_lk);
236
237 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
238
239 ld = ld_prime;
240 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
241 __cpu_simple_lock_init(&ld->ld_spinlock);
242 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
243 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
244 }
245 ld_freeptr = 1;
246 ld_nfree = LD_BATCH - 1;
247 }
248
249 /*
250 * lockdebug_alloc:
251 *
252 * A lock is being initialized, so allocate an associated debug
253 * structure.
254 */
255 bool
256 lockdebug_alloc(const char *func, size_t line, volatile void *lock,
257 lockops_t *lo, uintptr_t initaddr)
258 {
259 struct cpu_info *ci;
260 lockdebug_t *ld;
261 int s;
262
263 if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
264 return false;
265 if (__predict_false(ld_freeptr == 0))
266 lockdebug_init();
267
268 s = splhigh();
269 __cpu_simple_lock(&ld_mod_lk);
270 if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
271 __cpu_simple_unlock(&ld_mod_lk);
272 lockdebug_abort1(func, line, ld, s, "already initialized",
273 true);
274 return false;
275 }
276
277 /*
278 * Pinch a new debug structure. We may recurse because we call
279 * kmem_alloc(), which may need to initialize new locks somewhere
280 * down the path. If not recursing, we try to maintain at least
281 * LD_SLOP structures free, which should hopefully be enough to
282 * satisfy kmem_alloc(). If we can't provide a structure, not to
283 * worry: we'll just mark the lock as not having an ID.
284 */
285 ci = curcpu();
286 ci->ci_lkdebug_recurse++;
287 if (TAILQ_EMPTY(&ld_free)) {
288 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
289 ci->ci_lkdebug_recurse--;
290 __cpu_simple_unlock(&ld_mod_lk);
291 splx(s);
292 return false;
293 }
294 s = lockdebug_more(s);
295 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
296 s = lockdebug_more(s);
297 }
298 if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
299 __cpu_simple_unlock(&ld_mod_lk);
300 splx(s);
301 return false;
302 }
303 TAILQ_REMOVE(&ld_free, ld, ld_chain);
304 ld_nfree--;
305 ci->ci_lkdebug_recurse--;
306
307 if (__predict_false(ld->ld_lock != NULL)) {
308 panic("%s,%zu: corrupt table ld %p", func, line, ld);
309 }
310
311 /* Initialise the structure. */
312 ld->ld_lock = lock;
313 ld->ld_lockops = lo;
314 ld->ld_locked = 0;
315 ld->ld_unlocked = 0;
316 ld->ld_lwp = NULL;
317 ld->ld_initaddr = initaddr;
318 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
319 lockdebug_lock_cpus();
320 (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
321 lockdebug_unlock_cpus();
322 __cpu_simple_unlock(&ld_mod_lk);
323
324 splx(s);
325 return true;
326 }
327
328 /*
329 * lockdebug_free:
330 *
331 * A lock is being destroyed, so release debugging resources.
332 */
333 void
334 lockdebug_free(const char *func, size_t line, volatile void *lock)
335 {
336 lockdebug_t *ld;
337 int s;
338
339 if (__predict_false(panicstr != NULL || ld_panic))
340 return;
341
342 s = splhigh();
343 __cpu_simple_lock(&ld_mod_lk);
344 ld = lockdebug_lookup(func, line, lock,
345 (uintptr_t) __builtin_return_address(0));
346 if (__predict_false(ld == NULL)) {
347 __cpu_simple_unlock(&ld_mod_lk);
348 panic("%s,%zu: destroying uninitialized object %p"
349 "(ld_lock=%p)", func, line, lock, ld->ld_lock);
350 return;
351 }
352 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
353 ld->ld_shares != 0)) {
354 __cpu_simple_unlock(&ld_mod_lk);
355 lockdebug_abort1(func, line, ld, s, "is locked or in use",
356 true);
357 return;
358 }
359 lockdebug_lock_cpus();
360 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
361 lockdebug_unlock_cpus();
362 ld->ld_lock = NULL;
363 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
364 ld_nfree++;
365 __cpu_simple_unlock(&ld->ld_spinlock);
366 __cpu_simple_unlock(&ld_mod_lk);
367 splx(s);
368 }
369
370 /*
371 * lockdebug_more:
372 *
373 * Allocate a batch of debug structures and add to the free list.
374 * Must be called with ld_mod_lk held.
375 */
376 static int
377 lockdebug_more(int s)
378 {
379 lockdebug_t *ld;
380 void *block;
381 int i, base, m;
382
383 /*
384 * Can't call kmem_alloc() if in interrupt context. XXX We could
385 * deadlock, because we don't know which locks the caller holds.
386 */
387 if (cpu_intr_p() || cpu_softintr_p()) {
388 return s;
389 }
390
391 while (ld_nfree < LD_SLOP) {
392 __cpu_simple_unlock(&ld_mod_lk);
393 splx(s);
394 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
395 s = splhigh();
396 __cpu_simple_lock(&ld_mod_lk);
397
398 if (ld_nfree > LD_SLOP) {
399 /* Somebody beat us to it. */
400 __cpu_simple_unlock(&ld_mod_lk);
401 splx(s);
402 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
403 s = splhigh();
404 __cpu_simple_lock(&ld_mod_lk);
405 continue;
406 }
407
408 base = ld_freeptr;
409 ld_nfree += LD_BATCH;
410 ld = block;
411 base <<= LD_BATCH_SHIFT;
412 m = uimin(LD_MAX_LOCKS, base + LD_BATCH);
413
414 if (m == LD_MAX_LOCKS)
415 ld_nomore = true;
416
417 for (i = base; i < m; i++, ld++) {
418 __cpu_simple_lock_init(&ld->ld_spinlock);
419 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
420 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
421 }
422
423 membar_producer();
424 }
425
426 return s;
427 }
428
429 /*
430 * lockdebug_wantlock:
431 *
432 * Process the preamble to a lock acquire. The "shared"
433 * parameter controls which ld_{ex,sh}want counter is
434 * updated; a negative value of shared updates neither.
435 */
436 void
437 lockdebug_wantlock(const char *func, size_t line,
438 const volatile void *lock, uintptr_t where, int shared)
439 {
440 struct lwp *l = curlwp;
441 lockdebug_t *ld;
442 bool recurse;
443 int s;
444
445 (void)shared;
446 recurse = false;
447
448 if (__predict_false(panicstr != NULL || ld_panic))
449 return;
450
451 s = splhigh();
452 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
453 splx(s);
454 return;
455 }
456 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
457 if ((ld->ld_flags & LD_SLEEPER) != 0) {
458 if (ld->ld_lwp == l)
459 recurse = true;
460 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
461 recurse = true;
462 }
463 if (cpu_intr_p()) {
464 if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
465 lockdebug_abort1(func, line, ld, s,
466 "acquiring sleep lock from interrupt context",
467 true);
468 return;
469 }
470 }
471 if (shared > 0)
472 ld->ld_shwant++;
473 else if (shared == 0)
474 ld->ld_exwant++;
475 if (__predict_false(recurse)) {
476 lockdebug_abort1(func, line, ld, s, "locking against myself",
477 true);
478 return;
479 }
480 if (l->l_ld_wanted == NULL) {
481 l->l_ld_wanted = ld;
482 }
483 __cpu_simple_unlock(&ld->ld_spinlock);
484 splx(s);
485 }
486
487 /*
488 * lockdebug_locked:
489 *
490 * Process a lock acquire operation.
491 */
492 void
493 lockdebug_locked(const char *func, size_t line,
494 volatile void *lock, void *cvlock, uintptr_t where, int shared)
495 {
496 struct lwp *l = curlwp;
497 lockdebug_t *ld;
498 int s;
499
500 if (__predict_false(panicstr != NULL || ld_panic))
501 return;
502
503 s = splhigh();
504 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
505 splx(s);
506 return;
507 }
508 if (shared) {
509 l->l_shlocks++;
510 ld->ld_locked = where;
511 ld->ld_shares++;
512 ld->ld_shwant--;
513 } else {
514 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
515 lockdebug_abort1(func, line, ld, s, "already locked",
516 true);
517 return;
518 }
519 ld->ld_flags |= LD_LOCKED;
520 ld->ld_locked = where;
521 ld->ld_exwant--;
522 if ((ld->ld_flags & LD_SLEEPER) != 0) {
523 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
524 } else {
525 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
526 ld, ld_chain);
527 }
528 }
529 ld->ld_cpu = (uint16_t)cpu_index(curcpu());
530 ld->ld_lwp = l;
531 __cpu_simple_unlock(&ld->ld_spinlock);
532 if (l->l_ld_wanted == ld) {
533 l->l_ld_wanted = NULL;
534 }
535 splx(s);
536 }
537
538 /*
539 * lockdebug_unlocked:
540 *
541 * Process a lock release operation.
542 */
543 void
544 lockdebug_unlocked(const char *func, size_t line,
545 volatile void *lock, uintptr_t where, int shared)
546 {
547 struct lwp *l = curlwp;
548 lockdebug_t *ld;
549 int s;
550
551 if (__predict_false(panicstr != NULL || ld_panic))
552 return;
553
554 s = splhigh();
555 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
556 splx(s);
557 return;
558 }
559 if (shared) {
560 if (__predict_false(l->l_shlocks == 0)) {
561 lockdebug_abort1(func, line, ld, s,
562 "no shared locks held by LWP", true);
563 return;
564 }
565 if (__predict_false(ld->ld_shares == 0)) {
566 lockdebug_abort1(func, line, ld, s,
567 "no shared holds on this lock", true);
568 return;
569 }
570 l->l_shlocks--;
571 ld->ld_shares--;
572 if (ld->ld_lwp == l) {
573 ld->ld_unlocked = where;
574 ld->ld_lwp = NULL;
575 }
576 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
577 ld->ld_cpu = (uint16_t)-1;
578 } else {
579 if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
580 lockdebug_abort1(func, line, ld, s, "not locked", true);
581 return;
582 }
583
584 if ((ld->ld_flags & LD_SLEEPER) != 0) {
585 if (__predict_false(ld->ld_lwp != curlwp)) {
586 lockdebug_abort1(func, line, ld, s,
587 "not held by current LWP", true);
588 return;
589 }
590 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
591 } else {
592 uint16_t idx = (uint16_t)cpu_index(curcpu());
593 if (__predict_false(ld->ld_cpu != idx)) {
594 lockdebug_abort1(func, line, ld, s,
595 "not held by current CPU", true);
596 return;
597 }
598 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
599 ld_chain);
600 }
601 ld->ld_flags &= ~LD_LOCKED;
602 ld->ld_unlocked = where;
603 ld->ld_lwp = NULL;
604 }
605 __cpu_simple_unlock(&ld->ld_spinlock);
606 splx(s);
607 }
608
609 /*
610 * lockdebug_barrier:
611 *
612 * Panic if we hold more than one specified lock, and optionally, if we
613 * hold any sleep locks.
614 */
615 void
616 lockdebug_barrier(const char *func, size_t line, volatile void *onelock,
617 int slplocks)
618 {
619 struct lwp *l = curlwp;
620 lockdebug_t *ld;
621 int s;
622
623 if (__predict_false(panicstr != NULL || ld_panic))
624 return;
625
626 s = splhigh();
627 if ((l->l_pflag & LP_INTR) == 0) {
628 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
629 if (ld->ld_lock == onelock) {
630 continue;
631 }
632 __cpu_simple_lock(&ld->ld_spinlock);
633 lockdebug_abort1(func, line, ld, s,
634 "spin lock held", true);
635 return;
636 }
637 }
638 if (slplocks) {
639 splx(s);
640 return;
641 }
642 ld = TAILQ_FIRST(&l->l_ld_locks);
643 if (__predict_false(ld != NULL && ld->ld_lock != onelock)) {
644 __cpu_simple_lock(&ld->ld_spinlock);
645 lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
646 return;
647 }
648 splx(s);
649 if (l->l_shlocks != 0) {
650 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
651 if (ld->ld_lock == onelock) {
652 continue;
653 }
654 if (ld->ld_lwp == l)
655 lockdebug_dump(l, ld, printf);
656 }
657 panic("%s,%zu: holding %d shared locks", func, line,
658 l->l_shlocks);
659 }
660 }
661
662 /*
663 * lockdebug_mem_check:
664 *
665 * Check for in-use locks within a memory region that is
666 * being freed.
667 */
668 void
669 lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
670 {
671 lockdebug_t *ld;
672 struct cpu_info *ci;
673 int s;
674
675 if (__predict_false(panicstr != NULL || ld_panic))
676 return;
677
678 s = splhigh();
679 ci = curcpu();
680 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
681 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
682 if (ld != NULL) {
683 const uintptr_t lock = (uintptr_t)ld->ld_lock;
684
685 if (__predict_false((uintptr_t)base > lock))
686 panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
687 func, line, ld, base, sz);
688 if (lock >= (uintptr_t)base + sz)
689 ld = NULL;
690 }
691 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
692 if (__predict_false(ld != NULL)) {
693 __cpu_simple_lock(&ld->ld_spinlock);
694 lockdebug_abort1(func, line, ld, s,
695 "allocation contains active lock", !cold);
696 return;
697 }
698 splx(s);
699 }
700 #endif /* _KERNEL */
701
702 #ifdef DDB
703 #include <machine/db_machdep.h>
704 #include <ddb/db_interface.h>
705 #include <ddb/db_access.h>
706 #endif
707
708 /*
709 * lockdebug_dump:
710 *
711 * Dump information about a lock on panic, or for DDB.
712 */
713 static void
714 lockdebug_dump(lwp_t *l, lockdebug_t *ld, void (*pr)(const char *, ...)
715 __printflike(1, 2))
716 {
717 int sleeper = (ld->ld_flags & LD_SLEEPER);
718 lockops_t *lo = ld->ld_lockops;
719
720 (*pr)(
721 "lock address : %#018lx type : %18s\n"
722 "initialized : %#018lx",
723 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
724 (long)ld->ld_initaddr);
725
726 #ifndef _KERNEL
727 lockops_t los;
728 lo = &los;
729 db_read_bytes((db_addr_t)ld->ld_lockops, sizeof(los), (char *)lo);
730 #endif
731 (*pr)("\n"
732 "shared holds : %18u exclusive: %18u\n"
733 "shares wanted: %18u exclusive: %18u\n"
734 "relevant cpu : %18u last held: %18u\n"
735 "relevant lwp : %#018lx last held: %#018lx\n"
736 "last locked%c : %#018lx unlocked%c: %#018lx\n",
737 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
738 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
739 (unsigned)cpu_index(l->l_cpu), (unsigned)ld->ld_cpu,
740 (long)l, (long)ld->ld_lwp,
741 ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
742 (long)ld->ld_locked,
743 ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
744 (long)ld->ld_unlocked);
745
746 #ifdef _KERNEL
747 if (lo->lo_dump != NULL)
748 (*lo->lo_dump)(ld->ld_lock, pr);
749
750 if (sleeper) {
751 turnstile_print(ld->ld_lock, pr);
752 }
753 #endif
754 }
755
756 #ifdef _KERNEL
757 /*
758 * lockdebug_abort1:
759 *
760 * An error has been trapped - dump lock info and panic.
761 */
762 static void
763 lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
764 const char *msg, bool dopanic)
765 {
766
767 /*
768 * Don't make the situation worse if the system is already going
769 * down in flames. Once a panic is triggered, lockdebug state
770 * becomes stale and cannot be trusted.
771 */
772 if (atomic_inc_uint_nv(&ld_panic) != 1) {
773 __cpu_simple_unlock(&ld->ld_spinlock);
774 splx(s);
775 return;
776 }
777
778 printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
779 func, line, msg);
780 lockdebug_dump(curlwp, ld, printf_nolog);
781 __cpu_simple_unlock(&ld->ld_spinlock);
782 splx(s);
783 printf_nolog("\n");
784 if (dopanic)
785 panic("LOCKDEBUG: %s error: %s,%zu: %s",
786 ld->ld_lockops->lo_name, func, line, msg);
787 }
788
789 #endif /* _KERNEL */
790 #endif /* LOCKDEBUG */
791
792 /*
793 * lockdebug_lock_print:
794 *
795 * Handle the DDB 'show lock' command.
796 */
797 #ifdef DDB
798 void
799 lockdebug_lock_print(void *addr,
800 void (*pr)(const char *, ...) __printflike(1, 2))
801 {
802 #ifdef LOCKDEBUG
803 lockdebug_t *ld, lds;
804
805 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
806 db_read_bytes((db_addr_t)ld, sizeof(lds), __UNVOLATILE(&lds));
807 ld = &lds;
808 if (ld->ld_lock == NULL)
809 continue;
810 if (addr == NULL || ld->ld_lock == addr) {
811 lockdebug_dump(curlwp, ld, pr);
812 if (addr != NULL)
813 return;
814 }
815 }
816 if (addr != NULL) {
817 (*pr)("Sorry, no record of a lock with address %p found.\n",
818 addr);
819 }
820 #else
821 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
822 #endif /* LOCKDEBUG */
823 }
824
825 #ifdef _KERNEL
826 #ifdef LOCKDEBUG
827 static void
828 lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i,
829 void (*pr)(const char *, ...) __printflike(1, 2))
830 {
831 const char *sym;
832
833 #ifdef _KERNEL
834 ksyms_getname(NULL, &sym, (vaddr_t)ld->ld_initaddr,
835 KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY);
836 #endif
837 (*pr)("* Lock %d (initialized at %s)\n", i++, sym);
838 lockdebug_dump(l, ld, pr);
839 }
840
841 static void
842 lockdebug_show_trace(const void *ptr,
843 void (*pr)(const char *, ...) __printflike(1, 2))
844 {
845 db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
846 }
847
848 static void
849 lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2),
850 bool show_trace)
851 {
852 struct proc *p;
853
854 LIST_FOREACH(p, &allproc, p_list) {
855 struct lwp *l;
856 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
857 lockdebug_t *ld;
858 int i = 0;
859 if (TAILQ_EMPTY(&l->l_ld_locks) &&
860 l->l_ld_wanted == NULL) {
861 continue;
862 }
863 (*pr)("\n****** LWP %d.%d (%s) @ %p, l_stat=%d\n",
864 p->p_pid, l->l_lid,
865 l->l_name ? l->l_name : p->p_comm, l, l->l_stat);
866 if (!TAILQ_EMPTY(&l->l_ld_locks)) {
867 (*pr)("\n*** Locks held: \n");
868 TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) {
869 (*pr)("\n");
870 lockdebug_show_one(l, ld, i++, pr);
871 }
872 } else {
873 (*pr)("\n*** Locks held: none\n");
874 }
875
876 if (l->l_ld_wanted != NULL) {
877 (*pr)("\n*** Locks wanted: \n\n");
878 lockdebug_show_one(l, l->l_ld_wanted, 0, pr);
879 } else {
880 (*pr)("\n*** Locks wanted: none\n");
881 }
882 if (show_trace) {
883 (*pr)("\n*** Traceback: \n\n");
884 lockdebug_show_trace(l, pr);
885 (*pr)("\n");
886 }
887 }
888 }
889 }
890
891 static void
892 lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2),
893 bool show_trace)
894 {
895 lockdebug_t *ld;
896 CPU_INFO_ITERATOR cii;
897 struct cpu_info *ci;
898
899 for (CPU_INFO_FOREACH(cii, ci)) {
900 int i = 0;
901 if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks))
902 continue;
903 (*pr)("\n******* Locks held on %s:\n", cpu_name(ci));
904 TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) {
905 (*pr)("\n");
906 #ifdef MULTIPROCESSOR
907 lockdebug_show_one(ci->ci_curlwp, ld, i++, pr);
908 if (show_trace)
909 lockdebug_show_trace(ci->ci_curlwp, pr);
910 #else
911 lockdebug_show_one(curlwp, ld, i++, pr);
912 if (show_trace)
913 lockdebug_show_trace(curlwp, pr);
914 #endif
915 }
916 }
917 }
918 #endif /* _KERNEL */
919 #endif /* LOCKDEBUG */
920
921 #ifdef _KERNEL
922 void
923 lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2),
924 const char *modif)
925 {
926 #ifdef LOCKDEBUG
927 bool show_trace = false;
928 if (modif[0] == 't')
929 show_trace = true;
930
931 (*pr)("[Locks tracked through LWPs]\n");
932 lockdebug_show_all_locks_lwp(pr, show_trace);
933 (*pr)("\n");
934
935 (*pr)("[Locks tracked through CPUs]\n");
936 lockdebug_show_all_locks_cpu(pr, show_trace);
937 (*pr)("\n");
938 #else
939 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
940 #endif /* LOCKDEBUG */
941 }
942
943 void
944 lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2))
945 {
946 #ifdef LOCKDEBUG
947 lockdebug_t *ld;
948 void *_ld;
949 uint32_t n_null = 0;
950 uint32_t n_spin_mutex = 0;
951 uint32_t n_adaptive_mutex = 0;
952 uint32_t n_rwlock = 0;
953 uint32_t n_others = 0;
954
955 RB_TREE_FOREACH(_ld, &ld_rb_tree) {
956 ld = _ld;
957 if (ld->ld_lock == NULL) {
958 n_null++;
959 continue;
960 }
961 if (ld->ld_lockops->lo_name[0] == 'M') {
962 if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP)
963 n_adaptive_mutex++;
964 else
965 n_spin_mutex++;
966 continue;
967 }
968 if (ld->ld_lockops->lo_name[0] == 'R') {
969 n_rwlock++;
970 continue;
971 }
972 n_others++;
973 }
974 (*pr)(
975 "spin mutex: %u\n"
976 "adaptive mutex: %u\n"
977 "rwlock: %u\n"
978 "null locks: %u\n"
979 "others: %u\n",
980 n_spin_mutex, n_adaptive_mutex, n_rwlock,
981 n_null, n_others);
982 #else
983 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
984 #endif /* LOCKDEBUG */
985 }
986 #endif /* _KERNEL */
987 #endif /* DDB */
988
989 #ifdef _KERNEL
990 /*
991 * lockdebug_dismiss:
992 *
993 * The system is rebooting, and potentially from an unsafe
994 * place so avoid any future aborts.
995 */
996 void
997 lockdebug_dismiss(void)
998 {
999
1000 atomic_inc_uint_nv(&ld_panic);
1001 }
1002
1003 /*
1004 * lockdebug_abort:
1005 *
1006 * An error has been trapped - dump lock info and call panic().
1007 */
1008 void
1009 lockdebug_abort(const char *func, size_t line, const volatile void *lock,
1010 lockops_t *ops, const char *msg)
1011 {
1012 #ifdef LOCKDEBUG
1013 lockdebug_t *ld;
1014 int s;
1015
1016 s = splhigh();
1017 if ((ld = lockdebug_lookup(func, line, lock,
1018 (uintptr_t) __builtin_return_address(0))) != NULL) {
1019 lockdebug_abort1(func, line, ld, s, msg, true);
1020 return;
1021 }
1022 splx(s);
1023 #endif /* LOCKDEBUG */
1024
1025 /*
1026 * Don't make the situation worse if the system is already going
1027 * down in flames. Once a panic is triggered, lockdebug state
1028 * becomes stale and cannot be trusted.
1029 */
1030 if (atomic_inc_uint_nv(&ld_panic) > 1)
1031 return;
1032
1033 printf_nolog("%s error: %s,%zu: %s\n\n"
1034 "lock address : %#018lx\n"
1035 "current cpu : %18d\n"
1036 "current lwp : %#018lx\n",
1037 ops->lo_name, func, line, msg, (long)lock,
1038 (int)cpu_index(curcpu()), (long)curlwp);
1039 (*ops->lo_dump)(lock, printf_nolog);
1040 printf_nolog("\n");
1041
1042 panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
1043 ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
1044 }
1045 #endif /* _KERNEL */
1046