subr_lockdebug.c revision 1.72 1 /* $NetBSD: subr_lockdebug.c,v 1.72 2019/05/28 07:39:16 ryo Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Basic lock debugging code shared among lock primitives.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.72 2019/05/28 07:39:16 ryo Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ddb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/proc.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/kmem.h>
48 #include <sys/lockdebug.h>
49 #include <sys/sleepq.h>
50 #include <sys/cpu.h>
51 #include <sys/atomic.h>
52 #include <sys/lock.h>
53 #include <sys/rbtree.h>
54 #include <sys/ksyms.h>
55
56 #include <machine/lock.h>
57
58 unsigned int ld_panic;
59
60 #ifdef LOCKDEBUG
61
62 #ifdef __ia64__
63 #define LD_BATCH_SHIFT 16
64 #else
65 #define LD_BATCH_SHIFT 9
66 #endif
67 #define LD_BATCH (1 << LD_BATCH_SHIFT)
68 #define LD_BATCH_MASK (LD_BATCH - 1)
69 #define LD_MAX_LOCKS 1048576
70 #define LD_SLOP 16
71
72 #define LD_LOCKED 0x01
73 #define LD_SLEEPER 0x02
74
75 #define LD_WRITE_LOCK 0x80000000
76
77 typedef struct lockdebug {
78 struct rb_node ld_rb_node;
79 __cpu_simple_lock_t ld_spinlock;
80 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
81 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
82 volatile void *ld_lock;
83 lockops_t *ld_lockops;
84 struct lwp *ld_lwp;
85 uintptr_t ld_locked;
86 uintptr_t ld_unlocked;
87 uintptr_t ld_initaddr;
88 uint16_t ld_shares;
89 uint16_t ld_cpu;
90 uint8_t ld_flags;
91 uint8_t ld_shwant; /* advisory */
92 uint8_t ld_exwant; /* advisory */
93 uint8_t ld_unused;
94 } volatile lockdebug_t;
95
96 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
97
98 __cpu_simple_lock_t ld_mod_lk;
99 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
100 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
101 int ld_nfree;
102 int ld_freeptr;
103 int ld_recurse;
104 bool ld_nomore;
105 lockdebug_t ld_prime[LD_BATCH];
106
107 static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
108 const char *, bool);
109 static int lockdebug_more(int);
110 static void lockdebug_init(void);
111 static void lockdebug_dump(lockdebug_t *, void (*)(const char *, ...)
112 __printflike(1, 2));
113
114 static signed int
115 ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
116 {
117 const lockdebug_t *ld1 = n1;
118 const lockdebug_t *ld2 = n2;
119 const uintptr_t a = (uintptr_t)ld1->ld_lock;
120 const uintptr_t b = (uintptr_t)ld2->ld_lock;
121
122 if (a < b)
123 return -1;
124 if (a > b)
125 return 1;
126 return 0;
127 }
128
129 static signed int
130 ld_rbto_compare_key(void *ctx, const void *n, const void *key)
131 {
132 const lockdebug_t *ld = n;
133 const uintptr_t a = (uintptr_t)ld->ld_lock;
134 const uintptr_t b = (uintptr_t)key;
135
136 if (a < b)
137 return -1;
138 if (a > b)
139 return 1;
140 return 0;
141 }
142
143 static rb_tree_t ld_rb_tree;
144
145 static const rb_tree_ops_t ld_rb_tree_ops = {
146 .rbto_compare_nodes = ld_rbto_compare_nodes,
147 .rbto_compare_key = ld_rbto_compare_key,
148 .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
149 .rbto_context = NULL
150 };
151
152 static inline lockdebug_t *
153 lockdebug_lookup1(const volatile void *lock)
154 {
155 lockdebug_t *ld;
156 struct cpu_info *ci;
157
158 ci = curcpu();
159 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
160 ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
161 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
162 if (ld == NULL) {
163 return NULL;
164 }
165 __cpu_simple_lock(&ld->ld_spinlock);
166
167 return ld;
168 }
169
170 static void
171 lockdebug_lock_cpus(void)
172 {
173 CPU_INFO_ITERATOR cii;
174 struct cpu_info *ci;
175
176 for (CPU_INFO_FOREACH(cii, ci)) {
177 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
178 }
179 }
180
181 static void
182 lockdebug_unlock_cpus(void)
183 {
184 CPU_INFO_ITERATOR cii;
185 struct cpu_info *ci;
186
187 for (CPU_INFO_FOREACH(cii, ci)) {
188 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
189 }
190 }
191
192 /*
193 * lockdebug_lookup:
194 *
195 * Find a lockdebug structure by a pointer to a lock and return it locked.
196 */
197 static inline lockdebug_t *
198 lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
199 uintptr_t where)
200 {
201 lockdebug_t *ld;
202
203 ld = lockdebug_lookup1(lock);
204 if (__predict_false(ld == NULL)) {
205 panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
206 PRIxPTR ")", func, line, lock, where);
207 }
208 return ld;
209 }
210
211 /*
212 * lockdebug_init:
213 *
214 * Initialize the lockdebug system. Allocate an initial pool of
215 * lockdebug structures before the VM system is up and running.
216 */
217 static void
218 lockdebug_init(void)
219 {
220 lockdebug_t *ld;
221 int i;
222
223 TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
224 TAILQ_INIT(&curlwp->l_ld_locks);
225 __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
226 __cpu_simple_lock_init(&ld_mod_lk);
227
228 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
229
230 ld = ld_prime;
231 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
232 __cpu_simple_lock_init(&ld->ld_spinlock);
233 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
234 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
235 }
236 ld_freeptr = 1;
237 ld_nfree = LD_BATCH - 1;
238 }
239
240 /*
241 * lockdebug_alloc:
242 *
243 * A lock is being initialized, so allocate an associated debug
244 * structure.
245 */
246 bool
247 lockdebug_alloc(const char *func, size_t line, volatile void *lock,
248 lockops_t *lo, uintptr_t initaddr)
249 {
250 struct cpu_info *ci;
251 lockdebug_t *ld;
252 int s;
253
254 if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
255 return false;
256 if (__predict_false(ld_freeptr == 0))
257 lockdebug_init();
258
259 s = splhigh();
260 __cpu_simple_lock(&ld_mod_lk);
261 if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
262 __cpu_simple_unlock(&ld_mod_lk);
263 lockdebug_abort1(func, line, ld, s, "already initialized",
264 true);
265 return false;
266 }
267
268 /*
269 * Pinch a new debug structure. We may recurse because we call
270 * kmem_alloc(), which may need to initialize new locks somewhere
271 * down the path. If not recursing, we try to maintain at least
272 * LD_SLOP structures free, which should hopefully be enough to
273 * satisfy kmem_alloc(). If we can't provide a structure, not to
274 * worry: we'll just mark the lock as not having an ID.
275 */
276 ci = curcpu();
277 ci->ci_lkdebug_recurse++;
278 if (TAILQ_EMPTY(&ld_free)) {
279 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
280 ci->ci_lkdebug_recurse--;
281 __cpu_simple_unlock(&ld_mod_lk);
282 splx(s);
283 return false;
284 }
285 s = lockdebug_more(s);
286 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
287 s = lockdebug_more(s);
288 }
289 if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
290 __cpu_simple_unlock(&ld_mod_lk);
291 splx(s);
292 return false;
293 }
294 TAILQ_REMOVE(&ld_free, ld, ld_chain);
295 ld_nfree--;
296 ci->ci_lkdebug_recurse--;
297
298 if (__predict_false(ld->ld_lock != NULL)) {
299 panic("%s,%zu: corrupt table ld %p", func, line, ld);
300 }
301
302 /* Initialise the structure. */
303 ld->ld_lock = lock;
304 ld->ld_lockops = lo;
305 ld->ld_locked = 0;
306 ld->ld_unlocked = 0;
307 ld->ld_lwp = NULL;
308 ld->ld_initaddr = initaddr;
309 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
310 lockdebug_lock_cpus();
311 (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
312 lockdebug_unlock_cpus();
313 __cpu_simple_unlock(&ld_mod_lk);
314
315 splx(s);
316 return true;
317 }
318
319 /*
320 * lockdebug_free:
321 *
322 * A lock is being destroyed, so release debugging resources.
323 */
324 void
325 lockdebug_free(const char *func, size_t line, volatile void *lock)
326 {
327 lockdebug_t *ld;
328 int s;
329
330 if (__predict_false(panicstr != NULL || ld_panic))
331 return;
332
333 s = splhigh();
334 __cpu_simple_lock(&ld_mod_lk);
335 ld = lockdebug_lookup(func, line, lock,
336 (uintptr_t) __builtin_return_address(0));
337 if (__predict_false(ld == NULL)) {
338 __cpu_simple_unlock(&ld_mod_lk);
339 panic("%s,%zu: destroying uninitialized object %p"
340 "(ld_lock=%p)", func, line, lock, ld->ld_lock);
341 return;
342 }
343 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
344 ld->ld_shares != 0)) {
345 __cpu_simple_unlock(&ld_mod_lk);
346 lockdebug_abort1(func, line, ld, s, "is locked or in use",
347 true);
348 return;
349 }
350 lockdebug_lock_cpus();
351 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
352 lockdebug_unlock_cpus();
353 ld->ld_lock = NULL;
354 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
355 ld_nfree++;
356 __cpu_simple_unlock(&ld->ld_spinlock);
357 __cpu_simple_unlock(&ld_mod_lk);
358 splx(s);
359 }
360
361 /*
362 * lockdebug_more:
363 *
364 * Allocate a batch of debug structures and add to the free list.
365 * Must be called with ld_mod_lk held.
366 */
367 static int
368 lockdebug_more(int s)
369 {
370 lockdebug_t *ld;
371 void *block;
372 int i, base, m;
373
374 /*
375 * Can't call kmem_alloc() if in interrupt context. XXX We could
376 * deadlock, because we don't know which locks the caller holds.
377 */
378 if (cpu_intr_p() || cpu_softintr_p()) {
379 return s;
380 }
381
382 while (ld_nfree < LD_SLOP) {
383 __cpu_simple_unlock(&ld_mod_lk);
384 splx(s);
385 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
386 s = splhigh();
387 __cpu_simple_lock(&ld_mod_lk);
388
389 if (ld_nfree > LD_SLOP) {
390 /* Somebody beat us to it. */
391 __cpu_simple_unlock(&ld_mod_lk);
392 splx(s);
393 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
394 s = splhigh();
395 __cpu_simple_lock(&ld_mod_lk);
396 continue;
397 }
398
399 base = ld_freeptr;
400 ld_nfree += LD_BATCH;
401 ld = block;
402 base <<= LD_BATCH_SHIFT;
403 m = uimin(LD_MAX_LOCKS, base + LD_BATCH);
404
405 if (m == LD_MAX_LOCKS)
406 ld_nomore = true;
407
408 for (i = base; i < m; i++, ld++) {
409 __cpu_simple_lock_init(&ld->ld_spinlock);
410 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
411 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
412 }
413
414 membar_producer();
415 }
416
417 return s;
418 }
419
420 /*
421 * lockdebug_wantlock:
422 *
423 * Process the preamble to a lock acquire. The "shared"
424 * parameter controls which ld_{ex,sh}want counter is
425 * updated; a negative value of shared updates neither.
426 */
427 void
428 lockdebug_wantlock(const char *func, size_t line,
429 const volatile void *lock, uintptr_t where, int shared)
430 {
431 struct lwp *l = curlwp;
432 lockdebug_t *ld;
433 bool recurse;
434 int s;
435
436 (void)shared;
437 recurse = false;
438
439 if (__predict_false(panicstr != NULL || ld_panic))
440 return;
441
442 s = splhigh();
443 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
444 splx(s);
445 return;
446 }
447 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
448 if ((ld->ld_flags & LD_SLEEPER) != 0) {
449 if (ld->ld_lwp == l)
450 recurse = true;
451 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
452 recurse = true;
453 }
454 if (cpu_intr_p()) {
455 if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
456 lockdebug_abort1(func, line, ld, s,
457 "acquiring sleep lock from interrupt context",
458 true);
459 return;
460 }
461 }
462 if (shared > 0)
463 ld->ld_shwant++;
464 else if (shared == 0)
465 ld->ld_exwant++;
466 if (__predict_false(recurse)) {
467 lockdebug_abort1(func, line, ld, s, "locking against myself",
468 true);
469 return;
470 }
471 __cpu_simple_unlock(&ld->ld_spinlock);
472 splx(s);
473 }
474
475 /*
476 * lockdebug_locked:
477 *
478 * Process a lock acquire operation.
479 */
480 void
481 lockdebug_locked(const char *func, size_t line,
482 volatile void *lock, void *cvlock, uintptr_t where, int shared)
483 {
484 struct lwp *l = curlwp;
485 lockdebug_t *ld;
486 int s;
487
488 if (__predict_false(panicstr != NULL || ld_panic))
489 return;
490
491 s = splhigh();
492 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
493 splx(s);
494 return;
495 }
496 if (cvlock) {
497 KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
498 if (lock == (void *)&lbolt) {
499 /* nothing */
500 } else if (ld->ld_shares++ == 0) {
501 ld->ld_locked = (uintptr_t)cvlock;
502 } else if (__predict_false(cvlock != (void *)ld->ld_locked)) {
503 lockdebug_abort1(func, line, ld, s,
504 "multiple locks used with condition variable",
505 true);
506 return;
507 }
508 } else if (shared) {
509 l->l_shlocks++;
510 ld->ld_locked = where;
511 ld->ld_shares++;
512 ld->ld_shwant--;
513 } else {
514 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
515 lockdebug_abort1(func, line, ld, s, "already locked",
516 true);
517 return;
518 }
519 ld->ld_flags |= LD_LOCKED;
520 ld->ld_locked = where;
521 ld->ld_exwant--;
522 if ((ld->ld_flags & LD_SLEEPER) != 0) {
523 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
524 } else {
525 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
526 ld, ld_chain);
527 }
528 }
529 ld->ld_cpu = (uint16_t)cpu_index(curcpu());
530 ld->ld_lwp = l;
531 __cpu_simple_unlock(&ld->ld_spinlock);
532 splx(s);
533 }
534
535 /*
536 * lockdebug_unlocked:
537 *
538 * Process a lock release operation.
539 */
540 void
541 lockdebug_unlocked(const char *func, size_t line,
542 volatile void *lock, uintptr_t where, int shared)
543 {
544 struct lwp *l = curlwp;
545 lockdebug_t *ld;
546 int s;
547
548 if (__predict_false(panicstr != NULL || ld_panic))
549 return;
550
551 s = splhigh();
552 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
553 splx(s);
554 return;
555 }
556 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
557 if (lock == (void *)&lbolt) {
558 /* nothing */
559 } else {
560 ld->ld_shares--;
561 }
562 } else if (shared) {
563 if (__predict_false(l->l_shlocks == 0)) {
564 lockdebug_abort1(func, line, ld, s,
565 "no shared locks held by LWP", true);
566 return;
567 }
568 if (__predict_false(ld->ld_shares == 0)) {
569 lockdebug_abort1(func, line, ld, s,
570 "no shared holds on this lock", true);
571 return;
572 }
573 l->l_shlocks--;
574 ld->ld_shares--;
575 if (ld->ld_lwp == l) {
576 ld->ld_unlocked = where;
577 ld->ld_lwp = NULL;
578 }
579 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
580 ld->ld_cpu = (uint16_t)-1;
581 } else {
582 if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
583 lockdebug_abort1(func, line, ld, s, "not locked", true);
584 return;
585 }
586
587 if ((ld->ld_flags & LD_SLEEPER) != 0) {
588 if (__predict_false(ld->ld_lwp != curlwp)) {
589 lockdebug_abort1(func, line, ld, s,
590 "not held by current LWP", true);
591 return;
592 }
593 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
594 } else {
595 uint16_t idx = (uint16_t)cpu_index(curcpu());
596 if (__predict_false(ld->ld_cpu != idx)) {
597 lockdebug_abort1(func, line, ld, s,
598 "not held by current CPU", true);
599 return;
600 }
601 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
602 ld_chain);
603 }
604 ld->ld_flags &= ~LD_LOCKED;
605 ld->ld_unlocked = where;
606 ld->ld_lwp = NULL;
607 }
608 __cpu_simple_unlock(&ld->ld_spinlock);
609 splx(s);
610 }
611
612 /*
613 * lockdebug_wakeup:
614 *
615 * Process a wakeup on a condition variable.
616 */
617 void
618 lockdebug_wakeup(const char *func, size_t line, volatile void *lock,
619 uintptr_t where)
620 {
621 lockdebug_t *ld;
622 int s;
623
624 if (__predict_false(panicstr != NULL || ld_panic || lock == (void *)&lbolt))
625 return;
626
627 s = splhigh();
628 /* Find the CV... */
629 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
630 splx(s);
631 return;
632 }
633 /*
634 * If it has any waiters, ensure that they are using the
635 * same interlock.
636 */
637 if (__predict_false(ld->ld_shares != 0 &&
638 !mutex_owned((kmutex_t *)ld->ld_locked))) {
639 lockdebug_abort1(func, line, ld, s, "interlocking mutex not "
640 "held during wakeup", true);
641 return;
642 }
643 __cpu_simple_unlock(&ld->ld_spinlock);
644 splx(s);
645 }
646
647 /*
648 * lockdebug_barrier:
649 *
650 * Panic if we hold more than one specified spin lock, and optionally,
651 * if we hold sleep locks.
652 */
653 void
654 lockdebug_barrier(const char *func, size_t line, volatile void *spinlock,
655 int slplocks)
656 {
657 struct lwp *l = curlwp;
658 lockdebug_t *ld;
659 int s;
660
661 if (__predict_false(panicstr != NULL || ld_panic))
662 return;
663
664 s = splhigh();
665 if ((l->l_pflag & LP_INTR) == 0) {
666 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
667 if (ld->ld_lock == spinlock) {
668 continue;
669 }
670 __cpu_simple_lock(&ld->ld_spinlock);
671 lockdebug_abort1(func, line, ld, s,
672 "spin lock held", true);
673 return;
674 }
675 }
676 if (slplocks) {
677 splx(s);
678 return;
679 }
680 ld = TAILQ_FIRST(&l->l_ld_locks);
681 if (__predict_false(ld != NULL)) {
682 __cpu_simple_lock(&ld->ld_spinlock);
683 lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
684 return;
685 }
686 splx(s);
687 if (l->l_shlocks != 0) {
688 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
689 if (ld->ld_lockops->lo_type == LOCKOPS_CV)
690 continue;
691 if (ld->ld_lwp == l)
692 lockdebug_dump(ld, printf);
693 }
694 panic("%s,%zu: holding %d shared locks", func, line,
695 l->l_shlocks);
696 }
697 }
698
699 /*
700 * lockdebug_mem_check:
701 *
702 * Check for in-use locks within a memory region that is
703 * being freed.
704 */
705 void
706 lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
707 {
708 lockdebug_t *ld;
709 struct cpu_info *ci;
710 int s;
711
712 if (__predict_false(panicstr != NULL || ld_panic))
713 return;
714
715 s = splhigh();
716 ci = curcpu();
717 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
718 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
719 if (ld != NULL) {
720 const uintptr_t lock = (uintptr_t)ld->ld_lock;
721
722 if (__predict_false((uintptr_t)base > lock))
723 panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
724 func, line, ld, base, sz);
725 if (lock >= (uintptr_t)base + sz)
726 ld = NULL;
727 }
728 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
729 if (__predict_false(ld != NULL)) {
730 __cpu_simple_lock(&ld->ld_spinlock);
731 lockdebug_abort1(func, line, ld, s,
732 "allocation contains active lock", !cold);
733 return;
734 }
735 splx(s);
736 }
737
738 /*
739 * lockdebug_dump:
740 *
741 * Dump information about a lock on panic, or for DDB.
742 */
743 static void
744 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
745 __printflike(1, 2))
746 {
747 int sleeper = (ld->ld_flags & LD_SLEEPER);
748
749 (*pr)(
750 "lock address : %#018lx type : %18s\n"
751 "initialized : %#018lx",
752 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
753 (long)ld->ld_initaddr);
754
755 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
756 (*pr)(" interlock: %#018lx\n", (long)ld->ld_locked);
757 } else {
758 (*pr)("\n"
759 "shared holds : %18u exclusive: %18u\n"
760 "shares wanted: %18u exclusive: %18u\n"
761 "current cpu : %18u last held: %18u\n"
762 "current lwp : %#018lx last held: %#018lx\n"
763 "last locked%c : %#018lx unlocked%c: %#018lx\n",
764 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
765 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
766 (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
767 (long)curlwp, (long)ld->ld_lwp,
768 ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
769 (long)ld->ld_locked,
770 ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
771 (long)ld->ld_unlocked);
772 }
773
774 if (ld->ld_lockops->lo_dump != NULL)
775 (*ld->ld_lockops->lo_dump)(ld->ld_lock, pr);
776
777 if (sleeper) {
778 (*pr)("\n");
779 turnstile_print(ld->ld_lock, pr);
780 }
781 }
782
783 /*
784 * lockdebug_abort1:
785 *
786 * An error has been trapped - dump lock info and panic.
787 */
788 static void
789 lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
790 const char *msg, bool dopanic)
791 {
792
793 /*
794 * Don't make the situation worse if the system is already going
795 * down in flames. Once a panic is triggered, lockdebug state
796 * becomes stale and cannot be trusted.
797 */
798 if (atomic_inc_uint_nv(&ld_panic) != 1) {
799 __cpu_simple_unlock(&ld->ld_spinlock);
800 splx(s);
801 return;
802 }
803
804 printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
805 func, line, msg);
806 lockdebug_dump(ld, printf_nolog);
807 __cpu_simple_unlock(&ld->ld_spinlock);
808 splx(s);
809 printf_nolog("\n");
810 if (dopanic)
811 panic("LOCKDEBUG: %s error: %s,%zu: %s",
812 ld->ld_lockops->lo_name, func, line, msg);
813 }
814
815 #endif /* LOCKDEBUG */
816
817 /*
818 * lockdebug_lock_print:
819 *
820 * Handle the DDB 'show lock' command.
821 */
822 #ifdef DDB
823 #include <machine/db_machdep.h>
824 #include <ddb/db_interface.h>
825
826 void
827 lockdebug_lock_print(void *addr,
828 void (*pr)(const char *, ...) __printflike(1, 2))
829 {
830 #ifdef LOCKDEBUG
831 lockdebug_t *ld;
832
833 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
834 if (ld->ld_lock == NULL)
835 continue;
836 if (addr == NULL || ld->ld_lock == addr) {
837 lockdebug_dump(ld, pr);
838 if (addr != NULL)
839 return;
840 }
841 }
842 if (addr != NULL) {
843 (*pr)("Sorry, no record of a lock with address %p found.\n",
844 addr);
845 }
846 #else
847 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
848 #endif /* LOCKDEBUG */
849 }
850
851 #ifdef LOCKDEBUG
852 static void
853 lockdebug_show_one(lockdebug_t *ld, int i,
854 void (*pr)(const char *, ...) __printflike(1, 2))
855 {
856 const char *sym;
857
858 ksyms_getname(NULL, &sym, (vaddr_t)ld->ld_initaddr,
859 KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY);
860 (*pr)("Lock %d (initialized at %s)\n", i++, sym);
861 lockdebug_dump(ld, pr);
862 }
863
864 static void
865 lockdebug_show_trace(const void *ptr,
866 void (*pr)(const char *, ...) __printflike(1, 2))
867 {
868 db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
869 }
870
871 static void
872 lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2),
873 bool show_trace)
874 {
875 struct proc *p;
876
877 LIST_FOREACH(p, &allproc, p_list) {
878 struct lwp *l;
879 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
880 lockdebug_t *ld;
881 int i = 0;
882 if (TAILQ_EMPTY(&l->l_ld_locks))
883 continue;
884 (*pr)("Locks held by an LWP (%s):\n",
885 l->l_name ? l->l_name : p->p_comm);
886 TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) {
887 lockdebug_show_one(ld, i++, pr);
888 }
889 if (show_trace)
890 lockdebug_show_trace(l, pr);
891 (*pr)("\n");
892 }
893 }
894 }
895
896 static void
897 lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2),
898 bool show_trace)
899 {
900 lockdebug_t *ld;
901 CPU_INFO_ITERATOR cii;
902 struct cpu_info *ci;
903
904 for (CPU_INFO_FOREACH(cii, ci)) {
905 int i = 0;
906 if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks))
907 continue;
908 (*pr)("Locks held on CPU %u:\n", ci->ci_index);
909 TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) {
910 lockdebug_show_one(ld, i++, pr);
911 if (show_trace)
912 #ifdef MULTIPROCESSOR
913 lockdebug_show_trace(ci->ci_curlwp, pr);
914 #else
915 lockdebug_show_trace(curlwp, pr);
916 #endif
917 (*pr)("\n");
918 }
919 }
920 }
921 #endif /* LOCKDEBUG */
922
923 void
924 lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2),
925 const char *modif)
926 {
927 #ifdef LOCKDEBUG
928 bool show_trace = false;
929 if (modif[0] == 't')
930 show_trace = true;
931
932 (*pr)("[Locks tracked through LWPs]\n");
933 lockdebug_show_all_locks_lwp(pr, show_trace);
934 (*pr)("\n");
935
936 (*pr)("[Locks tracked through CPUs]\n");
937 lockdebug_show_all_locks_cpu(pr, show_trace);
938 (*pr)("\n");
939 #else
940 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
941 #endif /* LOCKDEBUG */
942 }
943
944 void
945 lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2))
946 {
947 #ifdef LOCKDEBUG
948 lockdebug_t *ld;
949 void *_ld;
950 uint32_t n_null = 0;
951 uint32_t n_spin_mutex = 0;
952 uint32_t n_adaptive_mutex = 0;
953 uint32_t n_rwlock = 0;
954 uint32_t n_cv = 0;
955 uint32_t n_others = 0;
956
957 RB_TREE_FOREACH(_ld, &ld_rb_tree) {
958 ld = _ld;
959 if (ld->ld_lock == NULL) {
960 n_null++;
961 continue;
962 }
963 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
964 n_cv++;
965 continue;
966 }
967 if (ld->ld_lockops->lo_name[0] == 'M') {
968 if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP)
969 n_adaptive_mutex++;
970 else
971 n_spin_mutex++;
972 continue;
973 }
974 if (ld->ld_lockops->lo_name[0] == 'R') {
975 n_rwlock++;
976 continue;
977 }
978 n_others++;
979 }
980 (*pr)(
981 "condvar: %u\n"
982 "spin mutex: %u\n"
983 "adaptive mutex: %u\n"
984 "rwlock: %u\n"
985 "null locks: %u\n"
986 "others: %u\n",
987 n_cv, n_spin_mutex, n_adaptive_mutex, n_rwlock,
988 n_null, n_others);
989 #else
990 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
991 #endif /* LOCKDEBUG */
992 }
993 #endif /* DDB */
994
995 /*
996 * lockdebug_dismiss:
997 *
998 * The system is rebooting, and potentially from an unsafe
999 * place so avoid any future aborts.
1000 */
1001 void
1002 lockdebug_dismiss(void)
1003 {
1004
1005 atomic_inc_uint_nv(&ld_panic);
1006 }
1007
1008 /*
1009 * lockdebug_abort:
1010 *
1011 * An error has been trapped - dump lock info and call panic().
1012 */
1013 void
1014 lockdebug_abort(const char *func, size_t line, const volatile void *lock,
1015 lockops_t *ops, const char *msg)
1016 {
1017 #ifdef LOCKDEBUG
1018 lockdebug_t *ld;
1019 int s;
1020
1021 s = splhigh();
1022 if ((ld = lockdebug_lookup(func, line, lock,
1023 (uintptr_t) __builtin_return_address(0))) != NULL) {
1024 lockdebug_abort1(func, line, ld, s, msg, true);
1025 return;
1026 }
1027 splx(s);
1028 #endif /* LOCKDEBUG */
1029
1030 /*
1031 * Don't make the situation worse if the system is already going
1032 * down in flames. Once a panic is triggered, lockdebug state
1033 * becomes stale and cannot be trusted.
1034 */
1035 if (atomic_inc_uint_nv(&ld_panic) > 1)
1036 return;
1037
1038 printf_nolog("%s error: %s,%zu: %s\n\n"
1039 "lock address : %#018lx\n"
1040 "current cpu : %18d\n"
1041 "current lwp : %#018lx\n",
1042 ops->lo_name, func, line, msg, (long)lock,
1043 (int)cpu_index(curcpu()), (long)curlwp);
1044 (*ops->lo_dump)(lock, printf_nolog);
1045 printf_nolog("\n");
1046
1047 panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
1048 ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
1049 }
1050