subr_lockdebug.c revision 1.61 1 /* $NetBSD: subr_lockdebug.c,v 1.61 2018/03/16 04:43:37 ozaki-r Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Basic lock debugging code shared among lock primitives.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.61 2018/03/16 04:43:37 ozaki-r Exp $");
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ddb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/proc.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/kmem.h>
48 #include <sys/lockdebug.h>
49 #include <sys/sleepq.h>
50 #include <sys/cpu.h>
51 #include <sys/atomic.h>
52 #include <sys/lock.h>
53 #include <sys/rbtree.h>
54
55 #include <machine/lock.h>
56
57 unsigned int ld_panic;
58
59 #ifdef LOCKDEBUG
60
61 #define LD_BATCH_SHIFT 9
62 #define LD_BATCH (1 << LD_BATCH_SHIFT)
63 #define LD_BATCH_MASK (LD_BATCH - 1)
64 #define LD_MAX_LOCKS 1048576
65 #define LD_SLOP 16
66
67 #define LD_LOCKED 0x01
68 #define LD_SLEEPER 0x02
69
70 #define LD_WRITE_LOCK 0x80000000
71
72 typedef struct lockdebug {
73 struct rb_node ld_rb_node;
74 __cpu_simple_lock_t ld_spinlock;
75 _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
76 _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
77 volatile void *ld_lock;
78 lockops_t *ld_lockops;
79 struct lwp *ld_lwp;
80 uintptr_t ld_locked;
81 uintptr_t ld_unlocked;
82 uintptr_t ld_initaddr;
83 uint16_t ld_shares;
84 uint16_t ld_cpu;
85 uint8_t ld_flags;
86 uint8_t ld_shwant; /* advisory */
87 uint8_t ld_exwant; /* advisory */
88 uint8_t ld_unused;
89 } volatile lockdebug_t;
90
91 typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
92
93 __cpu_simple_lock_t ld_mod_lk;
94 lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
95 lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
96 int ld_nfree;
97 int ld_freeptr;
98 int ld_recurse;
99 bool ld_nomore;
100 lockdebug_t ld_prime[LD_BATCH];
101
102 static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
103 const char *, bool);
104 static int lockdebug_more(int);
105 static void lockdebug_init(void);
106 static void lockdebug_dump(lockdebug_t *, void (*)(const char *, ...)
107 __printflike(1, 2));
108
109 static signed int
110 ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
111 {
112 const lockdebug_t *ld1 = n1;
113 const lockdebug_t *ld2 = n2;
114 const uintptr_t a = (uintptr_t)ld1->ld_lock;
115 const uintptr_t b = (uintptr_t)ld2->ld_lock;
116
117 if (a < b)
118 return -1;
119 if (a > b)
120 return 1;
121 return 0;
122 }
123
124 static signed int
125 ld_rbto_compare_key(void *ctx, const void *n, const void *key)
126 {
127 const lockdebug_t *ld = n;
128 const uintptr_t a = (uintptr_t)ld->ld_lock;
129 const uintptr_t b = (uintptr_t)key;
130
131 if (a < b)
132 return -1;
133 if (a > b)
134 return 1;
135 return 0;
136 }
137
138 static rb_tree_t ld_rb_tree;
139
140 static const rb_tree_ops_t ld_rb_tree_ops = {
141 .rbto_compare_nodes = ld_rbto_compare_nodes,
142 .rbto_compare_key = ld_rbto_compare_key,
143 .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
144 .rbto_context = NULL
145 };
146
147 static inline lockdebug_t *
148 lockdebug_lookup1(const volatile void *lock)
149 {
150 lockdebug_t *ld;
151 struct cpu_info *ci;
152
153 ci = curcpu();
154 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
155 ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
156 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
157 if (ld == NULL) {
158 return NULL;
159 }
160 __cpu_simple_lock(&ld->ld_spinlock);
161
162 return ld;
163 }
164
165 static void
166 lockdebug_lock_cpus(void)
167 {
168 CPU_INFO_ITERATOR cii;
169 struct cpu_info *ci;
170
171 for (CPU_INFO_FOREACH(cii, ci)) {
172 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
173 }
174 }
175
176 static void
177 lockdebug_unlock_cpus(void)
178 {
179 CPU_INFO_ITERATOR cii;
180 struct cpu_info *ci;
181
182 for (CPU_INFO_FOREACH(cii, ci)) {
183 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
184 }
185 }
186
187 /*
188 * lockdebug_lookup:
189 *
190 * Find a lockdebug structure by a pointer to a lock and return it locked.
191 */
192 static inline lockdebug_t *
193 lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
194 uintptr_t where)
195 {
196 lockdebug_t *ld;
197
198 ld = lockdebug_lookup1(lock);
199 if (__predict_false(ld == NULL)) {
200 panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
201 PRIxPTR ")", func, line, lock, where);
202 }
203 return ld;
204 }
205
206 /*
207 * lockdebug_init:
208 *
209 * Initialize the lockdebug system. Allocate an initial pool of
210 * lockdebug structures before the VM system is up and running.
211 */
212 static void
213 lockdebug_init(void)
214 {
215 lockdebug_t *ld;
216 int i;
217
218 TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
219 TAILQ_INIT(&curlwp->l_ld_locks);
220 __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
221 __cpu_simple_lock_init(&ld_mod_lk);
222
223 rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
224
225 ld = ld_prime;
226 for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
227 __cpu_simple_lock_init(&ld->ld_spinlock);
228 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
229 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
230 }
231 ld_freeptr = 1;
232 ld_nfree = LD_BATCH - 1;
233 }
234
235 /*
236 * lockdebug_alloc:
237 *
238 * A lock is being initialized, so allocate an associated debug
239 * structure.
240 */
241 bool
242 lockdebug_alloc(const char *func, size_t line, volatile void *lock,
243 lockops_t *lo, uintptr_t initaddr)
244 {
245 struct cpu_info *ci;
246 lockdebug_t *ld;
247 int s;
248
249 if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
250 return false;
251 if (__predict_false(ld_freeptr == 0))
252 lockdebug_init();
253
254 s = splhigh();
255 __cpu_simple_lock(&ld_mod_lk);
256 if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
257 __cpu_simple_unlock(&ld_mod_lk);
258 lockdebug_abort1(func, line, ld, s, "already initialized",
259 true);
260 return false;
261 }
262
263 /*
264 * Pinch a new debug structure. We may recurse because we call
265 * kmem_alloc(), which may need to initialize new locks somewhere
266 * down the path. If not recursing, we try to maintain at least
267 * LD_SLOP structures free, which should hopefully be enough to
268 * satisfy kmem_alloc(). If we can't provide a structure, not to
269 * worry: we'll just mark the lock as not having an ID.
270 */
271 ci = curcpu();
272 ci->ci_lkdebug_recurse++;
273 if (TAILQ_EMPTY(&ld_free)) {
274 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
275 ci->ci_lkdebug_recurse--;
276 __cpu_simple_unlock(&ld_mod_lk);
277 splx(s);
278 return false;
279 }
280 s = lockdebug_more(s);
281 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
282 s = lockdebug_more(s);
283 }
284 if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
285 __cpu_simple_unlock(&ld_mod_lk);
286 splx(s);
287 return false;
288 }
289 TAILQ_REMOVE(&ld_free, ld, ld_chain);
290 ld_nfree--;
291 ci->ci_lkdebug_recurse--;
292
293 if (__predict_false(ld->ld_lock != NULL)) {
294 panic("%s,%zu: corrupt table ld %p", func, line, ld);
295 }
296
297 /* Initialise the structure. */
298 ld->ld_lock = lock;
299 ld->ld_lockops = lo;
300 ld->ld_locked = 0;
301 ld->ld_unlocked = 0;
302 ld->ld_lwp = NULL;
303 ld->ld_initaddr = initaddr;
304 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
305 lockdebug_lock_cpus();
306 (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
307 lockdebug_unlock_cpus();
308 __cpu_simple_unlock(&ld_mod_lk);
309
310 splx(s);
311 return true;
312 }
313
314 /*
315 * lockdebug_free:
316 *
317 * A lock is being destroyed, so release debugging resources.
318 */
319 void
320 lockdebug_free(const char *func, size_t line, volatile void *lock)
321 {
322 lockdebug_t *ld;
323 int s;
324
325 if (__predict_false(panicstr != NULL || ld_panic))
326 return;
327
328 s = splhigh();
329 __cpu_simple_lock(&ld_mod_lk);
330 ld = lockdebug_lookup(func, line, lock,
331 (uintptr_t) __builtin_return_address(0));
332 if (__predict_false(ld == NULL)) {
333 __cpu_simple_unlock(&ld_mod_lk);
334 panic("%s,%zu: destroying uninitialized object %p"
335 "(ld_lock=%p)", func, line, lock, ld->ld_lock);
336 return;
337 }
338 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
339 ld->ld_shares != 0)) {
340 __cpu_simple_unlock(&ld_mod_lk);
341 lockdebug_abort1(func, line, ld, s, "is locked or in use",
342 true);
343 return;
344 }
345 lockdebug_lock_cpus();
346 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
347 lockdebug_unlock_cpus();
348 ld->ld_lock = NULL;
349 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
350 ld_nfree++;
351 __cpu_simple_unlock(&ld->ld_spinlock);
352 __cpu_simple_unlock(&ld_mod_lk);
353 splx(s);
354 }
355
356 /*
357 * lockdebug_more:
358 *
359 * Allocate a batch of debug structures and add to the free list.
360 * Must be called with ld_mod_lk held.
361 */
362 static int
363 lockdebug_more(int s)
364 {
365 lockdebug_t *ld;
366 void *block;
367 int i, base, m;
368
369 /*
370 * Can't call kmem_alloc() if in interrupt context. XXX We could
371 * deadlock, because we don't know which locks the caller holds.
372 */
373 if (cpu_intr_p() || cpu_softintr_p()) {
374 return s;
375 }
376
377 while (ld_nfree < LD_SLOP) {
378 __cpu_simple_unlock(&ld_mod_lk);
379 splx(s);
380 block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
381 s = splhigh();
382 __cpu_simple_lock(&ld_mod_lk);
383
384 if (ld_nfree > LD_SLOP) {
385 /* Somebody beat us to it. */
386 __cpu_simple_unlock(&ld_mod_lk);
387 splx(s);
388 kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
389 s = splhigh();
390 __cpu_simple_lock(&ld_mod_lk);
391 continue;
392 }
393
394 base = ld_freeptr;
395 ld_nfree += LD_BATCH;
396 ld = block;
397 base <<= LD_BATCH_SHIFT;
398 m = min(LD_MAX_LOCKS, base + LD_BATCH);
399
400 if (m == LD_MAX_LOCKS)
401 ld_nomore = true;
402
403 for (i = base; i < m; i++, ld++) {
404 __cpu_simple_lock_init(&ld->ld_spinlock);
405 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
406 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
407 }
408
409 membar_producer();
410 }
411
412 return s;
413 }
414
415 /*
416 * lockdebug_wantlock:
417 *
418 * Process the preamble to a lock acquire. The "shared"
419 * parameter controls which ld_{ex,sh}want counter is
420 * updated; a negative value of shared updates neither.
421 */
422 void
423 lockdebug_wantlock(const char *func, size_t line,
424 const volatile void *lock, uintptr_t where, int shared)
425 {
426 struct lwp *l = curlwp;
427 lockdebug_t *ld;
428 bool recurse;
429 int s;
430
431 (void)shared;
432 recurse = false;
433
434 if (__predict_false(panicstr != NULL || ld_panic))
435 return;
436
437 s = splhigh();
438 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
439 splx(s);
440 return;
441 }
442 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
443 if ((ld->ld_flags & LD_SLEEPER) != 0) {
444 if (ld->ld_lwp == l)
445 recurse = true;
446 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
447 recurse = true;
448 }
449 if (cpu_intr_p()) {
450 if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
451 lockdebug_abort1(func, line, ld, s,
452 "acquiring sleep lock from interrupt context",
453 true);
454 return;
455 }
456 }
457 if (shared > 0)
458 ld->ld_shwant++;
459 else if (shared == 0)
460 ld->ld_exwant++;
461 if (__predict_false(recurse)) {
462 lockdebug_abort1(func, line, ld, s, "locking against myself",
463 true);
464 return;
465 }
466 __cpu_simple_unlock(&ld->ld_spinlock);
467 splx(s);
468 }
469
470 /*
471 * lockdebug_locked:
472 *
473 * Process a lock acquire operation.
474 */
475 void
476 lockdebug_locked(const char *func, size_t line,
477 volatile void *lock, void *cvlock, uintptr_t where, int shared)
478 {
479 struct lwp *l = curlwp;
480 lockdebug_t *ld;
481 int s;
482
483 if (__predict_false(panicstr != NULL || ld_panic))
484 return;
485
486 s = splhigh();
487 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
488 splx(s);
489 return;
490 }
491 if (cvlock) {
492 KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
493 if (lock == (void *)&lbolt) {
494 /* nothing */
495 } else if (ld->ld_shares++ == 0) {
496 ld->ld_locked = (uintptr_t)cvlock;
497 } else if (__predict_false(cvlock != (void *)ld->ld_locked)) {
498 lockdebug_abort1(func, line, ld, s,
499 "multiple locks used with condition variable",
500 true);
501 return;
502 }
503 } else if (shared) {
504 l->l_shlocks++;
505 ld->ld_locked = where;
506 ld->ld_shares++;
507 ld->ld_shwant--;
508 } else {
509 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
510 lockdebug_abort1(func, line, ld, s, "already locked",
511 true);
512 return;
513 }
514 ld->ld_flags |= LD_LOCKED;
515 ld->ld_locked = where;
516 ld->ld_exwant--;
517 if ((ld->ld_flags & LD_SLEEPER) != 0) {
518 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
519 } else {
520 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
521 ld, ld_chain);
522 }
523 }
524 ld->ld_cpu = (uint16_t)cpu_index(curcpu());
525 ld->ld_lwp = l;
526 __cpu_simple_unlock(&ld->ld_spinlock);
527 splx(s);
528 }
529
530 /*
531 * lockdebug_unlocked:
532 *
533 * Process a lock release operation.
534 */
535 void
536 lockdebug_unlocked(const char *func, size_t line,
537 volatile void *lock, uintptr_t where, int shared)
538 {
539 struct lwp *l = curlwp;
540 lockdebug_t *ld;
541 int s;
542
543 if (__predict_false(panicstr != NULL || ld_panic))
544 return;
545
546 s = splhigh();
547 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
548 splx(s);
549 return;
550 }
551 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
552 if (lock == (void *)&lbolt) {
553 /* nothing */
554 } else {
555 ld->ld_shares--;
556 }
557 } else if (shared) {
558 if (__predict_false(l->l_shlocks == 0)) {
559 lockdebug_abort1(func, line, ld, s,
560 "no shared locks held by LWP", true);
561 return;
562 }
563 if (__predict_false(ld->ld_shares == 0)) {
564 lockdebug_abort1(func, line, ld, s,
565 "no shared holds on this lock", true);
566 return;
567 }
568 l->l_shlocks--;
569 ld->ld_shares--;
570 if (ld->ld_lwp == l) {
571 ld->ld_unlocked = where;
572 ld->ld_lwp = NULL;
573 }
574 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
575 ld->ld_cpu = (uint16_t)-1;
576 } else {
577 if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
578 lockdebug_abort1(func, line, ld, s, "not locked", true);
579 return;
580 }
581
582 if ((ld->ld_flags & LD_SLEEPER) != 0) {
583 if (__predict_false(ld->ld_lwp != curlwp)) {
584 lockdebug_abort1(func, line, ld, s,
585 "not held by current LWP", true);
586 return;
587 }
588 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
589 } else {
590 uint16_t idx = (uint16_t)cpu_index(curcpu());
591 if (__predict_false(ld->ld_cpu != idx)) {
592 lockdebug_abort1(func, line, ld, s,
593 "not held by current CPU", true);
594 return;
595 }
596 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
597 ld_chain);
598 }
599 ld->ld_flags &= ~LD_LOCKED;
600 ld->ld_unlocked = where;
601 ld->ld_lwp = NULL;
602 }
603 __cpu_simple_unlock(&ld->ld_spinlock);
604 splx(s);
605 }
606
607 /*
608 * lockdebug_wakeup:
609 *
610 * Process a wakeup on a condition variable.
611 */
612 void
613 lockdebug_wakeup(const char *func, size_t line, volatile void *lock,
614 uintptr_t where)
615 {
616 lockdebug_t *ld;
617 int s;
618
619 if (__predict_false(panicstr != NULL || ld_panic || lock == (void *)&lbolt))
620 return;
621
622 s = splhigh();
623 /* Find the CV... */
624 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
625 splx(s);
626 return;
627 }
628 /*
629 * If it has any waiters, ensure that they are using the
630 * same interlock.
631 */
632 if (__predict_false(ld->ld_shares != 0 &&
633 !mutex_owned((kmutex_t *)ld->ld_locked))) {
634 lockdebug_abort1(func, line, ld, s, "interlocking mutex not "
635 "held during wakeup", true);
636 return;
637 }
638 __cpu_simple_unlock(&ld->ld_spinlock);
639 splx(s);
640 }
641
642 /*
643 * lockdebug_barrier:
644 *
645 * Panic if we hold more than one specified spin lock, and optionally,
646 * if we hold sleep locks.
647 */
648 void
649 lockdebug_barrier(const char *func, size_t line, volatile void *spinlock,
650 int slplocks)
651 {
652 struct lwp *l = curlwp;
653 lockdebug_t *ld;
654 int s;
655
656 if (__predict_false(panicstr != NULL || ld_panic))
657 return;
658
659 s = splhigh();
660 if ((l->l_pflag & LP_INTR) == 0) {
661 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
662 if (ld->ld_lock == spinlock) {
663 continue;
664 }
665 __cpu_simple_lock(&ld->ld_spinlock);
666 lockdebug_abort1(func, line, ld, s,
667 "spin lock held", true);
668 return;
669 }
670 }
671 if (slplocks) {
672 splx(s);
673 return;
674 }
675 ld = TAILQ_FIRST(&l->l_ld_locks);
676 if (__predict_false(ld != NULL)) {
677 __cpu_simple_lock(&ld->ld_spinlock);
678 lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
679 return;
680 }
681 splx(s);
682 if (l->l_shlocks != 0) {
683 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
684 if (ld->ld_lockops->lo_type == LOCKOPS_CV)
685 continue;
686 if (ld->ld_lwp == l)
687 lockdebug_dump(ld, printf);
688 }
689 panic("%s,%zu: holding %d shared locks", func, line,
690 l->l_shlocks);
691 }
692 }
693
694 /*
695 * lockdebug_mem_check:
696 *
697 * Check for in-use locks within a memory region that is
698 * being freed.
699 */
700 void
701 lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
702 {
703 lockdebug_t *ld;
704 struct cpu_info *ci;
705 int s;
706
707 if (__predict_false(panicstr != NULL || ld_panic))
708 return;
709
710 s = splhigh();
711 ci = curcpu();
712 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
713 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
714 if (ld != NULL) {
715 const uintptr_t lock = (uintptr_t)ld->ld_lock;
716
717 if (__predict_false((uintptr_t)base > lock))
718 panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
719 func, line, ld, base, sz);
720 if (lock >= (uintptr_t)base + sz)
721 ld = NULL;
722 }
723 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
724 if (__predict_false(ld != NULL)) {
725 __cpu_simple_lock(&ld->ld_spinlock);
726 lockdebug_abort1(func, line, ld, s,
727 "allocation contains active lock", !cold);
728 return;
729 }
730 splx(s);
731 }
732
733 /*
734 * lockdebug_dump:
735 *
736 * Dump information about a lock on panic, or for DDB.
737 */
738 static void
739 lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
740 __printflike(1, 2))
741 {
742 int sleeper = (ld->ld_flags & LD_SLEEPER);
743
744 (*pr)(
745 "lock address : %#018lx type : %18s\n"
746 "initialized : %#018lx",
747 (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
748 (long)ld->ld_initaddr);
749
750 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
751 (*pr)(" interlock: %#018lx\n", (long)ld->ld_locked);
752 } else {
753 (*pr)("\n"
754 "shared holds : %18u exclusive: %18u\n"
755 "shares wanted: %18u exclusive: %18u\n"
756 "current cpu : %18u last held: %18u\n"
757 "current lwp : %#018lx last held: %#018lx\n"
758 "last locked%c : %#018lx unlocked%c: %#018lx\n",
759 (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
760 (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
761 (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
762 (long)curlwp, (long)ld->ld_lwp,
763 ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
764 (long)ld->ld_locked,
765 ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
766 (long)ld->ld_unlocked);
767 }
768
769 if (ld->ld_lockops->lo_dump != NULL)
770 (*ld->ld_lockops->lo_dump)(ld->ld_lock);
771
772 if (sleeper) {
773 (*pr)("\n");
774 turnstile_print(ld->ld_lock, pr);
775 }
776 }
777
778 /*
779 * lockdebug_abort1:
780 *
781 * An error has been trapped - dump lock info and panic.
782 */
783 static void
784 lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
785 const char *msg, bool dopanic)
786 {
787
788 /*
789 * Don't make the situation worse if the system is already going
790 * down in flames. Once a panic is triggered, lockdebug state
791 * becomes stale and cannot be trusted.
792 */
793 if (atomic_inc_uint_nv(&ld_panic) != 1) {
794 __cpu_simple_unlock(&ld->ld_spinlock);
795 splx(s);
796 return;
797 }
798
799 printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
800 func, line, msg);
801 lockdebug_dump(ld, printf_nolog);
802 __cpu_simple_unlock(&ld->ld_spinlock);
803 splx(s);
804 printf_nolog("\n");
805 if (dopanic)
806 panic("LOCKDEBUG: %s error: %s,%zu: %s",
807 ld->ld_lockops->lo_name, func, line, msg);
808 }
809
810 #endif /* LOCKDEBUG */
811
812 /*
813 * lockdebug_lock_print:
814 *
815 * Handle the DDB 'show lock' command.
816 */
817 #ifdef DDB
818 void
819 lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
820 {
821 #ifdef LOCKDEBUG
822 lockdebug_t *ld;
823
824 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
825 if (ld->ld_lock == NULL)
826 continue;
827 if (addr == NULL || ld->ld_lock == addr) {
828 lockdebug_dump(ld, pr);
829 if (addr != NULL)
830 return;
831 }
832 }
833 if (addr != NULL) {
834 (*pr)("Sorry, no record of a lock with address %p found.\n",
835 addr);
836 }
837 #else
838 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
839 #endif /* LOCKDEBUG */
840 }
841
842 void
843 lockdebug_show_lockstat(void (*pr)(const char *, ...))
844 {
845 #ifdef LOCKDEBUG
846 lockdebug_t *ld;
847 void *_ld;
848 uint32_t n_null = 0;
849 uint32_t n_spin_mutex = 0;
850 uint32_t n_adaptive_mutex = 0;
851 uint32_t n_rwlock = 0;
852 uint32_t n_cv = 0;
853 uint32_t n_others = 0;
854
855 RB_TREE_FOREACH(_ld, &ld_rb_tree) {
856 ld = _ld;
857 if (ld->ld_lock == NULL) {
858 n_null++;
859 continue;
860 }
861 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
862 n_cv++;
863 continue;
864 }
865 if (ld->ld_lockops->lo_name[0] == 'M') {
866 if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP)
867 n_adaptive_mutex++;
868 else
869 n_spin_mutex++;
870 continue;
871 }
872 if (ld->ld_lockops->lo_name[0] == 'R') {
873 n_rwlock++;
874 continue;
875 }
876 n_others++;
877 }
878 (*pr)(
879 "condvar: %u\n"
880 "spin mutex: %u\n"
881 "adaptive mutex: %u\n"
882 "rwlock: %u\n"
883 "null locks: %u\n"
884 "others: %u\n",
885 n_cv, n_spin_mutex, n_adaptive_mutex, n_rwlock,
886 n_null, n_others);
887 #else
888 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
889 #endif /* LOCKDEBUG */
890 }
891 #endif /* DDB */
892
893 /*
894 * lockdebug_abort:
895 *
896 * An error has been trapped - dump lock info and call panic().
897 */
898 void
899 lockdebug_abort(const char *func, size_t line, const volatile void *lock,
900 lockops_t *ops, const char *msg)
901 {
902 #ifdef LOCKDEBUG
903 lockdebug_t *ld;
904 int s;
905
906 s = splhigh();
907 if ((ld = lockdebug_lookup(func, line, lock,
908 (uintptr_t) __builtin_return_address(0))) != NULL) {
909 lockdebug_abort1(func, line, ld, s, msg, true);
910 return;
911 }
912 splx(s);
913 #endif /* LOCKDEBUG */
914
915 /*
916 * Complain first on the occurrance only. Otherwise proceeed to
917 * panic where we will `rendezvous' with other CPUs if the machine
918 * is going down in flames.
919 */
920 if (atomic_inc_uint_nv(&ld_panic) == 1) {
921 printf_nolog("%s error: %s,%zu: %s\n\n"
922 "lock address : %#018lx\n"
923 "current cpu : %18d\n"
924 "current lwp : %#018lx\n",
925 ops->lo_name, func, line, msg, (long)lock,
926 (int)cpu_index(curcpu()), (long)curlwp);
927 (*ops->lo_dump)(lock);
928 printf_nolog("\n");
929 }
930
931 panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
932 ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
933 }
934