subr_lockdebug.c revision 1.4.2.3 1 1.4.2.3 yamt /* $NetBSD: subr_lockdebug.c,v 1.4.2.3 2007/09/03 14:41:03 yamt Exp $ */
2 1.4.2.2 yamt
3 1.4.2.2 yamt /*-
4 1.4.2.2 yamt * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 1.4.2.2 yamt * All rights reserved.
6 1.4.2.2 yamt *
7 1.4.2.2 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.4.2.2 yamt * by Andrew Doran.
9 1.4.2.2 yamt *
10 1.4.2.2 yamt * Redistribution and use in source and binary forms, with or without
11 1.4.2.2 yamt * modification, are permitted provided that the following conditions
12 1.4.2.2 yamt * are met:
13 1.4.2.2 yamt * 1. Redistributions of source code must retain the above copyright
14 1.4.2.2 yamt * notice, this list of conditions and the following disclaimer.
15 1.4.2.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.4.2.2 yamt * notice, this list of conditions and the following disclaimer in the
17 1.4.2.2 yamt * documentation and/or other materials provided with the distribution.
18 1.4.2.2 yamt * 3. All advertising materials mentioning features or use of this software
19 1.4.2.2 yamt * must display the following acknowledgement:
20 1.4.2.2 yamt * This product includes software developed by the NetBSD
21 1.4.2.2 yamt * Foundation, Inc. and its contributors.
22 1.4.2.2 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.4.2.2 yamt * contributors may be used to endorse or promote products derived
24 1.4.2.2 yamt * from this software without specific prior written permission.
25 1.4.2.2 yamt *
26 1.4.2.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.4.2.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.4.2.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.4.2.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.4.2.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.4.2.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.4.2.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.4.2.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.4.2.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.4.2.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.4.2.2 yamt * POSSIBILITY OF SUCH DAMAGE.
37 1.4.2.2 yamt */
38 1.4.2.2 yamt
39 1.4.2.2 yamt /*
40 1.4.2.2 yamt * Basic lock debugging code shared among lock primatives.
41 1.4.2.2 yamt */
42 1.4.2.2 yamt
43 1.4.2.2 yamt #include "opt_multiprocessor.h"
44 1.4.2.2 yamt #include "opt_ddb.h"
45 1.4.2.2 yamt
46 1.4.2.2 yamt #include <sys/cdefs.h>
47 1.4.2.3 yamt __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.4.2.3 2007/09/03 14:41:03 yamt Exp $");
48 1.4.2.2 yamt
49 1.4.2.2 yamt #include <sys/param.h>
50 1.4.2.2 yamt #include <sys/proc.h>
51 1.4.2.2 yamt #include <sys/systm.h>
52 1.4.2.2 yamt #include <sys/kmem.h>
53 1.4.2.2 yamt #include <sys/lock.h>
54 1.4.2.2 yamt #include <sys/lockdebug.h>
55 1.4.2.2 yamt #include <sys/sleepq.h>
56 1.4.2.2 yamt
57 1.4.2.2 yamt #include <machine/cpu.h>
58 1.4.2.2 yamt
59 1.4.2.2 yamt #ifdef LOCKDEBUG
60 1.4.2.2 yamt
61 1.4.2.2 yamt #define LD_BATCH_SHIFT 9
62 1.4.2.2 yamt #define LD_BATCH (1 << LD_BATCH_SHIFT)
63 1.4.2.2 yamt #define LD_BATCH_MASK (LD_BATCH - 1)
64 1.4.2.2 yamt #define LD_MAX_LOCKS 1048576
65 1.4.2.2 yamt #define LD_SLOP 16
66 1.4.2.2 yamt
67 1.4.2.2 yamt #define LD_LOCKED 0x01
68 1.4.2.2 yamt #define LD_SLEEPER 0x02
69 1.4.2.2 yamt
70 1.4.2.3 yamt #define LD_NOID (LD_MAX_LOCKS + 1)
71 1.4.2.2 yamt
72 1.4.2.2 yamt typedef union lockdebuglk {
73 1.4.2.2 yamt struct {
74 1.4.2.2 yamt __cpu_simple_lock_t lku_lock;
75 1.4.2.2 yamt int lku_oldspl;
76 1.4.2.2 yamt } ul;
77 1.4.2.2 yamt uint8_t lk_pad[64];
78 1.4.2.2 yamt } volatile __aligned(64) lockdebuglk_t;
79 1.4.2.2 yamt
80 1.4.2.2 yamt #define lk_lock ul.lku_lock
81 1.4.2.2 yamt #define lk_oldspl ul.lku_oldspl
82 1.4.2.2 yamt
83 1.4.2.2 yamt typedef struct lockdebug {
84 1.4.2.2 yamt _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
85 1.4.2.2 yamt _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
86 1.4.2.2 yamt volatile void *ld_lock;
87 1.4.2.2 yamt lockops_t *ld_lockops;
88 1.4.2.2 yamt struct lwp *ld_lwp;
89 1.4.2.2 yamt uintptr_t ld_locked;
90 1.4.2.2 yamt uintptr_t ld_unlocked;
91 1.4.2.2 yamt u_int ld_id;
92 1.4.2.2 yamt uint16_t ld_shares;
93 1.4.2.2 yamt uint16_t ld_cpu;
94 1.4.2.2 yamt uint8_t ld_flags;
95 1.4.2.2 yamt uint8_t ld_shwant; /* advisory */
96 1.4.2.2 yamt uint8_t ld_exwant; /* advisory */
97 1.4.2.2 yamt uint8_t ld_unused;
98 1.4.2.2 yamt } volatile lockdebug_t;
99 1.4.2.2 yamt
100 1.4.2.2 yamt typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
101 1.4.2.2 yamt
102 1.4.2.2 yamt lockdebuglk_t ld_sleeper_lk;
103 1.4.2.2 yamt lockdebuglk_t ld_spinner_lk;
104 1.4.2.2 yamt lockdebuglk_t ld_free_lk;
105 1.4.2.2 yamt
106 1.4.2.2 yamt lockdebuglist_t ld_sleepers;
107 1.4.2.2 yamt lockdebuglist_t ld_spinners;
108 1.4.2.2 yamt lockdebuglist_t ld_free;
109 1.4.2.2 yamt lockdebuglist_t ld_all;
110 1.4.2.2 yamt int ld_nfree;
111 1.4.2.2 yamt int ld_freeptr;
112 1.4.2.2 yamt int ld_recurse;
113 1.4.2.3 yamt bool ld_nomore;
114 1.4.2.2 yamt lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH];
115 1.4.2.2 yamt
116 1.4.2.2 yamt lockdebug_t ld_prime[LD_BATCH];
117 1.4.2.2 yamt
118 1.4.2.3 yamt static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
119 1.4.2.3 yamt const char *, const char *);
120 1.4.2.3 yamt static void lockdebug_more(void);
121 1.4.2.3 yamt static void lockdebug_init(void);
122 1.4.2.2 yamt
123 1.4.2.2 yamt static inline void
124 1.4.2.2 yamt lockdebug_lock(lockdebuglk_t *lk)
125 1.4.2.2 yamt {
126 1.4.2.2 yamt int s;
127 1.4.2.2 yamt
128 1.4.2.3 yamt s = splhigh();
129 1.4.2.2 yamt __cpu_simple_lock(&lk->lk_lock);
130 1.4.2.2 yamt lk->lk_oldspl = s;
131 1.4.2.2 yamt }
132 1.4.2.2 yamt
133 1.4.2.2 yamt static inline void
134 1.4.2.2 yamt lockdebug_unlock(lockdebuglk_t *lk)
135 1.4.2.2 yamt {
136 1.4.2.2 yamt int s;
137 1.4.2.2 yamt
138 1.4.2.2 yamt s = lk->lk_oldspl;
139 1.4.2.2 yamt __cpu_simple_unlock(&(lk->lk_lock));
140 1.4.2.2 yamt splx(s);
141 1.4.2.2 yamt }
142 1.4.2.2 yamt
143 1.4.2.2 yamt /*
144 1.4.2.2 yamt * lockdebug_lookup:
145 1.4.2.2 yamt *
146 1.4.2.2 yamt * Find a lockdebug structure by ID and return it locked.
147 1.4.2.2 yamt */
148 1.4.2.2 yamt static inline lockdebug_t *
149 1.4.2.2 yamt lockdebug_lookup(u_int id, lockdebuglk_t **lk)
150 1.4.2.2 yamt {
151 1.4.2.2 yamt lockdebug_t *base, *ld;
152 1.4.2.2 yamt
153 1.4.2.2 yamt if (id == LD_NOID)
154 1.4.2.2 yamt return NULL;
155 1.4.2.2 yamt
156 1.4.2.2 yamt if (id == 0 || id >= LD_MAX_LOCKS)
157 1.4.2.2 yamt panic("lockdebug_lookup: uninitialized lock (1, id=%d)", id);
158 1.4.2.2 yamt
159 1.4.2.2 yamt base = ld_table[id >> LD_BATCH_SHIFT];
160 1.4.2.2 yamt ld = base + (id & LD_BATCH_MASK);
161 1.4.2.2 yamt
162 1.4.2.2 yamt if (base == NULL || ld->ld_lock == NULL || ld->ld_id != id)
163 1.4.2.2 yamt panic("lockdebug_lookup: uninitialized lock (2, id=%d)", id);
164 1.4.2.2 yamt
165 1.4.2.2 yamt if ((ld->ld_flags & LD_SLEEPER) != 0)
166 1.4.2.2 yamt *lk = &ld_sleeper_lk;
167 1.4.2.2 yamt else
168 1.4.2.2 yamt *lk = &ld_spinner_lk;
169 1.4.2.2 yamt
170 1.4.2.2 yamt lockdebug_lock(*lk);
171 1.4.2.2 yamt return ld;
172 1.4.2.2 yamt }
173 1.4.2.2 yamt
174 1.4.2.2 yamt /*
175 1.4.2.2 yamt * lockdebug_init:
176 1.4.2.2 yamt *
177 1.4.2.2 yamt * Initialize the lockdebug system. Allocate an initial pool of
178 1.4.2.2 yamt * lockdebug structures before the VM system is up and running.
179 1.4.2.2 yamt */
180 1.4.2.3 yamt static void
181 1.4.2.2 yamt lockdebug_init(void)
182 1.4.2.2 yamt {
183 1.4.2.2 yamt lockdebug_t *ld;
184 1.4.2.2 yamt int i;
185 1.4.2.2 yamt
186 1.4.2.2 yamt __cpu_simple_lock_init(&ld_sleeper_lk.lk_lock);
187 1.4.2.2 yamt __cpu_simple_lock_init(&ld_spinner_lk.lk_lock);
188 1.4.2.2 yamt __cpu_simple_lock_init(&ld_free_lk.lk_lock);
189 1.4.2.2 yamt
190 1.4.2.2 yamt TAILQ_INIT(&ld_free);
191 1.4.2.2 yamt TAILQ_INIT(&ld_all);
192 1.4.2.2 yamt TAILQ_INIT(&ld_sleepers);
193 1.4.2.2 yamt TAILQ_INIT(&ld_spinners);
194 1.4.2.2 yamt
195 1.4.2.2 yamt ld = ld_prime;
196 1.4.2.2 yamt ld_table[0] = ld;
197 1.4.2.2 yamt for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
198 1.4.2.2 yamt ld->ld_id = i;
199 1.4.2.2 yamt TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
200 1.4.2.2 yamt TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
201 1.4.2.2 yamt }
202 1.4.2.2 yamt ld_freeptr = 1;
203 1.4.2.2 yamt ld_nfree = LD_BATCH - 1;
204 1.4.2.2 yamt }
205 1.4.2.2 yamt
206 1.4.2.2 yamt /*
207 1.4.2.2 yamt * lockdebug_alloc:
208 1.4.2.2 yamt *
209 1.4.2.2 yamt * A lock is being initialized, so allocate an associated debug
210 1.4.2.2 yamt * structure.
211 1.4.2.2 yamt */
212 1.4.2.2 yamt u_int
213 1.4.2.2 yamt lockdebug_alloc(volatile void *lock, lockops_t *lo)
214 1.4.2.2 yamt {
215 1.4.2.2 yamt struct cpu_info *ci;
216 1.4.2.2 yamt lockdebug_t *ld;
217 1.4.2.2 yamt
218 1.4.2.3 yamt if (lo == NULL || panicstr != NULL)
219 1.4.2.2 yamt return LD_NOID;
220 1.4.2.3 yamt if (ld_freeptr == 0)
221 1.4.2.3 yamt lockdebug_init();
222 1.4.2.2 yamt
223 1.4.2.2 yamt ci = curcpu();
224 1.4.2.2 yamt
225 1.4.2.2 yamt /*
226 1.4.2.2 yamt * Pinch a new debug structure. We may recurse because we call
227 1.4.2.2 yamt * kmem_alloc(), which may need to initialize new locks somewhere
228 1.4.2.3 yamt * down the path. If not recursing, we try to maintain at least
229 1.4.2.2 yamt * LD_SLOP structures free, which should hopefully be enough to
230 1.4.2.2 yamt * satisfy kmem_alloc(). If we can't provide a structure, not to
231 1.4.2.2 yamt * worry: we'll just mark the lock as not having an ID.
232 1.4.2.2 yamt */
233 1.4.2.2 yamt lockdebug_lock(&ld_free_lk);
234 1.4.2.2 yamt ci->ci_lkdebug_recurse++;
235 1.4.2.2 yamt
236 1.4.2.2 yamt if (TAILQ_EMPTY(&ld_free)) {
237 1.4.2.3 yamt if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
238 1.4.2.2 yamt ci->ci_lkdebug_recurse--;
239 1.4.2.2 yamt lockdebug_unlock(&ld_free_lk);
240 1.4.2.2 yamt return LD_NOID;
241 1.4.2.2 yamt }
242 1.4.2.2 yamt lockdebug_more();
243 1.4.2.2 yamt } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
244 1.4.2.2 yamt lockdebug_more();
245 1.4.2.2 yamt
246 1.4.2.2 yamt if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
247 1.4.2.2 yamt lockdebug_unlock(&ld_free_lk);
248 1.4.2.2 yamt return LD_NOID;
249 1.4.2.2 yamt }
250 1.4.2.2 yamt
251 1.4.2.2 yamt TAILQ_REMOVE(&ld_free, ld, ld_chain);
252 1.4.2.2 yamt ld_nfree--;
253 1.4.2.2 yamt
254 1.4.2.2 yamt ci->ci_lkdebug_recurse--;
255 1.4.2.2 yamt lockdebug_unlock(&ld_free_lk);
256 1.4.2.2 yamt
257 1.4.2.2 yamt if (ld->ld_lock != NULL)
258 1.4.2.2 yamt panic("lockdebug_alloc: corrupt table");
259 1.4.2.2 yamt
260 1.4.2.2 yamt if (lo->lo_sleeplock)
261 1.4.2.2 yamt lockdebug_lock(&ld_sleeper_lk);
262 1.4.2.2 yamt else
263 1.4.2.2 yamt lockdebug_lock(&ld_spinner_lk);
264 1.4.2.2 yamt
265 1.4.2.2 yamt /* Initialise the structure. */
266 1.4.2.2 yamt ld->ld_lock = lock;
267 1.4.2.2 yamt ld->ld_lockops = lo;
268 1.4.2.2 yamt ld->ld_locked = 0;
269 1.4.2.2 yamt ld->ld_unlocked = 0;
270 1.4.2.2 yamt ld->ld_lwp = NULL;
271 1.4.2.2 yamt
272 1.4.2.2 yamt if (lo->lo_sleeplock) {
273 1.4.2.2 yamt ld->ld_flags = LD_SLEEPER;
274 1.4.2.2 yamt lockdebug_unlock(&ld_sleeper_lk);
275 1.4.2.2 yamt } else {
276 1.4.2.2 yamt ld->ld_flags = 0;
277 1.4.2.2 yamt lockdebug_unlock(&ld_spinner_lk);
278 1.4.2.2 yamt }
279 1.4.2.2 yamt
280 1.4.2.2 yamt return ld->ld_id;
281 1.4.2.2 yamt }
282 1.4.2.2 yamt
283 1.4.2.2 yamt /*
284 1.4.2.2 yamt * lockdebug_free:
285 1.4.2.2 yamt *
286 1.4.2.2 yamt * A lock is being destroyed, so release debugging resources.
287 1.4.2.2 yamt */
288 1.4.2.2 yamt void
289 1.4.2.2 yamt lockdebug_free(volatile void *lock, u_int id)
290 1.4.2.2 yamt {
291 1.4.2.2 yamt lockdebug_t *ld;
292 1.4.2.2 yamt lockdebuglk_t *lk;
293 1.4.2.2 yamt
294 1.4.2.2 yamt if (panicstr != NULL)
295 1.4.2.2 yamt return;
296 1.4.2.2 yamt
297 1.4.2.2 yamt if ((ld = lockdebug_lookup(id, &lk)) == NULL)
298 1.4.2.2 yamt return;
299 1.4.2.2 yamt
300 1.4.2.2 yamt if (ld->ld_lock != lock) {
301 1.4.2.2 yamt panic("lockdebug_free: destroying uninitialized lock %p"
302 1.4.2.2 yamt "(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock);
303 1.4.2.2 yamt lockdebug_abort1(ld, lk, __func__, "lock record follows");
304 1.4.2.2 yamt }
305 1.4.2.2 yamt if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
306 1.4.2.2 yamt lockdebug_abort1(ld, lk, __func__, "is locked");
307 1.4.2.2 yamt
308 1.4.2.2 yamt ld->ld_lock = NULL;
309 1.4.2.2 yamt
310 1.4.2.2 yamt lockdebug_unlock(lk);
311 1.4.2.2 yamt
312 1.4.2.2 yamt lockdebug_lock(&ld_free_lk);
313 1.4.2.2 yamt TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
314 1.4.2.2 yamt ld_nfree++;
315 1.4.2.2 yamt lockdebug_unlock(&ld_free_lk);
316 1.4.2.2 yamt }
317 1.4.2.2 yamt
318 1.4.2.2 yamt /*
319 1.4.2.2 yamt * lockdebug_more:
320 1.4.2.2 yamt *
321 1.4.2.2 yamt * Allocate a batch of debug structures and add to the free list.
322 1.4.2.2 yamt * Must be called with ld_free_lk held.
323 1.4.2.2 yamt */
324 1.4.2.3 yamt static void
325 1.4.2.2 yamt lockdebug_more(void)
326 1.4.2.2 yamt {
327 1.4.2.2 yamt lockdebug_t *ld;
328 1.4.2.2 yamt void *block;
329 1.4.2.3 yamt int i, base, m;
330 1.4.2.2 yamt
331 1.4.2.2 yamt while (ld_nfree < LD_SLOP) {
332 1.4.2.2 yamt lockdebug_unlock(&ld_free_lk);
333 1.4.2.2 yamt block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
334 1.4.2.2 yamt lockdebug_lock(&ld_free_lk);
335 1.4.2.2 yamt
336 1.4.2.2 yamt if (block == NULL)
337 1.4.2.2 yamt return;
338 1.4.2.2 yamt
339 1.4.2.2 yamt if (ld_nfree > LD_SLOP) {
340 1.4.2.2 yamt /* Somebody beat us to it. */
341 1.4.2.2 yamt lockdebug_unlock(&ld_free_lk);
342 1.4.2.2 yamt kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
343 1.4.2.2 yamt lockdebug_lock(&ld_free_lk);
344 1.4.2.2 yamt continue;
345 1.4.2.2 yamt }
346 1.4.2.2 yamt
347 1.4.2.2 yamt base = ld_freeptr;
348 1.4.2.2 yamt ld_nfree += LD_BATCH;
349 1.4.2.2 yamt ld = block;
350 1.4.2.2 yamt base <<= LD_BATCH_SHIFT;
351 1.4.2.3 yamt m = min(LD_MAX_LOCKS, base + LD_BATCH);
352 1.4.2.3 yamt
353 1.4.2.3 yamt if (m == LD_MAX_LOCKS)
354 1.4.2.3 yamt ld_nomore = true;
355 1.4.2.2 yamt
356 1.4.2.3 yamt for (i = base; i < m; i++, ld++) {
357 1.4.2.3 yamt ld->ld_id = i;
358 1.4.2.2 yamt TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
359 1.4.2.2 yamt TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
360 1.4.2.2 yamt }
361 1.4.2.2 yamt
362 1.4.2.2 yamt mb_write();
363 1.4.2.2 yamt ld_table[ld_freeptr++] = block;
364 1.4.2.2 yamt }
365 1.4.2.2 yamt }
366 1.4.2.2 yamt
367 1.4.2.2 yamt /*
368 1.4.2.2 yamt * lockdebug_wantlock:
369 1.4.2.2 yamt *
370 1.4.2.2 yamt * Process the preamble to a lock acquire.
371 1.4.2.2 yamt */
372 1.4.2.2 yamt void
373 1.4.2.2 yamt lockdebug_wantlock(u_int id, uintptr_t where, int shared)
374 1.4.2.2 yamt {
375 1.4.2.2 yamt struct lwp *l = curlwp;
376 1.4.2.2 yamt lockdebuglk_t *lk;
377 1.4.2.2 yamt lockdebug_t *ld;
378 1.4.2.2 yamt bool recurse;
379 1.4.2.2 yamt
380 1.4.2.2 yamt (void)shared;
381 1.4.2.2 yamt recurse = false;
382 1.4.2.2 yamt
383 1.4.2.2 yamt if (panicstr != NULL)
384 1.4.2.2 yamt return;
385 1.4.2.2 yamt
386 1.4.2.2 yamt if ((ld = lockdebug_lookup(id, &lk)) == NULL)
387 1.4.2.2 yamt return;
388 1.4.2.2 yamt
389 1.4.2.2 yamt if ((ld->ld_flags & LD_LOCKED) != 0) {
390 1.4.2.2 yamt if ((ld->ld_flags & LD_SLEEPER) != 0) {
391 1.4.2.2 yamt if (ld->ld_lwp == l)
392 1.4.2.2 yamt recurse = true;
393 1.4.2.2 yamt } else if (ld->ld_cpu == (uint16_t)cpu_number())
394 1.4.2.2 yamt recurse = true;
395 1.4.2.2 yamt }
396 1.4.2.2 yamt
397 1.4.2.2 yamt if (shared)
398 1.4.2.2 yamt ld->ld_shwant++;
399 1.4.2.2 yamt else
400 1.4.2.2 yamt ld->ld_exwant++;
401 1.4.2.2 yamt
402 1.4.2.2 yamt if (recurse)
403 1.4.2.2 yamt lockdebug_abort1(ld, lk, __func__, "locking against myself");
404 1.4.2.2 yamt
405 1.4.2.2 yamt lockdebug_unlock(lk);
406 1.4.2.2 yamt }
407 1.4.2.2 yamt
408 1.4.2.2 yamt /*
409 1.4.2.2 yamt * lockdebug_locked:
410 1.4.2.2 yamt *
411 1.4.2.2 yamt * Process a lock acquire operation.
412 1.4.2.2 yamt */
413 1.4.2.2 yamt void
414 1.4.2.2 yamt lockdebug_locked(u_int id, uintptr_t where, int shared)
415 1.4.2.2 yamt {
416 1.4.2.2 yamt struct lwp *l = curlwp;
417 1.4.2.2 yamt lockdebuglk_t *lk;
418 1.4.2.2 yamt lockdebug_t *ld;
419 1.4.2.2 yamt
420 1.4.2.2 yamt if (panicstr != NULL)
421 1.4.2.2 yamt return;
422 1.4.2.2 yamt
423 1.4.2.2 yamt if ((ld = lockdebug_lookup(id, &lk)) == NULL)
424 1.4.2.2 yamt return;
425 1.4.2.2 yamt
426 1.4.2.2 yamt if (shared) {
427 1.4.2.2 yamt l->l_shlocks++;
428 1.4.2.2 yamt ld->ld_shares++;
429 1.4.2.2 yamt ld->ld_shwant--;
430 1.4.2.2 yamt } else {
431 1.4.2.2 yamt if ((ld->ld_flags & LD_LOCKED) != 0)
432 1.4.2.2 yamt lockdebug_abort1(ld, lk, __func__,
433 1.4.2.2 yamt "already locked");
434 1.4.2.2 yamt
435 1.4.2.2 yamt ld->ld_flags |= LD_LOCKED;
436 1.4.2.2 yamt ld->ld_locked = where;
437 1.4.2.2 yamt ld->ld_cpu = (uint16_t)cpu_number();
438 1.4.2.2 yamt ld->ld_lwp = l;
439 1.4.2.2 yamt ld->ld_exwant--;
440 1.4.2.2 yamt
441 1.4.2.2 yamt if ((ld->ld_flags & LD_SLEEPER) != 0) {
442 1.4.2.2 yamt l->l_exlocks++;
443 1.4.2.2 yamt TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
444 1.4.2.2 yamt } else {
445 1.4.2.2 yamt curcpu()->ci_spin_locks2++;
446 1.4.2.2 yamt TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
447 1.4.2.2 yamt }
448 1.4.2.2 yamt }
449 1.4.2.2 yamt
450 1.4.2.2 yamt lockdebug_unlock(lk);
451 1.4.2.2 yamt }
452 1.4.2.2 yamt
453 1.4.2.2 yamt /*
454 1.4.2.2 yamt * lockdebug_unlocked:
455 1.4.2.2 yamt *
456 1.4.2.2 yamt * Process a lock release operation.
457 1.4.2.2 yamt */
458 1.4.2.2 yamt void
459 1.4.2.2 yamt lockdebug_unlocked(u_int id, uintptr_t where, int shared)
460 1.4.2.2 yamt {
461 1.4.2.2 yamt struct lwp *l = curlwp;
462 1.4.2.2 yamt lockdebuglk_t *lk;
463 1.4.2.2 yamt lockdebug_t *ld;
464 1.4.2.2 yamt
465 1.4.2.2 yamt if (panicstr != NULL)
466 1.4.2.2 yamt return;
467 1.4.2.2 yamt
468 1.4.2.2 yamt if ((ld = lockdebug_lookup(id, &lk)) == NULL)
469 1.4.2.2 yamt return;
470 1.4.2.2 yamt
471 1.4.2.2 yamt if (shared) {
472 1.4.2.2 yamt if (l->l_shlocks == 0)
473 1.4.2.2 yamt lockdebug_abort1(ld, lk, __func__,
474 1.4.2.2 yamt "no shared locks held by LWP");
475 1.4.2.2 yamt if (ld->ld_shares == 0)
476 1.4.2.2 yamt lockdebug_abort1(ld, lk, __func__,
477 1.4.2.2 yamt "no shared holds on this lock");
478 1.4.2.2 yamt l->l_shlocks--;
479 1.4.2.2 yamt ld->ld_shares--;
480 1.4.2.2 yamt } else {
481 1.4.2.2 yamt if ((ld->ld_flags & LD_LOCKED) == 0)
482 1.4.2.2 yamt lockdebug_abort1(ld, lk, __func__, "not locked");
483 1.4.2.2 yamt
484 1.4.2.2 yamt if ((ld->ld_flags & LD_SLEEPER) != 0) {
485 1.4.2.2 yamt if (ld->ld_lwp != curlwp)
486 1.4.2.2 yamt lockdebug_abort1(ld, lk, __func__,
487 1.4.2.2 yamt "not held by current LWP");
488 1.4.2.2 yamt ld->ld_flags &= ~LD_LOCKED;
489 1.4.2.2 yamt ld->ld_unlocked = where;
490 1.4.2.2 yamt ld->ld_lwp = NULL;
491 1.4.2.2 yamt curlwp->l_exlocks--;
492 1.4.2.2 yamt TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
493 1.4.2.2 yamt } else {
494 1.4.2.2 yamt if (ld->ld_cpu != (uint16_t)cpu_number())
495 1.4.2.2 yamt lockdebug_abort1(ld, lk, __func__,
496 1.4.2.2 yamt "not held by current CPU");
497 1.4.2.2 yamt ld->ld_flags &= ~LD_LOCKED;
498 1.4.2.2 yamt ld->ld_unlocked = where;
499 1.4.2.2 yamt ld->ld_lwp = NULL;
500 1.4.2.2 yamt curcpu()->ci_spin_locks2--;
501 1.4.2.2 yamt TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
502 1.4.2.2 yamt }
503 1.4.2.2 yamt }
504 1.4.2.2 yamt
505 1.4.2.2 yamt lockdebug_unlock(lk);
506 1.4.2.2 yamt }
507 1.4.2.2 yamt
508 1.4.2.2 yamt /*
509 1.4.2.2 yamt * lockdebug_barrier:
510 1.4.2.2 yamt *
511 1.4.2.2 yamt * Panic if we hold more than one specified spin lock, and optionally,
512 1.4.2.2 yamt * if we hold sleep locks.
513 1.4.2.2 yamt */
514 1.4.2.2 yamt void
515 1.4.2.2 yamt lockdebug_barrier(volatile void *spinlock, int slplocks)
516 1.4.2.2 yamt {
517 1.4.2.2 yamt struct lwp *l = curlwp;
518 1.4.2.2 yamt lockdebug_t *ld;
519 1.4.2.2 yamt uint16_t cpuno;
520 1.4.2.2 yamt
521 1.4.2.2 yamt if (panicstr != NULL)
522 1.4.2.2 yamt return;
523 1.4.2.2 yamt
524 1.4.2.2 yamt if (curcpu()->ci_spin_locks2 != 0) {
525 1.4.2.2 yamt cpuno = (uint16_t)cpu_number();
526 1.4.2.2 yamt
527 1.4.2.2 yamt lockdebug_lock(&ld_spinner_lk);
528 1.4.2.2 yamt TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
529 1.4.2.2 yamt if (ld->ld_lock == spinlock) {
530 1.4.2.2 yamt if (ld->ld_cpu != cpuno)
531 1.4.2.2 yamt lockdebug_abort1(ld, &ld_spinner_lk,
532 1.4.2.2 yamt __func__,
533 1.4.2.2 yamt "not held by current CPU");
534 1.4.2.2 yamt continue;
535 1.4.2.2 yamt }
536 1.4.2.2 yamt if (ld->ld_cpu == cpuno)
537 1.4.2.2 yamt lockdebug_abort1(ld, &ld_spinner_lk,
538 1.4.2.2 yamt __func__, "spin lock held");
539 1.4.2.2 yamt }
540 1.4.2.2 yamt lockdebug_unlock(&ld_spinner_lk);
541 1.4.2.2 yamt }
542 1.4.2.2 yamt
543 1.4.2.2 yamt if (!slplocks) {
544 1.4.2.2 yamt if (l->l_exlocks != 0) {
545 1.4.2.2 yamt lockdebug_lock(&ld_sleeper_lk);
546 1.4.2.2 yamt TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
547 1.4.2.2 yamt if (ld->ld_lwp == l)
548 1.4.2.2 yamt lockdebug_abort1(ld, &ld_sleeper_lk,
549 1.4.2.2 yamt __func__, "sleep lock held");
550 1.4.2.2 yamt }
551 1.4.2.2 yamt lockdebug_unlock(&ld_sleeper_lk);
552 1.4.2.2 yamt }
553 1.4.2.2 yamt if (l->l_shlocks != 0)
554 1.4.2.2 yamt panic("lockdebug_barrier: holding %d shared locks",
555 1.4.2.2 yamt l->l_shlocks);
556 1.4.2.2 yamt }
557 1.4.2.2 yamt }
558 1.4.2.2 yamt
559 1.4.2.2 yamt /*
560 1.4.2.2 yamt * lockdebug_dump:
561 1.4.2.2 yamt *
562 1.4.2.2 yamt * Dump information about a lock on panic, or for DDB.
563 1.4.2.2 yamt */
564 1.4.2.2 yamt static void
565 1.4.2.2 yamt lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
566 1.4.2.2 yamt {
567 1.4.2.2 yamt int sleeper = (ld->ld_flags & LD_SLEEPER);
568 1.4.2.2 yamt
569 1.4.2.2 yamt (*pr)(
570 1.4.2.2 yamt "lock address : %#018lx type : %18s\n"
571 1.4.2.2 yamt "shared holds : %18u exclusive: %18u\n"
572 1.4.2.2 yamt "shares wanted: %18u exclusive: %18u\n"
573 1.4.2.2 yamt "current cpu : %18u last held: %18u\n"
574 1.4.2.2 yamt "current lwp : %#018lx last held: %#018lx\n"
575 1.4.2.2 yamt "last locked : %#018lx unlocked : %#018lx\n",
576 1.4.2.2 yamt (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
577 1.4.2.2 yamt (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
578 1.4.2.2 yamt (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
579 1.4.2.2 yamt (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
580 1.4.2.2 yamt (long)curlwp, (long)ld->ld_lwp,
581 1.4.2.2 yamt (long)ld->ld_locked, (long)ld->ld_unlocked);
582 1.4.2.2 yamt
583 1.4.2.2 yamt if (ld->ld_lockops->lo_dump != NULL)
584 1.4.2.2 yamt (*ld->ld_lockops->lo_dump)(ld->ld_lock);
585 1.4.2.2 yamt
586 1.4.2.2 yamt if (sleeper) {
587 1.4.2.2 yamt (*pr)("\n");
588 1.4.2.2 yamt turnstile_print(ld->ld_lock, pr);
589 1.4.2.2 yamt }
590 1.4.2.2 yamt }
591 1.4.2.2 yamt
592 1.4.2.2 yamt /*
593 1.4.2.2 yamt * lockdebug_dump:
594 1.4.2.2 yamt *
595 1.4.2.2 yamt * Dump information about a known lock.
596 1.4.2.2 yamt */
597 1.4.2.3 yamt static void
598 1.4.2.2 yamt lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
599 1.4.2.2 yamt const char *msg)
600 1.4.2.2 yamt {
601 1.4.2.2 yamt
602 1.4.2.2 yamt printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
603 1.4.2.2 yamt func, msg);
604 1.4.2.2 yamt lockdebug_dump(ld, printf_nolog);
605 1.4.2.2 yamt lockdebug_unlock(lk);
606 1.4.2.2 yamt printf_nolog("\n");
607 1.4.2.2 yamt panic("LOCKDEBUG");
608 1.4.2.2 yamt }
609 1.4.2.2 yamt
610 1.4.2.2 yamt #endif /* LOCKDEBUG */
611 1.4.2.2 yamt
612 1.4.2.2 yamt /*
613 1.4.2.2 yamt * lockdebug_lock_print:
614 1.4.2.2 yamt *
615 1.4.2.2 yamt * Handle the DDB 'show lock' command.
616 1.4.2.2 yamt */
617 1.4.2.2 yamt #ifdef DDB
618 1.4.2.2 yamt void
619 1.4.2.2 yamt lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
620 1.4.2.2 yamt {
621 1.4.2.2 yamt #ifdef LOCKDEBUG
622 1.4.2.2 yamt lockdebug_t *ld;
623 1.4.2.2 yamt
624 1.4.2.2 yamt TAILQ_FOREACH(ld, &ld_all, ld_achain) {
625 1.4.2.2 yamt if (ld->ld_lock == addr) {
626 1.4.2.2 yamt lockdebug_dump(ld, pr);
627 1.4.2.2 yamt return;
628 1.4.2.2 yamt }
629 1.4.2.2 yamt }
630 1.4.2.2 yamt (*pr)("Sorry, no record of a lock with address %p found.\n", addr);
631 1.4.2.2 yamt #else
632 1.4.2.2 yamt (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
633 1.4.2.2 yamt #endif /* LOCKDEBUG */
634 1.4.2.2 yamt }
635 1.4.2.2 yamt #endif /* DDB */
636 1.4.2.2 yamt
637 1.4.2.2 yamt /*
638 1.4.2.2 yamt * lockdebug_abort:
639 1.4.2.2 yamt *
640 1.4.2.2 yamt * An error has been trapped - dump lock info and call panic().
641 1.4.2.2 yamt */
642 1.4.2.2 yamt void
643 1.4.2.3 yamt lockdebug_abort(u_int id, volatile void *lock, lockops_t *ops,
644 1.4.2.2 yamt const char *func, const char *msg)
645 1.4.2.2 yamt {
646 1.4.2.2 yamt #ifdef LOCKDEBUG
647 1.4.2.2 yamt lockdebug_t *ld;
648 1.4.2.2 yamt lockdebuglk_t *lk;
649 1.4.2.2 yamt
650 1.4.2.2 yamt if ((ld = lockdebug_lookup(id, &lk)) != NULL) {
651 1.4.2.2 yamt lockdebug_abort1(ld, lk, func, msg);
652 1.4.2.2 yamt /* NOTREACHED */
653 1.4.2.2 yamt }
654 1.4.2.2 yamt #endif /* LOCKDEBUG */
655 1.4.2.2 yamt
656 1.4.2.2 yamt printf_nolog("%s error: %s: %s\n\n"
657 1.4.2.2 yamt "lock address : %#018lx\n"
658 1.4.2.2 yamt "current cpu : %18d\n"
659 1.4.2.2 yamt "current lwp : %#018lx\n",
660 1.4.2.2 yamt ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
661 1.4.2.2 yamt (long)curlwp);
662 1.4.2.2 yamt
663 1.4.2.2 yamt (*ops->lo_dump)(lock);
664 1.4.2.2 yamt
665 1.4.2.2 yamt printf_nolog("\n");
666 1.4.2.2 yamt panic("lock error");
667 1.4.2.2 yamt }
668