subr_lockdebug.c revision 1.1.2.2 1 1.1.2.2 ad /* $NetBSD: subr_lockdebug.c,v 1.1.2.2 2006/10/24 19:07:49 ad Exp $ */
2 1.1.2.1 ad
3 1.1.2.1 ad /*-
4 1.1.2.1 ad * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 1.1.2.1 ad * All rights reserved.
6 1.1.2.1 ad *
7 1.1.2.1 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.1.2.1 ad * by Andrew Doran.
9 1.1.2.1 ad *
10 1.1.2.1 ad * Redistribution and use in source and binary forms, with or without
11 1.1.2.1 ad * modification, are permitted provided that the following conditions
12 1.1.2.1 ad * are met:
13 1.1.2.1 ad * 1. Redistributions of source code must retain the above copyright
14 1.1.2.1 ad * notice, this list of conditions and the following disclaimer.
15 1.1.2.1 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1.2.1 ad * notice, this list of conditions and the following disclaimer in the
17 1.1.2.1 ad * documentation and/or other materials provided with the distribution.
18 1.1.2.1 ad * 3. All advertising materials mentioning features or use of this software
19 1.1.2.1 ad * must display the following acknowledgement:
20 1.1.2.1 ad * This product includes software developed by the NetBSD
21 1.1.2.1 ad * Foundation, Inc. and its contributors.
22 1.1.2.1 ad * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1.2.1 ad * contributors may be used to endorse or promote products derived
24 1.1.2.1 ad * from this software without specific prior written permission.
25 1.1.2.1 ad *
26 1.1.2.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1.2.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1.2.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1.2.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1.2.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1.2.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1.2.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1.2.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1.2.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1.2.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1.2.1 ad * POSSIBILITY OF SUCH DAMAGE.
37 1.1.2.1 ad */
38 1.1.2.1 ad
39 1.1.2.1 ad /*
40 1.1.2.1 ad * Basic lock debugging code shared among lock primatives.
41 1.1.2.1 ad *
42 1.1.2.1 ad * XXX malloc() may want to initialize new mutexes. To be fixed.
43 1.1.2.1 ad */
44 1.1.2.1 ad
45 1.1.2.1 ad #include "opt_multiprocessor.h"
46 1.1.2.1 ad
47 1.1.2.1 ad #include <sys/cdefs.h>
48 1.1.2.2 ad __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.1.2.2 2006/10/24 19:07:49 ad Exp $");
49 1.1.2.1 ad
50 1.1.2.1 ad #include <sys/param.h>
51 1.1.2.1 ad #include <sys/proc.h>
52 1.1.2.1 ad #include <sys/systm.h>
53 1.1.2.1 ad #include <sys/malloc.h>
54 1.1.2.1 ad #include <sys/lock.h>
55 1.1.2.1 ad #include <sys/lockdebug.h>
56 1.1.2.1 ad
57 1.1.2.1 ad #include <machine/cpu.h>
58 1.1.2.1 ad
59 1.1.2.1 ad #ifdef LOCKDEBUG
60 1.1.2.1 ad
61 1.1.2.1 ad #define LD_BATCH_SHIFT 9
62 1.1.2.1 ad #define LD_BATCH (1 << LD_BATCH_SHIFT)
63 1.1.2.1 ad #define LD_BATCH_MASK (LD_BATCH - 1)
64 1.1.2.1 ad #define LD_MAX_LOCKS 1048576
65 1.1.2.1 ad #define LD_SLOP 16
66 1.1.2.1 ad
67 1.1.2.1 ad #define LD_LOCKED 0x01
68 1.1.2.1 ad #define LD_SLEEPER 0x02
69 1.1.2.1 ad
70 1.1.2.1 ad typedef struct lockdebuglk {
71 1.1.2.1 ad __cpu_simple_lock_t lk_lock;
72 1.1.2.1 ad int lk_oldspl;
73 1.1.2.1 ad } volatile lockdebuglk_t;
74 1.1.2.1 ad
75 1.1.2.1 ad typedef struct lockdebug {
76 1.1.2.1 ad _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
77 1.1.2.1 ad void *ld_lock;
78 1.1.2.1 ad lockops_t *ld_lockops;
79 1.1.2.1 ad struct lwp *ld_lwp;
80 1.1.2.1 ad uintptr_t ld_locked;
81 1.1.2.1 ad uintptr_t ld_unlocked;
82 1.1.2.1 ad u_int ld_id;
83 1.1.2.1 ad u_short ld_cpu;
84 1.1.2.1 ad u_short ld_shares;
85 1.1.2.1 ad u_char ld_flags;
86 1.1.2.1 ad } volatile lockdebug_t;
87 1.1.2.1 ad
88 1.1.2.1 ad typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
89 1.1.2.1 ad
90 1.1.2.1 ad lockdebuglk_t ld_sleeper_lk;
91 1.1.2.1 ad lockdebuglk_t ld_spinner_lk;
92 1.1.2.1 ad lockdebuglk_t ld_free_lk;
93 1.1.2.1 ad
94 1.1.2.1 ad lockdebuglist_t ld_sleepers;
95 1.1.2.1 ad lockdebuglist_t ld_spinners;
96 1.1.2.1 ad lockdebuglist_t ld_free;
97 1.1.2.1 ad int ld_nfree;
98 1.1.2.1 ad int ld_freeptr;
99 1.1.2.1 ad int ld_recurse;
100 1.1.2.1 ad lockdebug_t *ld_table[LD_MAX_LOCKS / LD_BATCH];
101 1.1.2.1 ad
102 1.1.2.1 ad lockdebug_t ld_prime[LD_BATCH];
103 1.1.2.1 ad
104 1.1.2.1 ad MALLOC_DEFINE(M_LOCKDEBUG, "lockdebug", "lockdebug structures");
105 1.1.2.1 ad
106 1.1.2.1 ad void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk, const char *,
107 1.1.2.1 ad const char *);
108 1.1.2.1 ad void lockdebug_more(void);
109 1.1.2.1 ad
110 1.1.2.1 ad static inline void
111 1.1.2.1 ad lockdebug_lock(lockdebuglk_t *lk)
112 1.1.2.1 ad {
113 1.1.2.1 ad int s;
114 1.1.2.1 ad
115 1.1.2.1 ad s = spllock();
116 1.1.2.1 ad __cpu_simple_lock(&lk->lk_lock);
117 1.1.2.1 ad lk->lk_oldspl = s;
118 1.1.2.1 ad }
119 1.1.2.1 ad
120 1.1.2.1 ad static inline void
121 1.1.2.1 ad lockdebug_unlock(lockdebuglk_t *lk)
122 1.1.2.1 ad {
123 1.1.2.1 ad int s;
124 1.1.2.1 ad
125 1.1.2.1 ad s = lk->lk_oldspl;
126 1.1.2.1 ad __cpu_simple_unlock(&lk->lk_lock);
127 1.1.2.1 ad splx(s);
128 1.1.2.1 ad }
129 1.1.2.1 ad
130 1.1.2.1 ad /*
131 1.1.2.1 ad * lockdebug_lookup:
132 1.1.2.1 ad *
133 1.1.2.1 ad * Find a lockdebug structure by ID and return it locked.
134 1.1.2.1 ad */
135 1.1.2.1 ad static inline lockdebug_t *
136 1.1.2.1 ad lockdebug_lookup(u_int id, lockdebuglk_t **lk)
137 1.1.2.1 ad {
138 1.1.2.1 ad lockdebug_t *ld;
139 1.1.2.1 ad
140 1.1.2.1 ad ld = ld_table[id >> LD_BATCH_SHIFT] + (id & LD_BATCH_MASK);
141 1.1.2.1 ad
142 1.1.2.1 ad if (id == 0 || id >= LD_MAX_LOCKS || ld == NULL || ld->ld_lock == NULL)
143 1.1.2.1 ad panic("lockdebug_lookup: uninitialized lock (id=%d)", id);
144 1.1.2.1 ad
145 1.1.2.1 ad if (ld->ld_id != id)
146 1.1.2.1 ad panic("lockdebug_lookup: corrupt table");
147 1.1.2.1 ad
148 1.1.2.1 ad if ((ld->ld_flags & LD_SLEEPER) != 0)
149 1.1.2.1 ad *lk = &ld_sleeper_lk;
150 1.1.2.1 ad else
151 1.1.2.1 ad *lk = &ld_spinner_lk;
152 1.1.2.1 ad
153 1.1.2.1 ad lockdebug_lock(*lk);
154 1.1.2.1 ad return ld;
155 1.1.2.1 ad }
156 1.1.2.1 ad
157 1.1.2.1 ad /*
158 1.1.2.1 ad * lockdebug_init:
159 1.1.2.1 ad *
160 1.1.2.1 ad * Initialize the lockdebug system. Allocate an initial pool of
161 1.1.2.1 ad * lockdebug structures before the VM system is up and running.
162 1.1.2.1 ad */
163 1.1.2.1 ad void
164 1.1.2.1 ad lockdebug_init(void)
165 1.1.2.1 ad {
166 1.1.2.1 ad lockdebug_t *ld;
167 1.1.2.1 ad int i;
168 1.1.2.1 ad
169 1.1.2.1 ad __cpu_simple_lock_init(&ld_sleeper_lk.lk_lock);
170 1.1.2.1 ad __cpu_simple_lock_init(&ld_spinner_lk.lk_lock);
171 1.1.2.1 ad __cpu_simple_lock_init(&ld_free_lk.lk_lock);
172 1.1.2.1 ad
173 1.1.2.1 ad TAILQ_INIT(&ld_free);
174 1.1.2.1 ad TAILQ_INIT(&ld_sleepers);
175 1.1.2.1 ad TAILQ_INIT(&ld_spinners);
176 1.1.2.1 ad
177 1.1.2.1 ad ld = ld_prime;
178 1.1.2.1 ad ld_table[0] = ld;
179 1.1.2.1 ad for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
180 1.1.2.1 ad ld->ld_id = i;
181 1.1.2.1 ad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
182 1.1.2.1 ad }
183 1.1.2.1 ad ld_freeptr = 1;
184 1.1.2.1 ad ld_nfree = LD_BATCH;
185 1.1.2.1 ad }
186 1.1.2.1 ad
187 1.1.2.1 ad /*
188 1.1.2.1 ad * lockdebug_alloc:
189 1.1.2.1 ad *
190 1.1.2.1 ad * A lock is being initialized, so allocate an associated debug
191 1.1.2.1 ad * structure.
192 1.1.2.1 ad */
193 1.1.2.1 ad u_int
194 1.1.2.1 ad lockdebug_alloc(void *lock, lockops_t *lo, int sleeplock)
195 1.1.2.1 ad {
196 1.1.2.1 ad lockdebug_t *ld;
197 1.1.2.1 ad
198 1.1.2.1 ad if (panicstr != NULL)
199 1.1.2.1 ad return 0;
200 1.1.2.1 ad
201 1.1.2.1 ad /* Pinch a new debug structure. */
202 1.1.2.1 ad lockdebug_lock(&ld_free_lk);
203 1.1.2.1 ad if (TAILQ_EMPTY(&ld_free))
204 1.1.2.1 ad lockdebug_more();
205 1.1.2.1 ad ld = TAILQ_FIRST(&ld_free);
206 1.1.2.1 ad TAILQ_REMOVE(&ld_free, ld, ld_chain);
207 1.1.2.1 ad ld_nfree--;
208 1.1.2.1 ad lockdebug_unlock(&ld_free_lk);
209 1.1.2.1 ad
210 1.1.2.1 ad if (ld->ld_lock != NULL)
211 1.1.2.1 ad panic("lockdebug_alloc: corrupt table");
212 1.1.2.1 ad
213 1.1.2.1 ad if (sleeplock)
214 1.1.2.1 ad lockdebug_lock(&ld_sleeper_lk);
215 1.1.2.1 ad else
216 1.1.2.1 ad lockdebug_lock(&ld_spinner_lk);
217 1.1.2.1 ad
218 1.1.2.1 ad /* Initialise the structure. */
219 1.1.2.1 ad ld->ld_lock = lock;
220 1.1.2.1 ad ld->ld_lockops = lo;
221 1.1.2.1 ad ld->ld_locked = 0;
222 1.1.2.1 ad ld->ld_unlocked = 0;
223 1.1.2.1 ad ld->ld_lwp = NULL;
224 1.1.2.1 ad
225 1.1.2.1 ad if (sleeplock) {
226 1.1.2.1 ad ld->ld_flags = LD_SLEEPER;
227 1.1.2.1 ad lockdebug_unlock(&ld_sleeper_lk);
228 1.1.2.1 ad } else {
229 1.1.2.1 ad ld->ld_flags = 0;
230 1.1.2.1 ad lockdebug_unlock(&ld_spinner_lk);
231 1.1.2.1 ad }
232 1.1.2.1 ad
233 1.1.2.1 ad return ld->ld_id;
234 1.1.2.1 ad }
235 1.1.2.1 ad
236 1.1.2.1 ad /*
237 1.1.2.1 ad * lockdebug_free:
238 1.1.2.1 ad *
239 1.1.2.1 ad * A lock is being destroyed, so release debugging resources.
240 1.1.2.1 ad */
241 1.1.2.1 ad void
242 1.1.2.1 ad lockdebug_free(void *lock, u_int id)
243 1.1.2.1 ad {
244 1.1.2.1 ad lockdebug_t *ld;
245 1.1.2.1 ad lockdebuglk_t *lk;
246 1.1.2.1 ad
247 1.1.2.1 ad if (panicstr != NULL)
248 1.1.2.1 ad return;
249 1.1.2.1 ad
250 1.1.2.1 ad ld = lockdebug_lookup(id, &lk);
251 1.1.2.1 ad
252 1.1.2.1 ad if (ld->ld_lock != lock) {
253 1.1.2.1 ad panic("lockdebug_free: destroying uninitialized lock %p"
254 1.1.2.1 ad "(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock);
255 1.1.2.1 ad lockdebug_abort1(ld, lk, __FUNCTION__, "lock record follows");
256 1.1.2.1 ad }
257 1.1.2.1 ad if ((ld->ld_flags & LD_LOCKED) != 0)
258 1.1.2.1 ad lockdebug_abort1(ld, lk, __FUNCTION__, "is locked");
259 1.1.2.1 ad
260 1.1.2.1 ad ld->ld_lock = NULL;
261 1.1.2.1 ad
262 1.1.2.1 ad lockdebug_unlock(lk);
263 1.1.2.1 ad
264 1.1.2.1 ad lockdebug_lock(&ld_free_lk);
265 1.1.2.1 ad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
266 1.1.2.1 ad ld_nfree++;
267 1.1.2.1 ad lockdebug_unlock(&ld_free_lk);
268 1.1.2.1 ad }
269 1.1.2.1 ad
270 1.1.2.1 ad /*
271 1.1.2.1 ad * lockdebug_more:
272 1.1.2.1 ad *
273 1.1.2.1 ad * Allocate a batch of debug structures and add to the free list. Must
274 1.1.2.1 ad * be called with ld_free_lk held.
275 1.1.2.1 ad */
276 1.1.2.1 ad void
277 1.1.2.1 ad lockdebug_more(void)
278 1.1.2.1 ad {
279 1.1.2.1 ad lockdebug_t *ld;
280 1.1.2.1 ad void *block;
281 1.1.2.1 ad int i, base;
282 1.1.2.1 ad
283 1.1.2.1 ad while (TAILQ_EMPTY(&ld_free)) {
284 1.1.2.1 ad lockdebug_unlock(&ld_free_lk);
285 1.1.2.1 ad block = malloc(LD_BATCH * sizeof(lockdebug_t), M_LOCKDEBUG,
286 1.1.2.1 ad M_NOWAIT | M_ZERO); /* XXX M_NOWAIT */
287 1.1.2.1 ad lockdebug_lock(&ld_free_lk);
288 1.1.2.1 ad
289 1.1.2.1 ad base = ld_freeptr;
290 1.1.2.1 ad if (ld_table[base] != NULL) {
291 1.1.2.1 ad /* Somebody beat us to it. */
292 1.1.2.1 ad lockdebug_unlock(&ld_free_lk);
293 1.1.2.1 ad free(block, M_LOCKDEBUG);
294 1.1.2.1 ad lockdebug_lock(&ld_free_lk);
295 1.1.2.1 ad continue;
296 1.1.2.1 ad }
297 1.1.2.1 ad ld_table[base] = block;
298 1.1.2.1 ad ld_freeptr++;
299 1.1.2.1 ad ld = block;
300 1.1.2.1 ad base <<= LD_BATCH_SHIFT;
301 1.1.2.1 ad
302 1.1.2.1 ad for (i = 0; i < LD_BATCH; i++, ld++) {
303 1.1.2.1 ad ld->ld_id = i + base;
304 1.1.2.1 ad ld->ld_lock = NULL;
305 1.1.2.1 ad TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
306 1.1.2.1 ad }
307 1.1.2.1 ad
308 1.1.2.1 ad ld_table[base] = ld;
309 1.1.2.1 ad mb_write();
310 1.1.2.1 ad }
311 1.1.2.1 ad }
312 1.1.2.1 ad
313 1.1.2.1 ad /*
314 1.1.2.1 ad * lockdebug_locked:
315 1.1.2.1 ad *
316 1.1.2.1 ad * Process a lock acquire operation.
317 1.1.2.1 ad */
318 1.1.2.1 ad void
319 1.1.2.1 ad lockdebug_locked(u_int id, uintptr_t where, int shared)
320 1.1.2.1 ad {
321 1.1.2.1 ad struct lwp *l = curlwp;
322 1.1.2.1 ad lockdebuglk_t *lk;
323 1.1.2.1 ad lockdebug_t *ld;
324 1.1.2.1 ad
325 1.1.2.1 ad if (panicstr != NULL)
326 1.1.2.1 ad return;
327 1.1.2.1 ad
328 1.1.2.1 ad ld = lockdebug_lookup(id, &lk);
329 1.1.2.1 ad
330 1.1.2.1 ad if ((ld->ld_flags & LD_LOCKED) != 0)
331 1.1.2.1 ad lockdebug_abort1(ld, lk, __FUNCTION__, "already locked");
332 1.1.2.1 ad
333 1.1.2.1 ad if (shared) {
334 1.1.2.1 ad if (l == NULL)
335 1.1.2.1 ad lockdebug_abort1(ld, lk, __FUNCTION__, "releasing "
336 1.1.2.1 ad "shared lock from interrupt context");
337 1.1.2.1 ad
338 1.1.2.1 ad l->l_shlocks++;
339 1.1.2.1 ad ld->ld_shares++;
340 1.1.2.1 ad } else {
341 1.1.2.1 ad ld->ld_flags |= LD_LOCKED;
342 1.1.2.1 ad ld->ld_locked = where;
343 1.1.2.1 ad ld->ld_cpu = (u_short)cpu_number();
344 1.1.2.1 ad ld->ld_lwp = l;
345 1.1.2.1 ad
346 1.1.2.1 ad if ((ld->ld_flags & LD_SLEEPER) != 0) {
347 1.1.2.1 ad l->l_exlocks++;
348 1.1.2.1 ad TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
349 1.1.2.1 ad } else {
350 1.1.2.1 ad curcpu()->ci_spin_locks2++;
351 1.1.2.1 ad TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
352 1.1.2.1 ad }
353 1.1.2.1 ad }
354 1.1.2.1 ad
355 1.1.2.1 ad lockdebug_unlock(lk);
356 1.1.2.1 ad }
357 1.1.2.1 ad
358 1.1.2.1 ad /*
359 1.1.2.1 ad * lockdebug_unlocked:
360 1.1.2.1 ad *
361 1.1.2.1 ad * Process a lock release operation.
362 1.1.2.1 ad */
363 1.1.2.1 ad void
364 1.1.2.1 ad lockdebug_unlocked(u_int id, uintptr_t where, int shared)
365 1.1.2.1 ad {
366 1.1.2.1 ad struct lwp *l = curlwp;
367 1.1.2.1 ad lockdebuglk_t *lk;
368 1.1.2.1 ad lockdebug_t *ld;
369 1.1.2.1 ad
370 1.1.2.1 ad if (panicstr != NULL)
371 1.1.2.1 ad return;
372 1.1.2.1 ad
373 1.1.2.1 ad ld = lockdebug_lookup(id, &lk);
374 1.1.2.1 ad
375 1.1.2.1 ad if (shared) {
376 1.1.2.1 ad if (l == NULL)
377 1.1.2.1 ad lockdebug_abort1(ld, lk, __FUNCTION__, "acquiring "
378 1.1.2.1 ad "shared lock from interrupt context");
379 1.1.2.1 ad if (l->l_shlocks == 0)
380 1.1.2.1 ad lockdebug_abort1(ld, lk, __FUNCTION__, "no shared "
381 1.1.2.1 ad "locks held by LWP");
382 1.1.2.1 ad if (ld->ld_shares == 0)
383 1.1.2.1 ad lockdebug_abort1(ld, lk, __FUNCTION__, "no shared "
384 1.1.2.1 ad "holds on this lock");
385 1.1.2.1 ad l->l_shlocks--;
386 1.1.2.1 ad ld->ld_shares--;
387 1.1.2.1 ad } else {
388 1.1.2.1 ad if ((ld->ld_flags & LD_LOCKED) == 0)
389 1.1.2.1 ad lockdebug_abort1(ld, lk, __FUNCTION__, "not locked");
390 1.1.2.1 ad
391 1.1.2.1 ad if ((ld->ld_flags & LD_SLEEPER) != 0) {
392 1.1.2.1 ad if (ld->ld_lwp != curlwp)
393 1.1.2.1 ad lockdebug_abort1(ld, lk, __FUNCTION__,
394 1.1.2.1 ad "not held by current LWP");
395 1.1.2.1 ad ld->ld_flags &= ~LD_LOCKED;
396 1.1.2.1 ad ld->ld_unlocked = where;
397 1.1.2.1 ad ld->ld_lwp = NULL;
398 1.1.2.1 ad curlwp->l_exlocks--;
399 1.1.2.1 ad TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
400 1.1.2.1 ad } else {
401 1.1.2.1 ad if (ld->ld_cpu != (u_short)cpu_number())
402 1.1.2.1 ad lockdebug_abort1(ld, lk, __FUNCTION__,
403 1.1.2.1 ad "not held by current CPU");
404 1.1.2.1 ad ld->ld_flags &= ~LD_LOCKED;
405 1.1.2.1 ad ld->ld_unlocked = where;
406 1.1.2.1 ad ld->ld_lwp = NULL;
407 1.1.2.1 ad curcpu()->ci_spin_locks2--;
408 1.1.2.1 ad TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
409 1.1.2.1 ad }
410 1.1.2.1 ad }
411 1.1.2.1 ad
412 1.1.2.1 ad lockdebug_unlock(lk);
413 1.1.2.1 ad }
414 1.1.2.1 ad
415 1.1.2.1 ad /*
416 1.1.2.1 ad * lockdebug_barrier:
417 1.1.2.1 ad *
418 1.1.2.1 ad * Panic if we hold more than one specified spin lock, and optionally,
419 1.1.2.1 ad * if we hold sleep locks.
420 1.1.2.1 ad */
421 1.1.2.1 ad void
422 1.1.2.1 ad lockdebug_barrier(void *spinlock, int slplocks)
423 1.1.2.1 ad {
424 1.1.2.1 ad struct lwp *l = curlwp;
425 1.1.2.1 ad lockdebug_t *ld;
426 1.1.2.1 ad u_short cpuno;
427 1.1.2.1 ad
428 1.1.2.1 ad if (panicstr != NULL)
429 1.1.2.1 ad return;
430 1.1.2.1 ad
431 1.1.2.1 ad if (curcpu()->ci_spin_locks2 != 0) {
432 1.1.2.1 ad cpuno = (u_short)cpu_number();
433 1.1.2.1 ad
434 1.1.2.1 ad lockdebug_lock(&ld_spinner_lk);
435 1.1.2.1 ad TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
436 1.1.2.1 ad if (ld->ld_lock == spinlock) {
437 1.1.2.1 ad if (ld->ld_cpu != cpuno)
438 1.1.2.1 ad lockdebug_abort1(ld, &ld_spinner_lk,
439 1.1.2.1 ad __FUNCTION__,
440 1.1.2.1 ad "not held by current CPU");
441 1.1.2.1 ad continue;
442 1.1.2.1 ad }
443 1.1.2.1 ad if (ld->ld_cpu == cpuno)
444 1.1.2.1 ad lockdebug_abort1(ld, &ld_spinner_lk,
445 1.1.2.1 ad __FUNCTION__, "spin lock held");
446 1.1.2.1 ad }
447 1.1.2.1 ad lockdebug_unlock(&ld_spinner_lk);
448 1.1.2.1 ad }
449 1.1.2.1 ad
450 1.1.2.1 ad if (!slplocks) {
451 1.1.2.1 ad if (l->l_exlocks != 0) {
452 1.1.2.1 ad lockdebug_lock(&ld_sleeper_lk);
453 1.1.2.1 ad TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
454 1.1.2.1 ad if (ld->ld_lwp == l)
455 1.1.2.1 ad lockdebug_abort1(ld, &ld_sleeper_lk,
456 1.1.2.1 ad __FUNCTION__, "sleep lock held");
457 1.1.2.1 ad }
458 1.1.2.1 ad lockdebug_unlock(&ld_sleeper_lk);
459 1.1.2.1 ad }
460 1.1.2.1 ad if (l->l_shlocks != 0)
461 1.1.2.1 ad panic("lockdebug_barrier: holding %d shared locks",
462 1.1.2.1 ad l->l_shlocks);
463 1.1.2.1 ad }
464 1.1.2.1 ad }
465 1.1.2.1 ad
466 1.1.2.1 ad /*
467 1.1.2.1 ad * lockdebug_abort:
468 1.1.2.1 ad *
469 1.1.2.1 ad * An error has been trapped - dump lock info call panic().
470 1.1.2.1 ad */
471 1.1.2.1 ad void
472 1.1.2.1 ad lockdebug_abort(int id, void *lock, lockops_t *ops, const char *func,
473 1.1.2.1 ad const char *msg)
474 1.1.2.1 ad {
475 1.1.2.1 ad lockdebug_t *ld;
476 1.1.2.1 ad lockdebuglk_t *lk;
477 1.1.2.1 ad
478 1.1.2.1 ad (void)lock;
479 1.1.2.1 ad (void)ops;
480 1.1.2.1 ad
481 1.1.2.1 ad ld = lockdebug_lookup(id, &lk);
482 1.1.2.1 ad
483 1.1.2.1 ad lockdebug_abort1(ld, lk, func, msg);
484 1.1.2.1 ad }
485 1.1.2.1 ad
486 1.1.2.1 ad void
487 1.1.2.1 ad lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
488 1.1.2.1 ad const char *msg)
489 1.1.2.1 ad {
490 1.1.2.1 ad static char buf[1024];
491 1.1.2.1 ad int p;
492 1.1.2.1 ad
493 1.1.2.1 ad /*
494 1.1.2.1 ad * The kernel is about to fall flat on its face, so assume that 1k
495 1.1.2.1 ad * will be enough to hold the dump and abuse the return value from
496 1.1.2.1 ad * snprintf.
497 1.1.2.1 ad */
498 1.1.2.1 ad p = snprintf(buf, sizeof(buf), "%s error: %s: %s\n",
499 1.1.2.1 ad ld->ld_lockops->lo_name, func, msg);
500 1.1.2.1 ad
501 1.1.2.1 ad p += snprintf(buf + p, sizeof(buf) - p,
502 1.1.2.1 ad "lock address : %#018lx type : %18s\n"
503 1.1.2.1 ad "shared holds : %18d exclusive: %12slocked\n"
504 1.1.2.1 ad "last locked : %#018lx unlocked : %#018lx\n"
505 1.1.2.1 ad "current cpu : %18d last held: %18d\n"
506 1.1.2.1 ad "current lwp : %#018lx last held: %#018lx\n",
507 1.1.2.1 ad (long)ld->ld_lock,
508 1.1.2.1 ad ((ld->ld_flags & LD_SLEEPER) == 0 ? "spin" : "sleep"),
509 1.1.2.1 ad ld->ld_shares, ((ld->ld_flags & LD_LOCKED) == 0 ? "un" : " "),
510 1.1.2.1 ad (long)ld->ld_locked, (long)ld->ld_unlocked,
511 1.1.2.1 ad (int)cpu_number(), (int)ld->ld_cpu,
512 1.1.2.1 ad (long)curlwp, (long)ld->ld_lwp);
513 1.1.2.1 ad
514 1.1.2.1 ad (void)(*ld->ld_lockops->lo_dump)(ld->ld_lock, buf + p, sizeof(buf) - p);
515 1.1.2.1 ad
516 1.1.2.1 ad lockdebug_unlock(lk);
517 1.1.2.1 ad
518 1.1.2.1 ad printf("%s", buf);
519 1.1.2.1 ad panic("LOCKDEBUG");
520 1.1.2.1 ad }
521 1.1.2.1 ad
522 1.1.2.1 ad #else /* LOCKDEBUG */
523 1.1.2.1 ad
524 1.1.2.1 ad /*
525 1.1.2.1 ad * lockdebug_abort:
526 1.1.2.1 ad *
527 1.1.2.1 ad * An error has been trapped - dump lock info and call panic().
528 1.1.2.1 ad * Called in the non-LOCKDEBUG case to print basic information.
529 1.1.2.1 ad */
530 1.1.2.1 ad void
531 1.1.2.1 ad lockdebug_abort(int id, void *lock, lockops_t *ops, const char *func,
532 1.1.2.1 ad const char *msg)
533 1.1.2.1 ad {
534 1.1.2.1 ad static char buf[1024];
535 1.1.2.1 ad int p;
536 1.1.2.1 ad
537 1.1.2.1 ad /*
538 1.1.2.1 ad * The kernel is about to fall flat on its face, so assume that 1k
539 1.1.2.1 ad * will be enough to hold the dump and abuse the return value from
540 1.1.2.1 ad * snprintf.
541 1.1.2.1 ad */
542 1.1.2.2 ad p = snprintf(buf, sizeof(buf), "%s error: %s: %s\n",
543 1.1.2.1 ad ops->lo_name, func, msg);
544 1.1.2.1 ad
545 1.1.2.1 ad p += snprintf(buf + p, sizeof(buf) - p,
546 1.1.2.1 ad "lock address : %#018lx\n"
547 1.1.2.1 ad "current cpu : %18d\n"
548 1.1.2.1 ad "current lwp : %#018lx\n",
549 1.1.2.2 ad (long)lock, (int)cpu_number(), (long)curlwp);
550 1.1.2.1 ad
551 1.1.2.2 ad (void)(*ops->lo_dump)(lock, buf + p, sizeof(buf) - p);
552 1.1.2.1 ad
553 1.1.2.1 ad printf("%s", buf);
554 1.1.2.1 ad panic("lock error");
555 1.1.2.1 ad }
556 1.1.2.1 ad
557 1.1.2.1 ad #endif /* LOCKDEBUG */
558