kern_lock.c revision 1.51.2.2 1 1.51.2.2 nathanw /* $NetBSD: kern_lock.c,v 1.51.2.2 2001/06/21 20:06:50 nathanw Exp $ */
2 1.19 thorpej
3 1.19 thorpej /*-
4 1.28 thorpej * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 1.19 thorpej * All rights reserved.
6 1.19 thorpej *
7 1.19 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.19 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.19 thorpej * NASA Ames Research Center.
10 1.19 thorpej *
11 1.19 thorpej * This code is derived from software contributed to The NetBSD Foundation
12 1.19 thorpej * by Ross Harvey.
13 1.19 thorpej *
14 1.19 thorpej * Redistribution and use in source and binary forms, with or without
15 1.19 thorpej * modification, are permitted provided that the following conditions
16 1.19 thorpej * are met:
17 1.19 thorpej * 1. Redistributions of source code must retain the above copyright
18 1.19 thorpej * notice, this list of conditions and the following disclaimer.
19 1.19 thorpej * 2. Redistributions in binary form must reproduce the above copyright
20 1.19 thorpej * notice, this list of conditions and the following disclaimer in the
21 1.19 thorpej * documentation and/or other materials provided with the distribution.
22 1.19 thorpej * 3. All advertising materials mentioning features or use of this software
23 1.19 thorpej * must display the following acknowledgement:
24 1.19 thorpej * This product includes software developed by the NetBSD
25 1.19 thorpej * Foundation, Inc. and its contributors.
26 1.19 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
27 1.19 thorpej * contributors may be used to endorse or promote products derived
28 1.19 thorpej * from this software without specific prior written permission.
29 1.19 thorpej *
30 1.19 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 1.19 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 1.19 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 1.19 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 1.19 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 1.19 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 1.19 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 1.19 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 1.19 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 1.19 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 1.19 thorpej * POSSIBILITY OF SUCH DAMAGE.
41 1.19 thorpej */
42 1.2 fvdl
43 1.1 fvdl /*
44 1.1 fvdl * Copyright (c) 1995
45 1.1 fvdl * The Regents of the University of California. All rights reserved.
46 1.1 fvdl *
47 1.1 fvdl * This code contains ideas from software contributed to Berkeley by
48 1.1 fvdl * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 1.1 fvdl * System project at Carnegie-Mellon University.
50 1.1 fvdl *
51 1.1 fvdl * Redistribution and use in source and binary forms, with or without
52 1.1 fvdl * modification, are permitted provided that the following conditions
53 1.1 fvdl * are met:
54 1.1 fvdl * 1. Redistributions of source code must retain the above copyright
55 1.1 fvdl * notice, this list of conditions and the following disclaimer.
56 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright
57 1.1 fvdl * notice, this list of conditions and the following disclaimer in the
58 1.1 fvdl * documentation and/or other materials provided with the distribution.
59 1.1 fvdl * 3. All advertising materials mentioning features or use of this software
60 1.1 fvdl * must display the following acknowledgement:
61 1.1 fvdl * This product includes software developed by the University of
62 1.1 fvdl * California, Berkeley and its contributors.
63 1.1 fvdl * 4. Neither the name of the University nor the names of its contributors
64 1.1 fvdl * may be used to endorse or promote products derived from this software
65 1.1 fvdl * without specific prior written permission.
66 1.1 fvdl *
67 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 1.1 fvdl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 1.1 fvdl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 1.1 fvdl * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 1.1 fvdl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 1.1 fvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 1.1 fvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 1.1 fvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 1.1 fvdl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 1.1 fvdl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 1.1 fvdl * SUCH DAMAGE.
78 1.1 fvdl *
79 1.1 fvdl * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
80 1.1 fvdl */
81 1.7 thorpej
82 1.21 thorpej #include "opt_multiprocessor.h"
83 1.7 thorpej #include "opt_lockdebug.h"
84 1.18 chs #include "opt_ddb.h"
85 1.1 fvdl
86 1.1 fvdl #include <sys/param.h>
87 1.51.2.1 nathanw #include <sys/lwp.h>
88 1.1 fvdl #include <sys/proc.h>
89 1.1 fvdl #include <sys/lock.h>
90 1.2 fvdl #include <sys/systm.h>
91 1.1 fvdl #include <machine/cpu.h>
92 1.1 fvdl
93 1.25 thorpej #if defined(LOCKDEBUG)
94 1.25 thorpej #include <sys/syslog.h>
95 1.25 thorpej /*
96 1.25 thorpej * note that stdarg.h and the ansi style va_start macro is used for both
97 1.25 thorpej * ansi and traditional c compiles.
98 1.25 thorpej * XXX: this requires that stdarg.h define: va_alist and va_dcl
99 1.25 thorpej */
100 1.25 thorpej #include <machine/stdarg.h>
101 1.25 thorpej
102 1.36 thorpej void lock_printf(const char *fmt, ...)
103 1.37 eeh __attribute__((__format__(__printf__,1,2)));
104 1.25 thorpej
105 1.51.2.2 nathanw int lock_debug_syslog = 0; /* defaults to syslog, but can be patched */
106 1.51.2.2 nathanw
107 1.51.2.2 nathanw #ifdef DDB
108 1.51.2.2 nathanw #include <ddb/ddbvar.h>
109 1.51.2.2 nathanw #include <machine/db_machdep.h>
110 1.51.2.2 nathanw #include <ddb/db_command.h>
111 1.51.2.2 nathanw #include <ddb/db_interface.h>
112 1.51.2.2 nathanw #endif
113 1.25 thorpej #endif
114 1.25 thorpej
115 1.1 fvdl /*
116 1.1 fvdl * Locking primitives implementation.
117 1.1 fvdl * Locks provide shared/exclusive sychronization.
118 1.1 fvdl */
119 1.1 fvdl
120 1.21 thorpej #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
121 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
122 1.21 thorpej #define COUNT_CPU(cpu_id, x) \
123 1.47 sommerfe curcpu()->ci_spin_locks += (x)
124 1.21 thorpej #else
125 1.21 thorpej u_long spin_locks;
126 1.21 thorpej #define COUNT_CPU(cpu_id, x) spin_locks += (x)
127 1.21 thorpej #endif /* MULTIPROCESSOR */ /* } */
128 1.21 thorpej
129 1.21 thorpej #define COUNT(lkp, p, cpu_id, x) \
130 1.21 thorpej do { \
131 1.21 thorpej if ((lkp)->lk_flags & LK_SPIN) \
132 1.21 thorpej COUNT_CPU((cpu_id), (x)); \
133 1.21 thorpej else \
134 1.21 thorpej (p)->p_locks += (x); \
135 1.30 thorpej } while (/*CONSTCOND*/0)
136 1.1 fvdl #else
137 1.22 mellon #define COUNT(lkp, p, cpu_id, x)
138 1.48 sommerfe #define COUNT_CPU(cpu_id, x)
139 1.21 thorpej #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
140 1.1 fvdl
141 1.51.2.2 nathanw #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */
142 1.51.2.2 nathanw #define SPINLOCK_SPIN_HOOK /* nothing */
143 1.49 thorpej #endif
144 1.49 thorpej
145 1.43 thorpej #define INTERLOCK_ACQUIRE(lkp, flags, s) \
146 1.40 thorpej do { \
147 1.43 thorpej if ((flags) & LK_SPIN) \
148 1.42 thorpej s = splsched(); \
149 1.40 thorpej simple_lock(&(lkp)->lk_interlock); \
150 1.40 thorpej } while (0)
151 1.40 thorpej
152 1.43 thorpej #define INTERLOCK_RELEASE(lkp, flags, s) \
153 1.40 thorpej do { \
154 1.40 thorpej simple_unlock(&(lkp)->lk_interlock); \
155 1.51.2.2 nathanw if ((flags) & LK_SPIN) \
156 1.40 thorpej splx(s); \
157 1.40 thorpej } while (0)
158 1.40 thorpej
159 1.50 thorpej #if defined(LOCKDEBUG)
160 1.50 thorpej #if defined(DDB)
161 1.50 thorpej #define SPINLOCK_SPINCHECK_DEBUGGER Debugger()
162 1.50 thorpej #else
163 1.50 thorpej #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
164 1.50 thorpej #endif
165 1.50 thorpej
166 1.50 thorpej #define SPINLOCK_SPINCHECK_DECL \
167 1.50 thorpej /* 32-bits of count -- wrap constitutes a "spinout" */ \
168 1.50 thorpej uint32_t __spinc = 0
169 1.50 thorpej
170 1.50 thorpej #define SPINLOCK_SPINCHECK \
171 1.50 thorpej do { \
172 1.50 thorpej if (++__spinc == 0) { \
173 1.50 thorpej printf("LK_SPIN spinout, excl %d, share %d\n", \
174 1.50 thorpej lkp->lk_exclusivecount, lkp->lk_sharecount); \
175 1.50 thorpej if (lkp->lk_exclusivecount) \
176 1.50 thorpej printf("held by CPU %lu\n", \
177 1.50 thorpej (u_long) lkp->lk_cpu); \
178 1.50 thorpej if (lkp->lk_lock_file) \
179 1.50 thorpej printf("last locked at %s:%d\n", \
180 1.50 thorpej lkp->lk_lock_file, lkp->lk_lock_line); \
181 1.50 thorpej if (lkp->lk_unlock_file) \
182 1.50 thorpej printf("last unlocked at %s:%d\n", \
183 1.50 thorpej lkp->lk_unlock_file, lkp->lk_unlock_line); \
184 1.50 thorpej SPINLOCK_SPINCHECK_DEBUGGER; \
185 1.50 thorpej } \
186 1.50 thorpej } while (0)
187 1.50 thorpej #else
188 1.50 thorpej #define SPINLOCK_SPINCHECK_DECL /* nothing */
189 1.50 thorpej #define SPINLOCK_SPINCHECK /* nothing */
190 1.50 thorpej #endif /* LOCKDEBUG && DDB */
191 1.50 thorpej
192 1.1 fvdl /*
193 1.1 fvdl * Acquire a resource.
194 1.1 fvdl */
195 1.23 thorpej #define ACQUIRE(lkp, error, extflags, drain, wanted) \
196 1.19 thorpej if ((extflags) & LK_SPIN) { \
197 1.19 thorpej int interlocked; \
198 1.50 thorpej SPINLOCK_SPINCHECK_DECL; \
199 1.19 thorpej \
200 1.23 thorpej if ((drain) == 0) \
201 1.23 thorpej (lkp)->lk_waitcount++; \
202 1.19 thorpej for (interlocked = 1;;) { \
203 1.50 thorpej SPINLOCK_SPINCHECK; \
204 1.19 thorpej if (wanted) { \
205 1.19 thorpej if (interlocked) { \
206 1.43 thorpej INTERLOCK_RELEASE((lkp), \
207 1.43 thorpej LK_SPIN, s); \
208 1.19 thorpej interlocked = 0; \
209 1.19 thorpej } \
210 1.51.2.2 nathanw SPINLOCK_SPIN_HOOK; \
211 1.19 thorpej } else if (interlocked) { \
212 1.19 thorpej break; \
213 1.19 thorpej } else { \
214 1.43 thorpej INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \
215 1.19 thorpej interlocked = 1; \
216 1.19 thorpej } \
217 1.19 thorpej } \
218 1.23 thorpej if ((drain) == 0) \
219 1.23 thorpej (lkp)->lk_waitcount--; \
220 1.19 thorpej KASSERT((wanted) == 0); \
221 1.19 thorpej error = 0; /* sanity */ \
222 1.19 thorpej } else { \
223 1.19 thorpej for (error = 0; wanted; ) { \
224 1.23 thorpej if ((drain)) \
225 1.23 thorpej (lkp)->lk_flags |= LK_WAITDRAIN; \
226 1.23 thorpej else \
227 1.23 thorpej (lkp)->lk_waitcount++; \
228 1.23 thorpej /* XXX Cast away volatile. */ \
229 1.51.2.2 nathanw error = ltsleep((drain) ? \
230 1.51.2.2 nathanw (void *)&(lkp)->lk_flags : \
231 1.23 thorpej (void *)(lkp), (lkp)->lk_prio, \
232 1.31 thorpej (lkp)->lk_wmesg, (lkp)->lk_timo, \
233 1.31 thorpej &(lkp)->lk_interlock); \
234 1.23 thorpej if ((drain) == 0) \
235 1.23 thorpej (lkp)->lk_waitcount--; \
236 1.19 thorpej if (error) \
237 1.19 thorpej break; \
238 1.19 thorpej if ((extflags) & LK_SLEEPFAIL) { \
239 1.19 thorpej error = ENOLCK; \
240 1.19 thorpej break; \
241 1.19 thorpej } \
242 1.1 fvdl } \
243 1.1 fvdl }
244 1.1 fvdl
245 1.19 thorpej #define SETHOLDER(lkp, pid, cpu_id) \
246 1.19 thorpej do { \
247 1.19 thorpej if ((lkp)->lk_flags & LK_SPIN) \
248 1.19 thorpej (lkp)->lk_cpu = cpu_id; \
249 1.19 thorpej else \
250 1.19 thorpej (lkp)->lk_lockholder = pid; \
251 1.30 thorpej } while (/*CONSTCOND*/0)
252 1.19 thorpej
253 1.19 thorpej #define WEHOLDIT(lkp, pid, cpu_id) \
254 1.19 thorpej (((lkp)->lk_flags & LK_SPIN) != 0 ? \
255 1.19 thorpej ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))
256 1.19 thorpej
257 1.23 thorpej #define WAKEUP_WAITER(lkp) \
258 1.23 thorpej do { \
259 1.23 thorpej if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \
260 1.23 thorpej /* XXX Cast away volatile. */ \
261 1.23 thorpej wakeup_one((void *)(lkp)); \
262 1.23 thorpej } \
263 1.30 thorpej } while (/*CONSTCOND*/0)
264 1.23 thorpej
265 1.21 thorpej #if defined(LOCKDEBUG) /* { */
266 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
267 1.21 thorpej struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
268 1.21 thorpej
269 1.27 thorpej #define SPINLOCK_LIST_LOCK() \
270 1.29 sommerfe __cpu_simple_lock(&spinlock_list_slock.lock_data)
271 1.21 thorpej
272 1.27 thorpej #define SPINLOCK_LIST_UNLOCK() \
273 1.29 sommerfe __cpu_simple_unlock(&spinlock_list_slock.lock_data)
274 1.21 thorpej #else
275 1.21 thorpej #define SPINLOCK_LIST_LOCK() /* nothing */
276 1.21 thorpej
277 1.21 thorpej #define SPINLOCK_LIST_UNLOCK() /* nothing */
278 1.21 thorpej #endif /* MULTIPROCESSOR */ /* } */
279 1.21 thorpej
280 1.21 thorpej TAILQ_HEAD(, lock) spinlock_list =
281 1.21 thorpej TAILQ_HEAD_INITIALIZER(spinlock_list);
282 1.21 thorpej
283 1.21 thorpej #define HAVEIT(lkp) \
284 1.21 thorpej do { \
285 1.21 thorpej if ((lkp)->lk_flags & LK_SPIN) { \
286 1.44 thorpej int s = spllock(); \
287 1.21 thorpej SPINLOCK_LIST_LOCK(); \
288 1.21 thorpej /* XXX Cast away volatile. */ \
289 1.21 thorpej TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
290 1.21 thorpej lk_list); \
291 1.21 thorpej SPINLOCK_LIST_UNLOCK(); \
292 1.21 thorpej splx(s); \
293 1.21 thorpej } \
294 1.30 thorpej } while (/*CONSTCOND*/0)
295 1.21 thorpej
296 1.21 thorpej #define DONTHAVEIT(lkp) \
297 1.21 thorpej do { \
298 1.21 thorpej if ((lkp)->lk_flags & LK_SPIN) { \
299 1.44 thorpej int s = spllock(); \
300 1.21 thorpej SPINLOCK_LIST_LOCK(); \
301 1.21 thorpej /* XXX Cast away volatile. */ \
302 1.21 thorpej TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \
303 1.21 thorpej lk_list); \
304 1.21 thorpej SPINLOCK_LIST_UNLOCK(); \
305 1.21 thorpej splx(s); \
306 1.21 thorpej } \
307 1.30 thorpej } while (/*CONSTCOND*/0)
308 1.21 thorpej #else
309 1.21 thorpej #define HAVEIT(lkp) /* nothing */
310 1.21 thorpej
311 1.21 thorpej #define DONTHAVEIT(lkp) /* nothing */
312 1.21 thorpej #endif /* LOCKDEBUG */ /* } */
313 1.21 thorpej
314 1.25 thorpej #if defined(LOCKDEBUG)
315 1.25 thorpej /*
316 1.25 thorpej * Lock debug printing routine; can be configured to print to console
317 1.25 thorpej * or log to syslog.
318 1.25 thorpej */
319 1.25 thorpej void
320 1.25 thorpej lock_printf(const char *fmt, ...)
321 1.25 thorpej {
322 1.25 thorpej va_list ap;
323 1.25 thorpej
324 1.25 thorpej va_start(ap, fmt);
325 1.25 thorpej if (lock_debug_syslog)
326 1.25 thorpej vlog(LOG_DEBUG, fmt, ap);
327 1.25 thorpej else
328 1.25 thorpej vprintf(fmt, ap);
329 1.25 thorpej va_end(ap);
330 1.25 thorpej }
331 1.25 thorpej #endif /* LOCKDEBUG */
332 1.25 thorpej
333 1.1 fvdl /*
334 1.1 fvdl * Initialize a lock; required before use.
335 1.1 fvdl */
336 1.1 fvdl void
337 1.33 thorpej lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
338 1.1 fvdl {
339 1.1 fvdl
340 1.8 perry memset(lkp, 0, sizeof(struct lock));
341 1.1 fvdl simple_lock_init(&lkp->lk_interlock);
342 1.1 fvdl lkp->lk_flags = flags & LK_EXTFLG_MASK;
343 1.19 thorpej if (flags & LK_SPIN)
344 1.19 thorpej lkp->lk_cpu = LK_NOCPU;
345 1.19 thorpej else {
346 1.19 thorpej lkp->lk_lockholder = LK_NOPROC;
347 1.19 thorpej lkp->lk_prio = prio;
348 1.19 thorpej lkp->lk_timo = timo;
349 1.19 thorpej }
350 1.19 thorpej lkp->lk_wmesg = wmesg; /* just a name for spin locks */
351 1.50 thorpej #if defined(LOCKDEBUG)
352 1.50 thorpej lkp->lk_lock_file = NULL;
353 1.50 thorpej lkp->lk_unlock_file = NULL;
354 1.50 thorpej #endif
355 1.1 fvdl }
356 1.1 fvdl
357 1.1 fvdl /*
358 1.1 fvdl * Determine the status of a lock.
359 1.1 fvdl */
360 1.1 fvdl int
361 1.33 thorpej lockstatus(struct lock *lkp)
362 1.1 fvdl {
363 1.40 thorpej int s, lock_type = 0;
364 1.1 fvdl
365 1.43 thorpej INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
366 1.1 fvdl if (lkp->lk_exclusivecount != 0)
367 1.1 fvdl lock_type = LK_EXCLUSIVE;
368 1.1 fvdl else if (lkp->lk_sharecount != 0)
369 1.1 fvdl lock_type = LK_SHARED;
370 1.43 thorpej INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
371 1.1 fvdl return (lock_type);
372 1.1 fvdl }
373 1.35 thorpej
374 1.35 thorpej #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
375 1.35 thorpej /*
376 1.35 thorpej * Make sure no spin locks are held by a CPU that is about
377 1.35 thorpej * to context switch.
378 1.35 thorpej */
379 1.35 thorpej void
380 1.35 thorpej spinlock_switchcheck(void)
381 1.35 thorpej {
382 1.35 thorpej u_long cnt;
383 1.35 thorpej int s;
384 1.35 thorpej
385 1.44 thorpej s = spllock();
386 1.35 thorpej #if defined(MULTIPROCESSOR)
387 1.35 thorpej cnt = curcpu()->ci_spin_locks;
388 1.35 thorpej #else
389 1.35 thorpej cnt = spin_locks;
390 1.35 thorpej #endif
391 1.35 thorpej splx(s);
392 1.35 thorpej
393 1.35 thorpej if (cnt != 0)
394 1.35 thorpej panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
395 1.35 thorpej (u_long) cpu_number(), cnt);
396 1.35 thorpej }
397 1.35 thorpej #endif /* LOCKDEBUG || DIAGNOSTIC */
398 1.1 fvdl
399 1.1 fvdl /*
400 1.44 thorpej * Locks and IPLs (interrupt priority levels):
401 1.44 thorpej *
402 1.44 thorpej * Locks which may be taken from interrupt context must be handled
403 1.44 thorpej * very carefully; you must spl to the highest IPL where the lock
404 1.44 thorpej * is needed before acquiring the lock.
405 1.44 thorpej *
406 1.44 thorpej * It is also important to avoid deadlock, since certain (very high
407 1.44 thorpej * priority) interrupts are often needed to keep the system as a whole
408 1.44 thorpej * from deadlocking, and must not be blocked while you are spinning
409 1.44 thorpej * waiting for a lower-priority lock.
410 1.44 thorpej *
411 1.44 thorpej * In addition, the lock-debugging hooks themselves need to use locks!
412 1.44 thorpej *
413 1.44 thorpej * A raw __cpu_simple_lock may be used from interrupts are long as it
414 1.44 thorpej * is acquired and held at a single IPL.
415 1.44 thorpej *
416 1.44 thorpej * A simple_lock (which is a __cpu_simple_lock wrapped with some
417 1.44 thorpej * debugging hooks) may be used at or below spllock(), which is
418 1.44 thorpej * typically at or just below splhigh() (i.e. blocks everything
419 1.44 thorpej * but certain machine-dependent extremely high priority interrupts).
420 1.44 thorpej *
421 1.44 thorpej * spinlockmgr spinlocks should be used at or below splsched().
422 1.44 thorpej *
423 1.44 thorpej * Some platforms may have interrupts of higher priority than splsched(),
424 1.44 thorpej * including hard serial interrupts, inter-processor interrupts, and
425 1.44 thorpej * kernel debugger traps.
426 1.44 thorpej */
427 1.44 thorpej
428 1.44 thorpej /*
429 1.32 sommerfe * XXX XXX kludge around another kludge..
430 1.32 sommerfe *
431 1.32 sommerfe * vfs_shutdown() may be called from interrupt context, either as a result
432 1.32 sommerfe * of a panic, or from the debugger. It proceeds to call
433 1.32 sommerfe * sys_sync(&proc0, ...), pretending its running on behalf of proc0
434 1.32 sommerfe *
435 1.32 sommerfe * We would like to make an attempt to sync the filesystems in this case, so
436 1.32 sommerfe * if this happens, we treat attempts to acquire locks specially.
437 1.32 sommerfe * All locks are acquired on behalf of proc0.
438 1.32 sommerfe *
439 1.32 sommerfe * If we've already paniced, we don't block waiting for locks, but
440 1.32 sommerfe * just barge right ahead since we're already going down in flames.
441 1.32 sommerfe */
442 1.32 sommerfe
443 1.32 sommerfe /*
444 1.1 fvdl * Set, change, or release a lock.
445 1.1 fvdl *
446 1.1 fvdl * Shared requests increment the shared count. Exclusive requests set the
447 1.1 fvdl * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
448 1.1 fvdl * accepted shared locks and shared-to-exclusive upgrades to go away.
449 1.1 fvdl */
450 1.1 fvdl int
451 1.50 thorpej #if defined(LOCKDEBUG)
452 1.50 thorpej _lockmgr(__volatile struct lock *lkp, u_int flags,
453 1.50 thorpej struct simplelock *interlkp, const char *file, int line)
454 1.50 thorpej #else
455 1.33 thorpej lockmgr(__volatile struct lock *lkp, u_int flags,
456 1.33 thorpej struct simplelock *interlkp)
457 1.50 thorpej #endif
458 1.1 fvdl {
459 1.1 fvdl int error;
460 1.1 fvdl pid_t pid;
461 1.1 fvdl int extflags;
462 1.24 thorpej cpuid_t cpu_id;
463 1.51.2.1 nathanw struct lwp *l = curproc;
464 1.51.2.1 nathanw struct proc *p = (l == NULL) ? NULL : l->l_proc;
465 1.32 sommerfe int lock_shutdown_noblock = 0;
466 1.40 thorpej int s;
467 1.1 fvdl
468 1.1 fvdl error = 0;
469 1.19 thorpej
470 1.43 thorpej INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
471 1.1 fvdl if (flags & LK_INTERLOCK)
472 1.1 fvdl simple_unlock(interlkp);
473 1.1 fvdl extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
474 1.19 thorpej
475 1.21 thorpej #ifdef DIAGNOSTIC /* { */
476 1.19 thorpej /*
477 1.19 thorpej * Don't allow spins on sleep locks and don't allow sleeps
478 1.19 thorpej * on spin locks.
479 1.19 thorpej */
480 1.19 thorpej if ((flags ^ lkp->lk_flags) & LK_SPIN)
481 1.19 thorpej panic("lockmgr: sleep/spin mismatch\n");
482 1.21 thorpej #endif /* } */
483 1.19 thorpej
484 1.19 thorpej if (extflags & LK_SPIN)
485 1.19 thorpej pid = LK_KERNPROC;
486 1.19 thorpej else {
487 1.32 sommerfe if (p == NULL) {
488 1.32 sommerfe if (!doing_shutdown) {
489 1.32 sommerfe #ifdef DIAGNOSTIC
490 1.32 sommerfe panic("lockmgr: no context");
491 1.32 sommerfe #endif
492 1.32 sommerfe } else {
493 1.32 sommerfe p = &proc0;
494 1.32 sommerfe if (panicstr && (!(flags & LK_NOWAIT))) {
495 1.32 sommerfe flags |= LK_NOWAIT;
496 1.32 sommerfe lock_shutdown_noblock = 1;
497 1.32 sommerfe }
498 1.32 sommerfe }
499 1.32 sommerfe }
500 1.19 thorpej pid = p->p_pid;
501 1.19 thorpej }
502 1.24 thorpej cpu_id = cpu_number();
503 1.19 thorpej
504 1.1 fvdl /*
505 1.1 fvdl * Once a lock has drained, the LK_DRAINING flag is set and an
506 1.1 fvdl * exclusive lock is returned. The only valid operation thereafter
507 1.1 fvdl * is a single release of that exclusive lock. This final release
508 1.1 fvdl * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
509 1.1 fvdl * further requests of any sort will result in a panic. The bits
510 1.1 fvdl * selected for these two flags are chosen so that they will be set
511 1.1 fvdl * in memory that is freed (freed memory is filled with 0xdeadbeef).
512 1.1 fvdl * The final release is permitted to give a new lease on life to
513 1.1 fvdl * the lock by specifying LK_REENABLE.
514 1.1 fvdl */
515 1.1 fvdl if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
516 1.28 thorpej #ifdef DIAGNOSTIC /* { */
517 1.1 fvdl if (lkp->lk_flags & LK_DRAINED)
518 1.1 fvdl panic("lockmgr: using decommissioned lock");
519 1.1 fvdl if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
520 1.19 thorpej WEHOLDIT(lkp, pid, cpu_id) == 0)
521 1.1 fvdl panic("lockmgr: non-release on draining lock: %d\n",
522 1.1 fvdl flags & LK_TYPE_MASK);
523 1.28 thorpej #endif /* DIAGNOSTIC */ /* } */
524 1.1 fvdl lkp->lk_flags &= ~LK_DRAINING;
525 1.1 fvdl if ((flags & LK_REENABLE) == 0)
526 1.1 fvdl lkp->lk_flags |= LK_DRAINED;
527 1.1 fvdl }
528 1.1 fvdl
529 1.1 fvdl switch (flags & LK_TYPE_MASK) {
530 1.1 fvdl
531 1.1 fvdl case LK_SHARED:
532 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
533 1.1 fvdl /*
534 1.1 fvdl * If just polling, check to see if we will block.
535 1.1 fvdl */
536 1.1 fvdl if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
537 1.1 fvdl (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
538 1.1 fvdl error = EBUSY;
539 1.1 fvdl break;
540 1.1 fvdl }
541 1.1 fvdl /*
542 1.1 fvdl * Wait for exclusive locks and upgrades to clear.
543 1.1 fvdl */
544 1.23 thorpej ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
545 1.1 fvdl (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
546 1.1 fvdl if (error)
547 1.1 fvdl break;
548 1.1 fvdl lkp->lk_sharecount++;
549 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
550 1.1 fvdl break;
551 1.1 fvdl }
552 1.1 fvdl /*
553 1.1 fvdl * We hold an exclusive lock, so downgrade it to shared.
554 1.1 fvdl * An alternative would be to fail with EDEADLK.
555 1.1 fvdl */
556 1.1 fvdl lkp->lk_sharecount++;
557 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
558 1.1 fvdl /* fall into downgrade */
559 1.1 fvdl
560 1.1 fvdl case LK_DOWNGRADE:
561 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
562 1.19 thorpej lkp->lk_exclusivecount == 0)
563 1.1 fvdl panic("lockmgr: not holding exclusive lock");
564 1.1 fvdl lkp->lk_sharecount += lkp->lk_exclusivecount;
565 1.1 fvdl lkp->lk_exclusivecount = 0;
566 1.15 fvdl lkp->lk_recurselevel = 0;
567 1.1 fvdl lkp->lk_flags &= ~LK_HAVE_EXCL;
568 1.19 thorpej SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
569 1.50 thorpej #if defined(LOCKDEBUG)
570 1.50 thorpej lkp->lk_unlock_file = file;
571 1.50 thorpej lkp->lk_unlock_line = line;
572 1.50 thorpej #endif
573 1.21 thorpej DONTHAVEIT(lkp);
574 1.23 thorpej WAKEUP_WAITER(lkp);
575 1.1 fvdl break;
576 1.1 fvdl
577 1.1 fvdl case LK_EXCLUPGRADE:
578 1.1 fvdl /*
579 1.1 fvdl * If another process is ahead of us to get an upgrade,
580 1.1 fvdl * then we want to fail rather than have an intervening
581 1.1 fvdl * exclusive access.
582 1.1 fvdl */
583 1.1 fvdl if (lkp->lk_flags & LK_WANT_UPGRADE) {
584 1.1 fvdl lkp->lk_sharecount--;
585 1.21 thorpej COUNT(lkp, p, cpu_id, -1);
586 1.1 fvdl error = EBUSY;
587 1.1 fvdl break;
588 1.1 fvdl }
589 1.1 fvdl /* fall into normal upgrade */
590 1.1 fvdl
591 1.1 fvdl case LK_UPGRADE:
592 1.1 fvdl /*
593 1.1 fvdl * Upgrade a shared lock to an exclusive one. If another
594 1.1 fvdl * shared lock has already requested an upgrade to an
595 1.1 fvdl * exclusive lock, our shared lock is released and an
596 1.1 fvdl * exclusive lock is requested (which will be granted
597 1.1 fvdl * after the upgrade). If we return an error, the file
598 1.1 fvdl * will always be unlocked.
599 1.1 fvdl */
600 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
601 1.1 fvdl panic("lockmgr: upgrade exclusive lock");
602 1.1 fvdl lkp->lk_sharecount--;
603 1.21 thorpej COUNT(lkp, p, cpu_id, -1);
604 1.1 fvdl /*
605 1.1 fvdl * If we are just polling, check to see if we will block.
606 1.1 fvdl */
607 1.1 fvdl if ((extflags & LK_NOWAIT) &&
608 1.1 fvdl ((lkp->lk_flags & LK_WANT_UPGRADE) ||
609 1.1 fvdl lkp->lk_sharecount > 1)) {
610 1.1 fvdl error = EBUSY;
611 1.1 fvdl break;
612 1.1 fvdl }
613 1.1 fvdl if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
614 1.1 fvdl /*
615 1.1 fvdl * We are first shared lock to request an upgrade, so
616 1.1 fvdl * request upgrade and wait for the shared count to
617 1.1 fvdl * drop to zero, then take exclusive lock.
618 1.1 fvdl */
619 1.1 fvdl lkp->lk_flags |= LK_WANT_UPGRADE;
620 1.23 thorpej ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
621 1.1 fvdl lkp->lk_flags &= ~LK_WANT_UPGRADE;
622 1.1 fvdl if (error)
623 1.1 fvdl break;
624 1.1 fvdl lkp->lk_flags |= LK_HAVE_EXCL;
625 1.19 thorpej SETHOLDER(lkp, pid, cpu_id);
626 1.50 thorpej #if defined(LOCKDEBUG)
627 1.50 thorpej lkp->lk_lock_file = file;
628 1.50 thorpej lkp->lk_lock_line = line;
629 1.50 thorpej #endif
630 1.21 thorpej HAVEIT(lkp);
631 1.1 fvdl if (lkp->lk_exclusivecount != 0)
632 1.1 fvdl panic("lockmgr: non-zero exclusive count");
633 1.1 fvdl lkp->lk_exclusivecount = 1;
634 1.15 fvdl if (extflags & LK_SETRECURSE)
635 1.15 fvdl lkp->lk_recurselevel = 1;
636 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
637 1.1 fvdl break;
638 1.1 fvdl }
639 1.1 fvdl /*
640 1.1 fvdl * Someone else has requested upgrade. Release our shared
641 1.1 fvdl * lock, awaken upgrade requestor if we are the last shared
642 1.1 fvdl * lock, then request an exclusive lock.
643 1.1 fvdl */
644 1.23 thorpej if (lkp->lk_sharecount == 0)
645 1.23 thorpej WAKEUP_WAITER(lkp);
646 1.1 fvdl /* fall into exclusive request */
647 1.1 fvdl
648 1.1 fvdl case LK_EXCLUSIVE:
649 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id)) {
650 1.1 fvdl /*
651 1.19 thorpej * Recursive lock.
652 1.1 fvdl */
653 1.15 fvdl if ((extflags & LK_CANRECURSE) == 0 &&
654 1.16 sommerfe lkp->lk_recurselevel == 0) {
655 1.16 sommerfe if (extflags & LK_RECURSEFAIL) {
656 1.16 sommerfe error = EDEADLK;
657 1.16 sommerfe break;
658 1.16 sommerfe } else
659 1.16 sommerfe panic("lockmgr: locking against myself");
660 1.16 sommerfe }
661 1.1 fvdl lkp->lk_exclusivecount++;
662 1.15 fvdl if (extflags & LK_SETRECURSE &&
663 1.15 fvdl lkp->lk_recurselevel == 0)
664 1.15 fvdl lkp->lk_recurselevel = lkp->lk_exclusivecount;
665 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
666 1.1 fvdl break;
667 1.1 fvdl }
668 1.1 fvdl /*
669 1.1 fvdl * If we are just polling, check to see if we will sleep.
670 1.1 fvdl */
671 1.1 fvdl if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
672 1.1 fvdl (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
673 1.1 fvdl lkp->lk_sharecount != 0)) {
674 1.1 fvdl error = EBUSY;
675 1.1 fvdl break;
676 1.1 fvdl }
677 1.1 fvdl /*
678 1.1 fvdl * Try to acquire the want_exclusive flag.
679 1.1 fvdl */
680 1.23 thorpej ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
681 1.1 fvdl (LK_HAVE_EXCL | LK_WANT_EXCL));
682 1.1 fvdl if (error)
683 1.1 fvdl break;
684 1.1 fvdl lkp->lk_flags |= LK_WANT_EXCL;
685 1.1 fvdl /*
686 1.1 fvdl * Wait for shared locks and upgrades to finish.
687 1.1 fvdl */
688 1.23 thorpej ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
689 1.1 fvdl (lkp->lk_flags & LK_WANT_UPGRADE));
690 1.1 fvdl lkp->lk_flags &= ~LK_WANT_EXCL;
691 1.1 fvdl if (error)
692 1.1 fvdl break;
693 1.1 fvdl lkp->lk_flags |= LK_HAVE_EXCL;
694 1.19 thorpej SETHOLDER(lkp, pid, cpu_id);
695 1.50 thorpej #if defined(LOCKDEBUG)
696 1.50 thorpej lkp->lk_lock_file = file;
697 1.50 thorpej lkp->lk_lock_line = line;
698 1.50 thorpej #endif
699 1.21 thorpej HAVEIT(lkp);
700 1.1 fvdl if (lkp->lk_exclusivecount != 0)
701 1.1 fvdl panic("lockmgr: non-zero exclusive count");
702 1.1 fvdl lkp->lk_exclusivecount = 1;
703 1.15 fvdl if (extflags & LK_SETRECURSE)
704 1.15 fvdl lkp->lk_recurselevel = 1;
705 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
706 1.1 fvdl break;
707 1.1 fvdl
708 1.1 fvdl case LK_RELEASE:
709 1.1 fvdl if (lkp->lk_exclusivecount != 0) {
710 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
711 1.19 thorpej if (lkp->lk_flags & LK_SPIN) {
712 1.19 thorpej panic("lockmgr: processor %lu, not "
713 1.19 thorpej "exclusive lock holder %lu "
714 1.19 thorpej "unlocking", cpu_id, lkp->lk_cpu);
715 1.19 thorpej } else {
716 1.19 thorpej panic("lockmgr: pid %d, not "
717 1.19 thorpej "exclusive lock holder %d "
718 1.19 thorpej "unlocking", pid,
719 1.19 thorpej lkp->lk_lockholder);
720 1.19 thorpej }
721 1.19 thorpej }
722 1.15 fvdl if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
723 1.15 fvdl lkp->lk_recurselevel = 0;
724 1.1 fvdl lkp->lk_exclusivecount--;
725 1.21 thorpej COUNT(lkp, p, cpu_id, -1);
726 1.1 fvdl if (lkp->lk_exclusivecount == 0) {
727 1.1 fvdl lkp->lk_flags &= ~LK_HAVE_EXCL;
728 1.19 thorpej SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
729 1.50 thorpej #if defined(LOCKDEBUG)
730 1.50 thorpej lkp->lk_unlock_file = file;
731 1.50 thorpej lkp->lk_unlock_line = line;
732 1.50 thorpej #endif
733 1.21 thorpej DONTHAVEIT(lkp);
734 1.1 fvdl }
735 1.1 fvdl } else if (lkp->lk_sharecount != 0) {
736 1.1 fvdl lkp->lk_sharecount--;
737 1.21 thorpej COUNT(lkp, p, cpu_id, -1);
738 1.1 fvdl }
739 1.39 thorpej #ifdef DIAGNOSTIC
740 1.39 thorpej else
741 1.39 thorpej panic("lockmgr: release of unlocked lock!");
742 1.39 thorpej #endif
743 1.23 thorpej WAKEUP_WAITER(lkp);
744 1.1 fvdl break;
745 1.1 fvdl
746 1.1 fvdl case LK_DRAIN:
747 1.1 fvdl /*
748 1.1 fvdl * Check that we do not already hold the lock, as it can
749 1.1 fvdl * never drain if we do. Unfortunately, we have no way to
750 1.1 fvdl * check for holding a shared lock, but at least we can
751 1.1 fvdl * check for an exclusive one.
752 1.1 fvdl */
753 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id))
754 1.1 fvdl panic("lockmgr: draining against myself");
755 1.1 fvdl /*
756 1.1 fvdl * If we are just polling, check to see if we will sleep.
757 1.1 fvdl */
758 1.1 fvdl if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
759 1.1 fvdl (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
760 1.1 fvdl lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
761 1.1 fvdl error = EBUSY;
762 1.1 fvdl break;
763 1.1 fvdl }
764 1.23 thorpej ACQUIRE(lkp, error, extflags, 1,
765 1.23 thorpej ((lkp->lk_flags &
766 1.23 thorpej (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
767 1.23 thorpej lkp->lk_sharecount != 0 ||
768 1.23 thorpej lkp->lk_waitcount != 0));
769 1.23 thorpej if (error)
770 1.23 thorpej break;
771 1.1 fvdl lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
772 1.19 thorpej SETHOLDER(lkp, pid, cpu_id);
773 1.50 thorpej #if defined(LOCKDEBUG)
774 1.50 thorpej lkp->lk_lock_file = file;
775 1.50 thorpej lkp->lk_lock_line = line;
776 1.50 thorpej #endif
777 1.21 thorpej HAVEIT(lkp);
778 1.1 fvdl lkp->lk_exclusivecount = 1;
779 1.15 fvdl /* XXX unlikely that we'd want this */
780 1.15 fvdl if (extflags & LK_SETRECURSE)
781 1.15 fvdl lkp->lk_recurselevel = 1;
782 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
783 1.1 fvdl break;
784 1.1 fvdl
785 1.1 fvdl default:
786 1.43 thorpej INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
787 1.1 fvdl panic("lockmgr: unknown locktype request %d",
788 1.1 fvdl flags & LK_TYPE_MASK);
789 1.1 fvdl /* NOTREACHED */
790 1.1 fvdl }
791 1.23 thorpej if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
792 1.23 thorpej ((lkp->lk_flags &
793 1.23 thorpej (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
794 1.1 fvdl lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
795 1.1 fvdl lkp->lk_flags &= ~LK_WAITDRAIN;
796 1.20 thorpej wakeup_one((void *)&lkp->lk_flags);
797 1.1 fvdl }
798 1.32 sommerfe /*
799 1.32 sommerfe * Note that this panic will be a recursive panic, since
800 1.32 sommerfe * we only set lock_shutdown_noblock above if panicstr != NULL.
801 1.32 sommerfe */
802 1.32 sommerfe if (error && lock_shutdown_noblock)
803 1.32 sommerfe panic("lockmgr: deadlock (see previous panic)");
804 1.32 sommerfe
805 1.43 thorpej INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
806 1.1 fvdl return (error);
807 1.1 fvdl }
808 1.1 fvdl
809 1.1 fvdl /*
810 1.47 sommerfe * For a recursive spinlock held one or more times by the current CPU,
811 1.47 sommerfe * release all N locks, and return N.
812 1.47 sommerfe * Intended for use in mi_switch() shortly before context switching.
813 1.47 sommerfe */
814 1.47 sommerfe
815 1.47 sommerfe int
816 1.50 thorpej #if defined(LOCKDEBUG)
817 1.50 thorpej _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
818 1.50 thorpej #else
819 1.47 sommerfe spinlock_release_all(__volatile struct lock *lkp)
820 1.50 thorpej #endif
821 1.47 sommerfe {
822 1.47 sommerfe int s, count;
823 1.47 sommerfe cpuid_t cpu_id;
824 1.47 sommerfe
825 1.47 sommerfe KASSERT(lkp->lk_flags & LK_SPIN);
826 1.47 sommerfe
827 1.47 sommerfe INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
828 1.47 sommerfe
829 1.47 sommerfe cpu_id = cpu_number();
830 1.47 sommerfe count = lkp->lk_exclusivecount;
831 1.47 sommerfe
832 1.47 sommerfe if (count != 0) {
833 1.47 sommerfe #ifdef DIAGNOSTIC
834 1.47 sommerfe if (WEHOLDIT(lkp, 0, cpu_id) == 0) {
835 1.47 sommerfe panic("spinlock_release_all: processor %lu, not "
836 1.47 sommerfe "exclusive lock holder %lu "
837 1.47 sommerfe "unlocking", (long)cpu_id, lkp->lk_cpu);
838 1.47 sommerfe }
839 1.47 sommerfe #endif
840 1.47 sommerfe lkp->lk_recurselevel = 0;
841 1.47 sommerfe lkp->lk_exclusivecount = 0;
842 1.47 sommerfe COUNT_CPU(cpu_id, -count);
843 1.47 sommerfe lkp->lk_flags &= ~LK_HAVE_EXCL;
844 1.47 sommerfe SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
845 1.50 thorpej #if defined(LOCKDEBUG)
846 1.50 thorpej lkp->lk_unlock_file = file;
847 1.50 thorpej lkp->lk_unlock_line = line;
848 1.50 thorpej #endif
849 1.47 sommerfe DONTHAVEIT(lkp);
850 1.47 sommerfe }
851 1.47 sommerfe #ifdef DIAGNOSTIC
852 1.47 sommerfe else if (lkp->lk_sharecount != 0)
853 1.47 sommerfe panic("spinlock_release_all: release of shared lock!");
854 1.47 sommerfe else
855 1.47 sommerfe panic("spinlock_release_all: release of unlocked lock!");
856 1.47 sommerfe #endif
857 1.47 sommerfe INTERLOCK_RELEASE(lkp, LK_SPIN, s);
858 1.47 sommerfe
859 1.47 sommerfe return (count);
860 1.47 sommerfe }
861 1.47 sommerfe
862 1.47 sommerfe /*
863 1.47 sommerfe * For a recursive spinlock held one or more times by the current CPU,
864 1.47 sommerfe * release all N locks, and return N.
865 1.47 sommerfe * Intended for use in mi_switch() right after resuming execution.
866 1.47 sommerfe */
867 1.47 sommerfe
868 1.47 sommerfe void
869 1.50 thorpej #if defined(LOCKDEBUG)
870 1.50 thorpej _spinlock_acquire_count(__volatile struct lock *lkp, int count,
871 1.50 thorpej const char *file, int line)
872 1.50 thorpej #else
873 1.47 sommerfe spinlock_acquire_count(__volatile struct lock *lkp, int count)
874 1.50 thorpej #endif
875 1.47 sommerfe {
876 1.47 sommerfe int s, error;
877 1.47 sommerfe cpuid_t cpu_id;
878 1.47 sommerfe
879 1.47 sommerfe KASSERT(lkp->lk_flags & LK_SPIN);
880 1.47 sommerfe
881 1.47 sommerfe INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
882 1.47 sommerfe
883 1.47 sommerfe cpu_id = cpu_number();
884 1.47 sommerfe
885 1.47 sommerfe #ifdef DIAGNOSTIC
886 1.47 sommerfe if (WEHOLDIT(lkp, LK_NOPROC, cpu_id))
887 1.47 sommerfe panic("spinlock_acquire_count: processor %lu already holds lock\n", (long)cpu_id);
888 1.47 sommerfe #endif
889 1.47 sommerfe /*
890 1.47 sommerfe * Try to acquire the want_exclusive flag.
891 1.47 sommerfe */
892 1.47 sommerfe ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags &
893 1.47 sommerfe (LK_HAVE_EXCL | LK_WANT_EXCL));
894 1.47 sommerfe lkp->lk_flags |= LK_WANT_EXCL;
895 1.47 sommerfe /*
896 1.47 sommerfe * Wait for shared locks and upgrades to finish.
897 1.47 sommerfe */
898 1.47 sommerfe ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 ||
899 1.47 sommerfe (lkp->lk_flags & LK_WANT_UPGRADE));
900 1.47 sommerfe lkp->lk_flags &= ~LK_WANT_EXCL;
901 1.47 sommerfe lkp->lk_flags |= LK_HAVE_EXCL;
902 1.47 sommerfe SETHOLDER(lkp, LK_NOPROC, cpu_id);
903 1.50 thorpej #if defined(LOCKDEBUG)
904 1.50 thorpej lkp->lk_lock_file = file;
905 1.50 thorpej lkp->lk_lock_line = line;
906 1.50 thorpej #endif
907 1.47 sommerfe HAVEIT(lkp);
908 1.47 sommerfe if (lkp->lk_exclusivecount != 0)
909 1.47 sommerfe panic("lockmgr: non-zero exclusive count");
910 1.47 sommerfe lkp->lk_exclusivecount = count;
911 1.47 sommerfe lkp->lk_recurselevel = 1;
912 1.47 sommerfe COUNT_CPU(cpu_id, count);
913 1.47 sommerfe
914 1.47 sommerfe INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
915 1.47 sommerfe }
916 1.47 sommerfe
917 1.47 sommerfe
918 1.47 sommerfe
919 1.47 sommerfe /*
920 1.1 fvdl * Print out information about state of a lock. Used by VOP_PRINT
921 1.1 fvdl * routines to display ststus about contained locks.
922 1.1 fvdl */
923 1.2 fvdl void
924 1.33 thorpej lockmgr_printinfo(__volatile struct lock *lkp)
925 1.1 fvdl {
926 1.1 fvdl
927 1.1 fvdl if (lkp->lk_sharecount)
928 1.1 fvdl printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
929 1.1 fvdl lkp->lk_sharecount);
930 1.19 thorpej else if (lkp->lk_flags & LK_HAVE_EXCL) {
931 1.19 thorpej printf(" lock type %s: EXCL (count %d) by ",
932 1.19 thorpej lkp->lk_wmesg, lkp->lk_exclusivecount);
933 1.19 thorpej if (lkp->lk_flags & LK_SPIN)
934 1.19 thorpej printf("processor %lu", lkp->lk_cpu);
935 1.19 thorpej else
936 1.19 thorpej printf("pid %d", lkp->lk_lockholder);
937 1.19 thorpej } else
938 1.19 thorpej printf(" not locked");
939 1.19 thorpej if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
940 1.1 fvdl printf(" with %d pending", lkp->lk_waitcount);
941 1.1 fvdl }
942 1.1 fvdl
943 1.21 thorpej #if defined(LOCKDEBUG) /* { */
944 1.21 thorpej TAILQ_HEAD(, simplelock) simplelock_list =
945 1.21 thorpej TAILQ_HEAD_INITIALIZER(simplelock_list);
946 1.21 thorpej
947 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
948 1.21 thorpej struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
949 1.21 thorpej
950 1.21 thorpej #define SLOCK_LIST_LOCK() \
951 1.29 sommerfe __cpu_simple_lock(&simplelock_list_slock.lock_data)
952 1.21 thorpej
953 1.21 thorpej #define SLOCK_LIST_UNLOCK() \
954 1.29 sommerfe __cpu_simple_unlock(&simplelock_list_slock.lock_data)
955 1.21 thorpej
956 1.21 thorpej #define SLOCK_COUNT(x) \
957 1.47 sommerfe curcpu()->ci_simple_locks += (x)
958 1.21 thorpej #else
959 1.21 thorpej u_long simple_locks;
960 1.21 thorpej
961 1.21 thorpej #define SLOCK_LIST_LOCK() /* nothing */
962 1.21 thorpej
963 1.21 thorpej #define SLOCK_LIST_UNLOCK() /* nothing */
964 1.21 thorpej
965 1.21 thorpej #define SLOCK_COUNT(x) simple_locks += (x)
966 1.21 thorpej #endif /* MULTIPROCESSOR */ /* } */
967 1.21 thorpej
968 1.21 thorpej #ifdef DDB /* { */
969 1.45 sommerfe #ifdef MULTIPROCESSOR
970 1.45 sommerfe int simple_lock_debugger = 1; /* more serious on MP */
971 1.45 sommerfe #else
972 1.18 chs int simple_lock_debugger = 0;
973 1.45 sommerfe #endif
974 1.21 thorpej #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger()
975 1.21 thorpej #else
976 1.21 thorpej #define SLOCK_DEBUGGER() /* nothing */
977 1.21 thorpej #endif /* } */
978 1.21 thorpej
979 1.26 sommerfe #ifdef MULTIPROCESSOR
980 1.46 thorpej #define SLOCK_MP() lock_printf("on cpu %ld\n", \
981 1.46 thorpej (u_long) cpu_number())
982 1.26 sommerfe #else
983 1.26 sommerfe #define SLOCK_MP() /* nothing */
984 1.26 sommerfe #endif
985 1.26 sommerfe
986 1.21 thorpej #define SLOCK_WHERE(str, alp, id, l) \
987 1.21 thorpej do { \
988 1.25 thorpej lock_printf(str); \
989 1.33 thorpej lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
990 1.26 sommerfe SLOCK_MP(); \
991 1.21 thorpej if ((alp)->lock_file != NULL) \
992 1.25 thorpej lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
993 1.21 thorpej (alp)->lock_line); \
994 1.21 thorpej if ((alp)->unlock_file != NULL) \
995 1.25 thorpej lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
996 1.21 thorpej (alp)->unlock_line); \
997 1.21 thorpej SLOCK_DEBUGGER(); \
998 1.30 thorpej } while (/*CONSTCOND*/0)
999 1.12 chs
1000 1.1 fvdl /*
1001 1.1 fvdl * Simple lock functions so that the debugger can see from whence
1002 1.1 fvdl * they are being called.
1003 1.1 fvdl */
1004 1.1 fvdl void
1005 1.33 thorpej simple_lock_init(struct simplelock *alp)
1006 1.1 fvdl {
1007 1.21 thorpej
1008 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
1009 1.27 thorpej __cpu_simple_lock_init(&alp->lock_data);
1010 1.21 thorpej #else
1011 1.27 thorpej alp->lock_data = __SIMPLELOCK_UNLOCKED;
1012 1.21 thorpej #endif /* } */
1013 1.5 chs alp->lock_file = NULL;
1014 1.5 chs alp->lock_line = 0;
1015 1.5 chs alp->unlock_file = NULL;
1016 1.5 chs alp->unlock_line = 0;
1017 1.41 thorpej alp->lock_holder = LK_NOCPU;
1018 1.1 fvdl }
1019 1.1 fvdl
1020 1.1 fvdl void
1021 1.33 thorpej _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
1022 1.1 fvdl {
1023 1.24 thorpej cpuid_t cpu_id = cpu_number();
1024 1.12 chs int s;
1025 1.12 chs
1026 1.44 thorpej s = spllock();
1027 1.21 thorpej
1028 1.21 thorpej /*
1029 1.21 thorpej * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1030 1.21 thorpej * don't take any action, and just fall into the normal spin case.
1031 1.21 thorpej */
1032 1.27 thorpej if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1033 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
1034 1.21 thorpej if (alp->lock_holder == cpu_id) {
1035 1.21 thorpej SLOCK_WHERE("simple_lock: locking against myself\n",
1036 1.21 thorpej alp, id, l);
1037 1.21 thorpej goto out;
1038 1.1 fvdl }
1039 1.21 thorpej #else
1040 1.21 thorpej SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
1041 1.21 thorpej goto out;
1042 1.21 thorpej #endif /* MULTIPROCESSOR */ /* } */
1043 1.1 fvdl }
1044 1.21 thorpej
1045 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
1046 1.21 thorpej /* Acquire the lock before modifying any fields. */
1047 1.27 thorpej __cpu_simple_lock(&alp->lock_data);
1048 1.21 thorpej #else
1049 1.27 thorpej alp->lock_data = __SIMPLELOCK_LOCKED;
1050 1.21 thorpej #endif /* } */
1051 1.21 thorpej
1052 1.45 sommerfe if (alp->lock_holder != LK_NOCPU) {
1053 1.45 sommerfe SLOCK_WHERE("simple_lock: uninitialized lock\n",
1054 1.45 sommerfe alp, id, l);
1055 1.45 sommerfe }
1056 1.5 chs alp->lock_file = id;
1057 1.5 chs alp->lock_line = l;
1058 1.21 thorpej alp->lock_holder = cpu_id;
1059 1.21 thorpej
1060 1.21 thorpej SLOCK_LIST_LOCK();
1061 1.21 thorpej /* XXX Cast away volatile */
1062 1.21 thorpej TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1063 1.21 thorpej SLOCK_LIST_UNLOCK();
1064 1.21 thorpej
1065 1.21 thorpej SLOCK_COUNT(1);
1066 1.21 thorpej
1067 1.21 thorpej out:
1068 1.18 chs splx(s);
1069 1.38 thorpej }
1070 1.38 thorpej
1071 1.38 thorpej int
1072 1.38 thorpej _simple_lock_held(__volatile struct simplelock *alp)
1073 1.38 thorpej {
1074 1.51.2.2 nathanw #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
1075 1.38 thorpej cpuid_t cpu_id = cpu_number();
1076 1.51.2.2 nathanw #endif
1077 1.38 thorpej int s, locked = 0;
1078 1.38 thorpej
1079 1.44 thorpej s = spllock();
1080 1.42 thorpej
1081 1.42 thorpej #if defined(MULTIPROCESSOR)
1082 1.38 thorpej if (__cpu_simple_lock_try(&alp->lock_data) == 0)
1083 1.38 thorpej locked = (alp->lock_holder == cpu_id);
1084 1.38 thorpej else
1085 1.38 thorpej __cpu_simple_unlock(&alp->lock_data);
1086 1.38 thorpej #else
1087 1.42 thorpej if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1088 1.42 thorpej locked = 1;
1089 1.42 thorpej KASSERT(alp->lock_holder == cpu_id);
1090 1.42 thorpej }
1091 1.42 thorpej #endif
1092 1.38 thorpej
1093 1.38 thorpej splx(s);
1094 1.42 thorpej
1095 1.38 thorpej return (locked);
1096 1.1 fvdl }
1097 1.1 fvdl
1098 1.1 fvdl int
1099 1.33 thorpej _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
1100 1.1 fvdl {
1101 1.24 thorpej cpuid_t cpu_id = cpu_number();
1102 1.21 thorpej int s, rv = 0;
1103 1.1 fvdl
1104 1.44 thorpej s = spllock();
1105 1.21 thorpej
1106 1.21 thorpej /*
1107 1.21 thorpej * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1108 1.21 thorpej * don't take any action.
1109 1.21 thorpej */
1110 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
1111 1.27 thorpej if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
1112 1.21 thorpej if (alp->lock_holder == cpu_id)
1113 1.21 thorpej SLOCK_WHERE("simple_lock_try: locking against myself\n",
1114 1.26 sommerfe alp, id, l);
1115 1.21 thorpej goto out;
1116 1.21 thorpej }
1117 1.21 thorpej #else
1118 1.27 thorpej if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1119 1.21 thorpej SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
1120 1.21 thorpej goto out;
1121 1.18 chs }
1122 1.27 thorpej alp->lock_data = __SIMPLELOCK_LOCKED;
1123 1.21 thorpej #endif /* MULTIPROCESSOR */ /* } */
1124 1.21 thorpej
1125 1.21 thorpej /*
1126 1.21 thorpej * At this point, we have acquired the lock.
1127 1.21 thorpej */
1128 1.21 thorpej
1129 1.21 thorpej rv = 1;
1130 1.18 chs
1131 1.5 chs alp->lock_file = id;
1132 1.5 chs alp->lock_line = l;
1133 1.21 thorpej alp->lock_holder = cpu_id;
1134 1.21 thorpej
1135 1.21 thorpej SLOCK_LIST_LOCK();
1136 1.21 thorpej /* XXX Cast away volatile. */
1137 1.21 thorpej TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1138 1.21 thorpej SLOCK_LIST_UNLOCK();
1139 1.21 thorpej
1140 1.21 thorpej SLOCK_COUNT(1);
1141 1.21 thorpej
1142 1.21 thorpej out:
1143 1.12 chs splx(s);
1144 1.21 thorpej return (rv);
1145 1.1 fvdl }
1146 1.1 fvdl
1147 1.1 fvdl void
1148 1.33 thorpej _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
1149 1.1 fvdl {
1150 1.12 chs int s;
1151 1.1 fvdl
1152 1.44 thorpej s = spllock();
1153 1.21 thorpej
1154 1.21 thorpej /*
1155 1.21 thorpej * MULTIPROCESSOR case: This is `safe' because we think we hold
1156 1.21 thorpej * the lock, and if we don't, we don't take any action.
1157 1.21 thorpej */
1158 1.27 thorpej if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
1159 1.21 thorpej SLOCK_WHERE("simple_unlock: lock not held\n",
1160 1.21 thorpej alp, id, l);
1161 1.21 thorpej goto out;
1162 1.21 thorpej }
1163 1.21 thorpej
1164 1.21 thorpej SLOCK_LIST_LOCK();
1165 1.21 thorpej TAILQ_REMOVE(&simplelock_list, alp, list);
1166 1.21 thorpej SLOCK_LIST_UNLOCK();
1167 1.21 thorpej
1168 1.21 thorpej SLOCK_COUNT(-1);
1169 1.21 thorpej
1170 1.21 thorpej alp->list.tqe_next = NULL; /* sanity */
1171 1.21 thorpej alp->list.tqe_prev = NULL; /* sanity */
1172 1.21 thorpej
1173 1.5 chs alp->unlock_file = id;
1174 1.5 chs alp->unlock_line = l;
1175 1.21 thorpej
1176 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
1177 1.26 sommerfe alp->lock_holder = LK_NOCPU;
1178 1.21 thorpej /* Now that we've modified all fields, release the lock. */
1179 1.27 thorpej __cpu_simple_unlock(&alp->lock_data);
1180 1.21 thorpej #else
1181 1.27 thorpej alp->lock_data = __SIMPLELOCK_UNLOCKED;
1182 1.41 thorpej KASSERT(alp->lock_holder == cpu_number());
1183 1.41 thorpej alp->lock_holder = LK_NOCPU;
1184 1.21 thorpej #endif /* } */
1185 1.21 thorpej
1186 1.21 thorpej out:
1187 1.18 chs splx(s);
1188 1.12 chs }
1189 1.12 chs
1190 1.12 chs void
1191 1.33 thorpej simple_lock_dump(void)
1192 1.12 chs {
1193 1.12 chs struct simplelock *alp;
1194 1.12 chs int s;
1195 1.12 chs
1196 1.44 thorpej s = spllock();
1197 1.21 thorpej SLOCK_LIST_LOCK();
1198 1.25 thorpej lock_printf("all simple locks:\n");
1199 1.21 thorpej for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
1200 1.21 thorpej alp = TAILQ_NEXT(alp, list)) {
1201 1.25 thorpej lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
1202 1.21 thorpej alp->lock_file, alp->lock_line);
1203 1.12 chs }
1204 1.21 thorpej SLOCK_LIST_UNLOCK();
1205 1.12 chs splx(s);
1206 1.12 chs }
1207 1.12 chs
1208 1.12 chs void
1209 1.33 thorpej simple_lock_freecheck(void *start, void *end)
1210 1.12 chs {
1211 1.12 chs struct simplelock *alp;
1212 1.12 chs int s;
1213 1.12 chs
1214 1.44 thorpej s = spllock();
1215 1.21 thorpej SLOCK_LIST_LOCK();
1216 1.21 thorpej for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
1217 1.21 thorpej alp = TAILQ_NEXT(alp, list)) {
1218 1.12 chs if ((void *)alp >= start && (void *)alp < end) {
1219 1.25 thorpej lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
1220 1.34 thorpej alp, alp->lock_holder, alp->lock_file,
1221 1.34 thorpej alp->lock_line);
1222 1.34 thorpej SLOCK_DEBUGGER();
1223 1.34 thorpej }
1224 1.34 thorpej }
1225 1.34 thorpej SLOCK_LIST_UNLOCK();
1226 1.34 thorpej splx(s);
1227 1.34 thorpej }
1228 1.34 thorpej
1229 1.51.2.2 nathanw /*
1230 1.51.2.2 nathanw * We must be holding exactly one lock: the sched_lock.
1231 1.51.2.2 nathanw */
1232 1.51.2.2 nathanw
1233 1.34 thorpej void
1234 1.34 thorpej simple_lock_switchcheck(void)
1235 1.34 thorpej {
1236 1.51.2.2 nathanw
1237 1.51.2.2 nathanw simple_lock_only_held(&sched_lock, "switching");
1238 1.51.2.2 nathanw }
1239 1.51.2.2 nathanw
1240 1.51.2.2 nathanw void
1241 1.51.2.2 nathanw simple_lock_only_held(volatile struct simplelock *lp, const char *where)
1242 1.51.2.2 nathanw {
1243 1.34 thorpej struct simplelock *alp;
1244 1.34 thorpej cpuid_t cpu_id = cpu_number();
1245 1.34 thorpej int s;
1246 1.34 thorpej
1247 1.51.2.2 nathanw if (lp) {
1248 1.51.2.2 nathanw LOCK_ASSERT(simple_lock_held(lp));
1249 1.51.2.2 nathanw }
1250 1.44 thorpej s = spllock();
1251 1.34 thorpej SLOCK_LIST_LOCK();
1252 1.34 thorpej for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
1253 1.34 thorpej alp = TAILQ_NEXT(alp, list)) {
1254 1.51.2.2 nathanw if (alp == lp)
1255 1.42 thorpej continue;
1256 1.51.2.2 nathanw if (alp->lock_holder == cpu_id)
1257 1.51.2.2 nathanw break;
1258 1.12 chs }
1259 1.21 thorpej SLOCK_LIST_UNLOCK();
1260 1.12 chs splx(s);
1261 1.51.2.2 nathanw
1262 1.51.2.2 nathanw if (alp != NULL) {
1263 1.51.2.2 nathanw lock_printf("%s with held simple_lock %p "
1264 1.51.2.2 nathanw "CPU %lu %s:%d\n",
1265 1.51.2.2 nathanw where, alp, alp->lock_holder, alp->lock_file,
1266 1.51.2.2 nathanw alp->lock_line);
1267 1.51.2.2 nathanw #ifdef DDB
1268 1.51.2.2 nathanw db_stack_trace_print((db_expr_t)__builtin_frame_address(0),
1269 1.51.2.2 nathanw TRUE, 65535, "", printf);
1270 1.51.2.2 nathanw #endif
1271 1.51.2.2 nathanw SLOCK_DEBUGGER();
1272 1.51.2.2 nathanw }
1273 1.1 fvdl }
1274 1.21 thorpej #endif /* LOCKDEBUG */ /* } */
1275