kern_lock.c revision 1.43 1 1.43 thorpej /* $NetBSD: kern_lock.c,v 1.43 2000/08/22 17:31:32 thorpej Exp $ */
2 1.19 thorpej
3 1.19 thorpej /*-
4 1.28 thorpej * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 1.19 thorpej * All rights reserved.
6 1.19 thorpej *
7 1.19 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.19 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.19 thorpej * NASA Ames Research Center.
10 1.19 thorpej *
11 1.19 thorpej * This code is derived from software contributed to The NetBSD Foundation
12 1.19 thorpej * by Ross Harvey.
13 1.19 thorpej *
14 1.19 thorpej * Redistribution and use in source and binary forms, with or without
15 1.19 thorpej * modification, are permitted provided that the following conditions
16 1.19 thorpej * are met:
17 1.19 thorpej * 1. Redistributions of source code must retain the above copyright
18 1.19 thorpej * notice, this list of conditions and the following disclaimer.
19 1.19 thorpej * 2. Redistributions in binary form must reproduce the above copyright
20 1.19 thorpej * notice, this list of conditions and the following disclaimer in the
21 1.19 thorpej * documentation and/or other materials provided with the distribution.
22 1.19 thorpej * 3. All advertising materials mentioning features or use of this software
23 1.19 thorpej * must display the following acknowledgement:
24 1.19 thorpej * This product includes software developed by the NetBSD
25 1.19 thorpej * Foundation, Inc. and its contributors.
26 1.19 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
27 1.19 thorpej * contributors may be used to endorse or promote products derived
28 1.19 thorpej * from this software without specific prior written permission.
29 1.19 thorpej *
30 1.19 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 1.19 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 1.19 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 1.19 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 1.19 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 1.19 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 1.19 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 1.19 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 1.19 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 1.19 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 1.19 thorpej * POSSIBILITY OF SUCH DAMAGE.
41 1.19 thorpej */
42 1.2 fvdl
43 1.1 fvdl /*
44 1.1 fvdl * Copyright (c) 1995
45 1.1 fvdl * The Regents of the University of California. All rights reserved.
46 1.1 fvdl *
47 1.1 fvdl * This code contains ideas from software contributed to Berkeley by
48 1.1 fvdl * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 1.1 fvdl * System project at Carnegie-Mellon University.
50 1.1 fvdl *
51 1.1 fvdl * Redistribution and use in source and binary forms, with or without
52 1.1 fvdl * modification, are permitted provided that the following conditions
53 1.1 fvdl * are met:
54 1.1 fvdl * 1. Redistributions of source code must retain the above copyright
55 1.1 fvdl * notice, this list of conditions and the following disclaimer.
56 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright
57 1.1 fvdl * notice, this list of conditions and the following disclaimer in the
58 1.1 fvdl * documentation and/or other materials provided with the distribution.
59 1.1 fvdl * 3. All advertising materials mentioning features or use of this software
60 1.1 fvdl * must display the following acknowledgement:
61 1.1 fvdl * This product includes software developed by the University of
62 1.1 fvdl * California, Berkeley and its contributors.
63 1.1 fvdl * 4. Neither the name of the University nor the names of its contributors
64 1.1 fvdl * may be used to endorse or promote products derived from this software
65 1.1 fvdl * without specific prior written permission.
66 1.1 fvdl *
67 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 1.1 fvdl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 1.1 fvdl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 1.1 fvdl * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 1.1 fvdl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 1.1 fvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 1.1 fvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 1.1 fvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 1.1 fvdl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 1.1 fvdl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 1.1 fvdl * SUCH DAMAGE.
78 1.1 fvdl *
79 1.1 fvdl * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
80 1.1 fvdl */
81 1.7 thorpej
82 1.21 thorpej #include "opt_multiprocessor.h"
83 1.7 thorpej #include "opt_lockdebug.h"
84 1.18 chs #include "opt_ddb.h"
85 1.1 fvdl
86 1.1 fvdl #include <sys/param.h>
87 1.1 fvdl #include <sys/proc.h>
88 1.1 fvdl #include <sys/lock.h>
89 1.2 fvdl #include <sys/systm.h>
90 1.1 fvdl #include <machine/cpu.h>
91 1.1 fvdl
92 1.28 thorpej #if defined(__HAVE_ATOMIC_OPERATIONS)
93 1.28 thorpej #include <machine/atomic.h>
94 1.28 thorpej #endif
95 1.28 thorpej
96 1.25 thorpej #if defined(LOCKDEBUG)
97 1.25 thorpej #include <sys/syslog.h>
98 1.25 thorpej /*
99 1.25 thorpej * note that stdarg.h and the ansi style va_start macro is used for both
100 1.25 thorpej * ansi and traditional c compiles.
101 1.25 thorpej * XXX: this requires that stdarg.h define: va_alist and va_dcl
102 1.25 thorpej */
103 1.25 thorpej #include <machine/stdarg.h>
104 1.25 thorpej
105 1.36 thorpej void lock_printf(const char *fmt, ...)
106 1.37 eeh __attribute__((__format__(__printf__,1,2)));
107 1.25 thorpej
108 1.25 thorpej int lock_debug_syslog = 0; /* defaults to printf, but can be patched */
109 1.25 thorpej #endif
110 1.25 thorpej
111 1.1 fvdl /*
112 1.1 fvdl * Locking primitives implementation.
113 1.1 fvdl * Locks provide shared/exclusive sychronization.
114 1.1 fvdl */
115 1.1 fvdl
116 1.21 thorpej #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
117 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
118 1.28 thorpej #if defined(__HAVE_ATOMIC_OPERATIONS) /* { */
119 1.21 thorpej #define COUNT_CPU(cpu_id, x) \
120 1.30 thorpej atomic_add_ulong(&curcpu()->ci_spin_locks, (x))
121 1.28 thorpej #else
122 1.28 thorpej #define COUNT_CPU(cpu_id, x) /* not safe */
123 1.28 thorpej #endif /* __HAVE_ATOMIC_OPERATIONS */ /* } */
124 1.21 thorpej #else
125 1.21 thorpej u_long spin_locks;
126 1.21 thorpej #define COUNT_CPU(cpu_id, x) spin_locks += (x)
127 1.21 thorpej #endif /* MULTIPROCESSOR */ /* } */
128 1.21 thorpej
129 1.21 thorpej #define COUNT(lkp, p, cpu_id, x) \
130 1.21 thorpej do { \
131 1.21 thorpej if ((lkp)->lk_flags & LK_SPIN) \
132 1.21 thorpej COUNT_CPU((cpu_id), (x)); \
133 1.21 thorpej else \
134 1.21 thorpej (p)->p_locks += (x); \
135 1.30 thorpej } while (/*CONSTCOND*/0)
136 1.1 fvdl #else
137 1.22 mellon #define COUNT(lkp, p, cpu_id, x)
138 1.21 thorpej #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
139 1.1 fvdl
140 1.43 thorpej #define INTERLOCK_ACQUIRE(lkp, flags, s) \
141 1.40 thorpej do { \
142 1.43 thorpej if ((flags) & LK_SPIN) \
143 1.42 thorpej s = splsched(); \
144 1.40 thorpej simple_lock(&(lkp)->lk_interlock); \
145 1.40 thorpej } while (0)
146 1.40 thorpej
147 1.43 thorpej #define INTERLOCK_RELEASE(lkp, flags, s) \
148 1.40 thorpej do { \
149 1.40 thorpej simple_unlock(&(lkp)->lk_interlock); \
150 1.43 thorpej if ((flags) & LK_SPIN) \
151 1.40 thorpej splx(s); \
152 1.40 thorpej } while (0)
153 1.40 thorpej
154 1.1 fvdl /*
155 1.1 fvdl * Acquire a resource.
156 1.1 fvdl */
157 1.23 thorpej #define ACQUIRE(lkp, error, extflags, drain, wanted) \
158 1.19 thorpej if ((extflags) & LK_SPIN) { \
159 1.19 thorpej int interlocked; \
160 1.19 thorpej \
161 1.23 thorpej if ((drain) == 0) \
162 1.23 thorpej (lkp)->lk_waitcount++; \
163 1.19 thorpej for (interlocked = 1;;) { \
164 1.19 thorpej if (wanted) { \
165 1.19 thorpej if (interlocked) { \
166 1.43 thorpej INTERLOCK_RELEASE((lkp), \
167 1.43 thorpej LK_SPIN, s); \
168 1.19 thorpej interlocked = 0; \
169 1.19 thorpej } \
170 1.19 thorpej } else if (interlocked) { \
171 1.19 thorpej break; \
172 1.19 thorpej } else { \
173 1.43 thorpej INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \
174 1.19 thorpej interlocked = 1; \
175 1.19 thorpej } \
176 1.19 thorpej } \
177 1.23 thorpej if ((drain) == 0) \
178 1.23 thorpej (lkp)->lk_waitcount--; \
179 1.19 thorpej KASSERT((wanted) == 0); \
180 1.19 thorpej error = 0; /* sanity */ \
181 1.19 thorpej } else { \
182 1.19 thorpej for (error = 0; wanted; ) { \
183 1.23 thorpej if ((drain)) \
184 1.23 thorpej (lkp)->lk_flags |= LK_WAITDRAIN; \
185 1.23 thorpej else \
186 1.23 thorpej (lkp)->lk_waitcount++; \
187 1.23 thorpej /* XXX Cast away volatile. */ \
188 1.31 thorpej error = ltsleep((drain) ? &(lkp)->lk_flags : \
189 1.23 thorpej (void *)(lkp), (lkp)->lk_prio, \
190 1.31 thorpej (lkp)->lk_wmesg, (lkp)->lk_timo, \
191 1.31 thorpej &(lkp)->lk_interlock); \
192 1.23 thorpej if ((drain) == 0) \
193 1.23 thorpej (lkp)->lk_waitcount--; \
194 1.19 thorpej if (error) \
195 1.19 thorpej break; \
196 1.19 thorpej if ((extflags) & LK_SLEEPFAIL) { \
197 1.19 thorpej error = ENOLCK; \
198 1.19 thorpej break; \
199 1.19 thorpej } \
200 1.1 fvdl } \
201 1.1 fvdl }
202 1.1 fvdl
203 1.19 thorpej #define SETHOLDER(lkp, pid, cpu_id) \
204 1.19 thorpej do { \
205 1.19 thorpej if ((lkp)->lk_flags & LK_SPIN) \
206 1.19 thorpej (lkp)->lk_cpu = cpu_id; \
207 1.19 thorpej else \
208 1.19 thorpej (lkp)->lk_lockholder = pid; \
209 1.30 thorpej } while (/*CONSTCOND*/0)
210 1.19 thorpej
211 1.19 thorpej #define WEHOLDIT(lkp, pid, cpu_id) \
212 1.19 thorpej (((lkp)->lk_flags & LK_SPIN) != 0 ? \
213 1.19 thorpej ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))
214 1.19 thorpej
215 1.23 thorpej #define WAKEUP_WAITER(lkp) \
216 1.23 thorpej do { \
217 1.23 thorpej if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \
218 1.23 thorpej /* XXX Cast away volatile. */ \
219 1.23 thorpej wakeup_one((void *)(lkp)); \
220 1.23 thorpej } \
221 1.30 thorpej } while (/*CONSTCOND*/0)
222 1.23 thorpej
223 1.21 thorpej #if defined(LOCKDEBUG) /* { */
224 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
225 1.21 thorpej struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
226 1.21 thorpej
227 1.27 thorpej #define SPINLOCK_LIST_LOCK() \
228 1.29 sommerfe __cpu_simple_lock(&spinlock_list_slock.lock_data)
229 1.21 thorpej
230 1.27 thorpej #define SPINLOCK_LIST_UNLOCK() \
231 1.29 sommerfe __cpu_simple_unlock(&spinlock_list_slock.lock_data)
232 1.21 thorpej #else
233 1.21 thorpej #define SPINLOCK_LIST_LOCK() /* nothing */
234 1.21 thorpej
235 1.21 thorpej #define SPINLOCK_LIST_UNLOCK() /* nothing */
236 1.21 thorpej #endif /* MULTIPROCESSOR */ /* } */
237 1.21 thorpej
238 1.21 thorpej TAILQ_HEAD(, lock) spinlock_list =
239 1.21 thorpej TAILQ_HEAD_INITIALIZER(spinlock_list);
240 1.21 thorpej
241 1.21 thorpej #define HAVEIT(lkp) \
242 1.21 thorpej do { \
243 1.21 thorpej if ((lkp)->lk_flags & LK_SPIN) { \
244 1.21 thorpej int s = splhigh(); \
245 1.21 thorpej SPINLOCK_LIST_LOCK(); \
246 1.21 thorpej /* XXX Cast away volatile. */ \
247 1.21 thorpej TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
248 1.21 thorpej lk_list); \
249 1.21 thorpej SPINLOCK_LIST_UNLOCK(); \
250 1.21 thorpej splx(s); \
251 1.21 thorpej } \
252 1.30 thorpej } while (/*CONSTCOND*/0)
253 1.21 thorpej
254 1.21 thorpej #define DONTHAVEIT(lkp) \
255 1.21 thorpej do { \
256 1.21 thorpej if ((lkp)->lk_flags & LK_SPIN) { \
257 1.21 thorpej int s = splhigh(); \
258 1.21 thorpej SPINLOCK_LIST_LOCK(); \
259 1.21 thorpej /* XXX Cast away volatile. */ \
260 1.21 thorpej TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \
261 1.21 thorpej lk_list); \
262 1.21 thorpej SPINLOCK_LIST_UNLOCK(); \
263 1.21 thorpej splx(s); \
264 1.21 thorpej } \
265 1.30 thorpej } while (/*CONSTCOND*/0)
266 1.21 thorpej #else
267 1.21 thorpej #define HAVEIT(lkp) /* nothing */
268 1.21 thorpej
269 1.21 thorpej #define DONTHAVEIT(lkp) /* nothing */
270 1.21 thorpej #endif /* LOCKDEBUG */ /* } */
271 1.21 thorpej
272 1.25 thorpej #if defined(LOCKDEBUG)
273 1.25 thorpej /*
274 1.25 thorpej * Lock debug printing routine; can be configured to print to console
275 1.25 thorpej * or log to syslog.
276 1.25 thorpej */
277 1.25 thorpej void
278 1.25 thorpej lock_printf(const char *fmt, ...)
279 1.25 thorpej {
280 1.25 thorpej va_list ap;
281 1.25 thorpej
282 1.25 thorpej va_start(ap, fmt);
283 1.25 thorpej if (lock_debug_syslog)
284 1.25 thorpej vlog(LOG_DEBUG, fmt, ap);
285 1.25 thorpej else
286 1.25 thorpej vprintf(fmt, ap);
287 1.25 thorpej va_end(ap);
288 1.25 thorpej }
289 1.25 thorpej #endif /* LOCKDEBUG */
290 1.25 thorpej
291 1.1 fvdl /*
292 1.1 fvdl * Initialize a lock; required before use.
293 1.1 fvdl */
294 1.1 fvdl void
295 1.33 thorpej lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
296 1.1 fvdl {
297 1.1 fvdl
298 1.8 perry memset(lkp, 0, sizeof(struct lock));
299 1.1 fvdl simple_lock_init(&lkp->lk_interlock);
300 1.1 fvdl lkp->lk_flags = flags & LK_EXTFLG_MASK;
301 1.19 thorpej if (flags & LK_SPIN)
302 1.19 thorpej lkp->lk_cpu = LK_NOCPU;
303 1.19 thorpej else {
304 1.19 thorpej lkp->lk_lockholder = LK_NOPROC;
305 1.19 thorpej lkp->lk_prio = prio;
306 1.19 thorpej lkp->lk_timo = timo;
307 1.19 thorpej }
308 1.19 thorpej lkp->lk_wmesg = wmesg; /* just a name for spin locks */
309 1.1 fvdl }
310 1.1 fvdl
311 1.1 fvdl /*
312 1.1 fvdl * Determine the status of a lock.
313 1.1 fvdl */
314 1.1 fvdl int
315 1.33 thorpej lockstatus(struct lock *lkp)
316 1.1 fvdl {
317 1.40 thorpej int s, lock_type = 0;
318 1.1 fvdl
319 1.43 thorpej INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
320 1.1 fvdl if (lkp->lk_exclusivecount != 0)
321 1.1 fvdl lock_type = LK_EXCLUSIVE;
322 1.1 fvdl else if (lkp->lk_sharecount != 0)
323 1.1 fvdl lock_type = LK_SHARED;
324 1.43 thorpej INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
325 1.1 fvdl return (lock_type);
326 1.1 fvdl }
327 1.35 thorpej
328 1.35 thorpej #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
329 1.35 thorpej /*
330 1.35 thorpej * Make sure no spin locks are held by a CPU that is about
331 1.35 thorpej * to context switch.
332 1.35 thorpej */
333 1.35 thorpej void
334 1.35 thorpej spinlock_switchcheck(void)
335 1.35 thorpej {
336 1.35 thorpej u_long cnt;
337 1.35 thorpej int s;
338 1.35 thorpej
339 1.35 thorpej s = splhigh();
340 1.35 thorpej #if defined(MULTIPROCESSOR)
341 1.35 thorpej cnt = curcpu()->ci_spin_locks;
342 1.35 thorpej #else
343 1.35 thorpej cnt = spin_locks;
344 1.35 thorpej #endif
345 1.35 thorpej splx(s);
346 1.35 thorpej
347 1.35 thorpej if (cnt != 0)
348 1.35 thorpej panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
349 1.35 thorpej (u_long) cpu_number(), cnt);
350 1.35 thorpej }
351 1.35 thorpej #endif /* LOCKDEBUG || DIAGNOSTIC */
352 1.1 fvdl
353 1.1 fvdl /*
354 1.32 sommerfe * XXX XXX kludge around another kludge..
355 1.32 sommerfe *
356 1.32 sommerfe * vfs_shutdown() may be called from interrupt context, either as a result
357 1.32 sommerfe * of a panic, or from the debugger. It proceeds to call
358 1.32 sommerfe * sys_sync(&proc0, ...), pretending its running on behalf of proc0
359 1.32 sommerfe *
360 1.32 sommerfe * We would like to make an attempt to sync the filesystems in this case, so
361 1.32 sommerfe * if this happens, we treat attempts to acquire locks specially.
362 1.32 sommerfe * All locks are acquired on behalf of proc0.
363 1.32 sommerfe *
364 1.32 sommerfe * If we've already paniced, we don't block waiting for locks, but
365 1.32 sommerfe * just barge right ahead since we're already going down in flames.
366 1.32 sommerfe */
367 1.32 sommerfe
368 1.32 sommerfe /*
369 1.1 fvdl * Set, change, or release a lock.
370 1.1 fvdl *
371 1.1 fvdl * Shared requests increment the shared count. Exclusive requests set the
372 1.1 fvdl * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
373 1.1 fvdl * accepted shared locks and shared-to-exclusive upgrades to go away.
374 1.1 fvdl */
375 1.1 fvdl int
376 1.33 thorpej lockmgr(__volatile struct lock *lkp, u_int flags,
377 1.33 thorpej struct simplelock *interlkp)
378 1.1 fvdl {
379 1.1 fvdl int error;
380 1.1 fvdl pid_t pid;
381 1.1 fvdl int extflags;
382 1.24 thorpej cpuid_t cpu_id;
383 1.6 fvdl struct proc *p = curproc;
384 1.32 sommerfe int lock_shutdown_noblock = 0;
385 1.40 thorpej int s;
386 1.1 fvdl
387 1.1 fvdl error = 0;
388 1.19 thorpej
389 1.43 thorpej INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
390 1.1 fvdl if (flags & LK_INTERLOCK)
391 1.1 fvdl simple_unlock(interlkp);
392 1.1 fvdl extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
393 1.19 thorpej
394 1.21 thorpej #ifdef DIAGNOSTIC /* { */
395 1.19 thorpej /*
396 1.19 thorpej * Don't allow spins on sleep locks and don't allow sleeps
397 1.19 thorpej * on spin locks.
398 1.19 thorpej */
399 1.19 thorpej if ((flags ^ lkp->lk_flags) & LK_SPIN)
400 1.19 thorpej panic("lockmgr: sleep/spin mismatch\n");
401 1.21 thorpej #endif /* } */
402 1.19 thorpej
403 1.19 thorpej if (extflags & LK_SPIN)
404 1.19 thorpej pid = LK_KERNPROC;
405 1.19 thorpej else {
406 1.32 sommerfe if (p == NULL) {
407 1.32 sommerfe if (!doing_shutdown) {
408 1.32 sommerfe #ifdef DIAGNOSTIC
409 1.32 sommerfe panic("lockmgr: no context");
410 1.32 sommerfe #endif
411 1.32 sommerfe } else {
412 1.32 sommerfe p = &proc0;
413 1.32 sommerfe if (panicstr && (!(flags & LK_NOWAIT))) {
414 1.32 sommerfe flags |= LK_NOWAIT;
415 1.32 sommerfe lock_shutdown_noblock = 1;
416 1.32 sommerfe }
417 1.32 sommerfe }
418 1.32 sommerfe }
419 1.19 thorpej pid = p->p_pid;
420 1.19 thorpej }
421 1.24 thorpej cpu_id = cpu_number();
422 1.19 thorpej
423 1.1 fvdl /*
424 1.1 fvdl * Once a lock has drained, the LK_DRAINING flag is set and an
425 1.1 fvdl * exclusive lock is returned. The only valid operation thereafter
426 1.1 fvdl * is a single release of that exclusive lock. This final release
427 1.1 fvdl * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
428 1.1 fvdl * further requests of any sort will result in a panic. The bits
429 1.1 fvdl * selected for these two flags are chosen so that they will be set
430 1.1 fvdl * in memory that is freed (freed memory is filled with 0xdeadbeef).
431 1.1 fvdl * The final release is permitted to give a new lease on life to
432 1.1 fvdl * the lock by specifying LK_REENABLE.
433 1.1 fvdl */
434 1.1 fvdl if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
435 1.28 thorpej #ifdef DIAGNOSTIC /* { */
436 1.1 fvdl if (lkp->lk_flags & LK_DRAINED)
437 1.1 fvdl panic("lockmgr: using decommissioned lock");
438 1.1 fvdl if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
439 1.19 thorpej WEHOLDIT(lkp, pid, cpu_id) == 0)
440 1.1 fvdl panic("lockmgr: non-release on draining lock: %d\n",
441 1.1 fvdl flags & LK_TYPE_MASK);
442 1.28 thorpej #endif /* DIAGNOSTIC */ /* } */
443 1.1 fvdl lkp->lk_flags &= ~LK_DRAINING;
444 1.1 fvdl if ((flags & LK_REENABLE) == 0)
445 1.1 fvdl lkp->lk_flags |= LK_DRAINED;
446 1.1 fvdl }
447 1.1 fvdl
448 1.1 fvdl switch (flags & LK_TYPE_MASK) {
449 1.1 fvdl
450 1.1 fvdl case LK_SHARED:
451 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
452 1.1 fvdl /*
453 1.1 fvdl * If just polling, check to see if we will block.
454 1.1 fvdl */
455 1.1 fvdl if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
456 1.1 fvdl (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
457 1.1 fvdl error = EBUSY;
458 1.1 fvdl break;
459 1.1 fvdl }
460 1.1 fvdl /*
461 1.1 fvdl * Wait for exclusive locks and upgrades to clear.
462 1.1 fvdl */
463 1.23 thorpej ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
464 1.1 fvdl (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
465 1.1 fvdl if (error)
466 1.1 fvdl break;
467 1.1 fvdl lkp->lk_sharecount++;
468 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
469 1.1 fvdl break;
470 1.1 fvdl }
471 1.1 fvdl /*
472 1.1 fvdl * We hold an exclusive lock, so downgrade it to shared.
473 1.1 fvdl * An alternative would be to fail with EDEADLK.
474 1.1 fvdl */
475 1.1 fvdl lkp->lk_sharecount++;
476 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
477 1.1 fvdl /* fall into downgrade */
478 1.1 fvdl
479 1.1 fvdl case LK_DOWNGRADE:
480 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
481 1.19 thorpej lkp->lk_exclusivecount == 0)
482 1.1 fvdl panic("lockmgr: not holding exclusive lock");
483 1.1 fvdl lkp->lk_sharecount += lkp->lk_exclusivecount;
484 1.1 fvdl lkp->lk_exclusivecount = 0;
485 1.15 fvdl lkp->lk_recurselevel = 0;
486 1.1 fvdl lkp->lk_flags &= ~LK_HAVE_EXCL;
487 1.19 thorpej SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
488 1.21 thorpej DONTHAVEIT(lkp);
489 1.23 thorpej WAKEUP_WAITER(lkp);
490 1.1 fvdl break;
491 1.1 fvdl
492 1.1 fvdl case LK_EXCLUPGRADE:
493 1.1 fvdl /*
494 1.1 fvdl * If another process is ahead of us to get an upgrade,
495 1.1 fvdl * then we want to fail rather than have an intervening
496 1.1 fvdl * exclusive access.
497 1.1 fvdl */
498 1.1 fvdl if (lkp->lk_flags & LK_WANT_UPGRADE) {
499 1.1 fvdl lkp->lk_sharecount--;
500 1.21 thorpej COUNT(lkp, p, cpu_id, -1);
501 1.1 fvdl error = EBUSY;
502 1.1 fvdl break;
503 1.1 fvdl }
504 1.1 fvdl /* fall into normal upgrade */
505 1.1 fvdl
506 1.1 fvdl case LK_UPGRADE:
507 1.1 fvdl /*
508 1.1 fvdl * Upgrade a shared lock to an exclusive one. If another
509 1.1 fvdl * shared lock has already requested an upgrade to an
510 1.1 fvdl * exclusive lock, our shared lock is released and an
511 1.1 fvdl * exclusive lock is requested (which will be granted
512 1.1 fvdl * after the upgrade). If we return an error, the file
513 1.1 fvdl * will always be unlocked.
514 1.1 fvdl */
515 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
516 1.1 fvdl panic("lockmgr: upgrade exclusive lock");
517 1.1 fvdl lkp->lk_sharecount--;
518 1.21 thorpej COUNT(lkp, p, cpu_id, -1);
519 1.1 fvdl /*
520 1.1 fvdl * If we are just polling, check to see if we will block.
521 1.1 fvdl */
522 1.1 fvdl if ((extflags & LK_NOWAIT) &&
523 1.1 fvdl ((lkp->lk_flags & LK_WANT_UPGRADE) ||
524 1.1 fvdl lkp->lk_sharecount > 1)) {
525 1.1 fvdl error = EBUSY;
526 1.1 fvdl break;
527 1.1 fvdl }
528 1.1 fvdl if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
529 1.1 fvdl /*
530 1.1 fvdl * We are first shared lock to request an upgrade, so
531 1.1 fvdl * request upgrade and wait for the shared count to
532 1.1 fvdl * drop to zero, then take exclusive lock.
533 1.1 fvdl */
534 1.1 fvdl lkp->lk_flags |= LK_WANT_UPGRADE;
535 1.23 thorpej ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
536 1.1 fvdl lkp->lk_flags &= ~LK_WANT_UPGRADE;
537 1.1 fvdl if (error)
538 1.1 fvdl break;
539 1.1 fvdl lkp->lk_flags |= LK_HAVE_EXCL;
540 1.19 thorpej SETHOLDER(lkp, pid, cpu_id);
541 1.21 thorpej HAVEIT(lkp);
542 1.1 fvdl if (lkp->lk_exclusivecount != 0)
543 1.1 fvdl panic("lockmgr: non-zero exclusive count");
544 1.1 fvdl lkp->lk_exclusivecount = 1;
545 1.15 fvdl if (extflags & LK_SETRECURSE)
546 1.15 fvdl lkp->lk_recurselevel = 1;
547 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
548 1.1 fvdl break;
549 1.1 fvdl }
550 1.1 fvdl /*
551 1.1 fvdl * Someone else has requested upgrade. Release our shared
552 1.1 fvdl * lock, awaken upgrade requestor if we are the last shared
553 1.1 fvdl * lock, then request an exclusive lock.
554 1.1 fvdl */
555 1.23 thorpej if (lkp->lk_sharecount == 0)
556 1.23 thorpej WAKEUP_WAITER(lkp);
557 1.1 fvdl /* fall into exclusive request */
558 1.1 fvdl
559 1.1 fvdl case LK_EXCLUSIVE:
560 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id)) {
561 1.1 fvdl /*
562 1.19 thorpej * Recursive lock.
563 1.1 fvdl */
564 1.15 fvdl if ((extflags & LK_CANRECURSE) == 0 &&
565 1.16 sommerfe lkp->lk_recurselevel == 0) {
566 1.16 sommerfe if (extflags & LK_RECURSEFAIL) {
567 1.16 sommerfe error = EDEADLK;
568 1.16 sommerfe break;
569 1.16 sommerfe } else
570 1.16 sommerfe panic("lockmgr: locking against myself");
571 1.16 sommerfe }
572 1.1 fvdl lkp->lk_exclusivecount++;
573 1.15 fvdl if (extflags & LK_SETRECURSE &&
574 1.15 fvdl lkp->lk_recurselevel == 0)
575 1.15 fvdl lkp->lk_recurselevel = lkp->lk_exclusivecount;
576 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
577 1.1 fvdl break;
578 1.1 fvdl }
579 1.1 fvdl /*
580 1.1 fvdl * If we are just polling, check to see if we will sleep.
581 1.1 fvdl */
582 1.1 fvdl if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
583 1.1 fvdl (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
584 1.1 fvdl lkp->lk_sharecount != 0)) {
585 1.1 fvdl error = EBUSY;
586 1.1 fvdl break;
587 1.1 fvdl }
588 1.1 fvdl /*
589 1.1 fvdl * Try to acquire the want_exclusive flag.
590 1.1 fvdl */
591 1.23 thorpej ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
592 1.1 fvdl (LK_HAVE_EXCL | LK_WANT_EXCL));
593 1.1 fvdl if (error)
594 1.1 fvdl break;
595 1.1 fvdl lkp->lk_flags |= LK_WANT_EXCL;
596 1.1 fvdl /*
597 1.1 fvdl * Wait for shared locks and upgrades to finish.
598 1.1 fvdl */
599 1.23 thorpej ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
600 1.1 fvdl (lkp->lk_flags & LK_WANT_UPGRADE));
601 1.1 fvdl lkp->lk_flags &= ~LK_WANT_EXCL;
602 1.1 fvdl if (error)
603 1.1 fvdl break;
604 1.1 fvdl lkp->lk_flags |= LK_HAVE_EXCL;
605 1.19 thorpej SETHOLDER(lkp, pid, cpu_id);
606 1.21 thorpej HAVEIT(lkp);
607 1.1 fvdl if (lkp->lk_exclusivecount != 0)
608 1.1 fvdl panic("lockmgr: non-zero exclusive count");
609 1.1 fvdl lkp->lk_exclusivecount = 1;
610 1.15 fvdl if (extflags & LK_SETRECURSE)
611 1.15 fvdl lkp->lk_recurselevel = 1;
612 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
613 1.1 fvdl break;
614 1.1 fvdl
615 1.1 fvdl case LK_RELEASE:
616 1.1 fvdl if (lkp->lk_exclusivecount != 0) {
617 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
618 1.19 thorpej if (lkp->lk_flags & LK_SPIN) {
619 1.19 thorpej panic("lockmgr: processor %lu, not "
620 1.19 thorpej "exclusive lock holder %lu "
621 1.19 thorpej "unlocking", cpu_id, lkp->lk_cpu);
622 1.19 thorpej } else {
623 1.19 thorpej panic("lockmgr: pid %d, not "
624 1.19 thorpej "exclusive lock holder %d "
625 1.19 thorpej "unlocking", pid,
626 1.19 thorpej lkp->lk_lockholder);
627 1.19 thorpej }
628 1.19 thorpej }
629 1.15 fvdl if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
630 1.15 fvdl lkp->lk_recurselevel = 0;
631 1.1 fvdl lkp->lk_exclusivecount--;
632 1.21 thorpej COUNT(lkp, p, cpu_id, -1);
633 1.1 fvdl if (lkp->lk_exclusivecount == 0) {
634 1.1 fvdl lkp->lk_flags &= ~LK_HAVE_EXCL;
635 1.19 thorpej SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
636 1.21 thorpej DONTHAVEIT(lkp);
637 1.1 fvdl }
638 1.1 fvdl } else if (lkp->lk_sharecount != 0) {
639 1.1 fvdl lkp->lk_sharecount--;
640 1.21 thorpej COUNT(lkp, p, cpu_id, -1);
641 1.1 fvdl }
642 1.39 thorpej #ifdef DIAGNOSTIC
643 1.39 thorpej else
644 1.39 thorpej panic("lockmgr: release of unlocked lock!");
645 1.39 thorpej #endif
646 1.23 thorpej WAKEUP_WAITER(lkp);
647 1.1 fvdl break;
648 1.1 fvdl
649 1.1 fvdl case LK_DRAIN:
650 1.1 fvdl /*
651 1.1 fvdl * Check that we do not already hold the lock, as it can
652 1.1 fvdl * never drain if we do. Unfortunately, we have no way to
653 1.1 fvdl * check for holding a shared lock, but at least we can
654 1.1 fvdl * check for an exclusive one.
655 1.1 fvdl */
656 1.19 thorpej if (WEHOLDIT(lkp, pid, cpu_id))
657 1.1 fvdl panic("lockmgr: draining against myself");
658 1.1 fvdl /*
659 1.1 fvdl * If we are just polling, check to see if we will sleep.
660 1.1 fvdl */
661 1.1 fvdl if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
662 1.1 fvdl (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
663 1.1 fvdl lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
664 1.1 fvdl error = EBUSY;
665 1.1 fvdl break;
666 1.1 fvdl }
667 1.23 thorpej ACQUIRE(lkp, error, extflags, 1,
668 1.23 thorpej ((lkp->lk_flags &
669 1.23 thorpej (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
670 1.23 thorpej lkp->lk_sharecount != 0 ||
671 1.23 thorpej lkp->lk_waitcount != 0));
672 1.23 thorpej if (error)
673 1.23 thorpej break;
674 1.1 fvdl lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
675 1.19 thorpej SETHOLDER(lkp, pid, cpu_id);
676 1.21 thorpej HAVEIT(lkp);
677 1.1 fvdl lkp->lk_exclusivecount = 1;
678 1.15 fvdl /* XXX unlikely that we'd want this */
679 1.15 fvdl if (extflags & LK_SETRECURSE)
680 1.15 fvdl lkp->lk_recurselevel = 1;
681 1.21 thorpej COUNT(lkp, p, cpu_id, 1);
682 1.1 fvdl break;
683 1.1 fvdl
684 1.1 fvdl default:
685 1.43 thorpej INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
686 1.1 fvdl panic("lockmgr: unknown locktype request %d",
687 1.1 fvdl flags & LK_TYPE_MASK);
688 1.1 fvdl /* NOTREACHED */
689 1.1 fvdl }
690 1.23 thorpej if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
691 1.23 thorpej ((lkp->lk_flags &
692 1.23 thorpej (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
693 1.1 fvdl lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
694 1.1 fvdl lkp->lk_flags &= ~LK_WAITDRAIN;
695 1.20 thorpej wakeup_one((void *)&lkp->lk_flags);
696 1.1 fvdl }
697 1.32 sommerfe /*
698 1.32 sommerfe * Note that this panic will be a recursive panic, since
699 1.32 sommerfe * we only set lock_shutdown_noblock above if panicstr != NULL.
700 1.32 sommerfe */
701 1.32 sommerfe if (error && lock_shutdown_noblock)
702 1.32 sommerfe panic("lockmgr: deadlock (see previous panic)");
703 1.32 sommerfe
704 1.43 thorpej INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
705 1.1 fvdl return (error);
706 1.1 fvdl }
707 1.1 fvdl
708 1.1 fvdl /*
709 1.1 fvdl * Print out information about state of a lock. Used by VOP_PRINT
710 1.1 fvdl * routines to display ststus about contained locks.
711 1.1 fvdl */
712 1.2 fvdl void
713 1.33 thorpej lockmgr_printinfo(__volatile struct lock *lkp)
714 1.1 fvdl {
715 1.1 fvdl
716 1.1 fvdl if (lkp->lk_sharecount)
717 1.1 fvdl printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
718 1.1 fvdl lkp->lk_sharecount);
719 1.19 thorpej else if (lkp->lk_flags & LK_HAVE_EXCL) {
720 1.19 thorpej printf(" lock type %s: EXCL (count %d) by ",
721 1.19 thorpej lkp->lk_wmesg, lkp->lk_exclusivecount);
722 1.19 thorpej if (lkp->lk_flags & LK_SPIN)
723 1.19 thorpej printf("processor %lu", lkp->lk_cpu);
724 1.19 thorpej else
725 1.19 thorpej printf("pid %d", lkp->lk_lockholder);
726 1.19 thorpej } else
727 1.19 thorpej printf(" not locked");
728 1.19 thorpej if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
729 1.1 fvdl printf(" with %d pending", lkp->lk_waitcount);
730 1.1 fvdl }
731 1.1 fvdl
732 1.21 thorpej #if defined(LOCKDEBUG) /* { */
733 1.21 thorpej TAILQ_HEAD(, simplelock) simplelock_list =
734 1.21 thorpej TAILQ_HEAD_INITIALIZER(simplelock_list);
735 1.21 thorpej
736 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
737 1.21 thorpej struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
738 1.21 thorpej
739 1.21 thorpej #define SLOCK_LIST_LOCK() \
740 1.29 sommerfe __cpu_simple_lock(&simplelock_list_slock.lock_data)
741 1.21 thorpej
742 1.21 thorpej #define SLOCK_LIST_UNLOCK() \
743 1.29 sommerfe __cpu_simple_unlock(&simplelock_list_slock.lock_data)
744 1.21 thorpej
745 1.28 thorpej #if defined(__HAVE_ATOMIC_OPERATIONS) /* { */
746 1.21 thorpej #define SLOCK_COUNT(x) \
747 1.28 thorpej atomic_add_ulong(&curcpu()->ci_simple_locks, (x))
748 1.28 thorpej #else
749 1.28 thorpej #define SLOCK_COUNT(x) /* not safe */
750 1.28 thorpej #endif /* __HAVE_ATOMIC_OPERATIONS */ /* } */
751 1.21 thorpej #else
752 1.21 thorpej u_long simple_locks;
753 1.21 thorpej
754 1.21 thorpej #define SLOCK_LIST_LOCK() /* nothing */
755 1.21 thorpej
756 1.21 thorpej #define SLOCK_LIST_UNLOCK() /* nothing */
757 1.21 thorpej
758 1.21 thorpej #define SLOCK_COUNT(x) simple_locks += (x)
759 1.21 thorpej #endif /* MULTIPROCESSOR */ /* } */
760 1.21 thorpej
761 1.21 thorpej #ifdef DDB /* { */
762 1.18 chs int simple_lock_debugger = 0;
763 1.21 thorpej #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger()
764 1.21 thorpej #else
765 1.21 thorpej #define SLOCK_DEBUGGER() /* nothing */
766 1.21 thorpej #endif /* } */
767 1.21 thorpej
768 1.26 sommerfe #ifdef MULTIPROCESSOR
769 1.26 sommerfe #define SLOCK_MP() lock_printf("on cpu %d\n", cpu_number())
770 1.26 sommerfe #else
771 1.26 sommerfe #define SLOCK_MP() /* nothing */
772 1.26 sommerfe #endif
773 1.26 sommerfe
774 1.21 thorpej #define SLOCK_WHERE(str, alp, id, l) \
775 1.21 thorpej do { \
776 1.25 thorpej lock_printf(str); \
777 1.33 thorpej lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
778 1.26 sommerfe SLOCK_MP(); \
779 1.21 thorpej if ((alp)->lock_file != NULL) \
780 1.25 thorpej lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
781 1.21 thorpej (alp)->lock_line); \
782 1.21 thorpej if ((alp)->unlock_file != NULL) \
783 1.25 thorpej lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
784 1.21 thorpej (alp)->unlock_line); \
785 1.21 thorpej SLOCK_DEBUGGER(); \
786 1.30 thorpej } while (/*CONSTCOND*/0)
787 1.12 chs
788 1.1 fvdl /*
789 1.1 fvdl * Simple lock functions so that the debugger can see from whence
790 1.1 fvdl * they are being called.
791 1.1 fvdl */
792 1.1 fvdl void
793 1.33 thorpej simple_lock_init(struct simplelock *alp)
794 1.1 fvdl {
795 1.21 thorpej
796 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
797 1.27 thorpej __cpu_simple_lock_init(&alp->lock_data);
798 1.21 thorpej #else
799 1.27 thorpej alp->lock_data = __SIMPLELOCK_UNLOCKED;
800 1.21 thorpej #endif /* } */
801 1.5 chs alp->lock_file = NULL;
802 1.5 chs alp->lock_line = 0;
803 1.5 chs alp->unlock_file = NULL;
804 1.5 chs alp->unlock_line = 0;
805 1.41 thorpej alp->lock_holder = LK_NOCPU;
806 1.1 fvdl }
807 1.1 fvdl
808 1.1 fvdl void
809 1.33 thorpej _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
810 1.1 fvdl {
811 1.24 thorpej cpuid_t cpu_id = cpu_number();
812 1.12 chs int s;
813 1.12 chs
814 1.18 chs s = splhigh();
815 1.21 thorpej
816 1.21 thorpej /*
817 1.21 thorpej * MULTIPROCESSOR case: This is `safe' since if it's not us, we
818 1.21 thorpej * don't take any action, and just fall into the normal spin case.
819 1.21 thorpej */
820 1.27 thorpej if (alp->lock_data == __SIMPLELOCK_LOCKED) {
821 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
822 1.21 thorpej if (alp->lock_holder == cpu_id) {
823 1.21 thorpej SLOCK_WHERE("simple_lock: locking against myself\n",
824 1.21 thorpej alp, id, l);
825 1.21 thorpej goto out;
826 1.1 fvdl }
827 1.21 thorpej #else
828 1.21 thorpej SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
829 1.21 thorpej goto out;
830 1.21 thorpej #endif /* MULTIPROCESSOR */ /* } */
831 1.1 fvdl }
832 1.21 thorpej
833 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
834 1.21 thorpej /* Acquire the lock before modifying any fields. */
835 1.27 thorpej __cpu_simple_lock(&alp->lock_data);
836 1.21 thorpej #else
837 1.27 thorpej alp->lock_data = __SIMPLELOCK_LOCKED;
838 1.21 thorpej #endif /* } */
839 1.21 thorpej
840 1.41 thorpej KASSERT(alp->lock_holder == LK_NOCPU);
841 1.41 thorpej
842 1.5 chs alp->lock_file = id;
843 1.5 chs alp->lock_line = l;
844 1.21 thorpej alp->lock_holder = cpu_id;
845 1.21 thorpej
846 1.21 thorpej SLOCK_LIST_LOCK();
847 1.21 thorpej /* XXX Cast away volatile */
848 1.21 thorpej TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
849 1.21 thorpej SLOCK_LIST_UNLOCK();
850 1.21 thorpej
851 1.21 thorpej SLOCK_COUNT(1);
852 1.21 thorpej
853 1.21 thorpej out:
854 1.18 chs splx(s);
855 1.38 thorpej }
856 1.38 thorpej
857 1.38 thorpej int
858 1.38 thorpej _simple_lock_held(__volatile struct simplelock *alp)
859 1.38 thorpej {
860 1.38 thorpej cpuid_t cpu_id = cpu_number();
861 1.38 thorpej int s, locked = 0;
862 1.38 thorpej
863 1.38 thorpej s = splhigh();
864 1.42 thorpej
865 1.42 thorpej #if defined(MULTIPROCESSOR)
866 1.38 thorpej if (__cpu_simple_lock_try(&alp->lock_data) == 0)
867 1.38 thorpej locked = (alp->lock_holder == cpu_id);
868 1.38 thorpej else
869 1.38 thorpej __cpu_simple_unlock(&alp->lock_data);
870 1.38 thorpej #else
871 1.42 thorpej if (alp->lock_data == __SIMPLELOCK_LOCKED) {
872 1.42 thorpej locked = 1;
873 1.42 thorpej KASSERT(alp->lock_holder == cpu_id);
874 1.42 thorpej }
875 1.42 thorpej #endif
876 1.38 thorpej
877 1.38 thorpej splx(s);
878 1.42 thorpej
879 1.38 thorpej return (locked);
880 1.1 fvdl }
881 1.1 fvdl
882 1.1 fvdl int
883 1.33 thorpej _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
884 1.1 fvdl {
885 1.24 thorpej cpuid_t cpu_id = cpu_number();
886 1.21 thorpej int s, rv = 0;
887 1.1 fvdl
888 1.18 chs s = splhigh();
889 1.21 thorpej
890 1.21 thorpej /*
891 1.21 thorpej * MULTIPROCESSOR case: This is `safe' since if it's not us, we
892 1.21 thorpej * don't take any action.
893 1.21 thorpej */
894 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
895 1.27 thorpej if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
896 1.21 thorpej if (alp->lock_holder == cpu_id)
897 1.21 thorpej SLOCK_WHERE("simple_lock_try: locking against myself\n",
898 1.26 sommerfe alp, id, l);
899 1.21 thorpej goto out;
900 1.21 thorpej }
901 1.21 thorpej #else
902 1.27 thorpej if (alp->lock_data == __SIMPLELOCK_LOCKED) {
903 1.21 thorpej SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
904 1.21 thorpej goto out;
905 1.18 chs }
906 1.27 thorpej alp->lock_data = __SIMPLELOCK_LOCKED;
907 1.21 thorpej #endif /* MULTIPROCESSOR */ /* } */
908 1.21 thorpej
909 1.21 thorpej /*
910 1.21 thorpej * At this point, we have acquired the lock.
911 1.21 thorpej */
912 1.21 thorpej
913 1.21 thorpej rv = 1;
914 1.18 chs
915 1.5 chs alp->lock_file = id;
916 1.5 chs alp->lock_line = l;
917 1.21 thorpej alp->lock_holder = cpu_id;
918 1.21 thorpej
919 1.21 thorpej SLOCK_LIST_LOCK();
920 1.21 thorpej /* XXX Cast away volatile. */
921 1.21 thorpej TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
922 1.21 thorpej SLOCK_LIST_UNLOCK();
923 1.21 thorpej
924 1.21 thorpej SLOCK_COUNT(1);
925 1.21 thorpej
926 1.21 thorpej out:
927 1.12 chs splx(s);
928 1.21 thorpej return (rv);
929 1.1 fvdl }
930 1.1 fvdl
931 1.1 fvdl void
932 1.33 thorpej _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
933 1.1 fvdl {
934 1.12 chs int s;
935 1.1 fvdl
936 1.18 chs s = splhigh();
937 1.21 thorpej
938 1.21 thorpej /*
939 1.21 thorpej * MULTIPROCESSOR case: This is `safe' because we think we hold
940 1.21 thorpej * the lock, and if we don't, we don't take any action.
941 1.21 thorpej */
942 1.27 thorpej if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
943 1.21 thorpej SLOCK_WHERE("simple_unlock: lock not held\n",
944 1.21 thorpej alp, id, l);
945 1.21 thorpej goto out;
946 1.21 thorpej }
947 1.21 thorpej
948 1.21 thorpej SLOCK_LIST_LOCK();
949 1.21 thorpej TAILQ_REMOVE(&simplelock_list, alp, list);
950 1.21 thorpej SLOCK_LIST_UNLOCK();
951 1.21 thorpej
952 1.21 thorpej SLOCK_COUNT(-1);
953 1.21 thorpej
954 1.21 thorpej alp->list.tqe_next = NULL; /* sanity */
955 1.21 thorpej alp->list.tqe_prev = NULL; /* sanity */
956 1.21 thorpej
957 1.5 chs alp->unlock_file = id;
958 1.5 chs alp->unlock_line = l;
959 1.21 thorpej
960 1.21 thorpej #if defined(MULTIPROCESSOR) /* { */
961 1.26 sommerfe alp->lock_holder = LK_NOCPU;
962 1.21 thorpej /* Now that we've modified all fields, release the lock. */
963 1.27 thorpej __cpu_simple_unlock(&alp->lock_data);
964 1.21 thorpej #else
965 1.27 thorpej alp->lock_data = __SIMPLELOCK_UNLOCKED;
966 1.41 thorpej KASSERT(alp->lock_holder == cpu_number());
967 1.41 thorpej alp->lock_holder = LK_NOCPU;
968 1.21 thorpej #endif /* } */
969 1.21 thorpej
970 1.21 thorpej out:
971 1.18 chs splx(s);
972 1.12 chs }
973 1.12 chs
974 1.12 chs void
975 1.33 thorpej simple_lock_dump(void)
976 1.12 chs {
977 1.12 chs struct simplelock *alp;
978 1.12 chs int s;
979 1.12 chs
980 1.12 chs s = splhigh();
981 1.21 thorpej SLOCK_LIST_LOCK();
982 1.25 thorpej lock_printf("all simple locks:\n");
983 1.21 thorpej for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
984 1.21 thorpej alp = TAILQ_NEXT(alp, list)) {
985 1.25 thorpej lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
986 1.21 thorpej alp->lock_file, alp->lock_line);
987 1.12 chs }
988 1.21 thorpej SLOCK_LIST_UNLOCK();
989 1.12 chs splx(s);
990 1.12 chs }
991 1.12 chs
992 1.12 chs void
993 1.33 thorpej simple_lock_freecheck(void *start, void *end)
994 1.12 chs {
995 1.12 chs struct simplelock *alp;
996 1.12 chs int s;
997 1.12 chs
998 1.12 chs s = splhigh();
999 1.21 thorpej SLOCK_LIST_LOCK();
1000 1.21 thorpej for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
1001 1.21 thorpej alp = TAILQ_NEXT(alp, list)) {
1002 1.12 chs if ((void *)alp >= start && (void *)alp < end) {
1003 1.25 thorpej lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
1004 1.34 thorpej alp, alp->lock_holder, alp->lock_file,
1005 1.34 thorpej alp->lock_line);
1006 1.34 thorpej SLOCK_DEBUGGER();
1007 1.34 thorpej }
1008 1.34 thorpej }
1009 1.34 thorpej SLOCK_LIST_UNLOCK();
1010 1.34 thorpej splx(s);
1011 1.34 thorpej }
1012 1.34 thorpej
1013 1.34 thorpej void
1014 1.34 thorpej simple_lock_switchcheck(void)
1015 1.34 thorpej {
1016 1.34 thorpej struct simplelock *alp;
1017 1.34 thorpej cpuid_t cpu_id = cpu_number();
1018 1.34 thorpej int s;
1019 1.34 thorpej
1020 1.42 thorpej /*
1021 1.42 thorpej * We must be holding exactly one lock: the sched_lock.
1022 1.42 thorpej */
1023 1.42 thorpej
1024 1.42 thorpej SCHED_ASSERT_LOCKED();
1025 1.42 thorpej
1026 1.42 thorpej s = splhigh(); /* XXX spllock */
1027 1.34 thorpej SLOCK_LIST_LOCK();
1028 1.34 thorpej for (alp = TAILQ_FIRST(&simplelock_list); alp != NULL;
1029 1.34 thorpej alp = TAILQ_NEXT(alp, list)) {
1030 1.42 thorpej if (alp == &sched_lock)
1031 1.42 thorpej continue;
1032 1.34 thorpej if (alp->lock_holder == cpu_id) {
1033 1.34 thorpej lock_printf("switching with held simple_lock %p "
1034 1.36 thorpej "CPU %lu %s:%d\n",
1035 1.21 thorpej alp, alp->lock_holder, alp->lock_file,
1036 1.21 thorpej alp->lock_line);
1037 1.21 thorpej SLOCK_DEBUGGER();
1038 1.12 chs }
1039 1.12 chs }
1040 1.21 thorpej SLOCK_LIST_UNLOCK();
1041 1.12 chs splx(s);
1042 1.1 fvdl }
1043 1.21 thorpej #endif /* LOCKDEBUG */ /* } */
1044