kern_lock.c revision 1.75.2.1.2.1 1 /* $NetBSD: kern_lock.c,v 1.75.2.1.2.1 2007/08/11 14:03:48 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.75.2.1.2.1 2007/08/11 14:03:48 bouyer Exp $");
80
81 #include "opt_multiprocessor.h"
82 #include "opt_lockdebug.h"
83 #include "opt_ddb.h"
84
85 #include <sys/param.h>
86 #include <sys/proc.h>
87 #include <sys/lock.h>
88 #include <sys/systm.h>
89 #include <machine/cpu.h>
90
91 #if defined(LOCKDEBUG)
92 #include <sys/syslog.h>
93 /*
94 * note that stdarg.h and the ansi style va_start macro is used for both
95 * ansi and traditional c compiles.
96 * XXX: this requires that stdarg.h define: va_alist and va_dcl
97 */
98 #include <machine/stdarg.h>
99
100 void lock_printf(const char *fmt, ...)
101 __attribute__((__format__(__printf__,1,2)));
102
103 static int acquire(__volatile struct lock *, int *, int, int, int);
104
105 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */
106
107 #ifdef DDB
108 #include <ddb/ddbvar.h>
109 #include <machine/db_machdep.h>
110 #include <ddb/db_command.h>
111 #include <ddb/db_interface.h>
112 #endif
113 #endif
114
115 /*
116 * Locking primitives implementation.
117 * Locks provide shared/exclusive synchronization.
118 */
119
120 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
121 #if defined(MULTIPROCESSOR) /* { */
122 #define COUNT_CPU(cpu_id, x) \
123 curcpu()->ci_spin_locks += (x)
124 #else
125 u_long spin_locks;
126 #define COUNT_CPU(cpu_id, x) spin_locks += (x)
127 #endif /* MULTIPROCESSOR */ /* } */
128
129 #define COUNT(lkp, l, cpu_id, x) \
130 do { \
131 if ((lkp)->lk_flags & LK_SPIN) \
132 COUNT_CPU((cpu_id), (x)); \
133 else \
134 (l)->l_locks += (x); \
135 } while (/*CONSTCOND*/0)
136 #else
137 #define COUNT(lkp, p, cpu_id, x)
138 #define COUNT_CPU(cpu_id, x)
139 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
140
141 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */
142 #define SPINLOCK_SPIN_HOOK /* nothing */
143 #endif
144
145 #define INTERLOCK_ACQUIRE(lkp, flags, s) \
146 do { \
147 if ((flags) & LK_SPIN) \
148 s = spllock(); \
149 simple_lock(&(lkp)->lk_interlock); \
150 } while (/*CONSTCOND*/ 0)
151
152 #define INTERLOCK_RELEASE(lkp, flags, s) \
153 do { \
154 simple_unlock(&(lkp)->lk_interlock); \
155 if ((flags) & LK_SPIN) \
156 splx(s); \
157 } while (/*CONSTCOND*/ 0)
158
159 #ifdef DDB /* { */
160 #ifdef MULTIPROCESSOR
161 int simple_lock_debugger = 1; /* more serious on MP */
162 #else
163 int simple_lock_debugger = 0;
164 #endif
165 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger()
166 #define SLOCK_TRACE() \
167 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \
168 TRUE, 65535, "", lock_printf);
169 #else
170 #define SLOCK_DEBUGGER() /* nothing */
171 #define SLOCK_TRACE() /* nothing */
172 #endif /* } */
173
174 #if defined(LOCKDEBUG)
175 #if defined(DDB)
176 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger()
177 #else
178 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
179 #endif
180
181 #define SPINLOCK_SPINCHECK_DECL \
182 /* 32-bits of count -- wrap constitutes a "spinout" */ \
183 uint32_t __spinc = 0
184
185 #define SPINLOCK_SPINCHECK \
186 do { \
187 if (++__spinc == 0) { \
188 lock_printf("LK_SPIN spinout, excl %d, share %d\n", \
189 lkp->lk_exclusivecount, lkp->lk_sharecount); \
190 if (lkp->lk_exclusivecount) \
191 lock_printf("held by CPU %lu\n", \
192 (u_long) lkp->lk_cpu); \
193 if (lkp->lk_lock_file) \
194 lock_printf("last locked at %s:%d\n", \
195 lkp->lk_lock_file, lkp->lk_lock_line); \
196 if (lkp->lk_unlock_file) \
197 lock_printf("last unlocked at %s:%d\n", \
198 lkp->lk_unlock_file, lkp->lk_unlock_line); \
199 SLOCK_TRACE(); \
200 SPINLOCK_SPINCHECK_DEBUGGER; \
201 } \
202 } while (/*CONSTCOND*/ 0)
203 #else
204 #define SPINLOCK_SPINCHECK_DECL /* nothing */
205 #define SPINLOCK_SPINCHECK /* nothing */
206 #endif /* LOCKDEBUG && DDB */
207
208 /*
209 * Acquire a resource.
210 */
211 static int
212 acquire(__volatile struct lock *lkp, int *s, int extflags,
213 int drain, int wanted)
214 {
215 int error;
216
217 KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
218
219 if (extflags & LK_SPIN) {
220 int interlocked;
221
222 SPINLOCK_SPINCHECK_DECL;
223
224 if (!drain) {
225 lkp->lk_waitcount++;
226 lkp->lk_flags |= LK_WAIT_NONZERO;
227 }
228 for (interlocked = 1;;) {
229 SPINLOCK_SPINCHECK;
230 if ((lkp->lk_flags & wanted) != 0) {
231 if (interlocked) {
232 INTERLOCK_RELEASE(lkp, LK_SPIN, *s);
233 interlocked = 0;
234 }
235 SPINLOCK_SPIN_HOOK;
236 } else if (interlocked) {
237 break;
238 } else {
239 INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s);
240 interlocked = 1;
241 }
242 }
243 if (!drain) {
244 lkp->lk_waitcount--;
245 if (lkp->lk_waitcount == 0)
246 lkp->lk_flags &= ~LK_WAIT_NONZERO;
247 }
248 KASSERT((lkp->lk_flags & wanted) == 0);
249 error = 0; /* sanity */
250 } else {
251 for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
252 if (drain)
253 lkp->lk_flags |= LK_WAITDRAIN;
254 else {
255 lkp->lk_waitcount++;
256 lkp->lk_flags |= LK_WAIT_NONZERO;
257 }
258 /* XXX Cast away volatile. */
259 error = ltsleep(drain ?
260 (void *)&lkp->lk_flags :
261 (void *)lkp, lkp->lk_prio,
262 lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock);
263 if (!drain) {
264 lkp->lk_waitcount--;
265 if (lkp->lk_waitcount == 0)
266 lkp->lk_flags &= ~LK_WAIT_NONZERO;
267 }
268 if (error)
269 break;
270 if (extflags & LK_SLEEPFAIL) {
271 error = ENOLCK;
272 break;
273 }
274 }
275 }
276
277 return error;
278 }
279
280 #define SETHOLDER(lkp, pid, lid, cpu_id) \
281 do { \
282 if ((lkp)->lk_flags & LK_SPIN) \
283 (lkp)->lk_cpu = cpu_id; \
284 else { \
285 (lkp)->lk_lockholder = pid; \
286 (lkp)->lk_locklwp = lid; \
287 } \
288 } while (/*CONSTCOND*/0)
289
290 #define WEHOLDIT(lkp, pid, lid, cpu_id) \
291 (((lkp)->lk_flags & LK_SPIN) != 0 ? \
292 ((lkp)->lk_cpu == (cpu_id)) : \
293 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
294
295 #define WAKEUP_WAITER(lkp) \
296 do { \
297 if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) == \
298 LK_WAIT_NONZERO) { \
299 /* XXX Cast away volatile. */ \
300 wakeup((void *)(lkp)); \
301 } \
302 } while (/*CONSTCOND*/0)
303
304 #if defined(LOCKDEBUG) /* { */
305 #if defined(MULTIPROCESSOR) /* { */
306 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
307
308 #define SPINLOCK_LIST_LOCK() \
309 __cpu_simple_lock(&spinlock_list_slock.lock_data)
310
311 #define SPINLOCK_LIST_UNLOCK() \
312 __cpu_simple_unlock(&spinlock_list_slock.lock_data)
313 #else
314 #define SPINLOCK_LIST_LOCK() /* nothing */
315
316 #define SPINLOCK_LIST_UNLOCK() /* nothing */
317 #endif /* MULTIPROCESSOR */ /* } */
318
319 TAILQ_HEAD(, lock) spinlock_list =
320 TAILQ_HEAD_INITIALIZER(spinlock_list);
321
322 #define HAVEIT(lkp) \
323 do { \
324 if ((lkp)->lk_flags & LK_SPIN) { \
325 int s = spllock(); \
326 SPINLOCK_LIST_LOCK(); \
327 /* XXX Cast away volatile. */ \
328 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
329 lk_list); \
330 SPINLOCK_LIST_UNLOCK(); \
331 splx(s); \
332 } \
333 } while (/*CONSTCOND*/0)
334
335 #define DONTHAVEIT(lkp) \
336 do { \
337 if ((lkp)->lk_flags & LK_SPIN) { \
338 int s = spllock(); \
339 SPINLOCK_LIST_LOCK(); \
340 /* XXX Cast away volatile. */ \
341 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \
342 lk_list); \
343 SPINLOCK_LIST_UNLOCK(); \
344 splx(s); \
345 } \
346 } while (/*CONSTCOND*/0)
347 #else
348 #define HAVEIT(lkp) /* nothing */
349
350 #define DONTHAVEIT(lkp) /* nothing */
351 #endif /* LOCKDEBUG */ /* } */
352
353 #if defined(LOCKDEBUG)
354 /*
355 * Lock debug printing routine; can be configured to print to console
356 * or log to syslog.
357 */
358 void
359 lock_printf(const char *fmt, ...)
360 {
361 char b[150];
362 va_list ap;
363
364 va_start(ap, fmt);
365 if (lock_debug_syslog)
366 vlog(LOG_DEBUG, fmt, ap);
367 else {
368 vsnprintf(b, sizeof(b), fmt, ap);
369 printf_nolog("%s", b);
370 }
371 va_end(ap);
372 }
373 #endif /* LOCKDEBUG */
374
375 /*
376 * Initialize a lock; required before use.
377 */
378 void
379 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
380 {
381
382 memset(lkp, 0, sizeof(struct lock));
383 simple_lock_init(&lkp->lk_interlock);
384 lkp->lk_flags = flags & LK_EXTFLG_MASK;
385 if (flags & LK_SPIN)
386 lkp->lk_cpu = LK_NOCPU;
387 else {
388 lkp->lk_lockholder = LK_NOPROC;
389 lkp->lk_prio = prio;
390 lkp->lk_timo = timo;
391 }
392 lkp->lk_wmesg = wmesg; /* just a name for spin locks */
393 #if defined(LOCKDEBUG)
394 lkp->lk_lock_file = NULL;
395 lkp->lk_unlock_file = NULL;
396 #endif
397 }
398
399 /*
400 * Determine the status of a lock.
401 */
402 int
403 lockstatus(struct lock *lkp)
404 {
405 int s = 0, lock_type = 0;
406
407 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
408 if (lkp->lk_exclusivecount != 0)
409 lock_type = LK_EXCLUSIVE;
410 else if (lkp->lk_sharecount != 0)
411 lock_type = LK_SHARED;
412 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
413 return (lock_type);
414 }
415
416 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
417 /*
418 * Make sure no spin locks are held by a CPU that is about
419 * to context switch.
420 */
421 void
422 spinlock_switchcheck(void)
423 {
424 u_long cnt;
425 int s;
426
427 s = spllock();
428 #if defined(MULTIPROCESSOR)
429 cnt = curcpu()->ci_spin_locks;
430 #else
431 cnt = spin_locks;
432 #endif
433 splx(s);
434
435 if (cnt != 0)
436 panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
437 (u_long) cpu_number(), cnt);
438 }
439 #endif /* LOCKDEBUG || DIAGNOSTIC */
440
441 /*
442 * Locks and IPLs (interrupt priority levels):
443 *
444 * Locks which may be taken from interrupt context must be handled
445 * very carefully; you must spl to the highest IPL where the lock
446 * is needed before acquiring the lock.
447 *
448 * It is also important to avoid deadlock, since certain (very high
449 * priority) interrupts are often needed to keep the system as a whole
450 * from deadlocking, and must not be blocked while you are spinning
451 * waiting for a lower-priority lock.
452 *
453 * In addition, the lock-debugging hooks themselves need to use locks!
454 *
455 * A raw __cpu_simple_lock may be used from interrupts are long as it
456 * is acquired and held at a single IPL.
457 *
458 * A simple_lock (which is a __cpu_simple_lock wrapped with some
459 * debugging hooks) may be used at or below spllock(), which is
460 * typically at or just below splhigh() (i.e. blocks everything
461 * but certain machine-dependent extremely high priority interrupts).
462 *
463 * spinlockmgr spinlocks should be used at or below splsched().
464 *
465 * Some platforms may have interrupts of higher priority than splsched(),
466 * including hard serial interrupts, inter-processor interrupts, and
467 * kernel debugger traps.
468 */
469
470 /*
471 * XXX XXX kludge around another kludge..
472 *
473 * vfs_shutdown() may be called from interrupt context, either as a result
474 * of a panic, or from the debugger. It proceeds to call
475 * sys_sync(&proc0, ...), pretending its running on behalf of proc0
476 *
477 * We would like to make an attempt to sync the filesystems in this case, so
478 * if this happens, we treat attempts to acquire locks specially.
479 * All locks are acquired on behalf of proc0.
480 *
481 * If we've already paniced, we don't block waiting for locks, but
482 * just barge right ahead since we're already going down in flames.
483 */
484
485 /*
486 * Set, change, or release a lock.
487 *
488 * Shared requests increment the shared count. Exclusive requests set the
489 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
490 * accepted shared locks and shared-to-exclusive upgrades to go away.
491 */
492 int
493 #if defined(LOCKDEBUG)
494 _lockmgr(__volatile struct lock *lkp, u_int flags,
495 struct simplelock *interlkp, const char *file, int line)
496 #else
497 lockmgr(__volatile struct lock *lkp, u_int flags,
498 struct simplelock *interlkp)
499 #endif
500 {
501 int error;
502 pid_t pid;
503 lwpid_t lid;
504 int extflags;
505 cpuid_t cpu_id;
506 struct lwp *l = curlwp;
507 int lock_shutdown_noblock = 0;
508 int s = 0;
509
510 error = 0;
511
512 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
513 if (flags & LK_INTERLOCK)
514 simple_unlock(interlkp);
515 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
516
517 #ifdef DIAGNOSTIC /* { */
518 /*
519 * Don't allow spins on sleep locks and don't allow sleeps
520 * on spin locks.
521 */
522 if ((flags ^ lkp->lk_flags) & LK_SPIN)
523 panic("lockmgr: sleep/spin mismatch");
524 #endif /* } */
525
526 if (extflags & LK_SPIN) {
527 pid = LK_KERNPROC;
528 lid = 0;
529 } else {
530 if (l == NULL) {
531 if (!doing_shutdown) {
532 panic("lockmgr: no context");
533 } else {
534 l = &lwp0;
535 if (panicstr && (!(flags & LK_NOWAIT))) {
536 flags |= LK_NOWAIT;
537 lock_shutdown_noblock = 1;
538 }
539 }
540 }
541 lid = l->l_lid;
542 pid = l->l_proc->p_pid;
543 }
544 cpu_id = cpu_number();
545
546 /*
547 * Once a lock has drained, the LK_DRAINING flag is set and an
548 * exclusive lock is returned. The only valid operation thereafter
549 * is a single release of that exclusive lock. This final release
550 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
551 * further requests of any sort will result in a panic. The bits
552 * selected for these two flags are chosen so that they will be set
553 * in memory that is freed (freed memory is filled with 0xdeadbeef).
554 * The final release is permitted to give a new lease on life to
555 * the lock by specifying LK_REENABLE.
556 */
557 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
558 #ifdef DIAGNOSTIC /* { */
559 if (lkp->lk_flags & LK_DRAINED)
560 panic("lockmgr: using decommissioned lock");
561 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
562 WEHOLDIT(lkp, pid, lid, cpu_id) == 0)
563 panic("lockmgr: non-release on draining lock: %d",
564 flags & LK_TYPE_MASK);
565 #endif /* DIAGNOSTIC */ /* } */
566 lkp->lk_flags &= ~LK_DRAINING;
567 if ((flags & LK_REENABLE) == 0)
568 lkp->lk_flags |= LK_DRAINED;
569 }
570
571 switch (flags & LK_TYPE_MASK) {
572
573 case LK_SHARED:
574 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
575 /*
576 * If just polling, check to see if we will block.
577 */
578 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
579 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
580 error = EBUSY;
581 break;
582 }
583 /*
584 * Wait for exclusive locks and upgrades to clear.
585 */
586 error = acquire(lkp, &s, extflags, 0,
587 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE);
588 if (error)
589 break;
590 lkp->lk_sharecount++;
591 lkp->lk_flags |= LK_SHARE_NONZERO;
592 COUNT(lkp, l, cpu_id, 1);
593 break;
594 }
595 /*
596 * We hold an exclusive lock, so downgrade it to shared.
597 * An alternative would be to fail with EDEADLK.
598 */
599 lkp->lk_sharecount++;
600 lkp->lk_flags |= LK_SHARE_NONZERO;
601 COUNT(lkp, l, cpu_id, 1);
602 /* fall into downgrade */
603
604 case LK_DOWNGRADE:
605 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0 ||
606 lkp->lk_exclusivecount == 0)
607 panic("lockmgr: not holding exclusive lock");
608 lkp->lk_sharecount += lkp->lk_exclusivecount;
609 lkp->lk_flags |= LK_SHARE_NONZERO;
610 lkp->lk_exclusivecount = 0;
611 lkp->lk_recurselevel = 0;
612 lkp->lk_flags &= ~LK_HAVE_EXCL;
613 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
614 #if defined(LOCKDEBUG)
615 lkp->lk_unlock_file = file;
616 lkp->lk_unlock_line = line;
617 #endif
618 DONTHAVEIT(lkp);
619 WAKEUP_WAITER(lkp);
620 break;
621
622 case LK_EXCLUPGRADE:
623 /*
624 * If another process is ahead of us to get an upgrade,
625 * then we want to fail rather than have an intervening
626 * exclusive access.
627 */
628 if (lkp->lk_flags & LK_WANT_UPGRADE) {
629 lkp->lk_sharecount--;
630 if (lkp->lk_sharecount == 0)
631 lkp->lk_flags &= ~LK_SHARE_NONZERO;
632 COUNT(lkp, l, cpu_id, -1);
633 error = EBUSY;
634 break;
635 }
636 /* fall into normal upgrade */
637
638 case LK_UPGRADE:
639 /*
640 * Upgrade a shared lock to an exclusive one. If another
641 * shared lock has already requested an upgrade to an
642 * exclusive lock, our shared lock is released and an
643 * exclusive lock is requested (which will be granted
644 * after the upgrade). If we return an error, the file
645 * will always be unlocked.
646 */
647 if (WEHOLDIT(lkp, pid, lid, cpu_id) || lkp->lk_sharecount <= 0)
648 panic("lockmgr: upgrade exclusive lock");
649 lkp->lk_sharecount--;
650 if (lkp->lk_sharecount == 0)
651 lkp->lk_flags &= ~LK_SHARE_NONZERO;
652 COUNT(lkp, l, cpu_id, -1);
653 /*
654 * If we are just polling, check to see if we will block.
655 */
656 if ((extflags & LK_NOWAIT) &&
657 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
658 lkp->lk_sharecount > 1)) {
659 error = EBUSY;
660 break;
661 }
662 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
663 /*
664 * We are first shared lock to request an upgrade, so
665 * request upgrade and wait for the shared count to
666 * drop to zero, then take exclusive lock.
667 */
668 lkp->lk_flags |= LK_WANT_UPGRADE;
669 error = acquire(lkp, &s, extflags, 0, LK_SHARE_NONZERO);
670 lkp->lk_flags &= ~LK_WANT_UPGRADE;
671 if (error) {
672 WAKEUP_WAITER(lkp);
673 break;
674 }
675 lkp->lk_flags |= LK_HAVE_EXCL;
676 SETHOLDER(lkp, pid, lid, cpu_id);
677 #if defined(LOCKDEBUG)
678 lkp->lk_lock_file = file;
679 lkp->lk_lock_line = line;
680 #endif
681 HAVEIT(lkp);
682 if (lkp->lk_exclusivecount != 0)
683 panic("lockmgr: non-zero exclusive count");
684 lkp->lk_exclusivecount = 1;
685 if (extflags & LK_SETRECURSE)
686 lkp->lk_recurselevel = 1;
687 COUNT(lkp, l, cpu_id, 1);
688 break;
689 }
690 /*
691 * Someone else has requested upgrade. Release our shared
692 * lock, awaken upgrade requestor if we are the last shared
693 * lock, then request an exclusive lock.
694 */
695 if (lkp->lk_sharecount == 0)
696 WAKEUP_WAITER(lkp);
697 /* fall into exclusive request */
698
699 case LK_EXCLUSIVE:
700 if (WEHOLDIT(lkp, pid, lid, cpu_id)) {
701 /*
702 * Recursive lock.
703 */
704 if ((extflags & LK_CANRECURSE) == 0 &&
705 lkp->lk_recurselevel == 0) {
706 if (extflags & LK_RECURSEFAIL) {
707 error = EDEADLK;
708 break;
709 } else
710 panic("lockmgr: locking against myself");
711 }
712 lkp->lk_exclusivecount++;
713 if (extflags & LK_SETRECURSE &&
714 lkp->lk_recurselevel == 0)
715 lkp->lk_recurselevel = lkp->lk_exclusivecount;
716 COUNT(lkp, l, cpu_id, 1);
717 break;
718 }
719 /*
720 * If we are just polling, check to see if we will sleep.
721 */
722 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
723 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
724 LK_SHARE_NONZERO))) {
725 error = EBUSY;
726 break;
727 }
728 /*
729 * Try to acquire the want_exclusive flag.
730 */
731 error = acquire(lkp, &s, extflags, 0,
732 LK_HAVE_EXCL | LK_WANT_EXCL);
733 if (error)
734 break;
735 lkp->lk_flags |= LK_WANT_EXCL;
736 /*
737 * Wait for shared locks and upgrades to finish.
738 */
739 error = acquire(lkp, &s, extflags, 0,
740 LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO);
741 lkp->lk_flags &= ~LK_WANT_EXCL;
742 if (error) {
743 WAKEUP_WAITER(lkp);
744 break;
745 }
746 lkp->lk_flags |= LK_HAVE_EXCL;
747 SETHOLDER(lkp, pid, lid, cpu_id);
748 #if defined(LOCKDEBUG)
749 lkp->lk_lock_file = file;
750 lkp->lk_lock_line = line;
751 #endif
752 HAVEIT(lkp);
753 if (lkp->lk_exclusivecount != 0)
754 panic("lockmgr: non-zero exclusive count");
755 lkp->lk_exclusivecount = 1;
756 if (extflags & LK_SETRECURSE)
757 lkp->lk_recurselevel = 1;
758 COUNT(lkp, l, cpu_id, 1);
759 break;
760
761 case LK_RELEASE:
762 if (lkp->lk_exclusivecount != 0) {
763 if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
764 if (lkp->lk_flags & LK_SPIN) {
765 panic("lockmgr: processor %lu, not "
766 "exclusive lock holder %lu "
767 "unlocking", cpu_id, lkp->lk_cpu);
768 } else {
769 panic("lockmgr: pid %d, not "
770 "exclusive lock holder %d "
771 "unlocking", pid,
772 lkp->lk_lockholder);
773 }
774 }
775 if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
776 lkp->lk_recurselevel = 0;
777 lkp->lk_exclusivecount--;
778 COUNT(lkp, l, cpu_id, -1);
779 if (lkp->lk_exclusivecount == 0) {
780 lkp->lk_flags &= ~LK_HAVE_EXCL;
781 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
782 #if defined(LOCKDEBUG)
783 lkp->lk_unlock_file = file;
784 lkp->lk_unlock_line = line;
785 #endif
786 DONTHAVEIT(lkp);
787 }
788 } else if (lkp->lk_sharecount != 0) {
789 lkp->lk_sharecount--;
790 if (lkp->lk_sharecount == 0)
791 lkp->lk_flags &= ~LK_SHARE_NONZERO;
792 COUNT(lkp, l, cpu_id, -1);
793 }
794 #ifdef DIAGNOSTIC
795 else
796 panic("lockmgr: release of unlocked lock!");
797 #endif
798 WAKEUP_WAITER(lkp);
799 break;
800
801 case LK_DRAIN:
802 /*
803 * Check that we do not already hold the lock, as it can
804 * never drain if we do. Unfortunately, we have no way to
805 * check for holding a shared lock, but at least we can
806 * check for an exclusive one.
807 */
808 if (WEHOLDIT(lkp, pid, lid, cpu_id))
809 panic("lockmgr: draining against myself");
810 /*
811 * If we are just polling, check to see if we will sleep.
812 */
813 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
814 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
815 LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
816 error = EBUSY;
817 break;
818 }
819 error = acquire(lkp, &s, extflags, 1,
820 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
821 LK_SHARE_NONZERO | LK_WAIT_NONZERO);
822 if (error)
823 break;
824 lkp->lk_flags |= LK_HAVE_EXCL;
825 if ((extflags & LK_RESURRECT) == 0)
826 lkp->lk_flags |= LK_DRAINING;
827 SETHOLDER(lkp, pid, lid, cpu_id);
828 #if defined(LOCKDEBUG)
829 lkp->lk_lock_file = file;
830 lkp->lk_lock_line = line;
831 #endif
832 HAVEIT(lkp);
833 lkp->lk_exclusivecount = 1;
834 /* XXX unlikely that we'd want this */
835 if (extflags & LK_SETRECURSE)
836 lkp->lk_recurselevel = 1;
837 COUNT(lkp, l, cpu_id, 1);
838 break;
839
840 default:
841 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
842 panic("lockmgr: unknown locktype request %d",
843 flags & LK_TYPE_MASK);
844 /* NOTREACHED */
845 }
846 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
847 ((lkp->lk_flags &
848 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
849 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
850 lkp->lk_flags &= ~LK_WAITDRAIN;
851 wakeup((void *)&lkp->lk_flags);
852 }
853 /*
854 * Note that this panic will be a recursive panic, since
855 * we only set lock_shutdown_noblock above if panicstr != NULL.
856 */
857 if (error && lock_shutdown_noblock)
858 panic("lockmgr: deadlock (see previous panic)");
859
860 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
861 return (error);
862 }
863
864 /*
865 * For a recursive spinlock held one or more times by the current CPU,
866 * release all N locks, and return N.
867 * Intended for use in mi_switch() shortly before context switching.
868 */
869
870 int
871 #if defined(LOCKDEBUG)
872 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
873 #else
874 spinlock_release_all(__volatile struct lock *lkp)
875 #endif
876 {
877 int s, count;
878 cpuid_t cpu_id;
879
880 KASSERT(lkp->lk_flags & LK_SPIN);
881
882 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
883
884 cpu_id = cpu_number();
885 count = lkp->lk_exclusivecount;
886
887 if (count != 0) {
888 #ifdef DIAGNOSTIC
889 if (WEHOLDIT(lkp, 0, 0, cpu_id) == 0) {
890 panic("spinlock_release_all: processor %lu, not "
891 "exclusive lock holder %lu "
892 "unlocking", (long)cpu_id, lkp->lk_cpu);
893 }
894 #endif
895 lkp->lk_recurselevel = 0;
896 lkp->lk_exclusivecount = 0;
897 COUNT_CPU(cpu_id, -count);
898 lkp->lk_flags &= ~LK_HAVE_EXCL;
899 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
900 #if defined(LOCKDEBUG)
901 lkp->lk_unlock_file = file;
902 lkp->lk_unlock_line = line;
903 #endif
904 DONTHAVEIT(lkp);
905 }
906 #ifdef DIAGNOSTIC
907 else if (lkp->lk_sharecount != 0)
908 panic("spinlock_release_all: release of shared lock!");
909 else
910 panic("spinlock_release_all: release of unlocked lock!");
911 #endif
912 INTERLOCK_RELEASE(lkp, LK_SPIN, s);
913
914 return (count);
915 }
916
917 /*
918 * For a recursive spinlock held one or more times by the current CPU,
919 * release all N locks, and return N.
920 * Intended for use in mi_switch() right after resuming execution.
921 */
922
923 void
924 #if defined(LOCKDEBUG)
925 _spinlock_acquire_count(__volatile struct lock *lkp, int count,
926 const char *file, int line)
927 #else
928 spinlock_acquire_count(__volatile struct lock *lkp, int count)
929 #endif
930 {
931 int s, error;
932 cpuid_t cpu_id;
933
934 KASSERT(lkp->lk_flags & LK_SPIN);
935
936 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
937
938 cpu_id = cpu_number();
939
940 #ifdef DIAGNOSTIC
941 if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_id))
942 panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id);
943 #endif
944 /*
945 * Try to acquire the want_exclusive flag.
946 */
947 error = acquire(lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL);
948 lkp->lk_flags |= LK_WANT_EXCL;
949 /*
950 * Wait for shared locks and upgrades to finish.
951 */
952 error = acquire(lkp, &s, LK_SPIN, 0,
953 LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE);
954 lkp->lk_flags &= ~LK_WANT_EXCL;
955 lkp->lk_flags |= LK_HAVE_EXCL;
956 SETHOLDER(lkp, LK_NOPROC, 0, cpu_id);
957 #if defined(LOCKDEBUG)
958 lkp->lk_lock_file = file;
959 lkp->lk_lock_line = line;
960 #endif
961 HAVEIT(lkp);
962 if (lkp->lk_exclusivecount != 0)
963 panic("lockmgr: non-zero exclusive count");
964 lkp->lk_exclusivecount = count;
965 lkp->lk_recurselevel = 1;
966 COUNT_CPU(cpu_id, count);
967
968 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
969 }
970
971
972
973 /*
974 * Print out information about state of a lock. Used by VOP_PRINT
975 * routines to display ststus about contained locks.
976 */
977 void
978 lockmgr_printinfo(__volatile struct lock *lkp)
979 {
980
981 if (lkp->lk_sharecount)
982 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
983 lkp->lk_sharecount);
984 else if (lkp->lk_flags & LK_HAVE_EXCL) {
985 printf(" lock type %s: EXCL (count %d) by ",
986 lkp->lk_wmesg, lkp->lk_exclusivecount);
987 if (lkp->lk_flags & LK_SPIN)
988 printf("processor %lu", lkp->lk_cpu);
989 else
990 printf("pid %d.%d", lkp->lk_lockholder,
991 lkp->lk_locklwp);
992 } else
993 printf(" not locked");
994 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
995 printf(" with %d pending", lkp->lk_waitcount);
996 }
997
998 #if defined(LOCKDEBUG) /* { */
999 TAILQ_HEAD(, simplelock) simplelock_list =
1000 TAILQ_HEAD_INITIALIZER(simplelock_list);
1001
1002 #if defined(MULTIPROCESSOR) /* { */
1003 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
1004
1005 #define SLOCK_LIST_LOCK() \
1006 __cpu_simple_lock(&simplelock_list_slock.lock_data)
1007
1008 #define SLOCK_LIST_UNLOCK() \
1009 __cpu_simple_unlock(&simplelock_list_slock.lock_data)
1010
1011 #define SLOCK_COUNT(x) \
1012 curcpu()->ci_simple_locks += (x)
1013 #else
1014 u_long simple_locks;
1015
1016 #define SLOCK_LIST_LOCK() /* nothing */
1017
1018 #define SLOCK_LIST_UNLOCK() /* nothing */
1019
1020 #define SLOCK_COUNT(x) simple_locks += (x)
1021 #endif /* MULTIPROCESSOR */ /* } */
1022
1023 #ifdef MULTIPROCESSOR
1024 #define SLOCK_MP() lock_printf("on CPU %ld\n", \
1025 (u_long) cpu_number())
1026 #else
1027 #define SLOCK_MP() /* nothing */
1028 #endif
1029
1030 #define SLOCK_WHERE(str, alp, id, l) \
1031 do { \
1032 lock_printf("\n"); \
1033 lock_printf(str); \
1034 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
1035 SLOCK_MP(); \
1036 if ((alp)->lock_file != NULL) \
1037 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
1038 (alp)->lock_line); \
1039 if ((alp)->unlock_file != NULL) \
1040 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
1041 (alp)->unlock_line); \
1042 SLOCK_TRACE() \
1043 SLOCK_DEBUGGER(); \
1044 } while (/*CONSTCOND*/0)
1045
1046 /*
1047 * Simple lock functions so that the debugger can see from whence
1048 * they are being called.
1049 */
1050 void
1051 simple_lock_init(struct simplelock *alp)
1052 {
1053
1054 #if defined(MULTIPROCESSOR) /* { */
1055 __cpu_simple_lock_init(&alp->lock_data);
1056 #else
1057 alp->lock_data = __SIMPLELOCK_UNLOCKED;
1058 #endif /* } */
1059 alp->lock_file = NULL;
1060 alp->lock_line = 0;
1061 alp->unlock_file = NULL;
1062 alp->unlock_line = 0;
1063 alp->lock_holder = LK_NOCPU;
1064 }
1065
1066 void
1067 _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
1068 {
1069 cpuid_t cpu_id = cpu_number();
1070 int s;
1071
1072 s = spllock();
1073
1074 /*
1075 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1076 * don't take any action, and just fall into the normal spin case.
1077 */
1078 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1079 #if defined(MULTIPROCESSOR) /* { */
1080 if (alp->lock_holder == cpu_id) {
1081 SLOCK_WHERE("simple_lock: locking against myself\n",
1082 alp, id, l);
1083 goto out;
1084 }
1085 #else
1086 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
1087 goto out;
1088 #endif /* MULTIPROCESSOR */ /* } */
1089 }
1090
1091 #if defined(MULTIPROCESSOR) /* { */
1092 /* Acquire the lock before modifying any fields. */
1093 splx(s);
1094 __cpu_simple_lock(&alp->lock_data);
1095 s = spllock();
1096 #else
1097 alp->lock_data = __SIMPLELOCK_LOCKED;
1098 #endif /* } */
1099
1100 if (alp->lock_holder != LK_NOCPU) {
1101 SLOCK_WHERE("simple_lock: uninitialized lock\n",
1102 alp, id, l);
1103 }
1104 alp->lock_file = id;
1105 alp->lock_line = l;
1106 alp->lock_holder = cpu_id;
1107
1108 SLOCK_LIST_LOCK();
1109 /* XXX Cast away volatile */
1110 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1111 SLOCK_LIST_UNLOCK();
1112
1113 SLOCK_COUNT(1);
1114
1115 out:
1116 splx(s);
1117 }
1118
1119 int
1120 _simple_lock_held(__volatile struct simplelock *alp)
1121 {
1122 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
1123 cpuid_t cpu_id = cpu_number();
1124 #endif
1125 int s, locked = 0;
1126
1127 s = spllock();
1128
1129 #if defined(MULTIPROCESSOR)
1130 if (__cpu_simple_lock_try(&alp->lock_data) == 0)
1131 locked = (alp->lock_holder == cpu_id);
1132 else
1133 __cpu_simple_unlock(&alp->lock_data);
1134 #else
1135 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1136 locked = 1;
1137 KASSERT(alp->lock_holder == cpu_id);
1138 }
1139 #endif
1140
1141 splx(s);
1142
1143 return (locked);
1144 }
1145
1146 int
1147 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
1148 {
1149 cpuid_t cpu_id = cpu_number();
1150 int s, rv = 0;
1151
1152 s = spllock();
1153
1154 /*
1155 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1156 * don't take any action.
1157 */
1158 #if defined(MULTIPROCESSOR) /* { */
1159 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
1160 if (alp->lock_holder == cpu_id)
1161 SLOCK_WHERE("simple_lock_try: locking against myself\n",
1162 alp, id, l);
1163 goto out;
1164 }
1165 #else
1166 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1167 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
1168 goto out;
1169 }
1170 alp->lock_data = __SIMPLELOCK_LOCKED;
1171 #endif /* MULTIPROCESSOR */ /* } */
1172
1173 /*
1174 * At this point, we have acquired the lock.
1175 */
1176
1177 rv = 1;
1178
1179 alp->lock_file = id;
1180 alp->lock_line = l;
1181 alp->lock_holder = cpu_id;
1182
1183 SLOCK_LIST_LOCK();
1184 /* XXX Cast away volatile. */
1185 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1186 SLOCK_LIST_UNLOCK();
1187
1188 SLOCK_COUNT(1);
1189
1190 out:
1191 splx(s);
1192 return (rv);
1193 }
1194
1195 void
1196 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
1197 {
1198 int s;
1199
1200 s = spllock();
1201
1202 /*
1203 * MULTIPROCESSOR case: This is `safe' because we think we hold
1204 * the lock, and if we don't, we don't take any action.
1205 */
1206 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
1207 SLOCK_WHERE("simple_unlock: lock not held\n",
1208 alp, id, l);
1209 goto out;
1210 }
1211
1212 SLOCK_LIST_LOCK();
1213 TAILQ_REMOVE(&simplelock_list, alp, list);
1214 SLOCK_LIST_UNLOCK();
1215
1216 SLOCK_COUNT(-1);
1217
1218 alp->list.tqe_next = NULL; /* sanity */
1219 alp->list.tqe_prev = NULL; /* sanity */
1220
1221 alp->unlock_file = id;
1222 alp->unlock_line = l;
1223
1224 #if defined(MULTIPROCESSOR) /* { */
1225 alp->lock_holder = LK_NOCPU;
1226 /* Now that we've modified all fields, release the lock. */
1227 __cpu_simple_unlock(&alp->lock_data);
1228 #else
1229 alp->lock_data = __SIMPLELOCK_UNLOCKED;
1230 KASSERT(alp->lock_holder == cpu_number());
1231 alp->lock_holder = LK_NOCPU;
1232 #endif /* } */
1233
1234 out:
1235 splx(s);
1236 }
1237
1238 void
1239 simple_lock_dump(void)
1240 {
1241 struct simplelock *alp;
1242 int s;
1243
1244 s = spllock();
1245 SLOCK_LIST_LOCK();
1246 lock_printf("all simple locks:\n");
1247 TAILQ_FOREACH(alp, &simplelock_list, list) {
1248 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
1249 alp->lock_file, alp->lock_line);
1250 }
1251 SLOCK_LIST_UNLOCK();
1252 splx(s);
1253 }
1254
1255 void
1256 simple_lock_freecheck(void *start, void *end)
1257 {
1258 struct simplelock *alp;
1259 int s;
1260
1261 s = spllock();
1262 SLOCK_LIST_LOCK();
1263 TAILQ_FOREACH(alp, &simplelock_list, list) {
1264 if ((void *)alp >= start && (void *)alp < end) {
1265 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
1266 alp, alp->lock_holder, alp->lock_file,
1267 alp->lock_line);
1268 SLOCK_DEBUGGER();
1269 }
1270 }
1271 SLOCK_LIST_UNLOCK();
1272 splx(s);
1273 }
1274
1275 /*
1276 * We must be holding exactly one lock: the sched_lock.
1277 */
1278
1279 void
1280 simple_lock_switchcheck(void)
1281 {
1282
1283 simple_lock_only_held(&sched_lock, "switching");
1284 }
1285
1286 void
1287 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
1288 {
1289 struct simplelock *alp;
1290 cpuid_t cpu_id = cpu_number();
1291 int s;
1292
1293 if (lp) {
1294 LOCK_ASSERT(simple_lock_held(lp));
1295 }
1296 s = spllock();
1297 SLOCK_LIST_LOCK();
1298 TAILQ_FOREACH(alp, &simplelock_list, list) {
1299 if (alp == lp)
1300 continue;
1301 if (alp->lock_holder == cpu_id)
1302 break;
1303 }
1304 SLOCK_LIST_UNLOCK();
1305 splx(s);
1306
1307 if (alp != NULL) {
1308 lock_printf("\n%s with held simple_lock %p "
1309 "CPU %lu %s:%d\n",
1310 where, alp, alp->lock_holder, alp->lock_file,
1311 alp->lock_line);
1312 SLOCK_TRACE();
1313 SLOCK_DEBUGGER();
1314 }
1315 }
1316 #endif /* LOCKDEBUG */ /* } */
1317
1318 #if defined(MULTIPROCESSOR)
1319 /*
1320 * Functions for manipulating the kernel_lock. We put them here
1321 * so that they show up in profiles.
1322 */
1323
1324 struct lock kernel_lock;
1325
1326 void
1327 _kernel_lock_init(void)
1328 {
1329
1330 spinlockinit(&kernel_lock, "klock", 0);
1331 }
1332
1333 /*
1334 * Acquire/release the kernel lock. Intended for use in the scheduler
1335 * and the lower half of the kernel.
1336 */
1337 void
1338 _kernel_lock(int flag)
1339 {
1340
1341 SCHED_ASSERT_UNLOCKED();
1342 spinlockmgr(&kernel_lock, flag, 0);
1343 }
1344
1345 void
1346 _kernel_unlock(void)
1347 {
1348
1349 spinlockmgr(&kernel_lock, LK_RELEASE, 0);
1350 }
1351
1352 /*
1353 * Acquire/release the kernel_lock on behalf of a process. Intended for
1354 * use in the top half of the kernel.
1355 */
1356 void
1357 _kernel_proc_lock(struct lwp *l)
1358 {
1359
1360 SCHED_ASSERT_UNLOCKED();
1361 spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
1362 l->l_flag |= L_BIGLOCK;
1363 }
1364
1365 void
1366 _kernel_proc_unlock(struct lwp *l)
1367 {
1368
1369 l->l_flag &= ~L_BIGLOCK;
1370 spinlockmgr(&kernel_lock, LK_RELEASE, 0);
1371 }
1372 #endif /* MULTIPROCESSOR */
1373