kern_lock.c revision 1.63 1 /* $NetBSD: kern_lock.c,v 1.63 2002/09/14 21:42:42 chs Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. All advertising materials mentioning features or use of this software
60 * must display the following acknowledgement:
61 * This product includes software developed by the University of
62 * California, Berkeley and its contributors.
63 * 4. Neither the name of the University nor the names of its contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * SUCH DAMAGE.
78 *
79 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.63 2002/09/14 21:42:42 chs Exp $");
84
85 #include "opt_multiprocessor.h"
86 #include "opt_lockdebug.h"
87 #include "opt_ddb.h"
88
89 #include <sys/param.h>
90 #include <sys/proc.h>
91 #include <sys/lock.h>
92 #include <sys/systm.h>
93 #include <machine/cpu.h>
94
95 #if defined(LOCKDEBUG)
96 #include <sys/syslog.h>
97 /*
98 * note that stdarg.h and the ansi style va_start macro is used for both
99 * ansi and traditional c compiles.
100 * XXX: this requires that stdarg.h define: va_alist and va_dcl
101 */
102 #include <machine/stdarg.h>
103
104 void lock_printf(const char *fmt, ...)
105 __attribute__((__format__(__printf__,1,2)));
106
107 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */
108
109 #ifdef DDB
110 #include <ddb/ddbvar.h>
111 #include <machine/db_machdep.h>
112 #include <ddb/db_command.h>
113 #include <ddb/db_interface.h>
114 #endif
115 #endif
116
117 /*
118 * Locking primitives implementation.
119 * Locks provide shared/exclusive synchronization.
120 */
121
122 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
123 #if defined(MULTIPROCESSOR) /* { */
124 #define COUNT_CPU(cpu_id, x) \
125 curcpu()->ci_spin_locks += (x)
126 #else
127 u_long spin_locks;
128 #define COUNT_CPU(cpu_id, x) spin_locks += (x)
129 #endif /* MULTIPROCESSOR */ /* } */
130
131 #define COUNT(lkp, p, cpu_id, x) \
132 do { \
133 if ((lkp)->lk_flags & LK_SPIN) \
134 COUNT_CPU((cpu_id), (x)); \
135 else \
136 (p)->p_locks += (x); \
137 } while (/*CONSTCOND*/0)
138 #else
139 #define COUNT(lkp, p, cpu_id, x)
140 #define COUNT_CPU(cpu_id, x)
141 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
142
143 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */
144 #define SPINLOCK_SPIN_HOOK /* nothing */
145 #endif
146
147 #define INTERLOCK_ACQUIRE(lkp, flags, s) \
148 do { \
149 if ((flags) & LK_SPIN) \
150 s = splsched(); \
151 simple_lock(&(lkp)->lk_interlock); \
152 } while (0)
153
154 #define INTERLOCK_RELEASE(lkp, flags, s) \
155 do { \
156 simple_unlock(&(lkp)->lk_interlock); \
157 if ((flags) & LK_SPIN) \
158 splx(s); \
159 } while (0)
160
161 #ifdef DDB /* { */
162 #ifdef MULTIPROCESSOR
163 int simple_lock_debugger = 1; /* more serious on MP */
164 #else
165 int simple_lock_debugger = 0;
166 #endif
167 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger()
168 #define SLOCK_TRACE() \
169 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \
170 TRUE, 65535, "", printf);
171 #else
172 #define SLOCK_DEBUGGER() /* nothing */
173 #define SLOCK_TRACE() /* nothing */
174 #endif /* } */
175
176 #if defined(LOCKDEBUG)
177 #if defined(DDB)
178 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger()
179 #else
180 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
181 #endif
182
183 #define SPINLOCK_SPINCHECK_DECL \
184 /* 32-bits of count -- wrap constitutes a "spinout" */ \
185 uint32_t __spinc = 0
186
187 #define SPINLOCK_SPINCHECK \
188 do { \
189 if (++__spinc == 0) { \
190 printf("LK_SPIN spinout, excl %d, share %d\n", \
191 lkp->lk_exclusivecount, lkp->lk_sharecount); \
192 if (lkp->lk_exclusivecount) \
193 printf("held by CPU %lu\n", \
194 (u_long) lkp->lk_cpu); \
195 if (lkp->lk_lock_file) \
196 printf("last locked at %s:%d\n", \
197 lkp->lk_lock_file, lkp->lk_lock_line); \
198 if (lkp->lk_unlock_file) \
199 printf("last unlocked at %s:%d\n", \
200 lkp->lk_unlock_file, lkp->lk_unlock_line); \
201 SLOCK_TRACE(); \
202 SPINLOCK_SPINCHECK_DEBUGGER; \
203 } \
204 } while (0)
205 #else
206 #define SPINLOCK_SPINCHECK_DECL /* nothing */
207 #define SPINLOCK_SPINCHECK /* nothing */
208 #endif /* LOCKDEBUG && DDB */
209
210 /*
211 * Acquire a resource.
212 */
213 #define ACQUIRE(lkp, error, extflags, drain, wanted) \
214 if ((extflags) & LK_SPIN) { \
215 int interlocked; \
216 SPINLOCK_SPINCHECK_DECL; \
217 \
218 if ((drain) == 0) \
219 (lkp)->lk_waitcount++; \
220 for (interlocked = 1;;) { \
221 SPINLOCK_SPINCHECK; \
222 if (wanted) { \
223 if (interlocked) { \
224 INTERLOCK_RELEASE((lkp), \
225 LK_SPIN, s); \
226 interlocked = 0; \
227 } \
228 SPINLOCK_SPIN_HOOK; \
229 } else if (interlocked) { \
230 break; \
231 } else { \
232 INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \
233 interlocked = 1; \
234 } \
235 } \
236 if ((drain) == 0) \
237 (lkp)->lk_waitcount--; \
238 KASSERT((wanted) == 0); \
239 error = 0; /* sanity */ \
240 } else { \
241 for (error = 0; wanted; ) { \
242 if ((drain)) \
243 (lkp)->lk_flags |= LK_WAITDRAIN; \
244 else \
245 (lkp)->lk_waitcount++; \
246 /* XXX Cast away volatile. */ \
247 error = ltsleep((drain) ? \
248 (void *)&(lkp)->lk_flags : \
249 (void *)(lkp), (lkp)->lk_prio, \
250 (lkp)->lk_wmesg, (lkp)->lk_timo, \
251 &(lkp)->lk_interlock); \
252 if ((drain) == 0) \
253 (lkp)->lk_waitcount--; \
254 if (error) \
255 break; \
256 if ((extflags) & LK_SLEEPFAIL) { \
257 error = ENOLCK; \
258 break; \
259 } \
260 } \
261 }
262
263 #define SETHOLDER(lkp, pid, cpu_id) \
264 do { \
265 if ((lkp)->lk_flags & LK_SPIN) \
266 (lkp)->lk_cpu = cpu_id; \
267 else \
268 (lkp)->lk_lockholder = pid; \
269 } while (/*CONSTCOND*/0)
270
271 #define WEHOLDIT(lkp, pid, cpu_id) \
272 (((lkp)->lk_flags & LK_SPIN) != 0 ? \
273 ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))
274
275 #define WAKEUP_WAITER(lkp) \
276 do { \
277 if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \
278 /* XXX Cast away volatile. */ \
279 wakeup((void *)(lkp)); \
280 } \
281 } while (/*CONSTCOND*/0)
282
283 #if defined(LOCKDEBUG) /* { */
284 #if defined(MULTIPROCESSOR) /* { */
285 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
286
287 #define SPINLOCK_LIST_LOCK() \
288 __cpu_simple_lock(&spinlock_list_slock.lock_data)
289
290 #define SPINLOCK_LIST_UNLOCK() \
291 __cpu_simple_unlock(&spinlock_list_slock.lock_data)
292 #else
293 #define SPINLOCK_LIST_LOCK() /* nothing */
294
295 #define SPINLOCK_LIST_UNLOCK() /* nothing */
296 #endif /* MULTIPROCESSOR */ /* } */
297
298 TAILQ_HEAD(, lock) spinlock_list =
299 TAILQ_HEAD_INITIALIZER(spinlock_list);
300
301 #define HAVEIT(lkp) \
302 do { \
303 if ((lkp)->lk_flags & LK_SPIN) { \
304 int s = spllock(); \
305 SPINLOCK_LIST_LOCK(); \
306 /* XXX Cast away volatile. */ \
307 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
308 lk_list); \
309 SPINLOCK_LIST_UNLOCK(); \
310 splx(s); \
311 } \
312 } while (/*CONSTCOND*/0)
313
314 #define DONTHAVEIT(lkp) \
315 do { \
316 if ((lkp)->lk_flags & LK_SPIN) { \
317 int s = spllock(); \
318 SPINLOCK_LIST_LOCK(); \
319 /* XXX Cast away volatile. */ \
320 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \
321 lk_list); \
322 SPINLOCK_LIST_UNLOCK(); \
323 splx(s); \
324 } \
325 } while (/*CONSTCOND*/0)
326 #else
327 #define HAVEIT(lkp) /* nothing */
328
329 #define DONTHAVEIT(lkp) /* nothing */
330 #endif /* LOCKDEBUG */ /* } */
331
332 #if defined(LOCKDEBUG)
333 /*
334 * Lock debug printing routine; can be configured to print to console
335 * or log to syslog.
336 */
337 void
338 lock_printf(const char *fmt, ...)
339 {
340 va_list ap;
341
342 va_start(ap, fmt);
343 if (lock_debug_syslog)
344 vlog(LOG_DEBUG, fmt, ap);
345 else
346 vprintf(fmt, ap);
347 va_end(ap);
348 }
349 #endif /* LOCKDEBUG */
350
351 /*
352 * Initialize a lock; required before use.
353 */
354 void
355 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
356 {
357
358 memset(lkp, 0, sizeof(struct lock));
359 simple_lock_init(&lkp->lk_interlock);
360 lkp->lk_flags = flags & LK_EXTFLG_MASK;
361 if (flags & LK_SPIN)
362 lkp->lk_cpu = LK_NOCPU;
363 else {
364 lkp->lk_lockholder = LK_NOPROC;
365 lkp->lk_prio = prio;
366 lkp->lk_timo = timo;
367 }
368 lkp->lk_wmesg = wmesg; /* just a name for spin locks */
369 #if defined(LOCKDEBUG)
370 lkp->lk_lock_file = NULL;
371 lkp->lk_unlock_file = NULL;
372 #endif
373 }
374
375 /*
376 * Determine the status of a lock.
377 */
378 int
379 lockstatus(struct lock *lkp)
380 {
381 int s, lock_type = 0;
382
383 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
384 if (lkp->lk_exclusivecount != 0)
385 lock_type = LK_EXCLUSIVE;
386 else if (lkp->lk_sharecount != 0)
387 lock_type = LK_SHARED;
388 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
389 return (lock_type);
390 }
391
392 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
393 /*
394 * Make sure no spin locks are held by a CPU that is about
395 * to context switch.
396 */
397 void
398 spinlock_switchcheck(void)
399 {
400 u_long cnt;
401 int s;
402
403 s = spllock();
404 #if defined(MULTIPROCESSOR)
405 cnt = curcpu()->ci_spin_locks;
406 #else
407 cnt = spin_locks;
408 #endif
409 splx(s);
410
411 if (cnt != 0)
412 panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
413 (u_long) cpu_number(), cnt);
414 }
415 #endif /* LOCKDEBUG || DIAGNOSTIC */
416
417 /*
418 * Locks and IPLs (interrupt priority levels):
419 *
420 * Locks which may be taken from interrupt context must be handled
421 * very carefully; you must spl to the highest IPL where the lock
422 * is needed before acquiring the lock.
423 *
424 * It is also important to avoid deadlock, since certain (very high
425 * priority) interrupts are often needed to keep the system as a whole
426 * from deadlocking, and must not be blocked while you are spinning
427 * waiting for a lower-priority lock.
428 *
429 * In addition, the lock-debugging hooks themselves need to use locks!
430 *
431 * A raw __cpu_simple_lock may be used from interrupts are long as it
432 * is acquired and held at a single IPL.
433 *
434 * A simple_lock (which is a __cpu_simple_lock wrapped with some
435 * debugging hooks) may be used at or below spllock(), which is
436 * typically at or just below splhigh() (i.e. blocks everything
437 * but certain machine-dependent extremely high priority interrupts).
438 *
439 * spinlockmgr spinlocks should be used at or below splsched().
440 *
441 * Some platforms may have interrupts of higher priority than splsched(),
442 * including hard serial interrupts, inter-processor interrupts, and
443 * kernel debugger traps.
444 */
445
446 /*
447 * XXX XXX kludge around another kludge..
448 *
449 * vfs_shutdown() may be called from interrupt context, either as a result
450 * of a panic, or from the debugger. It proceeds to call
451 * sys_sync(&proc0, ...), pretending its running on behalf of proc0
452 *
453 * We would like to make an attempt to sync the filesystems in this case, so
454 * if this happens, we treat attempts to acquire locks specially.
455 * All locks are acquired on behalf of proc0.
456 *
457 * If we've already paniced, we don't block waiting for locks, but
458 * just barge right ahead since we're already going down in flames.
459 */
460
461 /*
462 * Set, change, or release a lock.
463 *
464 * Shared requests increment the shared count. Exclusive requests set the
465 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
466 * accepted shared locks and shared-to-exclusive upgrades to go away.
467 */
468 int
469 #if defined(LOCKDEBUG)
470 _lockmgr(__volatile struct lock *lkp, u_int flags,
471 struct simplelock *interlkp, const char *file, int line)
472 #else
473 lockmgr(__volatile struct lock *lkp, u_int flags,
474 struct simplelock *interlkp)
475 #endif
476 {
477 int error;
478 pid_t pid;
479 int extflags;
480 cpuid_t cpu_id;
481 struct proc *p = curproc;
482 int lock_shutdown_noblock = 0;
483 int s;
484
485 error = 0;
486
487 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
488 if (flags & LK_INTERLOCK)
489 simple_unlock(interlkp);
490 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
491
492 #ifdef DIAGNOSTIC /* { */
493 /*
494 * Don't allow spins on sleep locks and don't allow sleeps
495 * on spin locks.
496 */
497 if ((flags ^ lkp->lk_flags) & LK_SPIN)
498 panic("lockmgr: sleep/spin mismatch\n");
499 #endif /* } */
500
501 if (extflags & LK_SPIN)
502 pid = LK_KERNPROC;
503 else {
504 if (p == NULL) {
505 if (!doing_shutdown) {
506 panic("lockmgr: no context");
507 } else {
508 p = &proc0;
509 if (panicstr && (!(flags & LK_NOWAIT))) {
510 flags |= LK_NOWAIT;
511 lock_shutdown_noblock = 1;
512 }
513 }
514 }
515 pid = p->p_pid;
516 }
517 cpu_id = cpu_number();
518
519 /*
520 * Once a lock has drained, the LK_DRAINING flag is set and an
521 * exclusive lock is returned. The only valid operation thereafter
522 * is a single release of that exclusive lock. This final release
523 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
524 * further requests of any sort will result in a panic. The bits
525 * selected for these two flags are chosen so that they will be set
526 * in memory that is freed (freed memory is filled with 0xdeadbeef).
527 * The final release is permitted to give a new lease on life to
528 * the lock by specifying LK_REENABLE.
529 */
530 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
531 #ifdef DIAGNOSTIC /* { */
532 if (lkp->lk_flags & LK_DRAINED)
533 panic("lockmgr: using decommissioned lock");
534 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
535 WEHOLDIT(lkp, pid, cpu_id) == 0)
536 panic("lockmgr: non-release on draining lock: %d\n",
537 flags & LK_TYPE_MASK);
538 #endif /* DIAGNOSTIC */ /* } */
539 lkp->lk_flags &= ~LK_DRAINING;
540 if ((flags & LK_REENABLE) == 0)
541 lkp->lk_flags |= LK_DRAINED;
542 }
543
544 switch (flags & LK_TYPE_MASK) {
545
546 case LK_SHARED:
547 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
548 /*
549 * If just polling, check to see if we will block.
550 */
551 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
552 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
553 error = EBUSY;
554 break;
555 }
556 /*
557 * Wait for exclusive locks and upgrades to clear.
558 */
559 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
560 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
561 if (error)
562 break;
563 lkp->lk_sharecount++;
564 COUNT(lkp, p, cpu_id, 1);
565 break;
566 }
567 /*
568 * We hold an exclusive lock, so downgrade it to shared.
569 * An alternative would be to fail with EDEADLK.
570 */
571 lkp->lk_sharecount++;
572 COUNT(lkp, p, cpu_id, 1);
573 /* fall into downgrade */
574
575 case LK_DOWNGRADE:
576 if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
577 lkp->lk_exclusivecount == 0)
578 panic("lockmgr: not holding exclusive lock");
579 lkp->lk_sharecount += lkp->lk_exclusivecount;
580 lkp->lk_exclusivecount = 0;
581 lkp->lk_recurselevel = 0;
582 lkp->lk_flags &= ~LK_HAVE_EXCL;
583 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
584 #if defined(LOCKDEBUG)
585 lkp->lk_unlock_file = file;
586 lkp->lk_unlock_line = line;
587 #endif
588 DONTHAVEIT(lkp);
589 WAKEUP_WAITER(lkp);
590 break;
591
592 case LK_EXCLUPGRADE:
593 /*
594 * If another process is ahead of us to get an upgrade,
595 * then we want to fail rather than have an intervening
596 * exclusive access.
597 */
598 if (lkp->lk_flags & LK_WANT_UPGRADE) {
599 lkp->lk_sharecount--;
600 COUNT(lkp, p, cpu_id, -1);
601 error = EBUSY;
602 break;
603 }
604 /* fall into normal upgrade */
605
606 case LK_UPGRADE:
607 /*
608 * Upgrade a shared lock to an exclusive one. If another
609 * shared lock has already requested an upgrade to an
610 * exclusive lock, our shared lock is released and an
611 * exclusive lock is requested (which will be granted
612 * after the upgrade). If we return an error, the file
613 * will always be unlocked.
614 */
615 if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
616 panic("lockmgr: upgrade exclusive lock");
617 lkp->lk_sharecount--;
618 COUNT(lkp, p, cpu_id, -1);
619 /*
620 * If we are just polling, check to see if we will block.
621 */
622 if ((extflags & LK_NOWAIT) &&
623 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
624 lkp->lk_sharecount > 1)) {
625 error = EBUSY;
626 break;
627 }
628 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
629 /*
630 * We are first shared lock to request an upgrade, so
631 * request upgrade and wait for the shared count to
632 * drop to zero, then take exclusive lock.
633 */
634 lkp->lk_flags |= LK_WANT_UPGRADE;
635 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
636 lkp->lk_flags &= ~LK_WANT_UPGRADE;
637 if (error)
638 break;
639 lkp->lk_flags |= LK_HAVE_EXCL;
640 SETHOLDER(lkp, pid, cpu_id);
641 #if defined(LOCKDEBUG)
642 lkp->lk_lock_file = file;
643 lkp->lk_lock_line = line;
644 #endif
645 HAVEIT(lkp);
646 if (lkp->lk_exclusivecount != 0)
647 panic("lockmgr: non-zero exclusive count");
648 lkp->lk_exclusivecount = 1;
649 if (extflags & LK_SETRECURSE)
650 lkp->lk_recurselevel = 1;
651 COUNT(lkp, p, cpu_id, 1);
652 break;
653 }
654 /*
655 * Someone else has requested upgrade. Release our shared
656 * lock, awaken upgrade requestor if we are the last shared
657 * lock, then request an exclusive lock.
658 */
659 if (lkp->lk_sharecount == 0)
660 WAKEUP_WAITER(lkp);
661 /* fall into exclusive request */
662
663 case LK_EXCLUSIVE:
664 if (WEHOLDIT(lkp, pid, cpu_id)) {
665 /*
666 * Recursive lock.
667 */
668 if ((extflags & LK_CANRECURSE) == 0 &&
669 lkp->lk_recurselevel == 0) {
670 if (extflags & LK_RECURSEFAIL) {
671 error = EDEADLK;
672 break;
673 } else
674 panic("lockmgr: locking against myself");
675 }
676 lkp->lk_exclusivecount++;
677 if (extflags & LK_SETRECURSE &&
678 lkp->lk_recurselevel == 0)
679 lkp->lk_recurselevel = lkp->lk_exclusivecount;
680 COUNT(lkp, p, cpu_id, 1);
681 break;
682 }
683 /*
684 * If we are just polling, check to see if we will sleep.
685 */
686 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
687 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
688 lkp->lk_sharecount != 0)) {
689 error = EBUSY;
690 break;
691 }
692 /*
693 * Try to acquire the want_exclusive flag.
694 */
695 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
696 (LK_HAVE_EXCL | LK_WANT_EXCL));
697 if (error)
698 break;
699 lkp->lk_flags |= LK_WANT_EXCL;
700 /*
701 * Wait for shared locks and upgrades to finish.
702 */
703 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
704 (lkp->lk_flags & LK_WANT_UPGRADE));
705 lkp->lk_flags &= ~LK_WANT_EXCL;
706 if (error)
707 break;
708 lkp->lk_flags |= LK_HAVE_EXCL;
709 SETHOLDER(lkp, pid, cpu_id);
710 #if defined(LOCKDEBUG)
711 lkp->lk_lock_file = file;
712 lkp->lk_lock_line = line;
713 #endif
714 HAVEIT(lkp);
715 if (lkp->lk_exclusivecount != 0)
716 panic("lockmgr: non-zero exclusive count");
717 lkp->lk_exclusivecount = 1;
718 if (extflags & LK_SETRECURSE)
719 lkp->lk_recurselevel = 1;
720 COUNT(lkp, p, cpu_id, 1);
721 break;
722
723 case LK_RELEASE:
724 if (lkp->lk_exclusivecount != 0) {
725 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
726 if (lkp->lk_flags & LK_SPIN) {
727 panic("lockmgr: processor %lu, not "
728 "exclusive lock holder %lu "
729 "unlocking", cpu_id, lkp->lk_cpu);
730 } else {
731 panic("lockmgr: pid %d, not "
732 "exclusive lock holder %d "
733 "unlocking", pid,
734 lkp->lk_lockholder);
735 }
736 }
737 if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
738 lkp->lk_recurselevel = 0;
739 lkp->lk_exclusivecount--;
740 COUNT(lkp, p, cpu_id, -1);
741 if (lkp->lk_exclusivecount == 0) {
742 lkp->lk_flags &= ~LK_HAVE_EXCL;
743 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
744 #if defined(LOCKDEBUG)
745 lkp->lk_unlock_file = file;
746 lkp->lk_unlock_line = line;
747 #endif
748 DONTHAVEIT(lkp);
749 }
750 } else if (lkp->lk_sharecount != 0) {
751 lkp->lk_sharecount--;
752 COUNT(lkp, p, cpu_id, -1);
753 }
754 #ifdef DIAGNOSTIC
755 else
756 panic("lockmgr: release of unlocked lock!");
757 #endif
758 WAKEUP_WAITER(lkp);
759 break;
760
761 case LK_DRAIN:
762 /*
763 * Check that we do not already hold the lock, as it can
764 * never drain if we do. Unfortunately, we have no way to
765 * check for holding a shared lock, but at least we can
766 * check for an exclusive one.
767 */
768 if (WEHOLDIT(lkp, pid, cpu_id))
769 panic("lockmgr: draining against myself");
770 /*
771 * If we are just polling, check to see if we will sleep.
772 */
773 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
774 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
775 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
776 error = EBUSY;
777 break;
778 }
779 ACQUIRE(lkp, error, extflags, 1,
780 ((lkp->lk_flags &
781 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
782 lkp->lk_sharecount != 0 ||
783 lkp->lk_waitcount != 0));
784 if (error)
785 break;
786 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
787 SETHOLDER(lkp, pid, cpu_id);
788 #if defined(LOCKDEBUG)
789 lkp->lk_lock_file = file;
790 lkp->lk_lock_line = line;
791 #endif
792 HAVEIT(lkp);
793 lkp->lk_exclusivecount = 1;
794 /* XXX unlikely that we'd want this */
795 if (extflags & LK_SETRECURSE)
796 lkp->lk_recurselevel = 1;
797 COUNT(lkp, p, cpu_id, 1);
798 break;
799
800 default:
801 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
802 panic("lockmgr: unknown locktype request %d",
803 flags & LK_TYPE_MASK);
804 /* NOTREACHED */
805 }
806 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
807 ((lkp->lk_flags &
808 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
809 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
810 lkp->lk_flags &= ~LK_WAITDRAIN;
811 wakeup((void *)&lkp->lk_flags);
812 }
813 /*
814 * Note that this panic will be a recursive panic, since
815 * we only set lock_shutdown_noblock above if panicstr != NULL.
816 */
817 if (error && lock_shutdown_noblock)
818 panic("lockmgr: deadlock (see previous panic)");
819
820 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
821 return (error);
822 }
823
824 /*
825 * For a recursive spinlock held one or more times by the current CPU,
826 * release all N locks, and return N.
827 * Intended for use in mi_switch() shortly before context switching.
828 */
829
830 int
831 #if defined(LOCKDEBUG)
832 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
833 #else
834 spinlock_release_all(__volatile struct lock *lkp)
835 #endif
836 {
837 int s, count;
838 cpuid_t cpu_id;
839
840 KASSERT(lkp->lk_flags & LK_SPIN);
841
842 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
843
844 cpu_id = cpu_number();
845 count = lkp->lk_exclusivecount;
846
847 if (count != 0) {
848 #ifdef DIAGNOSTIC
849 if (WEHOLDIT(lkp, 0, cpu_id) == 0) {
850 panic("spinlock_release_all: processor %lu, not "
851 "exclusive lock holder %lu "
852 "unlocking", (long)cpu_id, lkp->lk_cpu);
853 }
854 #endif
855 lkp->lk_recurselevel = 0;
856 lkp->lk_exclusivecount = 0;
857 COUNT_CPU(cpu_id, -count);
858 lkp->lk_flags &= ~LK_HAVE_EXCL;
859 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
860 #if defined(LOCKDEBUG)
861 lkp->lk_unlock_file = file;
862 lkp->lk_unlock_line = line;
863 #endif
864 DONTHAVEIT(lkp);
865 }
866 #ifdef DIAGNOSTIC
867 else if (lkp->lk_sharecount != 0)
868 panic("spinlock_release_all: release of shared lock!");
869 else
870 panic("spinlock_release_all: release of unlocked lock!");
871 #endif
872 INTERLOCK_RELEASE(lkp, LK_SPIN, s);
873
874 return (count);
875 }
876
877 /*
878 * For a recursive spinlock held one or more times by the current CPU,
879 * release all N locks, and return N.
880 * Intended for use in mi_switch() right after resuming execution.
881 */
882
883 void
884 #if defined(LOCKDEBUG)
885 _spinlock_acquire_count(__volatile struct lock *lkp, int count,
886 const char *file, int line)
887 #else
888 spinlock_acquire_count(__volatile struct lock *lkp, int count)
889 #endif
890 {
891 int s, error;
892 cpuid_t cpu_id;
893
894 KASSERT(lkp->lk_flags & LK_SPIN);
895
896 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
897
898 cpu_id = cpu_number();
899
900 #ifdef DIAGNOSTIC
901 if (WEHOLDIT(lkp, LK_NOPROC, cpu_id))
902 panic("spinlock_acquire_count: processor %lu already holds lock\n", (long)cpu_id);
903 #endif
904 /*
905 * Try to acquire the want_exclusive flag.
906 */
907 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags &
908 (LK_HAVE_EXCL | LK_WANT_EXCL));
909 lkp->lk_flags |= LK_WANT_EXCL;
910 /*
911 * Wait for shared locks and upgrades to finish.
912 */
913 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 ||
914 (lkp->lk_flags & LK_WANT_UPGRADE));
915 lkp->lk_flags &= ~LK_WANT_EXCL;
916 lkp->lk_flags |= LK_HAVE_EXCL;
917 SETHOLDER(lkp, LK_NOPROC, cpu_id);
918 #if defined(LOCKDEBUG)
919 lkp->lk_lock_file = file;
920 lkp->lk_lock_line = line;
921 #endif
922 HAVEIT(lkp);
923 if (lkp->lk_exclusivecount != 0)
924 panic("lockmgr: non-zero exclusive count");
925 lkp->lk_exclusivecount = count;
926 lkp->lk_recurselevel = 1;
927 COUNT_CPU(cpu_id, count);
928
929 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
930 }
931
932
933
934 /*
935 * Print out information about state of a lock. Used by VOP_PRINT
936 * routines to display ststus about contained locks.
937 */
938 void
939 lockmgr_printinfo(__volatile struct lock *lkp)
940 {
941
942 if (lkp->lk_sharecount)
943 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
944 lkp->lk_sharecount);
945 else if (lkp->lk_flags & LK_HAVE_EXCL) {
946 printf(" lock type %s: EXCL (count %d) by ",
947 lkp->lk_wmesg, lkp->lk_exclusivecount);
948 if (lkp->lk_flags & LK_SPIN)
949 printf("processor %lu", lkp->lk_cpu);
950 else
951 printf("pid %d", lkp->lk_lockholder);
952 } else
953 printf(" not locked");
954 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
955 printf(" with %d pending", lkp->lk_waitcount);
956 }
957
958 #if defined(LOCKDEBUG) /* { */
959 TAILQ_HEAD(, simplelock) simplelock_list =
960 TAILQ_HEAD_INITIALIZER(simplelock_list);
961
962 #if defined(MULTIPROCESSOR) /* { */
963 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
964
965 #define SLOCK_LIST_LOCK() \
966 __cpu_simple_lock(&simplelock_list_slock.lock_data)
967
968 #define SLOCK_LIST_UNLOCK() \
969 __cpu_simple_unlock(&simplelock_list_slock.lock_data)
970
971 #define SLOCK_COUNT(x) \
972 curcpu()->ci_simple_locks += (x)
973 #else
974 u_long simple_locks;
975
976 #define SLOCK_LIST_LOCK() /* nothing */
977
978 #define SLOCK_LIST_UNLOCK() /* nothing */
979
980 #define SLOCK_COUNT(x) simple_locks += (x)
981 #endif /* MULTIPROCESSOR */ /* } */
982
983 #ifdef MULTIPROCESSOR
984 #define SLOCK_MP() lock_printf("on cpu %ld\n", \
985 (u_long) cpu_number())
986 #else
987 #define SLOCK_MP() /* nothing */
988 #endif
989
990 #define SLOCK_WHERE(str, alp, id, l) \
991 do { \
992 lock_printf("\n"); \
993 lock_printf(str); \
994 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
995 SLOCK_MP(); \
996 if ((alp)->lock_file != NULL) \
997 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
998 (alp)->lock_line); \
999 if ((alp)->unlock_file != NULL) \
1000 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
1001 (alp)->unlock_line); \
1002 SLOCK_TRACE() \
1003 SLOCK_DEBUGGER(); \
1004 } while (/*CONSTCOND*/0)
1005
1006 /*
1007 * Simple lock functions so that the debugger can see from whence
1008 * they are being called.
1009 */
1010 void
1011 simple_lock_init(struct simplelock *alp)
1012 {
1013
1014 #if defined(MULTIPROCESSOR) /* { */
1015 __cpu_simple_lock_init(&alp->lock_data);
1016 #else
1017 alp->lock_data = __SIMPLELOCK_UNLOCKED;
1018 #endif /* } */
1019 alp->lock_file = NULL;
1020 alp->lock_line = 0;
1021 alp->unlock_file = NULL;
1022 alp->unlock_line = 0;
1023 alp->lock_holder = LK_NOCPU;
1024 }
1025
1026 void
1027 _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
1028 {
1029 cpuid_t cpu_id = cpu_number();
1030 int s;
1031
1032 s = spllock();
1033
1034 /*
1035 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1036 * don't take any action, and just fall into the normal spin case.
1037 */
1038 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1039 #if defined(MULTIPROCESSOR) /* { */
1040 if (alp->lock_holder == cpu_id) {
1041 SLOCK_WHERE("simple_lock: locking against myself\n",
1042 alp, id, l);
1043 goto out;
1044 }
1045 #else
1046 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
1047 goto out;
1048 #endif /* MULTIPROCESSOR */ /* } */
1049 }
1050
1051 #if defined(MULTIPROCESSOR) /* { */
1052 /* Acquire the lock before modifying any fields. */
1053 __cpu_simple_lock(&alp->lock_data);
1054 #else
1055 alp->lock_data = __SIMPLELOCK_LOCKED;
1056 #endif /* } */
1057
1058 if (alp->lock_holder != LK_NOCPU) {
1059 SLOCK_WHERE("simple_lock: uninitialized lock\n",
1060 alp, id, l);
1061 }
1062 alp->lock_file = id;
1063 alp->lock_line = l;
1064 alp->lock_holder = cpu_id;
1065
1066 SLOCK_LIST_LOCK();
1067 /* XXX Cast away volatile */
1068 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1069 SLOCK_LIST_UNLOCK();
1070
1071 SLOCK_COUNT(1);
1072
1073 out:
1074 splx(s);
1075 }
1076
1077 int
1078 _simple_lock_held(__volatile struct simplelock *alp)
1079 {
1080 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
1081 cpuid_t cpu_id = cpu_number();
1082 #endif
1083 int s, locked = 0;
1084
1085 s = spllock();
1086
1087 #if defined(MULTIPROCESSOR)
1088 if (__cpu_simple_lock_try(&alp->lock_data) == 0)
1089 locked = (alp->lock_holder == cpu_id);
1090 else
1091 __cpu_simple_unlock(&alp->lock_data);
1092 #else
1093 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1094 locked = 1;
1095 KASSERT(alp->lock_holder == cpu_id);
1096 }
1097 #endif
1098
1099 splx(s);
1100
1101 return (locked);
1102 }
1103
1104 int
1105 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
1106 {
1107 cpuid_t cpu_id = cpu_number();
1108 int s, rv = 0;
1109
1110 s = spllock();
1111
1112 /*
1113 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1114 * don't take any action.
1115 */
1116 #if defined(MULTIPROCESSOR) /* { */
1117 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
1118 if (alp->lock_holder == cpu_id)
1119 SLOCK_WHERE("simple_lock_try: locking against myself\n",
1120 alp, id, l);
1121 goto out;
1122 }
1123 #else
1124 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1125 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
1126 goto out;
1127 }
1128 alp->lock_data = __SIMPLELOCK_LOCKED;
1129 #endif /* MULTIPROCESSOR */ /* } */
1130
1131 /*
1132 * At this point, we have acquired the lock.
1133 */
1134
1135 rv = 1;
1136
1137 alp->lock_file = id;
1138 alp->lock_line = l;
1139 alp->lock_holder = cpu_id;
1140
1141 SLOCK_LIST_LOCK();
1142 /* XXX Cast away volatile. */
1143 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1144 SLOCK_LIST_UNLOCK();
1145
1146 SLOCK_COUNT(1);
1147
1148 out:
1149 splx(s);
1150 return (rv);
1151 }
1152
1153 void
1154 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
1155 {
1156 int s;
1157
1158 s = spllock();
1159
1160 /*
1161 * MULTIPROCESSOR case: This is `safe' because we think we hold
1162 * the lock, and if we don't, we don't take any action.
1163 */
1164 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
1165 SLOCK_WHERE("simple_unlock: lock not held\n",
1166 alp, id, l);
1167 goto out;
1168 }
1169
1170 SLOCK_LIST_LOCK();
1171 TAILQ_REMOVE(&simplelock_list, alp, list);
1172 SLOCK_LIST_UNLOCK();
1173
1174 SLOCK_COUNT(-1);
1175
1176 alp->list.tqe_next = NULL; /* sanity */
1177 alp->list.tqe_prev = NULL; /* sanity */
1178
1179 alp->unlock_file = id;
1180 alp->unlock_line = l;
1181
1182 #if defined(MULTIPROCESSOR) /* { */
1183 alp->lock_holder = LK_NOCPU;
1184 /* Now that we've modified all fields, release the lock. */
1185 __cpu_simple_unlock(&alp->lock_data);
1186 #else
1187 alp->lock_data = __SIMPLELOCK_UNLOCKED;
1188 KASSERT(alp->lock_holder == cpu_number());
1189 alp->lock_holder = LK_NOCPU;
1190 #endif /* } */
1191
1192 out:
1193 splx(s);
1194 }
1195
1196 void
1197 simple_lock_dump(void)
1198 {
1199 struct simplelock *alp;
1200 int s;
1201
1202 s = spllock();
1203 SLOCK_LIST_LOCK();
1204 lock_printf("all simple locks:\n");
1205 TAILQ_FOREACH(alp, &simplelock_list, list) {
1206 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
1207 alp->lock_file, alp->lock_line);
1208 }
1209 SLOCK_LIST_UNLOCK();
1210 splx(s);
1211 }
1212
1213 void
1214 simple_lock_freecheck(void *start, void *end)
1215 {
1216 struct simplelock *alp;
1217 int s;
1218
1219 s = spllock();
1220 SLOCK_LIST_LOCK();
1221 TAILQ_FOREACH(alp, &simplelock_list, list) {
1222 if ((void *)alp >= start && (void *)alp < end) {
1223 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
1224 alp, alp->lock_holder, alp->lock_file,
1225 alp->lock_line);
1226 SLOCK_DEBUGGER();
1227 }
1228 }
1229 SLOCK_LIST_UNLOCK();
1230 splx(s);
1231 }
1232
1233 /*
1234 * We must be holding exactly one lock: the sched_lock.
1235 */
1236
1237 void
1238 simple_lock_switchcheck(void)
1239 {
1240
1241 simple_lock_only_held(&sched_lock, "switching");
1242 }
1243
1244 void
1245 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
1246 {
1247 struct simplelock *alp;
1248 cpuid_t cpu_id = cpu_number();
1249 int s;
1250
1251 if (lp) {
1252 LOCK_ASSERT(simple_lock_held(lp));
1253 }
1254 s = spllock();
1255 SLOCK_LIST_LOCK();
1256 TAILQ_FOREACH(alp, &simplelock_list, list) {
1257 if (alp == lp)
1258 continue;
1259 if (alp->lock_holder == cpu_id)
1260 break;
1261 }
1262 SLOCK_LIST_UNLOCK();
1263 splx(s);
1264
1265 if (alp != NULL) {
1266 lock_printf("\n%s with held simple_lock %p "
1267 "CPU %lu %s:%d\n",
1268 where, alp, alp->lock_holder, alp->lock_file,
1269 alp->lock_line);
1270 SLOCK_TRACE();
1271 SLOCK_DEBUGGER();
1272 }
1273 }
1274 #endif /* LOCKDEBUG */ /* } */
1275
1276 #if defined(MULTIPROCESSOR)
1277 /*
1278 * Functions for manipulating the kernel_lock. We put them here
1279 * so that they show up in profiles.
1280 */
1281
1282 struct lock kernel_lock;
1283
1284 void
1285 _kernel_lock_init(void)
1286 {
1287
1288 spinlockinit(&kernel_lock, "klock", 0);
1289 }
1290
1291 /*
1292 * Acquire/release the kernel lock. Intended for use in the scheduler
1293 * and the lower half of the kernel.
1294 */
1295 void
1296 _kernel_lock(int flag)
1297 {
1298
1299 SCHED_ASSERT_UNLOCKED();
1300 spinlockmgr(&kernel_lock, flag, 0);
1301 }
1302
1303 void
1304 _kernel_unlock(void)
1305 {
1306
1307 spinlockmgr(&kernel_lock, LK_RELEASE, 0);
1308 }
1309
1310 /*
1311 * Acquire/release the kernel_lock on behalf of a process. Intended for
1312 * use in the top half of the kernel.
1313 */
1314 void
1315 _kernel_proc_lock(struct proc *p)
1316 {
1317
1318 SCHED_ASSERT_UNLOCKED();
1319 spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
1320 p->p_flag |= P_BIGLOCK;
1321 }
1322
1323 void
1324 _kernel_proc_unlock(struct proc *p)
1325 {
1326
1327 p->p_flag &= ~P_BIGLOCK;
1328 spinlockmgr(&kernel_lock, LK_RELEASE, 0);
1329 }
1330 #endif /* MULTIPROCESSOR */
1331