kern_lock.c revision 1.68 1 /* $NetBSD: kern_lock.c,v 1.68 2003/01/15 23:11:05 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. All advertising materials mentioning features or use of this software
60 * must display the following acknowledgement:
61 * This product includes software developed by the University of
62 * California, Berkeley and its contributors.
63 * 4. Neither the name of the University nor the names of its contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
71 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
72 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
73 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
74 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
75 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
76 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * SUCH DAMAGE.
78 *
79 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.68 2003/01/15 23:11:05 pk Exp $");
84
85 #include "opt_multiprocessor.h"
86 #include "opt_lockdebug.h"
87 #include "opt_ddb.h"
88
89 #include <sys/param.h>
90 #include <sys/proc.h>
91 #include <sys/lock.h>
92 #include <sys/systm.h>
93 #include <machine/cpu.h>
94
95 #if defined(LOCKDEBUG)
96 #include <sys/syslog.h>
97 /*
98 * note that stdarg.h and the ansi style va_start macro is used for both
99 * ansi and traditional c compiles.
100 * XXX: this requires that stdarg.h define: va_alist and va_dcl
101 */
102 #include <machine/stdarg.h>
103
104 void lock_printf(const char *fmt, ...)
105 __attribute__((__format__(__printf__,1,2)));
106
107 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */
108
109 #ifdef DDB
110 #include <ddb/ddbvar.h>
111 #include <machine/db_machdep.h>
112 #include <ddb/db_command.h>
113 #include <ddb/db_interface.h>
114 #endif
115 #endif
116
117 /*
118 * Locking primitives implementation.
119 * Locks provide shared/exclusive synchronization.
120 */
121
122 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
123 #if defined(MULTIPROCESSOR) /* { */
124 #define COUNT_CPU(cpu_id, x) \
125 curcpu()->ci_spin_locks += (x)
126 #else
127 u_long spin_locks;
128 #define COUNT_CPU(cpu_id, x) spin_locks += (x)
129 #endif /* MULTIPROCESSOR */ /* } */
130
131 #define COUNT(lkp, p, cpu_id, x) \
132 do { \
133 if ((lkp)->lk_flags & LK_SPIN) \
134 COUNT_CPU((cpu_id), (x)); \
135 else \
136 (p)->p_locks += (x); \
137 } while (/*CONSTCOND*/0)
138 #else
139 #define COUNT(lkp, p, cpu_id, x)
140 #define COUNT_CPU(cpu_id, x)
141 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
142
143 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */
144 #define SPINLOCK_SPIN_HOOK /* nothing */
145 #endif
146
147 #define INTERLOCK_ACQUIRE(lkp, flags, s) \
148 do { \
149 if ((flags) & LK_SPIN) \
150 s = spllock(); \
151 simple_lock(&(lkp)->lk_interlock); \
152 } while (/*CONSTCOND*/ 0)
153
154 #define INTERLOCK_RELEASE(lkp, flags, s) \
155 do { \
156 simple_unlock(&(lkp)->lk_interlock); \
157 if ((flags) & LK_SPIN) \
158 splx(s); \
159 } while (/*CONSTCOND*/ 0)
160
161 #ifdef DDB /* { */
162 #ifdef MULTIPROCESSOR
163 int simple_lock_debugger = 1; /* more serious on MP */
164 #else
165 int simple_lock_debugger = 0;
166 #endif
167 #define SLOCK_DEBUGGER() if (simple_lock_debugger) Debugger()
168 #define SLOCK_TRACE() \
169 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \
170 TRUE, 65535, "", printf);
171 #else
172 #define SLOCK_DEBUGGER() /* nothing */
173 #define SLOCK_TRACE() /* nothing */
174 #endif /* } */
175
176 #if defined(LOCKDEBUG)
177 #if defined(DDB)
178 #define SPINLOCK_SPINCHECK_DEBUGGER Debugger()
179 #else
180 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
181 #endif
182
183 #define SPINLOCK_SPINCHECK_DECL \
184 /* 32-bits of count -- wrap constitutes a "spinout" */ \
185 uint32_t __spinc = 0
186
187 #define SPINLOCK_SPINCHECK \
188 do { \
189 if (++__spinc == 0) { \
190 printf("LK_SPIN spinout, excl %d, share %d\n", \
191 lkp->lk_exclusivecount, lkp->lk_sharecount); \
192 if (lkp->lk_exclusivecount) \
193 printf("held by CPU %lu\n", \
194 (u_long) lkp->lk_cpu); \
195 if (lkp->lk_lock_file) \
196 printf("last locked at %s:%d\n", \
197 lkp->lk_lock_file, lkp->lk_lock_line); \
198 if (lkp->lk_unlock_file) \
199 printf("last unlocked at %s:%d\n", \
200 lkp->lk_unlock_file, lkp->lk_unlock_line); \
201 SLOCK_TRACE(); \
202 SPINLOCK_SPINCHECK_DEBUGGER; \
203 } \
204 } while (/*CONSTCOND*/ 0)
205 #else
206 #define SPINLOCK_SPINCHECK_DECL /* nothing */
207 #define SPINLOCK_SPINCHECK /* nothing */
208 #endif /* LOCKDEBUG && DDB */
209
210 /*
211 * Acquire a resource.
212 */
213 #define ACQUIRE(lkp, error, extflags, drain, wanted) \
214 if ((extflags) & LK_SPIN) { \
215 int interlocked; \
216 SPINLOCK_SPINCHECK_DECL; \
217 \
218 if ((drain) == 0) \
219 (lkp)->lk_waitcount++; \
220 for (interlocked = 1;;) { \
221 SPINLOCK_SPINCHECK; \
222 if (wanted) { \
223 if (interlocked) { \
224 INTERLOCK_RELEASE((lkp), \
225 LK_SPIN, s); \
226 interlocked = 0; \
227 } \
228 SPINLOCK_SPIN_HOOK; \
229 } else if (interlocked) { \
230 break; \
231 } else { \
232 INTERLOCK_ACQUIRE((lkp), LK_SPIN, s); \
233 interlocked = 1; \
234 } \
235 } \
236 if ((drain) == 0) \
237 (lkp)->lk_waitcount--; \
238 KASSERT((wanted) == 0); \
239 error = 0; /* sanity */ \
240 } else { \
241 for (error = 0; wanted; ) { \
242 if ((drain)) \
243 (lkp)->lk_flags |= LK_WAITDRAIN; \
244 else \
245 (lkp)->lk_waitcount++; \
246 /* XXX Cast away volatile. */ \
247 error = ltsleep((drain) ? \
248 (void *)&(lkp)->lk_flags : \
249 (void *)(lkp), (lkp)->lk_prio, \
250 (lkp)->lk_wmesg, (lkp)->lk_timo, \
251 &(lkp)->lk_interlock); \
252 if ((drain) == 0) \
253 (lkp)->lk_waitcount--; \
254 if (error) \
255 break; \
256 if ((extflags) & LK_SLEEPFAIL) { \
257 error = ENOLCK; \
258 break; \
259 } \
260 } \
261 }
262
263 #define SETHOLDER(lkp, pid, cpu_id) \
264 do { \
265 if ((lkp)->lk_flags & LK_SPIN) \
266 (lkp)->lk_cpu = cpu_id; \
267 else \
268 (lkp)->lk_lockholder = pid; \
269 } while (/*CONSTCOND*/0)
270
271 #define WEHOLDIT(lkp, pid, cpu_id) \
272 (((lkp)->lk_flags & LK_SPIN) != 0 ? \
273 ((lkp)->lk_cpu == (cpu_id)) : ((lkp)->lk_lockholder == (pid)))
274
275 #define WAKEUP_WAITER(lkp) \
276 do { \
277 if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) { \
278 /* XXX Cast away volatile. */ \
279 wakeup((void *)(lkp)); \
280 } \
281 } while (/*CONSTCOND*/0)
282
283 #if defined(LOCKDEBUG) /* { */
284 #if defined(MULTIPROCESSOR) /* { */
285 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
286
287 #define SPINLOCK_LIST_LOCK() \
288 __cpu_simple_lock(&spinlock_list_slock.lock_data)
289
290 #define SPINLOCK_LIST_UNLOCK() \
291 __cpu_simple_unlock(&spinlock_list_slock.lock_data)
292 #else
293 #define SPINLOCK_LIST_LOCK() /* nothing */
294
295 #define SPINLOCK_LIST_UNLOCK() /* nothing */
296 #endif /* MULTIPROCESSOR */ /* } */
297
298 TAILQ_HEAD(, lock) spinlock_list =
299 TAILQ_HEAD_INITIALIZER(spinlock_list);
300
301 #define HAVEIT(lkp) \
302 do { \
303 if ((lkp)->lk_flags & LK_SPIN) { \
304 int s = spllock(); \
305 SPINLOCK_LIST_LOCK(); \
306 /* XXX Cast away volatile. */ \
307 TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp), \
308 lk_list); \
309 SPINLOCK_LIST_UNLOCK(); \
310 splx(s); \
311 } \
312 } while (/*CONSTCOND*/0)
313
314 #define DONTHAVEIT(lkp) \
315 do { \
316 if ((lkp)->lk_flags & LK_SPIN) { \
317 int s = spllock(); \
318 SPINLOCK_LIST_LOCK(); \
319 /* XXX Cast away volatile. */ \
320 TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp), \
321 lk_list); \
322 SPINLOCK_LIST_UNLOCK(); \
323 splx(s); \
324 } \
325 } while (/*CONSTCOND*/0)
326 #else
327 #define HAVEIT(lkp) /* nothing */
328
329 #define DONTHAVEIT(lkp) /* nothing */
330 #endif /* LOCKDEBUG */ /* } */
331
332 #if defined(LOCKDEBUG)
333 /*
334 * Lock debug printing routine; can be configured to print to console
335 * or log to syslog.
336 */
337 void
338 lock_printf(const char *fmt, ...)
339 {
340 char b[150];
341 va_list ap;
342
343 va_start(ap, fmt);
344 if (lock_debug_syslog)
345 vlog(LOG_DEBUG, fmt, ap);
346 else {
347 vsnprintf(b, sizeof(b), fmt, ap);
348 printf_nolog("%s", b);
349 }
350 va_end(ap);
351 }
352 #endif /* LOCKDEBUG */
353
354 /*
355 * Initialize a lock; required before use.
356 */
357 void
358 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
359 {
360
361 memset(lkp, 0, sizeof(struct lock));
362 simple_lock_init(&lkp->lk_interlock);
363 lkp->lk_flags = flags & LK_EXTFLG_MASK;
364 if (flags & LK_SPIN)
365 lkp->lk_cpu = LK_NOCPU;
366 else {
367 lkp->lk_lockholder = LK_NOPROC;
368 lkp->lk_prio = prio;
369 lkp->lk_timo = timo;
370 }
371 lkp->lk_wmesg = wmesg; /* just a name for spin locks */
372 #if defined(LOCKDEBUG)
373 lkp->lk_lock_file = NULL;
374 lkp->lk_unlock_file = NULL;
375 #endif
376 }
377
378 /*
379 * Determine the status of a lock.
380 */
381 int
382 lockstatus(struct lock *lkp)
383 {
384 int s = 0, lock_type = 0;
385
386 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
387 if (lkp->lk_exclusivecount != 0)
388 lock_type = LK_EXCLUSIVE;
389 else if (lkp->lk_sharecount != 0)
390 lock_type = LK_SHARED;
391 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
392 return (lock_type);
393 }
394
395 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
396 /*
397 * Make sure no spin locks are held by a CPU that is about
398 * to context switch.
399 */
400 void
401 spinlock_switchcheck(void)
402 {
403 u_long cnt;
404 int s;
405
406 s = spllock();
407 #if defined(MULTIPROCESSOR)
408 cnt = curcpu()->ci_spin_locks;
409 #else
410 cnt = spin_locks;
411 #endif
412 splx(s);
413
414 if (cnt != 0)
415 panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
416 (u_long) cpu_number(), cnt);
417 }
418 #endif /* LOCKDEBUG || DIAGNOSTIC */
419
420 /*
421 * Locks and IPLs (interrupt priority levels):
422 *
423 * Locks which may be taken from interrupt context must be handled
424 * very carefully; you must spl to the highest IPL where the lock
425 * is needed before acquiring the lock.
426 *
427 * It is also important to avoid deadlock, since certain (very high
428 * priority) interrupts are often needed to keep the system as a whole
429 * from deadlocking, and must not be blocked while you are spinning
430 * waiting for a lower-priority lock.
431 *
432 * In addition, the lock-debugging hooks themselves need to use locks!
433 *
434 * A raw __cpu_simple_lock may be used from interrupts are long as it
435 * is acquired and held at a single IPL.
436 *
437 * A simple_lock (which is a __cpu_simple_lock wrapped with some
438 * debugging hooks) may be used at or below spllock(), which is
439 * typically at or just below splhigh() (i.e. blocks everything
440 * but certain machine-dependent extremely high priority interrupts).
441 *
442 * spinlockmgr spinlocks should be used at or below splsched().
443 *
444 * Some platforms may have interrupts of higher priority than splsched(),
445 * including hard serial interrupts, inter-processor interrupts, and
446 * kernel debugger traps.
447 */
448
449 /*
450 * XXX XXX kludge around another kludge..
451 *
452 * vfs_shutdown() may be called from interrupt context, either as a result
453 * of a panic, or from the debugger. It proceeds to call
454 * sys_sync(&proc0, ...), pretending its running on behalf of proc0
455 *
456 * We would like to make an attempt to sync the filesystems in this case, so
457 * if this happens, we treat attempts to acquire locks specially.
458 * All locks are acquired on behalf of proc0.
459 *
460 * If we've already paniced, we don't block waiting for locks, but
461 * just barge right ahead since we're already going down in flames.
462 */
463
464 /*
465 * Set, change, or release a lock.
466 *
467 * Shared requests increment the shared count. Exclusive requests set the
468 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
469 * accepted shared locks and shared-to-exclusive upgrades to go away.
470 */
471 int
472 #if defined(LOCKDEBUG)
473 _lockmgr(__volatile struct lock *lkp, u_int flags,
474 struct simplelock *interlkp, const char *file, int line)
475 #else
476 lockmgr(__volatile struct lock *lkp, u_int flags,
477 struct simplelock *interlkp)
478 #endif
479 {
480 int error;
481 pid_t pid;
482 int extflags;
483 cpuid_t cpu_id;
484 struct proc *p = curproc;
485 int lock_shutdown_noblock = 0;
486 int s = 0;
487
488 error = 0;
489
490 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
491 if (flags & LK_INTERLOCK)
492 simple_unlock(interlkp);
493 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
494
495 #ifdef DIAGNOSTIC /* { */
496 /*
497 * Don't allow spins on sleep locks and don't allow sleeps
498 * on spin locks.
499 */
500 if ((flags ^ lkp->lk_flags) & LK_SPIN)
501 panic("lockmgr: sleep/spin mismatch");
502 #endif /* } */
503
504 if (extflags & LK_SPIN)
505 pid = LK_KERNPROC;
506 else {
507 if (p == NULL) {
508 if (!doing_shutdown) {
509 panic("lockmgr: no context");
510 } else {
511 p = &proc0;
512 if (panicstr && (!(flags & LK_NOWAIT))) {
513 flags |= LK_NOWAIT;
514 lock_shutdown_noblock = 1;
515 }
516 }
517 }
518 pid = p->p_pid;
519 }
520 cpu_id = cpu_number();
521
522 /*
523 * Once a lock has drained, the LK_DRAINING flag is set and an
524 * exclusive lock is returned. The only valid operation thereafter
525 * is a single release of that exclusive lock. This final release
526 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
527 * further requests of any sort will result in a panic. The bits
528 * selected for these two flags are chosen so that they will be set
529 * in memory that is freed (freed memory is filled with 0xdeadbeef).
530 * The final release is permitted to give a new lease on life to
531 * the lock by specifying LK_REENABLE.
532 */
533 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
534 #ifdef DIAGNOSTIC /* { */
535 if (lkp->lk_flags & LK_DRAINED)
536 panic("lockmgr: using decommissioned lock");
537 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
538 WEHOLDIT(lkp, pid, cpu_id) == 0)
539 panic("lockmgr: non-release on draining lock: %d",
540 flags & LK_TYPE_MASK);
541 #endif /* DIAGNOSTIC */ /* } */
542 lkp->lk_flags &= ~LK_DRAINING;
543 if ((flags & LK_REENABLE) == 0)
544 lkp->lk_flags |= LK_DRAINED;
545 }
546
547 switch (flags & LK_TYPE_MASK) {
548
549 case LK_SHARED:
550 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
551 /*
552 * If just polling, check to see if we will block.
553 */
554 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
555 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
556 error = EBUSY;
557 break;
558 }
559 /*
560 * Wait for exclusive locks and upgrades to clear.
561 */
562 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
563 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
564 if (error)
565 break;
566 lkp->lk_sharecount++;
567 COUNT(lkp, p, cpu_id, 1);
568 break;
569 }
570 /*
571 * We hold an exclusive lock, so downgrade it to shared.
572 * An alternative would be to fail with EDEADLK.
573 */
574 lkp->lk_sharecount++;
575 COUNT(lkp, p, cpu_id, 1);
576 /* fall into downgrade */
577
578 case LK_DOWNGRADE:
579 if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
580 lkp->lk_exclusivecount == 0)
581 panic("lockmgr: not holding exclusive lock");
582 lkp->lk_sharecount += lkp->lk_exclusivecount;
583 lkp->lk_exclusivecount = 0;
584 lkp->lk_recurselevel = 0;
585 lkp->lk_flags &= ~LK_HAVE_EXCL;
586 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
587 #if defined(LOCKDEBUG)
588 lkp->lk_unlock_file = file;
589 lkp->lk_unlock_line = line;
590 #endif
591 DONTHAVEIT(lkp);
592 WAKEUP_WAITER(lkp);
593 break;
594
595 case LK_EXCLUPGRADE:
596 /*
597 * If another process is ahead of us to get an upgrade,
598 * then we want to fail rather than have an intervening
599 * exclusive access.
600 */
601 if (lkp->lk_flags & LK_WANT_UPGRADE) {
602 lkp->lk_sharecount--;
603 COUNT(lkp, p, cpu_id, -1);
604 error = EBUSY;
605 break;
606 }
607 /* fall into normal upgrade */
608
609 case LK_UPGRADE:
610 /*
611 * Upgrade a shared lock to an exclusive one. If another
612 * shared lock has already requested an upgrade to an
613 * exclusive lock, our shared lock is released and an
614 * exclusive lock is requested (which will be granted
615 * after the upgrade). If we return an error, the file
616 * will always be unlocked.
617 */
618 if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
619 panic("lockmgr: upgrade exclusive lock");
620 lkp->lk_sharecount--;
621 COUNT(lkp, p, cpu_id, -1);
622 /*
623 * If we are just polling, check to see if we will block.
624 */
625 if ((extflags & LK_NOWAIT) &&
626 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
627 lkp->lk_sharecount > 1)) {
628 error = EBUSY;
629 break;
630 }
631 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
632 /*
633 * We are first shared lock to request an upgrade, so
634 * request upgrade and wait for the shared count to
635 * drop to zero, then take exclusive lock.
636 */
637 lkp->lk_flags |= LK_WANT_UPGRADE;
638 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
639 lkp->lk_flags &= ~LK_WANT_UPGRADE;
640 if (error)
641 break;
642 lkp->lk_flags |= LK_HAVE_EXCL;
643 SETHOLDER(lkp, pid, cpu_id);
644 #if defined(LOCKDEBUG)
645 lkp->lk_lock_file = file;
646 lkp->lk_lock_line = line;
647 #endif
648 HAVEIT(lkp);
649 if (lkp->lk_exclusivecount != 0)
650 panic("lockmgr: non-zero exclusive count");
651 lkp->lk_exclusivecount = 1;
652 if (extflags & LK_SETRECURSE)
653 lkp->lk_recurselevel = 1;
654 COUNT(lkp, p, cpu_id, 1);
655 break;
656 }
657 /*
658 * Someone else has requested upgrade. Release our shared
659 * lock, awaken upgrade requestor if we are the last shared
660 * lock, then request an exclusive lock.
661 */
662 if (lkp->lk_sharecount == 0)
663 WAKEUP_WAITER(lkp);
664 /* fall into exclusive request */
665
666 case LK_EXCLUSIVE:
667 if (WEHOLDIT(lkp, pid, cpu_id)) {
668 /*
669 * Recursive lock.
670 */
671 if ((extflags & LK_CANRECURSE) == 0 &&
672 lkp->lk_recurselevel == 0) {
673 if (extflags & LK_RECURSEFAIL) {
674 error = EDEADLK;
675 break;
676 } else
677 panic("lockmgr: locking against myself");
678 }
679 lkp->lk_exclusivecount++;
680 if (extflags & LK_SETRECURSE &&
681 lkp->lk_recurselevel == 0)
682 lkp->lk_recurselevel = lkp->lk_exclusivecount;
683 COUNT(lkp, p, cpu_id, 1);
684 break;
685 }
686 /*
687 * If we are just polling, check to see if we will sleep.
688 */
689 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
690 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
691 lkp->lk_sharecount != 0)) {
692 error = EBUSY;
693 break;
694 }
695 /*
696 * Try to acquire the want_exclusive flag.
697 */
698 ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
699 (LK_HAVE_EXCL | LK_WANT_EXCL));
700 if (error)
701 break;
702 lkp->lk_flags |= LK_WANT_EXCL;
703 /*
704 * Wait for shared locks and upgrades to finish.
705 */
706 ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
707 (lkp->lk_flags & LK_WANT_UPGRADE));
708 lkp->lk_flags &= ~LK_WANT_EXCL;
709 if (error)
710 break;
711 lkp->lk_flags |= LK_HAVE_EXCL;
712 SETHOLDER(lkp, pid, cpu_id);
713 #if defined(LOCKDEBUG)
714 lkp->lk_lock_file = file;
715 lkp->lk_lock_line = line;
716 #endif
717 HAVEIT(lkp);
718 if (lkp->lk_exclusivecount != 0)
719 panic("lockmgr: non-zero exclusive count");
720 lkp->lk_exclusivecount = 1;
721 if (extflags & LK_SETRECURSE)
722 lkp->lk_recurselevel = 1;
723 COUNT(lkp, p, cpu_id, 1);
724 break;
725
726 case LK_RELEASE:
727 if (lkp->lk_exclusivecount != 0) {
728 if (WEHOLDIT(lkp, pid, cpu_id) == 0) {
729 if (lkp->lk_flags & LK_SPIN) {
730 panic("lockmgr: processor %lu, not "
731 "exclusive lock holder %lu "
732 "unlocking", cpu_id, lkp->lk_cpu);
733 } else {
734 panic("lockmgr: pid %d, not "
735 "exclusive lock holder %d "
736 "unlocking", pid,
737 lkp->lk_lockholder);
738 }
739 }
740 if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
741 lkp->lk_recurselevel = 0;
742 lkp->lk_exclusivecount--;
743 COUNT(lkp, p, cpu_id, -1);
744 if (lkp->lk_exclusivecount == 0) {
745 lkp->lk_flags &= ~LK_HAVE_EXCL;
746 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
747 #if defined(LOCKDEBUG)
748 lkp->lk_unlock_file = file;
749 lkp->lk_unlock_line = line;
750 #endif
751 DONTHAVEIT(lkp);
752 }
753 } else if (lkp->lk_sharecount != 0) {
754 lkp->lk_sharecount--;
755 COUNT(lkp, p, cpu_id, -1);
756 }
757 #ifdef DIAGNOSTIC
758 else
759 panic("lockmgr: release of unlocked lock!");
760 #endif
761 WAKEUP_WAITER(lkp);
762 break;
763
764 case LK_DRAIN:
765 /*
766 * Check that we do not already hold the lock, as it can
767 * never drain if we do. Unfortunately, we have no way to
768 * check for holding a shared lock, but at least we can
769 * check for an exclusive one.
770 */
771 if (WEHOLDIT(lkp, pid, cpu_id))
772 panic("lockmgr: draining against myself");
773 /*
774 * If we are just polling, check to see if we will sleep.
775 */
776 if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
777 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
778 lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
779 error = EBUSY;
780 break;
781 }
782 ACQUIRE(lkp, error, extflags, 1,
783 ((lkp->lk_flags &
784 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
785 lkp->lk_sharecount != 0 ||
786 lkp->lk_waitcount != 0));
787 if (error)
788 break;
789 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
790 SETHOLDER(lkp, pid, cpu_id);
791 #if defined(LOCKDEBUG)
792 lkp->lk_lock_file = file;
793 lkp->lk_lock_line = line;
794 #endif
795 HAVEIT(lkp);
796 lkp->lk_exclusivecount = 1;
797 /* XXX unlikely that we'd want this */
798 if (extflags & LK_SETRECURSE)
799 lkp->lk_recurselevel = 1;
800 COUNT(lkp, p, cpu_id, 1);
801 break;
802
803 default:
804 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
805 panic("lockmgr: unknown locktype request %d",
806 flags & LK_TYPE_MASK);
807 /* NOTREACHED */
808 }
809 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
810 ((lkp->lk_flags &
811 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
812 lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
813 lkp->lk_flags &= ~LK_WAITDRAIN;
814 wakeup((void *)&lkp->lk_flags);
815 }
816 /*
817 * Note that this panic will be a recursive panic, since
818 * we only set lock_shutdown_noblock above if panicstr != NULL.
819 */
820 if (error && lock_shutdown_noblock)
821 panic("lockmgr: deadlock (see previous panic)");
822
823 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
824 return (error);
825 }
826
827 /*
828 * For a recursive spinlock held one or more times by the current CPU,
829 * release all N locks, and return N.
830 * Intended for use in mi_switch() shortly before context switching.
831 */
832
833 int
834 #if defined(LOCKDEBUG)
835 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
836 #else
837 spinlock_release_all(__volatile struct lock *lkp)
838 #endif
839 {
840 int s, count;
841 cpuid_t cpu_id;
842
843 KASSERT(lkp->lk_flags & LK_SPIN);
844
845 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
846
847 cpu_id = cpu_number();
848 count = lkp->lk_exclusivecount;
849
850 if (count != 0) {
851 #ifdef DIAGNOSTIC
852 if (WEHOLDIT(lkp, 0, cpu_id) == 0) {
853 panic("spinlock_release_all: processor %lu, not "
854 "exclusive lock holder %lu "
855 "unlocking", (long)cpu_id, lkp->lk_cpu);
856 }
857 #endif
858 lkp->lk_recurselevel = 0;
859 lkp->lk_exclusivecount = 0;
860 COUNT_CPU(cpu_id, -count);
861 lkp->lk_flags &= ~LK_HAVE_EXCL;
862 SETHOLDER(lkp, LK_NOPROC, LK_NOCPU);
863 #if defined(LOCKDEBUG)
864 lkp->lk_unlock_file = file;
865 lkp->lk_unlock_line = line;
866 #endif
867 DONTHAVEIT(lkp);
868 }
869 #ifdef DIAGNOSTIC
870 else if (lkp->lk_sharecount != 0)
871 panic("spinlock_release_all: release of shared lock!");
872 else
873 panic("spinlock_release_all: release of unlocked lock!");
874 #endif
875 INTERLOCK_RELEASE(lkp, LK_SPIN, s);
876
877 return (count);
878 }
879
880 /*
881 * For a recursive spinlock held one or more times by the current CPU,
882 * release all N locks, and return N.
883 * Intended for use in mi_switch() right after resuming execution.
884 */
885
886 void
887 #if defined(LOCKDEBUG)
888 _spinlock_acquire_count(__volatile struct lock *lkp, int count,
889 const char *file, int line)
890 #else
891 spinlock_acquire_count(__volatile struct lock *lkp, int count)
892 #endif
893 {
894 int s, error;
895 cpuid_t cpu_id;
896
897 KASSERT(lkp->lk_flags & LK_SPIN);
898
899 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
900
901 cpu_id = cpu_number();
902
903 #ifdef DIAGNOSTIC
904 if (WEHOLDIT(lkp, LK_NOPROC, cpu_id))
905 panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id);
906 #endif
907 /*
908 * Try to acquire the want_exclusive flag.
909 */
910 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags &
911 (LK_HAVE_EXCL | LK_WANT_EXCL));
912 lkp->lk_flags |= LK_WANT_EXCL;
913 /*
914 * Wait for shared locks and upgrades to finish.
915 */
916 ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 ||
917 (lkp->lk_flags & LK_WANT_UPGRADE));
918 lkp->lk_flags &= ~LK_WANT_EXCL;
919 lkp->lk_flags |= LK_HAVE_EXCL;
920 SETHOLDER(lkp, LK_NOPROC, cpu_id);
921 #if defined(LOCKDEBUG)
922 lkp->lk_lock_file = file;
923 lkp->lk_lock_line = line;
924 #endif
925 HAVEIT(lkp);
926 if (lkp->lk_exclusivecount != 0)
927 panic("lockmgr: non-zero exclusive count");
928 lkp->lk_exclusivecount = count;
929 lkp->lk_recurselevel = 1;
930 COUNT_CPU(cpu_id, count);
931
932 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
933 }
934
935
936
937 /*
938 * Print out information about state of a lock. Used by VOP_PRINT
939 * routines to display ststus about contained locks.
940 */
941 void
942 lockmgr_printinfo(__volatile struct lock *lkp)
943 {
944
945 if (lkp->lk_sharecount)
946 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
947 lkp->lk_sharecount);
948 else if (lkp->lk_flags & LK_HAVE_EXCL) {
949 printf(" lock type %s: EXCL (count %d) by ",
950 lkp->lk_wmesg, lkp->lk_exclusivecount);
951 if (lkp->lk_flags & LK_SPIN)
952 printf("processor %lu", lkp->lk_cpu);
953 else
954 printf("pid %d", lkp->lk_lockholder);
955 } else
956 printf(" not locked");
957 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
958 printf(" with %d pending", lkp->lk_waitcount);
959 }
960
961 #if defined(LOCKDEBUG) /* { */
962 TAILQ_HEAD(, simplelock) simplelock_list =
963 TAILQ_HEAD_INITIALIZER(simplelock_list);
964
965 #if defined(MULTIPROCESSOR) /* { */
966 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
967
968 #define SLOCK_LIST_LOCK() \
969 __cpu_simple_lock(&simplelock_list_slock.lock_data)
970
971 #define SLOCK_LIST_UNLOCK() \
972 __cpu_simple_unlock(&simplelock_list_slock.lock_data)
973
974 #define SLOCK_COUNT(x) \
975 curcpu()->ci_simple_locks += (x)
976 #else
977 u_long simple_locks;
978
979 #define SLOCK_LIST_LOCK() /* nothing */
980
981 #define SLOCK_LIST_UNLOCK() /* nothing */
982
983 #define SLOCK_COUNT(x) simple_locks += (x)
984 #endif /* MULTIPROCESSOR */ /* } */
985
986 #ifdef MULTIPROCESSOR
987 #define SLOCK_MP() lock_printf("on cpu %ld\n", \
988 (u_long) cpu_number())
989 #else
990 #define SLOCK_MP() /* nothing */
991 #endif
992
993 #define SLOCK_WHERE(str, alp, id, l) \
994 do { \
995 lock_printf("\n"); \
996 lock_printf(str); \
997 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
998 SLOCK_MP(); \
999 if ((alp)->lock_file != NULL) \
1000 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
1001 (alp)->lock_line); \
1002 if ((alp)->unlock_file != NULL) \
1003 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
1004 (alp)->unlock_line); \
1005 SLOCK_TRACE() \
1006 SLOCK_DEBUGGER(); \
1007 } while (/*CONSTCOND*/0)
1008
1009 /*
1010 * Simple lock functions so that the debugger can see from whence
1011 * they are being called.
1012 */
1013 void
1014 simple_lock_init(struct simplelock *alp)
1015 {
1016
1017 #if defined(MULTIPROCESSOR) /* { */
1018 __cpu_simple_lock_init(&alp->lock_data);
1019 #else
1020 alp->lock_data = __SIMPLELOCK_UNLOCKED;
1021 #endif /* } */
1022 alp->lock_file = NULL;
1023 alp->lock_line = 0;
1024 alp->unlock_file = NULL;
1025 alp->unlock_line = 0;
1026 alp->lock_holder = LK_NOCPU;
1027 }
1028
1029 void
1030 _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
1031 {
1032 cpuid_t cpu_id = cpu_number();
1033 int s;
1034
1035 s = spllock();
1036
1037 /*
1038 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1039 * don't take any action, and just fall into the normal spin case.
1040 */
1041 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1042 #if defined(MULTIPROCESSOR) /* { */
1043 if (alp->lock_holder == cpu_id) {
1044 SLOCK_WHERE("simple_lock: locking against myself\n",
1045 alp, id, l);
1046 goto out;
1047 }
1048 #else
1049 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
1050 goto out;
1051 #endif /* MULTIPROCESSOR */ /* } */
1052 }
1053
1054 #if defined(MULTIPROCESSOR) /* { */
1055 /* Acquire the lock before modifying any fields. */
1056 __cpu_simple_lock(&alp->lock_data);
1057 #else
1058 alp->lock_data = __SIMPLELOCK_LOCKED;
1059 #endif /* } */
1060
1061 if (alp->lock_holder != LK_NOCPU) {
1062 SLOCK_WHERE("simple_lock: uninitialized lock\n",
1063 alp, id, l);
1064 }
1065 alp->lock_file = id;
1066 alp->lock_line = l;
1067 alp->lock_holder = cpu_id;
1068
1069 SLOCK_LIST_LOCK();
1070 /* XXX Cast away volatile */
1071 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1072 SLOCK_LIST_UNLOCK();
1073
1074 SLOCK_COUNT(1);
1075
1076 out:
1077 splx(s);
1078 }
1079
1080 int
1081 _simple_lock_held(__volatile struct simplelock *alp)
1082 {
1083 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
1084 cpuid_t cpu_id = cpu_number();
1085 #endif
1086 int s, locked = 0;
1087
1088 s = spllock();
1089
1090 #if defined(MULTIPROCESSOR)
1091 if (__cpu_simple_lock_try(&alp->lock_data) == 0)
1092 locked = (alp->lock_holder == cpu_id);
1093 else
1094 __cpu_simple_unlock(&alp->lock_data);
1095 #else
1096 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1097 locked = 1;
1098 KASSERT(alp->lock_holder == cpu_id);
1099 }
1100 #endif
1101
1102 splx(s);
1103
1104 return (locked);
1105 }
1106
1107 int
1108 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
1109 {
1110 cpuid_t cpu_id = cpu_number();
1111 int s, rv = 0;
1112
1113 s = spllock();
1114
1115 /*
1116 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1117 * don't take any action.
1118 */
1119 #if defined(MULTIPROCESSOR) /* { */
1120 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
1121 if (alp->lock_holder == cpu_id)
1122 SLOCK_WHERE("simple_lock_try: locking against myself\n",
1123 alp, id, l);
1124 goto out;
1125 }
1126 #else
1127 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1128 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
1129 goto out;
1130 }
1131 alp->lock_data = __SIMPLELOCK_LOCKED;
1132 #endif /* MULTIPROCESSOR */ /* } */
1133
1134 /*
1135 * At this point, we have acquired the lock.
1136 */
1137
1138 rv = 1;
1139
1140 alp->lock_file = id;
1141 alp->lock_line = l;
1142 alp->lock_holder = cpu_id;
1143
1144 SLOCK_LIST_LOCK();
1145 /* XXX Cast away volatile. */
1146 TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
1147 SLOCK_LIST_UNLOCK();
1148
1149 SLOCK_COUNT(1);
1150
1151 out:
1152 splx(s);
1153 return (rv);
1154 }
1155
1156 void
1157 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
1158 {
1159 int s;
1160
1161 s = spllock();
1162
1163 /*
1164 * MULTIPROCESSOR case: This is `safe' because we think we hold
1165 * the lock, and if we don't, we don't take any action.
1166 */
1167 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
1168 SLOCK_WHERE("simple_unlock: lock not held\n",
1169 alp, id, l);
1170 goto out;
1171 }
1172
1173 SLOCK_LIST_LOCK();
1174 TAILQ_REMOVE(&simplelock_list, alp, list);
1175 SLOCK_LIST_UNLOCK();
1176
1177 SLOCK_COUNT(-1);
1178
1179 alp->list.tqe_next = NULL; /* sanity */
1180 alp->list.tqe_prev = NULL; /* sanity */
1181
1182 alp->unlock_file = id;
1183 alp->unlock_line = l;
1184
1185 #if defined(MULTIPROCESSOR) /* { */
1186 alp->lock_holder = LK_NOCPU;
1187 /* Now that we've modified all fields, release the lock. */
1188 __cpu_simple_unlock(&alp->lock_data);
1189 #else
1190 alp->lock_data = __SIMPLELOCK_UNLOCKED;
1191 KASSERT(alp->lock_holder == cpu_number());
1192 alp->lock_holder = LK_NOCPU;
1193 #endif /* } */
1194
1195 out:
1196 splx(s);
1197 }
1198
1199 void
1200 simple_lock_dump(void)
1201 {
1202 struct simplelock *alp;
1203 int s;
1204
1205 s = spllock();
1206 SLOCK_LIST_LOCK();
1207 lock_printf("all simple locks:\n");
1208 TAILQ_FOREACH(alp, &simplelock_list, list) {
1209 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
1210 alp->lock_file, alp->lock_line);
1211 }
1212 SLOCK_LIST_UNLOCK();
1213 splx(s);
1214 }
1215
1216 void
1217 simple_lock_freecheck(void *start, void *end)
1218 {
1219 struct simplelock *alp;
1220 int s;
1221
1222 s = spllock();
1223 SLOCK_LIST_LOCK();
1224 TAILQ_FOREACH(alp, &simplelock_list, list) {
1225 if ((void *)alp >= start && (void *)alp < end) {
1226 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
1227 alp, alp->lock_holder, alp->lock_file,
1228 alp->lock_line);
1229 SLOCK_DEBUGGER();
1230 }
1231 }
1232 SLOCK_LIST_UNLOCK();
1233 splx(s);
1234 }
1235
1236 /*
1237 * We must be holding exactly one lock: the sched_lock.
1238 */
1239
1240 void
1241 simple_lock_switchcheck(void)
1242 {
1243
1244 simple_lock_only_held(&sched_lock, "switching");
1245 }
1246
1247 void
1248 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
1249 {
1250 struct simplelock *alp;
1251 cpuid_t cpu_id = cpu_number();
1252 int s;
1253
1254 if (lp) {
1255 LOCK_ASSERT(simple_lock_held(lp));
1256 }
1257 s = spllock();
1258 SLOCK_LIST_LOCK();
1259 TAILQ_FOREACH(alp, &simplelock_list, list) {
1260 if (alp == lp)
1261 continue;
1262 if (alp->lock_holder == cpu_id)
1263 break;
1264 }
1265 SLOCK_LIST_UNLOCK();
1266 splx(s);
1267
1268 if (alp != NULL) {
1269 lock_printf("\n%s with held simple_lock %p "
1270 "CPU %lu %s:%d\n",
1271 where, alp, alp->lock_holder, alp->lock_file,
1272 alp->lock_line);
1273 SLOCK_TRACE();
1274 SLOCK_DEBUGGER();
1275 }
1276 }
1277 #endif /* LOCKDEBUG */ /* } */
1278
1279 #if defined(MULTIPROCESSOR)
1280 /*
1281 * Functions for manipulating the kernel_lock. We put them here
1282 * so that they show up in profiles.
1283 */
1284
1285 struct lock kernel_lock;
1286
1287 void
1288 _kernel_lock_init(void)
1289 {
1290
1291 spinlockinit(&kernel_lock, "klock", 0);
1292 }
1293
1294 /*
1295 * Acquire/release the kernel lock. Intended for use in the scheduler
1296 * and the lower half of the kernel.
1297 */
1298 void
1299 _kernel_lock(int flag)
1300 {
1301
1302 SCHED_ASSERT_UNLOCKED();
1303 spinlockmgr(&kernel_lock, flag, 0);
1304 }
1305
1306 void
1307 _kernel_unlock(void)
1308 {
1309
1310 spinlockmgr(&kernel_lock, LK_RELEASE, 0);
1311 }
1312
1313 /*
1314 * Acquire/release the kernel_lock on behalf of a process. Intended for
1315 * use in the top half of the kernel.
1316 */
1317 void
1318 _kernel_proc_lock(struct proc *p)
1319 {
1320
1321 SCHED_ASSERT_UNLOCKED();
1322 spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
1323 p->p_flag |= P_BIGLOCK;
1324 }
1325
1326 void
1327 _kernel_proc_unlock(struct proc *p)
1328 {
1329
1330 p->p_flag &= ~P_BIGLOCK;
1331 spinlockmgr(&kernel_lock, LK_RELEASE, 0);
1332 }
1333 #endif /* MULTIPROCESSOR */
1334