kern_lock.c revision 1.112 1 /* $NetBSD: kern_lock.c,v 1.112 2007/04/14 06:59:25 perseant Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.112 2007/04/14 06:59:25 perseant Exp $");
80
81 #include "opt_multiprocessor.h"
82 #include "opt_ddb.h"
83
84 #define __MUTEX_PRIVATE
85
86 #include <sys/param.h>
87 #include <sys/proc.h>
88 #include <sys/lock.h>
89 #include <sys/systm.h>
90 #include <sys/lockdebug.h>
91
92 #include <machine/cpu.h>
93 #include <machine/stdarg.h>
94
95 #include <dev/lockstat.h>
96
97 #if defined(LOCKDEBUG)
98 #include <sys/syslog.h>
99 /*
100 * note that stdarg.h and the ansi style va_start macro is used for both
101 * ansi and traditional c compiles.
102 * XXX: this requires that stdarg.h define: va_alist and va_dcl
103 */
104 #include <machine/stdarg.h>
105
106 void lock_printf(const char *fmt, ...)
107 __attribute__((__format__(__printf__,1,2)));
108
109 static int acquire(volatile struct lock **, int *, int, int, int, uintptr_t);
110
111 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */
112
113 #ifdef DDB
114 #include <ddb/ddbvar.h>
115 #include <machine/db_machdep.h>
116 #include <ddb/db_command.h>
117 #include <ddb/db_interface.h>
118 #endif
119 #endif /* defined(LOCKDEBUG) */
120
121 #if defined(MULTIPROCESSOR)
122 /*
123 * IPL_BIGLOCK: block IPLs which need to grab kernel_mutex.
124 * XXX IPL_VM or IPL_AUDIO should be enough.
125 */
126 #if !defined(__HAVE_SPLBIGLOCK)
127 #define splbiglock splclock
128 #endif
129 int kernel_lock_id;
130 #endif
131
132 __cpu_simple_lock_t kernel_lock;
133
134 /*
135 * Locking primitives implementation.
136 * Locks provide shared/exclusive synchronization.
137 */
138
139 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
140 #if defined(MULTIPROCESSOR) /* { */
141 #define COUNT_CPU(cpu_id, x) \
142 curcpu()->ci_spin_locks += (x)
143 #else
144 u_long spin_locks;
145 #define COUNT_CPU(cpu_id, x) spin_locks += (x)
146 #endif /* MULTIPROCESSOR */ /* } */
147
148 #define COUNT(lkp, l, cpu_id, x) \
149 do { \
150 if ((lkp)->lk_flags & LK_SPIN) \
151 COUNT_CPU((cpu_id), (x)); \
152 else \
153 (l)->l_locks += (x); \
154 } while (/*CONSTCOND*/0)
155 #else
156 #define COUNT(lkp, p, cpu_id, x)
157 #define COUNT_CPU(cpu_id, x)
158 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
159
160 #define INTERLOCK_ACQUIRE(lkp, flags, s) \
161 do { \
162 if ((flags) & LK_SPIN) \
163 s = spllock(); \
164 simple_lock(&(lkp)->lk_interlock); \
165 } while (/*CONSTCOND*/ 0)
166
167 #define INTERLOCK_RELEASE(lkp, flags, s) \
168 do { \
169 simple_unlock(&(lkp)->lk_interlock); \
170 if ((flags) & LK_SPIN) \
171 splx(s); \
172 } while (/*CONSTCOND*/ 0)
173
174 #ifdef DDB /* { */
175 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
176 int simple_lock_debugger = 1; /* more serious on MP */
177 #else
178 int simple_lock_debugger = 0;
179 #endif
180 #define SLOCK_DEBUGGER() if (simple_lock_debugger && db_onpanic) Debugger()
181 #define SLOCK_TRACE() \
182 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \
183 true, 65535, "", lock_printf);
184 #else
185 #define SLOCK_DEBUGGER() /* nothing */
186 #define SLOCK_TRACE() /* nothing */
187 #endif /* } */
188
189 #if defined(LOCKDEBUG)
190 #if defined(DDB)
191 #define SPINLOCK_SPINCHECK_DEBUGGER if (db_onpanic) Debugger()
192 #else
193 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
194 #endif
195
196 #define SPINLOCK_SPINCHECK_DECL \
197 /* 32-bits of count -- wrap constitutes a "spinout" */ \
198 uint32_t __spinc = 0
199
200 #define SPINLOCK_SPINCHECK \
201 do { \
202 if (++__spinc == 0) { \
203 lock_printf("LK_SPIN spinout, excl %d, share %d\n", \
204 lkp->lk_exclusivecount, lkp->lk_sharecount); \
205 if (lkp->lk_exclusivecount) \
206 lock_printf("held by CPU %lu\n", \
207 (u_long) lkp->lk_cpu); \
208 if (lkp->lk_lock_file) \
209 lock_printf("last locked at %s:%d\n", \
210 lkp->lk_lock_file, lkp->lk_lock_line); \
211 if (lkp->lk_unlock_file) \
212 lock_printf("last unlocked at %s:%d\n", \
213 lkp->lk_unlock_file, lkp->lk_unlock_line); \
214 SLOCK_TRACE(); \
215 SPINLOCK_SPINCHECK_DEBUGGER; \
216 } \
217 } while (/*CONSTCOND*/ 0)
218 #else
219 #define SPINLOCK_SPINCHECK_DECL /* nothing */
220 #define SPINLOCK_SPINCHECK /* nothing */
221 #endif /* LOCKDEBUG && DDB */
222
223 #define RETURN_ADDRESS ((uintptr_t)__builtin_return_address(0))
224
225 /*
226 * Acquire a resource.
227 */
228 static int
229 acquire(volatile struct lock **lkpp, int *s, int extflags,
230 int drain, int wanted, uintptr_t ra)
231 {
232 int error;
233 volatile struct lock *lkp = *lkpp;
234 LOCKSTAT_TIMER(slptime);
235 LOCKSTAT_FLAG(lsflag);
236
237 KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
238
239 if (extflags & LK_SPIN) {
240 int interlocked;
241
242 SPINLOCK_SPINCHECK_DECL;
243
244 if (!drain) {
245 lkp->lk_waitcount++;
246 lkp->lk_flags |= LK_WAIT_NONZERO;
247 }
248 for (interlocked = 1;;) {
249 SPINLOCK_SPINCHECK;
250 if ((lkp->lk_flags & wanted) != 0) {
251 if (interlocked) {
252 INTERLOCK_RELEASE(lkp, LK_SPIN, *s);
253 interlocked = 0;
254 }
255 SPINLOCK_SPIN_HOOK;
256 } else if (interlocked) {
257 break;
258 } else {
259 INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s);
260 interlocked = 1;
261 }
262 }
263 if (!drain) {
264 lkp->lk_waitcount--;
265 if (lkp->lk_waitcount == 0)
266 lkp->lk_flags &= ~LK_WAIT_NONZERO;
267 }
268 KASSERT((lkp->lk_flags & wanted) == 0);
269 error = 0; /* sanity */
270 } else {
271 LOCKSTAT_ENTER(lsflag);
272
273 for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
274 if (drain)
275 lkp->lk_flags |= LK_WAITDRAIN;
276 else {
277 lkp->lk_waitcount++;
278 lkp->lk_flags |= LK_WAIT_NONZERO;
279 }
280 /* XXX Cast away volatile. */
281 LOCKSTAT_START_TIMER(lsflag, slptime);
282 error = ltsleep(drain ?
283 (volatile const void *)&lkp->lk_flags :
284 (volatile const void *)lkp, lkp->lk_prio,
285 lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock);
286 LOCKSTAT_STOP_TIMER(lsflag, slptime);
287 LOCKSTAT_EVENT_RA(lsflag, (void *)(uintptr_t)lkp,
288 LB_LOCKMGR | LB_SLEEP1, 1, slptime, ra);
289 if (!drain) {
290 lkp->lk_waitcount--;
291 if (lkp->lk_waitcount == 0)
292 lkp->lk_flags &= ~LK_WAIT_NONZERO;
293 }
294 if (error)
295 break;
296 if (extflags & LK_SLEEPFAIL) {
297 error = ENOLCK;
298 break;
299 }
300 if (lkp->lk_newlock != NULL) {
301 simple_lock(&lkp->lk_newlock->lk_interlock);
302 simple_unlock(&lkp->lk_interlock);
303 if (lkp->lk_waitcount == 0)
304 wakeup(&lkp->lk_newlock);
305 *lkpp = lkp = lkp->lk_newlock;
306 }
307 }
308
309 LOCKSTAT_EXIT(lsflag);
310 }
311
312 return error;
313 }
314
315 #define SETHOLDER(lkp, pid, lid, cpu_id) \
316 do { \
317 if ((lkp)->lk_flags & LK_SPIN) \
318 (lkp)->lk_cpu = cpu_id; \
319 else { \
320 (lkp)->lk_lockholder = pid; \
321 (lkp)->lk_locklwp = lid; \
322 } \
323 } while (/*CONSTCOND*/0)
324
325 #define WEHOLDIT(lkp, pid, lid, cpu_id) \
326 (((lkp)->lk_flags & LK_SPIN) != 0 ? \
327 ((lkp)->lk_cpu == (cpu_id)) : \
328 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
329
330 #define WAKEUP_WAITER(lkp) \
331 do { \
332 if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) == \
333 LK_WAIT_NONZERO) { \
334 wakeup((lkp)); \
335 } \
336 } while (/*CONSTCOND*/0)
337
338 #if defined(LOCKDEBUG) /* { */
339 #if defined(MULTIPROCESSOR) /* { */
340 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
341
342 #define SPINLOCK_LIST_LOCK() \
343 __cpu_simple_lock(&spinlock_list_slock.lock_data)
344
345 #define SPINLOCK_LIST_UNLOCK() \
346 __cpu_simple_unlock(&spinlock_list_slock.lock_data)
347 #else
348 #define SPINLOCK_LIST_LOCK() /* nothing */
349
350 #define SPINLOCK_LIST_UNLOCK() /* nothing */
351 #endif /* MULTIPROCESSOR */ /* } */
352
353 _TAILQ_HEAD(, struct lock, volatile) spinlock_list =
354 TAILQ_HEAD_INITIALIZER(spinlock_list);
355
356 #define HAVEIT(lkp) \
357 do { \
358 if ((lkp)->lk_flags & LK_SPIN) { \
359 int sp = spllock(); \
360 SPINLOCK_LIST_LOCK(); \
361 TAILQ_INSERT_TAIL(&spinlock_list, (lkp), lk_list); \
362 SPINLOCK_LIST_UNLOCK(); \
363 splx(sp); \
364 } \
365 } while (/*CONSTCOND*/0)
366
367 #define DONTHAVEIT(lkp) \
368 do { \
369 if ((lkp)->lk_flags & LK_SPIN) { \
370 int sp = spllock(); \
371 SPINLOCK_LIST_LOCK(); \
372 TAILQ_REMOVE(&spinlock_list, (lkp), lk_list); \
373 SPINLOCK_LIST_UNLOCK(); \
374 splx(sp); \
375 } \
376 } while (/*CONSTCOND*/0)
377 #else
378 #define HAVEIT(lkp) /* nothing */
379
380 #define DONTHAVEIT(lkp) /* nothing */
381 #endif /* LOCKDEBUG */ /* } */
382
383 #if defined(LOCKDEBUG)
384 /*
385 * Lock debug printing routine; can be configured to print to console
386 * or log to syslog.
387 */
388 void
389 lock_printf(const char *fmt, ...)
390 {
391 char b[150];
392 va_list ap;
393
394 va_start(ap, fmt);
395 if (lock_debug_syslog)
396 vlog(LOG_DEBUG, fmt, ap);
397 else {
398 vsnprintf(b, sizeof(b), fmt, ap);
399 printf_nolog("%s", b);
400 }
401 va_end(ap);
402 }
403 #endif /* LOCKDEBUG */
404
405 static void
406 lockpanic(volatile struct lock *lkp, const char *fmt, ...)
407 {
408 char s[150], b[150];
409 #ifdef LOCKDEBUG
410 static const char *locktype[] = {
411 "*0*", "shared", "exclusive", "upgrade", "exclupgrade",
412 "downgrade", "release", "drain", "exclother", "*9*",
413 "*10*", "*11*", "*12*", "*13*", "*14*", "*15*"
414 };
415 #endif
416
417 va_list ap;
418 va_start(ap, fmt);
419 vsnprintf(s, sizeof(s), fmt, ap);
420 va_end(ap);
421 bitmask_snprintf(lkp->lk_flags, __LK_FLAG_BITS, b, sizeof(b));
422 panic("%s ("
423 #ifdef LOCKDEBUG
424 "type %s "
425 #endif
426 "flags %s, sharecount %d, exclusivecount %d, "
427 "recurselevel %d, waitcount %d, wmesg %s"
428 #ifdef LOCKDEBUG
429 ", lock_file %s, unlock_file %s, lock_line %d, unlock_line %d"
430 #endif
431 ")\n",
432 s,
433 #ifdef LOCKDEBUG
434 locktype[lkp->lk_flags & LK_TYPE_MASK],
435 #endif
436 b, lkp->lk_sharecount, lkp->lk_exclusivecount,
437 lkp->lk_recurselevel, lkp->lk_waitcount, lkp->lk_wmesg
438 #ifdef LOCKDEBUG
439 , lkp->lk_lock_file, lkp->lk_unlock_file, lkp->lk_lock_line,
440 lkp->lk_unlock_line
441 #endif
442 );
443 }
444
445 /*
446 * Transfer any waiting processes from one lock to another.
447 */
448 void
449 transferlockers(struct lock *from, struct lock *to)
450 {
451
452 KASSERT(from != to);
453 KASSERT((from->lk_flags & LK_WAITDRAIN) == 0);
454 if (from->lk_waitcount == 0)
455 return;
456 from->lk_newlock = to;
457 wakeup((void *)from);
458 tsleep((void *)&from->lk_newlock, from->lk_prio, "lkxfer", 0);
459 from->lk_newlock = NULL;
460 from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
461 KASSERT(from->lk_waitcount == 0);
462 }
463
464
465 /*
466 * Initialize a lock; required before use.
467 */
468 void
469 lockinit(struct lock *lkp, pri_t prio, const char *wmesg, int timo, int flags)
470 {
471
472 memset(lkp, 0, sizeof(struct lock));
473 simple_lock_init(&lkp->lk_interlock);
474 lkp->lk_flags = flags & LK_EXTFLG_MASK;
475 if (flags & LK_SPIN)
476 lkp->lk_cpu = LK_NOCPU;
477 else {
478 lkp->lk_lockholder = LK_NOPROC;
479 lkp->lk_newlock = NULL;
480 lkp->lk_prio = prio;
481 lkp->lk_timo = timo;
482 }
483 lkp->lk_wmesg = wmesg; /* just a name for spin locks */
484 #if defined(LOCKDEBUG)
485 lkp->lk_lock_file = NULL;
486 lkp->lk_unlock_file = NULL;
487 #endif
488 }
489
490 /*
491 * Determine the status of a lock.
492 */
493 int
494 lockstatus(struct lock *lkp)
495 {
496 int s = 0; /* XXX: gcc */
497 int lock_type = 0;
498 struct lwp *l = curlwp; /* XXX */
499 pid_t pid;
500 lwpid_t lid;
501 cpuid_t cpu_num;
502
503 if ((lkp->lk_flags & LK_SPIN) || l == NULL) {
504 cpu_num = cpu_number();
505 pid = LK_KERNPROC;
506 lid = 0;
507 } else {
508 cpu_num = LK_NOCPU;
509 pid = l->l_proc->p_pid;
510 lid = l->l_lid;
511 }
512
513 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
514 if (lkp->lk_exclusivecount != 0) {
515 if (WEHOLDIT(lkp, pid, lid, cpu_num))
516 lock_type = LK_EXCLUSIVE;
517 else
518 lock_type = LK_EXCLOTHER;
519 } else if (lkp->lk_sharecount != 0)
520 lock_type = LK_SHARED;
521 else if (lkp->lk_flags & (LK_WANT_EXCL | LK_WANT_UPGRADE))
522 lock_type = LK_EXCLOTHER;
523 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
524 return (lock_type);
525 }
526
527 #if defined(LOCKDEBUG)
528 /*
529 * Make sure no spin locks are held by a CPU that is about
530 * to context switch.
531 */
532 void
533 spinlock_switchcheck(void)
534 {
535 u_long cnt;
536 int s;
537
538 s = spllock();
539 #if defined(MULTIPROCESSOR)
540 cnt = curcpu()->ci_spin_locks;
541 #else
542 cnt = spin_locks;
543 #endif
544 splx(s);
545
546 if (cnt != 0)
547 panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
548 (u_long) cpu_number(), cnt);
549 }
550 #endif /* LOCKDEBUG */
551
552 /*
553 * Locks and IPLs (interrupt priority levels):
554 *
555 * Locks which may be taken from interrupt context must be handled
556 * very carefully; you must spl to the highest IPL where the lock
557 * is needed before acquiring the lock.
558 *
559 * It is also important to avoid deadlock, since certain (very high
560 * priority) interrupts are often needed to keep the system as a whole
561 * from deadlocking, and must not be blocked while you are spinning
562 * waiting for a lower-priority lock.
563 *
564 * In addition, the lock-debugging hooks themselves need to use locks!
565 *
566 * A raw __cpu_simple_lock may be used from interrupts are long as it
567 * is acquired and held at a single IPL.
568 *
569 * A simple_lock (which is a __cpu_simple_lock wrapped with some
570 * debugging hooks) may be used at or below spllock(), which is
571 * typically at or just below splhigh() (i.e. blocks everything
572 * but certain machine-dependent extremely high priority interrupts).
573 *
574 * spinlockmgr spinlocks should be used at or below splsched().
575 *
576 * Some platforms may have interrupts of higher priority than splsched(),
577 * including hard serial interrupts, inter-processor interrupts, and
578 * kernel debugger traps.
579 */
580
581 /*
582 * XXX XXX kludge around another kludge..
583 *
584 * vfs_shutdown() may be called from interrupt context, either as a result
585 * of a panic, or from the debugger. It proceeds to call
586 * sys_sync(&proc0, ...), pretending its running on behalf of proc0
587 *
588 * We would like to make an attempt to sync the filesystems in this case, so
589 * if this happens, we treat attempts to acquire locks specially.
590 * All locks are acquired on behalf of proc0.
591 *
592 * If we've already paniced, we don't block waiting for locks, but
593 * just barge right ahead since we're already going down in flames.
594 */
595
596 /*
597 * Set, change, or release a lock.
598 *
599 * Shared requests increment the shared count. Exclusive requests set the
600 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
601 * accepted shared locks and shared-to-exclusive upgrades to go away.
602 */
603 int
604 #if defined(LOCKDEBUG)
605 _lockmgr(volatile struct lock *lkp, u_int flags,
606 struct simplelock *interlkp, const char *file, int line)
607 #else
608 lockmgr(volatile struct lock *lkp, u_int flags,
609 struct simplelock *interlkp)
610 #endif
611 {
612 int error;
613 pid_t pid;
614 lwpid_t lid;
615 int extflags;
616 cpuid_t cpu_num;
617 struct lwp *l = curlwp;
618 int lock_shutdown_noblock = 0;
619 int s = 0;
620
621 error = 0;
622
623 /* LK_RETRY is for vn_lock, not for lockmgr. */
624 KASSERT((flags & LK_RETRY) == 0);
625
626 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
627 if (flags & LK_INTERLOCK)
628 simple_unlock(interlkp);
629 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
630
631 #ifdef DIAGNOSTIC /* { */
632 /*
633 * Don't allow spins on sleep locks and don't allow sleeps
634 * on spin locks.
635 */
636 if ((flags ^ lkp->lk_flags) & LK_SPIN)
637 lockpanic(lkp, "lockmgr: sleep/spin mismatch");
638 #endif /* } */
639
640 if (extflags & LK_SPIN) {
641 pid = LK_KERNPROC;
642 lid = 0;
643 } else {
644 if (l == NULL) {
645 if (!doing_shutdown) {
646 panic("lockmgr: no context");
647 } else {
648 l = &lwp0;
649 if (panicstr && (!(flags & LK_NOWAIT))) {
650 flags |= LK_NOWAIT;
651 lock_shutdown_noblock = 1;
652 }
653 }
654 }
655 lid = l->l_lid;
656 pid = l->l_proc->p_pid;
657 }
658 cpu_num = cpu_number();
659
660 /*
661 * Once a lock has drained, the LK_DRAINING flag is set and an
662 * exclusive lock is returned. The only valid operation thereafter
663 * is a single release of that exclusive lock. This final release
664 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
665 * further requests of any sort will result in a panic. The bits
666 * selected for these two flags are chosen so that they will be set
667 * in memory that is freed (freed memory is filled with 0xdeadbeef).
668 * The final release is permitted to give a new lease on life to
669 * the lock by specifying LK_REENABLE.
670 */
671 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
672 #ifdef DIAGNOSTIC /* { */
673 if (lkp->lk_flags & LK_DRAINED)
674 lockpanic(lkp, "lockmgr: using decommissioned lock");
675 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
676 WEHOLDIT(lkp, pid, lid, cpu_num) == 0)
677 lockpanic(lkp, "lockmgr: non-release on draining lock: %d",
678 flags & LK_TYPE_MASK);
679 #endif /* DIAGNOSTIC */ /* } */
680 lkp->lk_flags &= ~LK_DRAINING;
681 if ((flags & LK_REENABLE) == 0)
682 lkp->lk_flags |= LK_DRAINED;
683 }
684
685 switch (flags & LK_TYPE_MASK) {
686
687 case LK_SHARED:
688 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
689 /*
690 * If just polling, check to see if we will block.
691 */
692 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
693 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
694 error = EBUSY;
695 break;
696 }
697 /*
698 * Wait for exclusive locks and upgrades to clear.
699 */
700 error = acquire(&lkp, &s, extflags, 0,
701 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE,
702 RETURN_ADDRESS);
703 if (error)
704 break;
705 lkp->lk_sharecount++;
706 lkp->lk_flags |= LK_SHARE_NONZERO;
707 COUNT(lkp, l, cpu_num, 1);
708 break;
709 }
710 /*
711 * We hold an exclusive lock, so downgrade it to shared.
712 * An alternative would be to fail with EDEADLK.
713 */
714 lkp->lk_sharecount++;
715 lkp->lk_flags |= LK_SHARE_NONZERO;
716 COUNT(lkp, l, cpu_num, 1);
717 /* fall into downgrade */
718
719 case LK_DOWNGRADE:
720 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0 ||
721 lkp->lk_exclusivecount == 0)
722 lockpanic(lkp, "lockmgr: not holding exclusive lock");
723 lkp->lk_sharecount += lkp->lk_exclusivecount;
724 lkp->lk_flags |= LK_SHARE_NONZERO;
725 lkp->lk_exclusivecount = 0;
726 lkp->lk_recurselevel = 0;
727 lkp->lk_flags &= ~LK_HAVE_EXCL;
728 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
729 #if defined(LOCKDEBUG)
730 lkp->lk_unlock_file = file;
731 lkp->lk_unlock_line = line;
732 #endif
733 DONTHAVEIT(lkp);
734 WAKEUP_WAITER(lkp);
735 break;
736
737 case LK_EXCLUPGRADE:
738 /*
739 * If another process is ahead of us to get an upgrade,
740 * then we want to fail rather than have an intervening
741 * exclusive access.
742 */
743 if (lkp->lk_flags & LK_WANT_UPGRADE) {
744 lkp->lk_sharecount--;
745 if (lkp->lk_sharecount == 0)
746 lkp->lk_flags &= ~LK_SHARE_NONZERO;
747 COUNT(lkp, l, cpu_num, -1);
748 error = EBUSY;
749 break;
750 }
751 /* fall into normal upgrade */
752
753 case LK_UPGRADE:
754 /*
755 * Upgrade a shared lock to an exclusive one. If another
756 * shared lock has already requested an upgrade to an
757 * exclusive lock, our shared lock is released and an
758 * exclusive lock is requested (which will be granted
759 * after the upgrade). If we return an error, the file
760 * will always be unlocked.
761 */
762 if (WEHOLDIT(lkp, pid, lid, cpu_num) || lkp->lk_sharecount <= 0)
763 lockpanic(lkp, "lockmgr: upgrade exclusive lock");
764 lkp->lk_sharecount--;
765 if (lkp->lk_sharecount == 0)
766 lkp->lk_flags &= ~LK_SHARE_NONZERO;
767 COUNT(lkp, l, cpu_num, -1);
768 /*
769 * If we are just polling, check to see if we will block.
770 */
771 if ((extflags & LK_NOWAIT) &&
772 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
773 lkp->lk_sharecount > 1)) {
774 error = EBUSY;
775 break;
776 }
777 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
778 /*
779 * We are first shared lock to request an upgrade, so
780 * request upgrade and wait for the shared count to
781 * drop to zero, then take exclusive lock.
782 */
783 lkp->lk_flags |= LK_WANT_UPGRADE;
784 error = acquire(&lkp, &s, extflags, 0, LK_SHARE_NONZERO,
785 RETURN_ADDRESS);
786 lkp->lk_flags &= ~LK_WANT_UPGRADE;
787 if (error) {
788 WAKEUP_WAITER(lkp);
789 break;
790 }
791 lkp->lk_flags |= LK_HAVE_EXCL;
792 SETHOLDER(lkp, pid, lid, cpu_num);
793 #if defined(LOCKDEBUG)
794 lkp->lk_lock_file = file;
795 lkp->lk_lock_line = line;
796 #endif
797 HAVEIT(lkp);
798 if (lkp->lk_exclusivecount != 0)
799 lockpanic(lkp, "lockmgr: non-zero exclusive count");
800 lkp->lk_exclusivecount = 1;
801 if (extflags & LK_SETRECURSE)
802 lkp->lk_recurselevel = 1;
803 COUNT(lkp, l, cpu_num, 1);
804 break;
805 }
806 /*
807 * Someone else has requested upgrade. Release our shared
808 * lock, awaken upgrade requestor if we are the last shared
809 * lock, then request an exclusive lock.
810 */
811 if (lkp->lk_sharecount == 0)
812 WAKEUP_WAITER(lkp);
813 /* fall into exclusive request */
814
815 case LK_EXCLUSIVE:
816 if (WEHOLDIT(lkp, pid, lid, cpu_num)) {
817 /*
818 * Recursive lock.
819 */
820 if ((extflags & LK_CANRECURSE) == 0 &&
821 lkp->lk_recurselevel == 0) {
822 if (extflags & LK_RECURSEFAIL) {
823 error = EDEADLK;
824 break;
825 } else
826 lockpanic(lkp, "lockmgr: locking against myself");
827 }
828 lkp->lk_exclusivecount++;
829 if (extflags & LK_SETRECURSE &&
830 lkp->lk_recurselevel == 0)
831 lkp->lk_recurselevel = lkp->lk_exclusivecount;
832 COUNT(lkp, l, cpu_num, 1);
833 break;
834 }
835 /*
836 * If we are just polling, check to see if we will sleep.
837 */
838 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
839 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
840 LK_SHARE_NONZERO))) {
841 error = EBUSY;
842 break;
843 }
844 /*
845 * Try to acquire the want_exclusive flag.
846 */
847 error = acquire(&lkp, &s, extflags, 0,
848 LK_HAVE_EXCL | LK_WANT_EXCL, RETURN_ADDRESS);
849 if (error)
850 break;
851 lkp->lk_flags |= LK_WANT_EXCL;
852 /*
853 * Wait for shared locks and upgrades to finish.
854 */
855 error = acquire(&lkp, &s, extflags, 0,
856 LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO,
857 RETURN_ADDRESS);
858 lkp->lk_flags &= ~LK_WANT_EXCL;
859 if (error) {
860 WAKEUP_WAITER(lkp);
861 break;
862 }
863 lkp->lk_flags |= LK_HAVE_EXCL;
864 SETHOLDER(lkp, pid, lid, cpu_num);
865 #if defined(LOCKDEBUG)
866 lkp->lk_lock_file = file;
867 lkp->lk_lock_line = line;
868 #endif
869 HAVEIT(lkp);
870 if (lkp->lk_exclusivecount != 0)
871 lockpanic(lkp, "lockmgr: non-zero exclusive count");
872 lkp->lk_exclusivecount = 1;
873 if (extflags & LK_SETRECURSE)
874 lkp->lk_recurselevel = 1;
875 COUNT(lkp, l, cpu_num, 1);
876 break;
877
878 case LK_RELEASE:
879 if (lkp->lk_exclusivecount != 0) {
880 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
881 if (lkp->lk_flags & LK_SPIN) {
882 lockpanic(lkp,
883 "lockmgr: processor %lu, not "
884 "exclusive lock holder %lu "
885 "unlocking", cpu_num, lkp->lk_cpu);
886 } else {
887 lockpanic(lkp, "lockmgr: pid %d.%d, not "
888 "exclusive lock holder %d.%d "
889 "unlocking", pid, lid,
890 lkp->lk_lockholder,
891 lkp->lk_locklwp);
892 }
893 }
894 if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
895 lkp->lk_recurselevel = 0;
896 lkp->lk_exclusivecount--;
897 COUNT(lkp, l, cpu_num, -1);
898 if (lkp->lk_exclusivecount == 0) {
899 lkp->lk_flags &= ~LK_HAVE_EXCL;
900 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
901 #if defined(LOCKDEBUG)
902 lkp->lk_unlock_file = file;
903 lkp->lk_unlock_line = line;
904 #endif
905 DONTHAVEIT(lkp);
906 }
907 } else if (lkp->lk_sharecount != 0) {
908 lkp->lk_sharecount--;
909 if (lkp->lk_sharecount == 0)
910 lkp->lk_flags &= ~LK_SHARE_NONZERO;
911 COUNT(lkp, l, cpu_num, -1);
912 }
913 #ifdef DIAGNOSTIC
914 else
915 lockpanic(lkp, "lockmgr: release of unlocked lock!");
916 #endif
917 WAKEUP_WAITER(lkp);
918 break;
919
920 case LK_DRAIN:
921 /*
922 * Check that we do not already hold the lock, as it can
923 * never drain if we do. Unfortunately, we have no way to
924 * check for holding a shared lock, but at least we can
925 * check for an exclusive one.
926 */
927 if (WEHOLDIT(lkp, pid, lid, cpu_num))
928 lockpanic(lkp, "lockmgr: draining against myself");
929 /*
930 * If we are just polling, check to see if we will sleep.
931 */
932 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
933 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
934 LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
935 error = EBUSY;
936 break;
937 }
938 error = acquire(&lkp, &s, extflags, 1,
939 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
940 LK_SHARE_NONZERO | LK_WAIT_NONZERO,
941 RETURN_ADDRESS);
942 if (error)
943 break;
944 lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
945 SETHOLDER(lkp, pid, lid, cpu_num);
946 #if defined(LOCKDEBUG)
947 lkp->lk_lock_file = file;
948 lkp->lk_lock_line = line;
949 #endif
950 HAVEIT(lkp);
951 lkp->lk_exclusivecount = 1;
952 /* XXX unlikely that we'd want this */
953 if (extflags & LK_SETRECURSE)
954 lkp->lk_recurselevel = 1;
955 COUNT(lkp, l, cpu_num, 1);
956 break;
957
958 default:
959 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
960 lockpanic(lkp, "lockmgr: unknown locktype request %d",
961 flags & LK_TYPE_MASK);
962 /* NOTREACHED */
963 }
964 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
965 ((lkp->lk_flags &
966 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
967 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
968 lkp->lk_flags &= ~LK_WAITDRAIN;
969 wakeup(&lkp->lk_flags);
970 }
971 /*
972 * Note that this panic will be a recursive panic, since
973 * we only set lock_shutdown_noblock above if panicstr != NULL.
974 */
975 if (error && lock_shutdown_noblock)
976 lockpanic(lkp, "lockmgr: deadlock (see previous panic)");
977
978 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
979 return (error);
980 }
981
982 /*
983 * For a recursive spinlock held one or more times by the current CPU,
984 * release all N locks, and return N.
985 * Intended for use in mi_switch() shortly before context switching.
986 */
987
988 int
989 #if defined(LOCKDEBUG)
990 _spinlock_release_all(volatile struct lock *lkp, const char *file, int line)
991 #else
992 spinlock_release_all(volatile struct lock *lkp)
993 #endif
994 {
995 int s, count;
996 cpuid_t cpu_num;
997
998 KASSERT(lkp->lk_flags & LK_SPIN);
999
1000 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
1001
1002 cpu_num = cpu_number();
1003 count = lkp->lk_exclusivecount;
1004
1005 if (count != 0) {
1006 #ifdef DIAGNOSTIC
1007 if (WEHOLDIT(lkp, 0, 0, cpu_num) == 0) {
1008 lockpanic(lkp, "spinlock_release_all: processor %lu, not "
1009 "exclusive lock holder %lu "
1010 "unlocking", (long)cpu_num, lkp->lk_cpu);
1011 }
1012 #endif
1013 lkp->lk_recurselevel = 0;
1014 lkp->lk_exclusivecount = 0;
1015 COUNT_CPU(cpu_num, -count);
1016 lkp->lk_flags &= ~LK_HAVE_EXCL;
1017 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
1018 #if defined(LOCKDEBUG)
1019 lkp->lk_unlock_file = file;
1020 lkp->lk_unlock_line = line;
1021 #endif
1022 DONTHAVEIT(lkp);
1023 }
1024 #ifdef DIAGNOSTIC
1025 else if (lkp->lk_sharecount != 0)
1026 lockpanic(lkp, "spinlock_release_all: release of shared lock!");
1027 else
1028 lockpanic(lkp, "spinlock_release_all: release of unlocked lock!");
1029 #endif
1030 INTERLOCK_RELEASE(lkp, LK_SPIN, s);
1031
1032 return (count);
1033 }
1034
1035 /*
1036 * For a recursive spinlock held one or more times by the current CPU,
1037 * release all N locks, and return N.
1038 * Intended for use in mi_switch() right after resuming execution.
1039 */
1040
1041 void
1042 #if defined(LOCKDEBUG)
1043 _spinlock_acquire_count(volatile struct lock *lkp, int count,
1044 const char *file, int line)
1045 #else
1046 spinlock_acquire_count(volatile struct lock *lkp, int count)
1047 #endif
1048 {
1049 int s, error;
1050 cpuid_t cpu_num;
1051
1052 KASSERT(lkp->lk_flags & LK_SPIN);
1053
1054 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
1055
1056 cpu_num = cpu_number();
1057
1058 #ifdef DIAGNOSTIC
1059 if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_num))
1060 lockpanic(lkp, "spinlock_acquire_count: processor %lu already holds lock", (long)cpu_num);
1061 #endif
1062 /*
1063 * Try to acquire the want_exclusive flag.
1064 */
1065 error = acquire(&lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL,
1066 RETURN_ADDRESS);
1067 lkp->lk_flags |= LK_WANT_EXCL;
1068 /*
1069 * Wait for shared locks and upgrades to finish.
1070 */
1071 error = acquire(&lkp, &s, LK_SPIN, 0,
1072 LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE,
1073 RETURN_ADDRESS);
1074 lkp->lk_flags &= ~LK_WANT_EXCL;
1075 lkp->lk_flags |= LK_HAVE_EXCL;
1076 SETHOLDER(lkp, LK_NOPROC, 0, cpu_num);
1077 #if defined(LOCKDEBUG)
1078 lkp->lk_lock_file = file;
1079 lkp->lk_lock_line = line;
1080 #endif
1081 HAVEIT(lkp);
1082 if (lkp->lk_exclusivecount != 0)
1083 lockpanic(lkp, "lockmgr: non-zero exclusive count");
1084 lkp->lk_exclusivecount = count;
1085 lkp->lk_recurselevel = 1;
1086 COUNT_CPU(cpu_num, count);
1087
1088 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
1089 }
1090
1091
1092
1093 /*
1094 * Print out information about state of a lock. Used by VOP_PRINT
1095 * routines to display ststus about contained locks.
1096 */
1097 void
1098 lockmgr_printinfo(volatile struct lock *lkp)
1099 {
1100
1101 if (lkp->lk_sharecount)
1102 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
1103 lkp->lk_sharecount);
1104 else if (lkp->lk_flags & LK_HAVE_EXCL) {
1105 printf(" lock type %s: EXCL (count %d) by ",
1106 lkp->lk_wmesg, lkp->lk_exclusivecount);
1107 if (lkp->lk_flags & LK_SPIN)
1108 printf("processor %lu", lkp->lk_cpu);
1109 else
1110 printf("pid %d.%d", lkp->lk_lockholder,
1111 lkp->lk_locklwp);
1112 } else
1113 printf(" not locked");
1114 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
1115 printf(" with %d pending", lkp->lk_waitcount);
1116 }
1117
1118 #if defined(LOCKDEBUG) /* { */
1119 _TAILQ_HEAD(, struct simplelock, volatile) simplelock_list =
1120 TAILQ_HEAD_INITIALIZER(simplelock_list);
1121
1122 #if defined(MULTIPROCESSOR) /* { */
1123 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
1124
1125 #define SLOCK_LIST_LOCK() \
1126 __cpu_simple_lock(&simplelock_list_slock.lock_data)
1127
1128 #define SLOCK_LIST_UNLOCK() \
1129 __cpu_simple_unlock(&simplelock_list_slock.lock_data)
1130
1131 #define SLOCK_COUNT(x) \
1132 curcpu()->ci_simple_locks += (x)
1133 #else
1134 u_long simple_locks;
1135
1136 #define SLOCK_LIST_LOCK() /* nothing */
1137
1138 #define SLOCK_LIST_UNLOCK() /* nothing */
1139
1140 #define SLOCK_COUNT(x) simple_locks += (x)
1141 #endif /* MULTIPROCESSOR */ /* } */
1142
1143 #ifdef MULTIPROCESSOR
1144 #define SLOCK_MP() lock_printf("on CPU %ld\n", \
1145 (u_long) cpu_number())
1146 #else
1147 #define SLOCK_MP() /* nothing */
1148 #endif
1149
1150 #define SLOCK_WHERE(str, alp, id, l) \
1151 do { \
1152 lock_printf("\n"); \
1153 lock_printf(str); \
1154 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
1155 SLOCK_MP(); \
1156 if ((alp)->lock_file != NULL) \
1157 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
1158 (alp)->lock_line); \
1159 if ((alp)->unlock_file != NULL) \
1160 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
1161 (alp)->unlock_line); \
1162 SLOCK_TRACE() \
1163 SLOCK_DEBUGGER(); \
1164 } while (/*CONSTCOND*/0)
1165
1166 /*
1167 * Simple lock functions so that the debugger can see from whence
1168 * they are being called.
1169 */
1170 void
1171 simple_lock_init(volatile struct simplelock *alp)
1172 {
1173
1174 #if defined(MULTIPROCESSOR) /* { */
1175 __cpu_simple_lock_init(&alp->lock_data);
1176 #else
1177 alp->lock_data = __SIMPLELOCK_UNLOCKED;
1178 #endif /* } */
1179 alp->lock_file = NULL;
1180 alp->lock_line = 0;
1181 alp->unlock_file = NULL;
1182 alp->unlock_line = 0;
1183 alp->lock_holder = LK_NOCPU;
1184 }
1185
1186 void
1187 _simple_lock(volatile struct simplelock *alp, const char *id, int l)
1188 {
1189 cpuid_t cpu_num = cpu_number();
1190 int s;
1191
1192 s = spllock();
1193
1194 /*
1195 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1196 * don't take any action, and just fall into the normal spin case.
1197 */
1198 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1199 #if defined(MULTIPROCESSOR) /* { */
1200 if (alp->lock_holder == cpu_num) {
1201 SLOCK_WHERE("simple_lock: locking against myself\n",
1202 alp, id, l);
1203 goto out;
1204 }
1205 #else
1206 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
1207 goto out;
1208 #endif /* MULTIPROCESSOR */ /* } */
1209 }
1210
1211 #if defined(MULTIPROCESSOR) /* { */
1212 /* Acquire the lock before modifying any fields. */
1213 splx(s);
1214 __cpu_simple_lock(&alp->lock_data);
1215 s = spllock();
1216 #else
1217 alp->lock_data = __SIMPLELOCK_LOCKED;
1218 #endif /* } */
1219
1220 if (alp->lock_holder != LK_NOCPU) {
1221 SLOCK_WHERE("simple_lock: uninitialized lock\n",
1222 alp, id, l);
1223 }
1224 alp->lock_file = id;
1225 alp->lock_line = l;
1226 alp->lock_holder = cpu_num;
1227
1228 SLOCK_LIST_LOCK();
1229 TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
1230 SLOCK_LIST_UNLOCK();
1231
1232 SLOCK_COUNT(1);
1233
1234 out:
1235 splx(s);
1236 }
1237
1238 int
1239 _simple_lock_held(volatile struct simplelock *alp)
1240 {
1241 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
1242 cpuid_t cpu_num = cpu_number();
1243 #endif
1244 int s, locked = 0;
1245
1246 s = spllock();
1247
1248 #if defined(MULTIPROCESSOR)
1249 if (__cpu_simple_lock_try(&alp->lock_data) == 0)
1250 locked = (alp->lock_holder == cpu_num);
1251 else
1252 __cpu_simple_unlock(&alp->lock_data);
1253 #else
1254 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1255 locked = 1;
1256 KASSERT(alp->lock_holder == cpu_num);
1257 }
1258 #endif
1259
1260 splx(s);
1261
1262 return (locked);
1263 }
1264
1265 int
1266 _simple_lock_try(volatile struct simplelock *alp, const char *id, int l)
1267 {
1268 cpuid_t cpu_num = cpu_number();
1269 int s, rv = 0;
1270
1271 s = spllock();
1272
1273 /*
1274 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1275 * don't take any action.
1276 */
1277 #if defined(MULTIPROCESSOR) /* { */
1278 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
1279 if (alp->lock_holder == cpu_num)
1280 SLOCK_WHERE("simple_lock_try: locking against myself\n",
1281 alp, id, l);
1282 goto out;
1283 }
1284 #else
1285 if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1286 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
1287 goto out;
1288 }
1289 alp->lock_data = __SIMPLELOCK_LOCKED;
1290 #endif /* MULTIPROCESSOR */ /* } */
1291
1292 /*
1293 * At this point, we have acquired the lock.
1294 */
1295
1296 rv = 1;
1297
1298 alp->lock_file = id;
1299 alp->lock_line = l;
1300 alp->lock_holder = cpu_num;
1301
1302 SLOCK_LIST_LOCK();
1303 TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
1304 SLOCK_LIST_UNLOCK();
1305
1306 SLOCK_COUNT(1);
1307
1308 out:
1309 splx(s);
1310 return (rv);
1311 }
1312
1313 void
1314 _simple_unlock(volatile struct simplelock *alp, const char *id, int l)
1315 {
1316 int s;
1317
1318 s = spllock();
1319
1320 /*
1321 * MULTIPROCESSOR case: This is `safe' because we think we hold
1322 * the lock, and if we don't, we don't take any action.
1323 */
1324 if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
1325 SLOCK_WHERE("simple_unlock: lock not held\n",
1326 alp, id, l);
1327 goto out;
1328 }
1329
1330 SLOCK_LIST_LOCK();
1331 TAILQ_REMOVE(&simplelock_list, alp, list);
1332 SLOCK_LIST_UNLOCK();
1333
1334 SLOCK_COUNT(-1);
1335
1336 alp->list.tqe_next = NULL; /* sanity */
1337 alp->list.tqe_prev = NULL; /* sanity */
1338
1339 alp->unlock_file = id;
1340 alp->unlock_line = l;
1341
1342 #if defined(MULTIPROCESSOR) /* { */
1343 alp->lock_holder = LK_NOCPU;
1344 /* Now that we've modified all fields, release the lock. */
1345 __cpu_simple_unlock(&alp->lock_data);
1346 #else
1347 alp->lock_data = __SIMPLELOCK_UNLOCKED;
1348 KASSERT(alp->lock_holder == cpu_number());
1349 alp->lock_holder = LK_NOCPU;
1350 #endif /* } */
1351
1352 out:
1353 splx(s);
1354 }
1355
1356 void
1357 simple_lock_dump(void)
1358 {
1359 volatile struct simplelock *alp;
1360 int s;
1361
1362 s = spllock();
1363 SLOCK_LIST_LOCK();
1364 lock_printf("all simple locks:\n");
1365 TAILQ_FOREACH(alp, &simplelock_list, list) {
1366 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
1367 alp->lock_file, alp->lock_line);
1368 }
1369 SLOCK_LIST_UNLOCK();
1370 splx(s);
1371 }
1372
1373 void
1374 simple_lock_freecheck(void *start, void *end)
1375 {
1376 volatile struct simplelock *alp;
1377 int s;
1378
1379 s = spllock();
1380 SLOCK_LIST_LOCK();
1381 TAILQ_FOREACH(alp, &simplelock_list, list) {
1382 if ((volatile void *)alp >= start &&
1383 (volatile void *)alp < end) {
1384 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
1385 alp, alp->lock_holder, alp->lock_file,
1386 alp->lock_line);
1387 SLOCK_DEBUGGER();
1388 }
1389 }
1390 SLOCK_LIST_UNLOCK();
1391 splx(s);
1392 }
1393
1394 /*
1395 * We must be holding exactly one lock: the sched_lock.
1396 */
1397
1398 void
1399 simple_lock_switchcheck(void)
1400 {
1401
1402 simple_lock_only_held(NULL, "switching");
1403 }
1404
1405 /*
1406 * Drop into the debugger if lp isn't the only lock held.
1407 * lp may be NULL.
1408 */
1409 void
1410 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
1411 {
1412 volatile struct simplelock *alp;
1413 cpuid_t cpu_num = cpu_number();
1414 int s;
1415
1416 if (lp) {
1417 LOCK_ASSERT(simple_lock_held(lp));
1418 }
1419 s = spllock();
1420 SLOCK_LIST_LOCK();
1421 TAILQ_FOREACH(alp, &simplelock_list, list) {
1422 if (alp == lp)
1423 continue;
1424 if (alp->lock_holder == cpu_num)
1425 break;
1426 }
1427 SLOCK_LIST_UNLOCK();
1428 splx(s);
1429
1430 if (alp != NULL) {
1431 lock_printf("\n%s with held simple_lock %p "
1432 "CPU %lu %s:%d\n",
1433 where, alp, alp->lock_holder, alp->lock_file,
1434 alp->lock_line);
1435 SLOCK_TRACE();
1436 SLOCK_DEBUGGER();
1437 }
1438 }
1439
1440 /*
1441 * Set to 1 by simple_lock_assert_*().
1442 * Can be cleared from ddb to avoid a panic.
1443 */
1444 int slock_assert_will_panic;
1445
1446 /*
1447 * If the lock isn't held, print a traceback, optionally drop into the
1448 * debugger, then panic.
1449 * The panic can be avoided by clearing slock_assert_with_panic from the
1450 * debugger.
1451 */
1452 void
1453 _simple_lock_assert_locked(volatile struct simplelock *alp,
1454 const char *lockname, const char *id, int l)
1455 {
1456 if (simple_lock_held(alp) == 0) {
1457 slock_assert_will_panic = 1;
1458 lock_printf("%s lock not held\n", lockname);
1459 SLOCK_WHERE("lock not held", alp, id, l);
1460 if (slock_assert_will_panic)
1461 panic("%s: not locked", lockname);
1462 }
1463 }
1464
1465 void
1466 _simple_lock_assert_unlocked(volatile struct simplelock *alp,
1467 const char *lockname, const char *id, int l)
1468 {
1469 if (simple_lock_held(alp)) {
1470 slock_assert_will_panic = 1;
1471 lock_printf("%s lock held\n", lockname);
1472 SLOCK_WHERE("lock held", alp, id, l);
1473 if (slock_assert_will_panic)
1474 panic("%s: locked", lockname);
1475 }
1476 }
1477
1478 void
1479 assert_sleepable(struct simplelock *interlock, const char *msg)
1480 {
1481
1482 if (curlwp == NULL) {
1483 panic("assert_sleepable: NULL curlwp");
1484 }
1485 simple_lock_only_held(interlock, msg);
1486 }
1487
1488 #endif /* LOCKDEBUG */ /* } */
1489
1490 #if defined(MULTIPROCESSOR)
1491
1492 /*
1493 * Functions for manipulating the kernel_lock. We put them here
1494 * so that they show up in profiles.
1495 */
1496
1497 #define _KERNEL_LOCK_ABORT(msg) \
1498 LOCKDEBUG_ABORT(kernel_lock_id, &kernel_lock, &_kernel_lock_ops, \
1499 __FUNCTION__, msg)
1500
1501 #ifdef LOCKDEBUG
1502 #define _KERNEL_LOCK_ASSERT(cond) \
1503 do { \
1504 if (!(cond)) \
1505 _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
1506 } while (/* CONSTCOND */ 0)
1507 #else
1508 #define _KERNEL_LOCK_ASSERT(cond) /* nothing */
1509 #endif
1510
1511 void _kernel_lock_dump(volatile void *);
1512
1513 lockops_t _kernel_lock_ops = {
1514 "Kernel lock",
1515 0,
1516 _kernel_lock_dump
1517 };
1518
1519 /*
1520 * Initialize the kernel lock.
1521 */
1522 void
1523 _kernel_lock_init(void)
1524 {
1525
1526 __cpu_simple_lock_init(&kernel_lock);
1527 kernel_lock_id = LOCKDEBUG_ALLOC(&kernel_lock, &_kernel_lock_ops);
1528 }
1529
1530 /*
1531 * Print debugging information about the kernel lock.
1532 */
1533 void
1534 _kernel_lock_dump(volatile void *junk)
1535 {
1536 struct cpu_info *ci = curcpu();
1537
1538 (void)junk;
1539
1540 printf_nolog("curcpu holds : %18d wanted by: %#018lx\n",
1541 ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
1542 }
1543
1544 /*
1545 * Acquire 'nlocks' holds on the kernel lock. If 'l' is non-null, the
1546 * acquisition is from process context.
1547 */
1548 void
1549 _kernel_lock(int nlocks, struct lwp *l)
1550 {
1551 struct cpu_info *ci = curcpu();
1552 LOCKSTAT_TIMER(spintime);
1553 LOCKSTAT_FLAG(lsflag);
1554 struct lwp *owant;
1555 #ifdef LOCKDEBUG
1556 u_int spins;
1557 #endif
1558 int s;
1559
1560 (void)l;
1561
1562 if (nlocks == 0)
1563 return;
1564 _KERNEL_LOCK_ASSERT(nlocks > 0);
1565
1566 s = splbiglock();
1567
1568 if (ci->ci_biglock_count != 0) {
1569 _KERNEL_LOCK_ASSERT(kernel_lock == __SIMPLELOCK_LOCKED);
1570 ci->ci_biglock_count += nlocks;
1571 splx(s);
1572 return;
1573 }
1574
1575 LOCKDEBUG_WANTLOCK(kernel_lock_id,
1576 (uintptr_t)__builtin_return_address(0), 0);
1577
1578 if (__cpu_simple_lock_try(&kernel_lock)) {
1579 ci->ci_biglock_count = nlocks;
1580 LOCKDEBUG_LOCKED(kernel_lock_id,
1581 (uintptr_t)__builtin_return_address(0), 0);
1582 splx(s);
1583 return;
1584 }
1585
1586 LOCKSTAT_ENTER(lsflag);
1587 LOCKSTAT_START_TIMER(lsflag, spintime);
1588
1589 /*
1590 * Before setting ci_biglock_wanted we must post a store
1591 * fence (see kern_mutex.c). This is accomplished by the
1592 * __cpu_simple_lock_try() above.
1593 */
1594 owant = ci->ci_biglock_wanted;
1595 ci->ci_biglock_wanted = curlwp; /* XXXAD */
1596
1597 #ifdef LOCKDEBUG
1598 spins = 0;
1599 #endif
1600
1601 do {
1602 while (kernel_lock == __SIMPLELOCK_LOCKED) {
1603 #ifdef LOCKDEBUG
1604 if (SPINLOCK_SPINOUT(spins))
1605 _KERNEL_LOCK_ABORT("spinout");
1606 #endif
1607 splx(s);
1608 SPINLOCK_SPIN_HOOK;
1609 (void)splbiglock();
1610 }
1611 } while (!__cpu_simple_lock_try(&kernel_lock));
1612
1613 ci->ci_biglock_wanted = owant;
1614 ci->ci_biglock_count += nlocks;
1615 LOCKSTAT_STOP_TIMER(lsflag, spintime);
1616 LOCKDEBUG_LOCKED(kernel_lock_id,
1617 (uintptr_t)__builtin_return_address(0), 0);
1618 splx(s);
1619
1620 /*
1621 * Again, another store fence is required (see kern_mutex.c).
1622 */
1623 mb_write();
1624 if (owant == NULL) {
1625 LOCKSTAT_EVENT(lsflag, &kernel_lock, LB_KERNEL_LOCK | LB_SPIN,
1626 1, spintime);
1627 }
1628 LOCKSTAT_EXIT(lsflag);
1629 }
1630
1631 /*
1632 * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release
1633 * all holds. If 'l' is non-null, the release is from process context.
1634 */
1635 void
1636 _kernel_unlock(int nlocks, struct lwp *l, int *countp)
1637 {
1638 struct cpu_info *ci = curcpu();
1639 u_int olocks;
1640 int s;
1641
1642 (void)l;
1643
1644 _KERNEL_LOCK_ASSERT(nlocks < 2);
1645
1646 olocks = ci->ci_biglock_count;
1647
1648 if (olocks == 0) {
1649 _KERNEL_LOCK_ASSERT(nlocks <= 0);
1650 if (countp != NULL)
1651 *countp = 0;
1652 return;
1653 }
1654
1655 _KERNEL_LOCK_ASSERT(kernel_lock == __SIMPLELOCK_LOCKED);
1656
1657 if (nlocks == 0)
1658 nlocks = olocks;
1659 else if (nlocks == -1) {
1660 nlocks = 1;
1661 _KERNEL_LOCK_ASSERT(olocks == 1);
1662 }
1663
1664 s = splbiglock();
1665 if ((ci->ci_biglock_count -= nlocks) == 0) {
1666 LOCKDEBUG_UNLOCKED(kernel_lock_id,
1667 (uintptr_t)__builtin_return_address(0), 0);
1668 __cpu_simple_unlock(&kernel_lock);
1669 }
1670 splx(s);
1671
1672 if (countp != NULL)
1673 *countp = olocks;
1674 }
1675
1676 #if defined(DEBUG)
1677 /*
1678 * Assert that the kernel lock is held.
1679 */
1680 void
1681 _kernel_lock_assert_locked(void)
1682 {
1683
1684 if (kernel_lock != __SIMPLELOCK_LOCKED ||
1685 curcpu()->ci_biglock_count == 0)
1686 _KERNEL_LOCK_ABORT("not locked");
1687 }
1688
1689 void
1690 _kernel_lock_assert_unlocked()
1691 {
1692
1693 if (curcpu()->ci_biglock_count != 0)
1694 _KERNEL_LOCK_ABORT("locked");
1695 }
1696 #endif
1697
1698 #endif /* MULTIPROCESSOR || LOCKDEBUG */
1699