kern_lock.c revision 1.119 1 /* $NetBSD: kern_lock.c,v 1.119 2007/09/10 11:34:10 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.119 2007/09/10 11:34:10 skrll Exp $");
80
81 #include "opt_multiprocessor.h"
82 #include "opt_ddb.h"
83
84 #define __MUTEX_PRIVATE
85
86 #include <sys/param.h>
87 #include <sys/proc.h>
88 #include <sys/lock.h>
89 #include <sys/systm.h>
90 #include <sys/lockdebug.h>
91
92 #include <machine/cpu.h>
93 #include <machine/stdarg.h>
94
95 #include <dev/lockstat.h>
96
97 #if defined(LOCKDEBUG)
98 #include <sys/syslog.h>
99 /*
100 * note that stdarg.h and the ansi style va_start macro is used for both
101 * ansi and traditional c compiles.
102 * XXX: this requires that stdarg.h define: va_alist and va_dcl
103 */
104 #include <machine/stdarg.h>
105
106 void lock_printf(const char *fmt, ...)
107 __attribute__((__format__(__printf__,1,2)));
108
109 static int acquire(volatile struct lock **, int *, int, int, int, uintptr_t);
110
111 int lock_debug_syslog = 0; /* defaults to printf, but can be patched */
112
113 #ifdef DDB
114 #include <ddb/ddbvar.h>
115 #include <machine/db_machdep.h>
116 #include <ddb/db_command.h>
117 #include <ddb/db_interface.h>
118 #endif
119 #endif /* defined(LOCKDEBUG) */
120
121 /*
122 * Locking primitives implementation.
123 * Locks provide shared/exclusive synchronization.
124 */
125
126 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
127 #if defined(MULTIPROCESSOR) /* { */
128 #define COUNT_CPU(cpu_id, x) \
129 curcpu()->ci_spin_locks += (x)
130 #else
131 u_long spin_locks;
132 #define COUNT_CPU(cpu_id, x) spin_locks += (x)
133 #endif /* MULTIPROCESSOR */ /* } */
134
135 #define COUNT(lkp, l, cpu_id, x) \
136 do { \
137 if ((lkp)->lk_flags & LK_SPIN) \
138 COUNT_CPU((cpu_id), (x)); \
139 else \
140 (l)->l_locks += (x); \
141 } while (/*CONSTCOND*/0)
142 #else
143 #define COUNT(lkp, p, cpu_id, x)
144 #define COUNT_CPU(cpu_id, x)
145 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
146
147 #define INTERLOCK_ACQUIRE(lkp, flags, s) \
148 do { \
149 if ((flags) & LK_SPIN) \
150 s = splhigh(); \
151 simple_lock(&(lkp)->lk_interlock); \
152 } while (/*CONSTCOND*/ 0)
153
154 #define INTERLOCK_RELEASE(lkp, flags, s) \
155 do { \
156 simple_unlock(&(lkp)->lk_interlock); \
157 if ((flags) & LK_SPIN) \
158 splx(s); \
159 } while (/*CONSTCOND*/ 0)
160
161 #ifdef DDB /* { */
162 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
163 int simple_lock_debugger = 1; /* more serious on MP */
164 #else
165 int simple_lock_debugger = 0;
166 #endif
167 #define SLOCK_DEBUGGER() if (simple_lock_debugger && db_onpanic) Debugger()
168 #define SLOCK_TRACE() \
169 db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \
170 true, 65535, "", lock_printf);
171 #else
172 #define SLOCK_DEBUGGER() /* nothing */
173 #define SLOCK_TRACE() /* nothing */
174 #endif /* } */
175
176 #if defined(LOCKDEBUG)
177 #if defined(DDB)
178 #define SPINLOCK_SPINCHECK_DEBUGGER if (db_onpanic) Debugger()
179 #else
180 #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
181 #endif
182
183 #define SPINLOCK_SPINCHECK_DECL \
184 /* 32-bits of count -- wrap constitutes a "spinout" */ \
185 uint32_t __spinc = 0
186
187 #define SPINLOCK_SPINCHECK \
188 do { \
189 if (++__spinc == 0) { \
190 lock_printf("LK_SPIN spinout, excl %d, share %d\n", \
191 lkp->lk_exclusivecount, lkp->lk_sharecount); \
192 if (lkp->lk_exclusivecount) \
193 lock_printf("held by CPU %lu\n", \
194 (u_long) lkp->lk_cpu); \
195 if (lkp->lk_lock_file) \
196 lock_printf("last locked at %s:%d\n", \
197 lkp->lk_lock_file, lkp->lk_lock_line); \
198 if (lkp->lk_unlock_file) \
199 lock_printf("last unlocked at %s:%d\n", \
200 lkp->lk_unlock_file, lkp->lk_unlock_line); \
201 SLOCK_TRACE(); \
202 SPINLOCK_SPINCHECK_DEBUGGER; \
203 } \
204 } while (/*CONSTCOND*/ 0)
205 #else
206 #define SPINLOCK_SPINCHECK_DECL /* nothing */
207 #define SPINLOCK_SPINCHECK /* nothing */
208 #endif /* LOCKDEBUG && DDB */
209
210 #define RETURN_ADDRESS ((uintptr_t)__builtin_return_address(0))
211
212 /*
213 * Acquire a resource.
214 */
215 static int
216 acquire(volatile struct lock **lkpp, int *s, int extflags,
217 int drain, int wanted, uintptr_t ra)
218 {
219 int error;
220 volatile struct lock *lkp = *lkpp;
221 LOCKSTAT_TIMER(slptime);
222 LOCKSTAT_FLAG(lsflag);
223
224 KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
225
226 if (extflags & LK_SPIN) {
227 int interlocked;
228
229 SPINLOCK_SPINCHECK_DECL;
230
231 if (!drain) {
232 lkp->lk_waitcount++;
233 lkp->lk_flags |= LK_WAIT_NONZERO;
234 }
235 for (interlocked = 1;;) {
236 SPINLOCK_SPINCHECK;
237 if ((lkp->lk_flags & wanted) != 0) {
238 if (interlocked) {
239 INTERLOCK_RELEASE(lkp, LK_SPIN, *s);
240 interlocked = 0;
241 }
242 SPINLOCK_SPIN_HOOK;
243 } else if (interlocked) {
244 break;
245 } else {
246 INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s);
247 interlocked = 1;
248 }
249 }
250 if (!drain) {
251 lkp->lk_waitcount--;
252 if (lkp->lk_waitcount == 0)
253 lkp->lk_flags &= ~LK_WAIT_NONZERO;
254 }
255 KASSERT((lkp->lk_flags & wanted) == 0);
256 error = 0; /* sanity */
257 } else {
258 LOCKSTAT_ENTER(lsflag);
259
260 for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
261 if (drain)
262 lkp->lk_flags |= LK_WAITDRAIN;
263 else {
264 lkp->lk_waitcount++;
265 lkp->lk_flags |= LK_WAIT_NONZERO;
266 }
267 /* XXX Cast away volatile. */
268 LOCKSTAT_START_TIMER(lsflag, slptime);
269 error = ltsleep(drain ?
270 (volatile const void *)&lkp->lk_flags :
271 (volatile const void *)lkp, lkp->lk_prio,
272 lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock);
273 LOCKSTAT_STOP_TIMER(lsflag, slptime);
274 LOCKSTAT_EVENT_RA(lsflag, (void *)(uintptr_t)lkp,
275 LB_LOCKMGR | LB_SLEEP1, 1, slptime, ra);
276 if (!drain) {
277 lkp->lk_waitcount--;
278 if (lkp->lk_waitcount == 0)
279 lkp->lk_flags &= ~LK_WAIT_NONZERO;
280 }
281 if (error)
282 break;
283 if (extflags & LK_SLEEPFAIL) {
284 error = ENOLCK;
285 break;
286 }
287 if (lkp->lk_newlock != NULL) {
288 simple_lock(&lkp->lk_newlock->lk_interlock);
289 simple_unlock(&lkp->lk_interlock);
290 if (lkp->lk_waitcount == 0)
291 wakeup(&lkp->lk_newlock);
292 *lkpp = lkp = lkp->lk_newlock;
293 }
294 }
295
296 LOCKSTAT_EXIT(lsflag);
297 }
298
299 return error;
300 }
301
302 #define SETHOLDER(lkp, pid, lid, cpu_id) \
303 do { \
304 if ((lkp)->lk_flags & LK_SPIN) \
305 (lkp)->lk_cpu = cpu_id; \
306 else { \
307 (lkp)->lk_lockholder = pid; \
308 (lkp)->lk_locklwp = lid; \
309 } \
310 } while (/*CONSTCOND*/0)
311
312 #define WEHOLDIT(lkp, pid, lid, cpu_id) \
313 (((lkp)->lk_flags & LK_SPIN) != 0 ? \
314 ((lkp)->lk_cpu == (cpu_id)) : \
315 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
316
317 #define WAKEUP_WAITER(lkp) \
318 do { \
319 if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) == \
320 LK_WAIT_NONZERO) { \
321 wakeup((lkp)); \
322 } \
323 } while (/*CONSTCOND*/0)
324
325 #if defined(LOCKDEBUG) /* { */
326 #if defined(MULTIPROCESSOR) /* { */
327 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
328
329 #define SPINLOCK_LIST_LOCK() \
330 __cpu_simple_lock(&spinlock_list_slock.lock_data)
331
332 #define SPINLOCK_LIST_UNLOCK() \
333 __cpu_simple_unlock(&spinlock_list_slock.lock_data)
334 #else
335 #define SPINLOCK_LIST_LOCK() /* nothing */
336
337 #define SPINLOCK_LIST_UNLOCK() /* nothing */
338 #endif /* MULTIPROCESSOR */ /* } */
339
340 _TAILQ_HEAD(, struct lock, volatile) spinlock_list =
341 TAILQ_HEAD_INITIALIZER(spinlock_list);
342
343 #define HAVEIT(lkp) \
344 do { \
345 if ((lkp)->lk_flags & LK_SPIN) { \
346 int sp = splhigh(); \
347 SPINLOCK_LIST_LOCK(); \
348 TAILQ_INSERT_TAIL(&spinlock_list, (lkp), lk_list); \
349 SPINLOCK_LIST_UNLOCK(); \
350 splx(sp); \
351 } \
352 } while (/*CONSTCOND*/0)
353
354 #define DONTHAVEIT(lkp) \
355 do { \
356 if ((lkp)->lk_flags & LK_SPIN) { \
357 int sp = splhigh(); \
358 SPINLOCK_LIST_LOCK(); \
359 TAILQ_REMOVE(&spinlock_list, (lkp), lk_list); \
360 SPINLOCK_LIST_UNLOCK(); \
361 splx(sp); \
362 } \
363 } while (/*CONSTCOND*/0)
364 #else
365 #define HAVEIT(lkp) /* nothing */
366
367 #define DONTHAVEIT(lkp) /* nothing */
368 #endif /* LOCKDEBUG */ /* } */
369
370 #if defined(LOCKDEBUG)
371 /*
372 * Lock debug printing routine; can be configured to print to console
373 * or log to syslog.
374 */
375 void
376 lock_printf(const char *fmt, ...)
377 {
378 char b[150];
379 va_list ap;
380
381 va_start(ap, fmt);
382 if (lock_debug_syslog)
383 vlog(LOG_DEBUG, fmt, ap);
384 else {
385 vsnprintf(b, sizeof(b), fmt, ap);
386 printf_nolog("%s", b);
387 }
388 va_end(ap);
389 }
390 #endif /* LOCKDEBUG */
391
392 static void
393 lockpanic(volatile struct lock *lkp, const char *fmt, ...)
394 {
395 char s[150], b[150];
396 #ifdef LOCKDEBUG
397 static const char *locktype[] = {
398 "*0*", "shared", "exclusive", "upgrade", "exclupgrade",
399 "downgrade", "release", "drain", "exclother", "*9*",
400 "*10*", "*11*", "*12*", "*13*", "*14*", "*15*"
401 };
402 #endif
403 va_list ap;
404 va_start(ap, fmt);
405 vsnprintf(s, sizeof(s), fmt, ap);
406 va_end(ap);
407 bitmask_snprintf(lkp->lk_flags, __LK_FLAG_BITS, b, sizeof(b));
408 panic("%s ("
409 #ifdef LOCKDEBUG
410 "type %s "
411 #endif
412 "flags %s, sharecount %d, exclusivecount %d, "
413 "recurselevel %d, waitcount %d, wmesg %s"
414 #ifdef LOCKDEBUG
415 ", lock_file %s, unlock_file %s, lock_line %d, unlock_line %d"
416 #endif
417 ")\n",
418 s,
419 #ifdef LOCKDEBUG
420 locktype[lkp->lk_flags & LK_TYPE_MASK],
421 #endif
422 b, lkp->lk_sharecount, lkp->lk_exclusivecount,
423 lkp->lk_recurselevel, lkp->lk_waitcount, lkp->lk_wmesg
424 #ifdef LOCKDEBUG
425 , lkp->lk_lock_file, lkp->lk_unlock_file, lkp->lk_lock_line,
426 lkp->lk_unlock_line
427 #endif
428 );
429 }
430
431 /*
432 * Transfer any waiting processes from one lock to another.
433 */
434 void
435 transferlockers(struct lock *from, struct lock *to)
436 {
437
438 KASSERT(from != to);
439 KASSERT((from->lk_flags & LK_WAITDRAIN) == 0);
440 if (from->lk_waitcount == 0)
441 return;
442 from->lk_newlock = to;
443 wakeup((void *)from);
444 tsleep((void *)&from->lk_newlock, from->lk_prio, "lkxfer", 0);
445 from->lk_newlock = NULL;
446 from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
447 KASSERT(from->lk_waitcount == 0);
448 }
449
450
451 /*
452 * Initialize a lock; required before use.
453 */
454 void
455 lockinit(struct lock *lkp, pri_t prio, const char *wmesg, int timo, int flags)
456 {
457
458 memset(lkp, 0, sizeof(struct lock));
459 simple_lock_init(&lkp->lk_interlock);
460 lkp->lk_flags = flags & LK_EXTFLG_MASK;
461 if (flags & LK_SPIN)
462 lkp->lk_cpu = LK_NOCPU;
463 else {
464 lkp->lk_lockholder = LK_NOPROC;
465 lkp->lk_newlock = NULL;
466 lkp->lk_prio = prio;
467 lkp->lk_timo = timo;
468 }
469 lkp->lk_wmesg = wmesg; /* just a name for spin locks */
470 #if defined(LOCKDEBUG)
471 lkp->lk_lock_file = NULL;
472 lkp->lk_unlock_file = NULL;
473 #endif
474 }
475
476 /*
477 * Determine the status of a lock.
478 */
479 int
480 lockstatus(struct lock *lkp)
481 {
482 int s = 0; /* XXX: gcc */
483 int lock_type = 0;
484 struct lwp *l = curlwp; /* XXX */
485 pid_t pid;
486 lwpid_t lid;
487 cpuid_t cpu_num;
488
489 if ((lkp->lk_flags & LK_SPIN) || l == NULL) {
490 cpu_num = cpu_number();
491 pid = LK_KERNPROC;
492 lid = 0;
493 } else {
494 cpu_num = LK_NOCPU;
495 pid = l->l_proc->p_pid;
496 lid = l->l_lid;
497 }
498
499 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
500 if (lkp->lk_exclusivecount != 0) {
501 if (WEHOLDIT(lkp, pid, lid, cpu_num))
502 lock_type = LK_EXCLUSIVE;
503 else
504 lock_type = LK_EXCLOTHER;
505 } else if (lkp->lk_sharecount != 0)
506 lock_type = LK_SHARED;
507 else if (lkp->lk_flags & (LK_WANT_EXCL | LK_WANT_UPGRADE))
508 lock_type = LK_EXCLOTHER;
509 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
510 return (lock_type);
511 }
512
513 #if defined(LOCKDEBUG)
514 /*
515 * Make sure no spin locks are held by a CPU that is about
516 * to context switch.
517 */
518 void
519 spinlock_switchcheck(void)
520 {
521 u_long cnt;
522 int s;
523
524 if (panicstr != NULL)
525 return;
526
527 s = splhigh();
528 #if defined(MULTIPROCESSOR)
529 cnt = curcpu()->ci_spin_locks;
530 #else
531 cnt = spin_locks;
532 #endif
533 splx(s);
534
535 if (cnt != 0)
536 panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
537 (u_long) cpu_number(), cnt);
538 }
539 #endif /* LOCKDEBUG */
540
541 /*
542 * Locks and IPLs (interrupt priority levels):
543 *
544 * Locks which may be taken from interrupt context must be handled
545 * very carefully; you must spl to the highest IPL where the lock
546 * is needed before acquiring the lock.
547 *
548 * It is also important to avoid deadlock, since certain (very high
549 * priority) interrupts are often needed to keep the system as a whole
550 * from deadlocking, and must not be blocked while you are spinning
551 * waiting for a lower-priority lock.
552 *
553 * In addition, the lock-debugging hooks themselves need to use locks!
554 *
555 * A raw __cpu_simple_lock may be used from interrupts are long as it
556 * is acquired and held at a single IPL.
557 */
558
559 /*
560 * XXX XXX kludge around another kludge..
561 *
562 * vfs_shutdown() may be called from interrupt context, either as a result
563 * of a panic, or from the debugger. It proceeds to call
564 * sys_sync(&proc0, ...), pretending its running on behalf of proc0
565 *
566 * We would like to make an attempt to sync the filesystems in this case, so
567 * if this happens, we treat attempts to acquire locks specially.
568 * All locks are acquired on behalf of proc0.
569 *
570 * If we've already paniced, we don't block waiting for locks, but
571 * just barge right ahead since we're already going down in flames.
572 */
573
574 /*
575 * Set, change, or release a lock.
576 *
577 * Shared requests increment the shared count. Exclusive requests set the
578 * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
579 * accepted shared locks and shared-to-exclusive upgrades to go away.
580 */
581 int
582 #if defined(LOCKDEBUG)
583 _lockmgr(volatile struct lock *lkp, u_int flags,
584 struct simplelock *interlkp, const char *file, int line)
585 #else
586 lockmgr(volatile struct lock *lkp, u_int flags,
587 struct simplelock *interlkp)
588 #endif
589 {
590 int error;
591 pid_t pid;
592 lwpid_t lid;
593 int extflags;
594 cpuid_t cpu_num;
595 struct lwp *l = curlwp;
596 int lock_shutdown_noblock = 0;
597 int s = 0;
598
599 error = 0;
600
601 /* LK_RETRY is for vn_lock, not for lockmgr. */
602 KASSERT((flags & LK_RETRY) == 0);
603
604 INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
605 if (flags & LK_INTERLOCK)
606 simple_unlock(interlkp);
607 extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
608
609 #ifdef DIAGNOSTIC /* { */
610 /*
611 * Don't allow spins on sleep locks and don't allow sleeps
612 * on spin locks.
613 */
614 if ((flags ^ lkp->lk_flags) & LK_SPIN)
615 lockpanic(lkp, "lockmgr: sleep/spin mismatch");
616 #endif /* } */
617
618 if (extflags & LK_SPIN) {
619 pid = LK_KERNPROC;
620 lid = 0;
621 } else {
622 if (l == NULL) {
623 if (!doing_shutdown) {
624 panic("lockmgr: no context");
625 } else {
626 l = &lwp0;
627 if (panicstr && (!(flags & LK_NOWAIT))) {
628 flags |= LK_NOWAIT;
629 lock_shutdown_noblock = 1;
630 }
631 }
632 }
633 lid = l->l_lid;
634 pid = l->l_proc->p_pid;
635 }
636 cpu_num = cpu_number();
637
638 /*
639 * Once a lock has drained, the LK_DRAINING flag is set and an
640 * exclusive lock is returned. The only valid operation thereafter
641 * is a single release of that exclusive lock. This final release
642 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
643 * further requests of any sort will result in a panic. The bits
644 * selected for these two flags are chosen so that they will be set
645 * in memory that is freed (freed memory is filled with 0xdeadbeef).
646 * The final release is permitted to give a new lease on life to
647 * the lock by specifying LK_REENABLE.
648 */
649 if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
650 #ifdef DIAGNOSTIC /* { */
651 if (lkp->lk_flags & LK_DRAINED)
652 lockpanic(lkp, "lockmgr: using decommissioned lock");
653 if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
654 WEHOLDIT(lkp, pid, lid, cpu_num) == 0)
655 lockpanic(lkp, "lockmgr: non-release on draining lock: %d",
656 flags & LK_TYPE_MASK);
657 #endif /* DIAGNOSTIC */ /* } */
658 lkp->lk_flags &= ~LK_DRAINING;
659 if ((flags & LK_REENABLE) == 0)
660 lkp->lk_flags |= LK_DRAINED;
661 }
662
663 switch (flags & LK_TYPE_MASK) {
664
665 case LK_SHARED:
666 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
667 /*
668 * If just polling, check to see if we will block.
669 */
670 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
671 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
672 error = EBUSY;
673 break;
674 }
675 /*
676 * Wait for exclusive locks and upgrades to clear.
677 */
678 error = acquire(&lkp, &s, extflags, 0,
679 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE,
680 RETURN_ADDRESS);
681 if (error)
682 break;
683 lkp->lk_sharecount++;
684 lkp->lk_flags |= LK_SHARE_NONZERO;
685 COUNT(lkp, l, cpu_num, 1);
686 break;
687 }
688 /*
689 * We hold an exclusive lock, so downgrade it to shared.
690 * An alternative would be to fail with EDEADLK.
691 */
692 lkp->lk_sharecount++;
693 lkp->lk_flags |= LK_SHARE_NONZERO;
694 COUNT(lkp, l, cpu_num, 1);
695 /* fall into downgrade */
696
697 case LK_DOWNGRADE:
698 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0 ||
699 lkp->lk_exclusivecount == 0)
700 lockpanic(lkp, "lockmgr: not holding exclusive lock");
701 lkp->lk_sharecount += lkp->lk_exclusivecount;
702 lkp->lk_flags |= LK_SHARE_NONZERO;
703 lkp->lk_exclusivecount = 0;
704 lkp->lk_recurselevel = 0;
705 lkp->lk_flags &= ~LK_HAVE_EXCL;
706 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
707 #if defined(LOCKDEBUG)
708 lkp->lk_unlock_file = file;
709 lkp->lk_unlock_line = line;
710 #endif
711 DONTHAVEIT(lkp);
712 WAKEUP_WAITER(lkp);
713 break;
714
715 case LK_EXCLUPGRADE:
716 /*
717 * If another process is ahead of us to get an upgrade,
718 * then we want to fail rather than have an intervening
719 * exclusive access.
720 */
721 if (lkp->lk_flags & LK_WANT_UPGRADE) {
722 lkp->lk_sharecount--;
723 if (lkp->lk_sharecount == 0)
724 lkp->lk_flags &= ~LK_SHARE_NONZERO;
725 COUNT(lkp, l, cpu_num, -1);
726 error = EBUSY;
727 break;
728 }
729 /* fall into normal upgrade */
730
731 case LK_UPGRADE:
732 /*
733 * Upgrade a shared lock to an exclusive one. If another
734 * shared lock has already requested an upgrade to an
735 * exclusive lock, our shared lock is released and an
736 * exclusive lock is requested (which will be granted
737 * after the upgrade). If we return an error, the file
738 * will always be unlocked.
739 */
740 if (WEHOLDIT(lkp, pid, lid, cpu_num) || lkp->lk_sharecount <= 0)
741 lockpanic(lkp, "lockmgr: upgrade exclusive lock");
742 lkp->lk_sharecount--;
743 if (lkp->lk_sharecount == 0)
744 lkp->lk_flags &= ~LK_SHARE_NONZERO;
745 COUNT(lkp, l, cpu_num, -1);
746 /*
747 * If we are just polling, check to see if we will block.
748 */
749 if ((extflags & LK_NOWAIT) &&
750 ((lkp->lk_flags & LK_WANT_UPGRADE) ||
751 lkp->lk_sharecount > 1)) {
752 error = EBUSY;
753 break;
754 }
755 if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
756 /*
757 * We are first shared lock to request an upgrade, so
758 * request upgrade and wait for the shared count to
759 * drop to zero, then take exclusive lock.
760 */
761 lkp->lk_flags |= LK_WANT_UPGRADE;
762 error = acquire(&lkp, &s, extflags, 0, LK_SHARE_NONZERO,
763 RETURN_ADDRESS);
764 lkp->lk_flags &= ~LK_WANT_UPGRADE;
765 if (error) {
766 WAKEUP_WAITER(lkp);
767 break;
768 }
769 lkp->lk_flags |= LK_HAVE_EXCL;
770 SETHOLDER(lkp, pid, lid, cpu_num);
771 #if defined(LOCKDEBUG)
772 lkp->lk_lock_file = file;
773 lkp->lk_lock_line = line;
774 #endif
775 HAVEIT(lkp);
776 if (lkp->lk_exclusivecount != 0)
777 lockpanic(lkp, "lockmgr: non-zero exclusive count");
778 lkp->lk_exclusivecount = 1;
779 if (extflags & LK_SETRECURSE)
780 lkp->lk_recurselevel = 1;
781 COUNT(lkp, l, cpu_num, 1);
782 break;
783 }
784 /*
785 * Someone else has requested upgrade. Release our shared
786 * lock, awaken upgrade requestor if we are the last shared
787 * lock, then request an exclusive lock.
788 */
789 if (lkp->lk_sharecount == 0)
790 WAKEUP_WAITER(lkp);
791 /* fall into exclusive request */
792
793 case LK_EXCLUSIVE:
794 if (WEHOLDIT(lkp, pid, lid, cpu_num)) {
795 /*
796 * Recursive lock.
797 */
798 if ((extflags & LK_CANRECURSE) == 0 &&
799 lkp->lk_recurselevel == 0) {
800 if (extflags & LK_RECURSEFAIL) {
801 error = EDEADLK;
802 break;
803 } else
804 lockpanic(lkp, "lockmgr: locking against myself");
805 }
806 lkp->lk_exclusivecount++;
807 if (extflags & LK_SETRECURSE &&
808 lkp->lk_recurselevel == 0)
809 lkp->lk_recurselevel = lkp->lk_exclusivecount;
810 COUNT(lkp, l, cpu_num, 1);
811 break;
812 }
813 /*
814 * If we are just polling, check to see if we will sleep.
815 */
816 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
817 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
818 LK_SHARE_NONZERO))) {
819 error = EBUSY;
820 break;
821 }
822 /*
823 * Try to acquire the want_exclusive flag.
824 */
825 error = acquire(&lkp, &s, extflags, 0,
826 LK_HAVE_EXCL | LK_WANT_EXCL, RETURN_ADDRESS);
827 if (error)
828 break;
829 lkp->lk_flags |= LK_WANT_EXCL;
830 /*
831 * Wait for shared locks and upgrades to finish.
832 */
833 error = acquire(&lkp, &s, extflags, 0,
834 LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO,
835 RETURN_ADDRESS);
836 lkp->lk_flags &= ~LK_WANT_EXCL;
837 if (error) {
838 WAKEUP_WAITER(lkp);
839 break;
840 }
841 lkp->lk_flags |= LK_HAVE_EXCL;
842 SETHOLDER(lkp, pid, lid, cpu_num);
843 #if defined(LOCKDEBUG)
844 lkp->lk_lock_file = file;
845 lkp->lk_lock_line = line;
846 #endif
847 HAVEIT(lkp);
848 if (lkp->lk_exclusivecount != 0)
849 lockpanic(lkp, "lockmgr: non-zero exclusive count");
850 lkp->lk_exclusivecount = 1;
851 if (extflags & LK_SETRECURSE)
852 lkp->lk_recurselevel = 1;
853 COUNT(lkp, l, cpu_num, 1);
854 break;
855
856 case LK_RELEASE:
857 if (lkp->lk_exclusivecount != 0) {
858 if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
859 if (lkp->lk_flags & LK_SPIN) {
860 lockpanic(lkp,
861 "lockmgr: processor %lu, not "
862 "exclusive lock holder %lu "
863 "unlocking", cpu_num, lkp->lk_cpu);
864 } else {
865 lockpanic(lkp, "lockmgr: pid %d.%d, not "
866 "exclusive lock holder %d.%d "
867 "unlocking", pid, lid,
868 lkp->lk_lockholder,
869 lkp->lk_locklwp);
870 }
871 }
872 if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
873 lkp->lk_recurselevel = 0;
874 lkp->lk_exclusivecount--;
875 COUNT(lkp, l, cpu_num, -1);
876 if (lkp->lk_exclusivecount == 0) {
877 lkp->lk_flags &= ~LK_HAVE_EXCL;
878 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
879 #if defined(LOCKDEBUG)
880 lkp->lk_unlock_file = file;
881 lkp->lk_unlock_line = line;
882 #endif
883 DONTHAVEIT(lkp);
884 }
885 } else if (lkp->lk_sharecount != 0) {
886 lkp->lk_sharecount--;
887 if (lkp->lk_sharecount == 0)
888 lkp->lk_flags &= ~LK_SHARE_NONZERO;
889 COUNT(lkp, l, cpu_num, -1);
890 }
891 #ifdef DIAGNOSTIC
892 else
893 lockpanic(lkp, "lockmgr: release of unlocked lock!");
894 #endif
895 WAKEUP_WAITER(lkp);
896 break;
897
898 case LK_DRAIN:
899 /*
900 * Check that we do not already hold the lock, as it can
901 * never drain if we do. Unfortunately, we have no way to
902 * check for holding a shared lock, but at least we can
903 * check for an exclusive one.
904 */
905 if (WEHOLDIT(lkp, pid, lid, cpu_num))
906 lockpanic(lkp, "lockmgr: draining against myself");
907 /*
908 * If we are just polling, check to see if we will sleep.
909 */
910 if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
911 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
912 LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
913 error = EBUSY;
914 break;
915 }
916 error = acquire(&lkp, &s, extflags, 1,
917 LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
918 LK_SHARE_NONZERO | LK_WAIT_NONZERO,
919 RETURN_ADDRESS);
920 if (error)
921 break;
922 lkp->lk_flags |= LK_HAVE_EXCL;
923 if ((extflags & LK_RESURRECT) == 0)
924 lkp->lk_flags |= LK_DRAINING;
925 SETHOLDER(lkp, pid, lid, cpu_num);
926 #if defined(LOCKDEBUG)
927 lkp->lk_lock_file = file;
928 lkp->lk_lock_line = line;
929 #endif
930 HAVEIT(lkp);
931 lkp->lk_exclusivecount = 1;
932 /* XXX unlikely that we'd want this */
933 if (extflags & LK_SETRECURSE)
934 lkp->lk_recurselevel = 1;
935 COUNT(lkp, l, cpu_num, 1);
936 break;
937
938 default:
939 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
940 lockpanic(lkp, "lockmgr: unknown locktype request %d",
941 flags & LK_TYPE_MASK);
942 /* NOTREACHED */
943 }
944 if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
945 ((lkp->lk_flags &
946 (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
947 LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
948 lkp->lk_flags &= ~LK_WAITDRAIN;
949 wakeup(&lkp->lk_flags);
950 }
951 /*
952 * Note that this panic will be a recursive panic, since
953 * we only set lock_shutdown_noblock above if panicstr != NULL.
954 */
955 if (error && lock_shutdown_noblock)
956 lockpanic(lkp, "lockmgr: deadlock (see previous panic)");
957
958 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
959 return (error);
960 }
961
962 /*
963 * For a recursive spinlock held one or more times by the current CPU,
964 * release all N locks, and return N.
965 * Intended for use in mi_switch() shortly before context switching.
966 */
967
968 int
969 #if defined(LOCKDEBUG)
970 _spinlock_release_all(volatile struct lock *lkp, const char *file, int line)
971 #else
972 spinlock_release_all(volatile struct lock *lkp)
973 #endif
974 {
975 int s, count;
976 cpuid_t cpu_num;
977
978 KASSERT(lkp->lk_flags & LK_SPIN);
979
980 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
981
982 cpu_num = cpu_number();
983 count = lkp->lk_exclusivecount;
984
985 if (count != 0) {
986 #ifdef DIAGNOSTIC
987 if (WEHOLDIT(lkp, 0, 0, cpu_num) == 0) {
988 lockpanic(lkp, "spinlock_release_all: processor %lu, not "
989 "exclusive lock holder %lu "
990 "unlocking", (long)cpu_num, lkp->lk_cpu);
991 }
992 #endif
993 lkp->lk_recurselevel = 0;
994 lkp->lk_exclusivecount = 0;
995 COUNT_CPU(cpu_num, -count);
996 lkp->lk_flags &= ~LK_HAVE_EXCL;
997 SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
998 #if defined(LOCKDEBUG)
999 lkp->lk_unlock_file = file;
1000 lkp->lk_unlock_line = line;
1001 #endif
1002 DONTHAVEIT(lkp);
1003 }
1004 #ifdef DIAGNOSTIC
1005 else if (lkp->lk_sharecount != 0)
1006 lockpanic(lkp, "spinlock_release_all: release of shared lock!");
1007 else
1008 lockpanic(lkp, "spinlock_release_all: release of unlocked lock!");
1009 #endif
1010 INTERLOCK_RELEASE(lkp, LK_SPIN, s);
1011
1012 return (count);
1013 }
1014
1015 /*
1016 * For a recursive spinlock held one or more times by the current CPU,
1017 * release all N locks, and return N.
1018 * Intended for use in mi_switch() right after resuming execution.
1019 */
1020
1021 void
1022 #if defined(LOCKDEBUG)
1023 _spinlock_acquire_count(volatile struct lock *lkp, int count,
1024 const char *file, int line)
1025 #else
1026 spinlock_acquire_count(volatile struct lock *lkp, int count)
1027 #endif
1028 {
1029 int s, error;
1030 cpuid_t cpu_num;
1031
1032 KASSERT(lkp->lk_flags & LK_SPIN);
1033
1034 INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
1035
1036 cpu_num = cpu_number();
1037
1038 #ifdef DIAGNOSTIC
1039 if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_num))
1040 lockpanic(lkp, "spinlock_acquire_count: processor %lu already holds lock", (long)cpu_num);
1041 #endif
1042 /*
1043 * Try to acquire the want_exclusive flag.
1044 */
1045 error = acquire(&lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL,
1046 RETURN_ADDRESS);
1047 lkp->lk_flags |= LK_WANT_EXCL;
1048 /*
1049 * Wait for shared locks and upgrades to finish.
1050 */
1051 error = acquire(&lkp, &s, LK_SPIN, 0,
1052 LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE,
1053 RETURN_ADDRESS);
1054 lkp->lk_flags &= ~LK_WANT_EXCL;
1055 lkp->lk_flags |= LK_HAVE_EXCL;
1056 SETHOLDER(lkp, LK_NOPROC, 0, cpu_num);
1057 #if defined(LOCKDEBUG)
1058 lkp->lk_lock_file = file;
1059 lkp->lk_lock_line = line;
1060 #endif
1061 HAVEIT(lkp);
1062 if (lkp->lk_exclusivecount != 0)
1063 lockpanic(lkp, "lockmgr: non-zero exclusive count");
1064 lkp->lk_exclusivecount = count;
1065 lkp->lk_recurselevel = 1;
1066 COUNT_CPU(cpu_num, count);
1067
1068 INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
1069 }
1070
1071
1072
1073 /*
1074 * Print out information about state of a lock. Used by VOP_PRINT
1075 * routines to display ststus about contained locks.
1076 */
1077 void
1078 lockmgr_printinfo(volatile struct lock *lkp)
1079 {
1080
1081 if (lkp->lk_sharecount)
1082 printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
1083 lkp->lk_sharecount);
1084 else if (lkp->lk_flags & LK_HAVE_EXCL) {
1085 printf(" lock type %s: EXCL (count %d) by ",
1086 lkp->lk_wmesg, lkp->lk_exclusivecount);
1087 if (lkp->lk_flags & LK_SPIN)
1088 printf("processor %lu", lkp->lk_cpu);
1089 else
1090 printf("pid %d.%d", lkp->lk_lockholder,
1091 lkp->lk_locklwp);
1092 } else
1093 printf(" not locked");
1094 if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
1095 printf(" with %d pending", lkp->lk_waitcount);
1096 }
1097
1098 #if defined(LOCKDEBUG) /* { */
1099 _TAILQ_HEAD(, struct simplelock, volatile) simplelock_list =
1100 TAILQ_HEAD_INITIALIZER(simplelock_list);
1101
1102 #if defined(MULTIPROCESSOR) /* { */
1103 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
1104
1105 #define SLOCK_LIST_LOCK() \
1106 __cpu_simple_lock(&simplelock_list_slock.lock_data)
1107
1108 #define SLOCK_LIST_UNLOCK() \
1109 __cpu_simple_unlock(&simplelock_list_slock.lock_data)
1110
1111 #define SLOCK_COUNT(x) \
1112 curcpu()->ci_simple_locks += (x)
1113 #else
1114 u_long simple_locks;
1115
1116 #define SLOCK_LIST_LOCK() /* nothing */
1117
1118 #define SLOCK_LIST_UNLOCK() /* nothing */
1119
1120 #define SLOCK_COUNT(x) simple_locks += (x)
1121 #endif /* MULTIPROCESSOR */ /* } */
1122
1123 #ifdef MULTIPROCESSOR
1124 #define SLOCK_MP() lock_printf("on CPU %ld\n", \
1125 (u_long) cpu_number())
1126 #else
1127 #define SLOCK_MP() /* nothing */
1128 #endif
1129
1130 #define SLOCK_WHERE(str, alp, id, l) \
1131 do { \
1132 lock_printf("\n"); \
1133 lock_printf(str); \
1134 lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
1135 SLOCK_MP(); \
1136 if ((alp)->lock_file != NULL) \
1137 lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
1138 (alp)->lock_line); \
1139 if ((alp)->unlock_file != NULL) \
1140 lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
1141 (alp)->unlock_line); \
1142 SLOCK_TRACE() \
1143 SLOCK_DEBUGGER(); \
1144 } while (/*CONSTCOND*/0)
1145
1146 /*
1147 * Simple lock functions so that the debugger can see from whence
1148 * they are being called.
1149 */
1150 void
1151 simple_lock_init(volatile struct simplelock *alp)
1152 {
1153
1154 #if defined(MULTIPROCESSOR) /* { */
1155 __cpu_simple_lock_init(&alp->lock_data);
1156 #else
1157 __cpu_simple_lock_clear(&alp->lock_data);
1158 #endif /* } */
1159 alp->lock_file = NULL;
1160 alp->lock_line = 0;
1161 alp->unlock_file = NULL;
1162 alp->unlock_line = 0;
1163 alp->lock_holder = LK_NOCPU;
1164 }
1165
1166 void
1167 _simple_lock(volatile struct simplelock *alp, const char *id, int l)
1168 {
1169 cpuid_t cpu_num = cpu_number();
1170 int s;
1171
1172 s = splhigh();
1173
1174 /*
1175 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1176 * don't take any action, and just fall into the normal spin case.
1177 */
1178 if (__SIMPLELOCK_LOCKED_P(&alp->lock_data)) {
1179 #if defined(MULTIPROCESSOR) /* { */
1180 if (alp->lock_holder == cpu_num) {
1181 SLOCK_WHERE("simple_lock: locking against myself\n",
1182 alp, id, l);
1183 goto out;
1184 }
1185 #else
1186 SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
1187 goto out;
1188 #endif /* MULTIPROCESSOR */ /* } */
1189 }
1190
1191 #if defined(MULTIPROCESSOR) /* { */
1192 /* Acquire the lock before modifying any fields. */
1193 splx(s);
1194 __cpu_simple_lock(&alp->lock_data);
1195 s = splhigh();
1196 #else
1197 __cpu_simple_lock_set(&alp->lock_data);
1198 #endif /* } */
1199
1200 if (alp->lock_holder != LK_NOCPU) {
1201 SLOCK_WHERE("simple_lock: uninitialized lock\n",
1202 alp, id, l);
1203 }
1204 alp->lock_file = id;
1205 alp->lock_line = l;
1206 alp->lock_holder = cpu_num;
1207
1208 SLOCK_LIST_LOCK();
1209 TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
1210 SLOCK_LIST_UNLOCK();
1211
1212 SLOCK_COUNT(1);
1213
1214 out:
1215 splx(s);
1216 }
1217
1218 int
1219 _simple_lock_held(volatile struct simplelock *alp)
1220 {
1221 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
1222 cpuid_t cpu_num = cpu_number();
1223 #endif
1224 int s, locked = 0;
1225
1226 s = splhigh();
1227
1228 #if defined(MULTIPROCESSOR)
1229 if (__cpu_simple_lock_try(&alp->lock_data) == 0)
1230 locked = (alp->lock_holder == cpu_num);
1231 else
1232 __cpu_simple_unlock(&alp->lock_data);
1233 #else
1234 if (__SIMPLELOCK_LOCKED_P(&alp->lock_data)) {
1235 locked = 1;
1236 KASSERT(alp->lock_holder == cpu_num);
1237 }
1238 #endif
1239
1240 splx(s);
1241
1242 return (locked);
1243 }
1244
1245 int
1246 _simple_lock_try(volatile struct simplelock *alp, const char *id, int l)
1247 {
1248 cpuid_t cpu_num = cpu_number();
1249 int s, rv = 0;
1250
1251 s = splhigh();
1252
1253 /*
1254 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1255 * don't take any action.
1256 */
1257 #if defined(MULTIPROCESSOR) /* { */
1258 if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
1259 if (alp->lock_holder == cpu_num)
1260 SLOCK_WHERE("simple_lock_try: locking against myself\n",
1261 alp, id, l);
1262 goto out;
1263 }
1264 #else
1265 if (__SIMPLELOCK_LOCKED_P(&alp->lock_data)) {
1266 SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
1267 goto out;
1268 }
1269 __cpu_simple_lock_set(&alp->lock_data);
1270 #endif /* MULTIPROCESSOR */ /* } */
1271
1272 /*
1273 * At this point, we have acquired the lock.
1274 */
1275
1276 rv = 1;
1277
1278 alp->lock_file = id;
1279 alp->lock_line = l;
1280 alp->lock_holder = cpu_num;
1281
1282 SLOCK_LIST_LOCK();
1283 TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
1284 SLOCK_LIST_UNLOCK();
1285
1286 SLOCK_COUNT(1);
1287
1288 out:
1289 splx(s);
1290 return (rv);
1291 }
1292
1293 void
1294 _simple_unlock(volatile struct simplelock *alp, const char *id, int l)
1295 {
1296 int s;
1297
1298 s = splhigh();
1299
1300 /*
1301 * MULTIPROCESSOR case: This is `safe' because we think we hold
1302 * the lock, and if we don't, we don't take any action.
1303 */
1304 if (__SIMPLELOCK_UNLOCKED_P(&alp->lock_data)) {
1305 SLOCK_WHERE("simple_unlock: lock not held\n",
1306 alp, id, l);
1307 goto out;
1308 }
1309
1310 SLOCK_LIST_LOCK();
1311 TAILQ_REMOVE(&simplelock_list, alp, list);
1312 SLOCK_LIST_UNLOCK();
1313
1314 SLOCK_COUNT(-1);
1315
1316 alp->list.tqe_next = NULL; /* sanity */
1317 alp->list.tqe_prev = NULL; /* sanity */
1318
1319 alp->unlock_file = id;
1320 alp->unlock_line = l;
1321
1322 #if defined(MULTIPROCESSOR) /* { */
1323 alp->lock_holder = LK_NOCPU;
1324 /* Now that we've modified all fields, release the lock. */
1325 __cpu_simple_unlock(&alp->lock_data);
1326 #else
1327 __cpu_simple_lock_clear(&alp->lock_data);
1328 KASSERT(alp->lock_holder == cpu_number());
1329 alp->lock_holder = LK_NOCPU;
1330 #endif /* } */
1331
1332 out:
1333 splx(s);
1334 }
1335
1336 void
1337 simple_lock_dump(void)
1338 {
1339 volatile struct simplelock *alp;
1340 int s;
1341
1342 s = splhigh();
1343 SLOCK_LIST_LOCK();
1344 lock_printf("all simple locks:\n");
1345 TAILQ_FOREACH(alp, &simplelock_list, list) {
1346 lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
1347 alp->lock_file, alp->lock_line);
1348 }
1349 SLOCK_LIST_UNLOCK();
1350 splx(s);
1351 }
1352
1353 void
1354 simple_lock_freecheck(void *start, void *end)
1355 {
1356 volatile struct simplelock *alp;
1357 int s;
1358
1359 s = splhigh();
1360 SLOCK_LIST_LOCK();
1361 TAILQ_FOREACH(alp, &simplelock_list, list) {
1362 if ((volatile void *)alp >= start &&
1363 (volatile void *)alp < end) {
1364 lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
1365 alp, alp->lock_holder, alp->lock_file,
1366 alp->lock_line);
1367 SLOCK_DEBUGGER();
1368 }
1369 }
1370 SLOCK_LIST_UNLOCK();
1371 splx(s);
1372 }
1373
1374 /*
1375 * We must be holding exactly one lock: the spc_lock.
1376 */
1377
1378 void
1379 simple_lock_switchcheck(void)
1380 {
1381
1382 simple_lock_only_held(NULL, "switching");
1383 }
1384
1385 /*
1386 * Drop into the debugger if lp isn't the only lock held.
1387 * lp may be NULL.
1388 */
1389 void
1390 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
1391 {
1392 volatile struct simplelock *alp;
1393 cpuid_t cpu_num = cpu_number();
1394 int s;
1395
1396 if (lp) {
1397 LOCK_ASSERT(simple_lock_held(lp));
1398 }
1399 s = splhigh();
1400 SLOCK_LIST_LOCK();
1401 TAILQ_FOREACH(alp, &simplelock_list, list) {
1402 if (alp == lp)
1403 continue;
1404 if (alp->lock_holder == cpu_num)
1405 break;
1406 }
1407 SLOCK_LIST_UNLOCK();
1408 splx(s);
1409
1410 if (alp != NULL) {
1411 lock_printf("\n%s with held simple_lock %p "
1412 "CPU %lu %s:%d\n",
1413 where, alp, alp->lock_holder, alp->lock_file,
1414 alp->lock_line);
1415 SLOCK_TRACE();
1416 SLOCK_DEBUGGER();
1417 }
1418 }
1419
1420 /*
1421 * Set to 1 by simple_lock_assert_*().
1422 * Can be cleared from ddb to avoid a panic.
1423 */
1424 int slock_assert_will_panic;
1425
1426 /*
1427 * If the lock isn't held, print a traceback, optionally drop into the
1428 * debugger, then panic.
1429 * The panic can be avoided by clearing slock_assert_with_panic from the
1430 * debugger.
1431 */
1432 void
1433 _simple_lock_assert_locked(volatile struct simplelock *alp,
1434 const char *lockname, const char *id, int l)
1435 {
1436 if (simple_lock_held(alp) == 0) {
1437 slock_assert_will_panic = 1;
1438 lock_printf("%s lock not held\n", lockname);
1439 SLOCK_WHERE("lock not held", alp, id, l);
1440 if (slock_assert_will_panic && panicstr == NULL)
1441 panic("%s: not locked", lockname);
1442 }
1443 }
1444
1445 void
1446 _simple_lock_assert_unlocked(volatile struct simplelock *alp,
1447 const char *lockname, const char *id, int l)
1448 {
1449 if (simple_lock_held(alp)) {
1450 slock_assert_will_panic = 1;
1451 lock_printf("%s lock held\n", lockname);
1452 SLOCK_WHERE("lock held", alp, id, l);
1453 if (slock_assert_will_panic && panicstr == NULL)
1454 panic("%s: locked", lockname);
1455 }
1456 }
1457
1458 void
1459 assert_sleepable(struct simplelock *interlock, const char *msg)
1460 {
1461
1462 if (panicstr != NULL)
1463 return;
1464 if (CURCPU_IDLE_P()) {
1465 panic("assert_sleepable: idle");
1466 }
1467 simple_lock_only_held(interlock, msg);
1468 }
1469
1470 #endif /* LOCKDEBUG */ /* } */
1471
1472 int kernel_lock_id;
1473 __cpu_simple_lock_t kernel_lock;
1474
1475 #if defined(MULTIPROCESSOR)
1476
1477 /*
1478 * Functions for manipulating the kernel_lock. We put them here
1479 * so that they show up in profiles.
1480 */
1481
1482 #define _KERNEL_LOCK_ABORT(msg) \
1483 LOCKDEBUG_ABORT(kernel_lock_id, &kernel_lock, &_kernel_lock_ops, \
1484 __FUNCTION__, msg)
1485
1486 #ifdef LOCKDEBUG
1487 #define _KERNEL_LOCK_ASSERT(cond) \
1488 do { \
1489 if (!(cond)) \
1490 _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
1491 } while (/* CONSTCOND */ 0)
1492 #else
1493 #define _KERNEL_LOCK_ASSERT(cond) /* nothing */
1494 #endif
1495
1496 void _kernel_lock_dump(volatile void *);
1497
1498 lockops_t _kernel_lock_ops = {
1499 "Kernel lock",
1500 0,
1501 _kernel_lock_dump
1502 };
1503
1504 /*
1505 * Initialize the kernel lock.
1506 */
1507 void
1508 _kernel_lock_init(void)
1509 {
1510
1511 __cpu_simple_lock_init(&kernel_lock);
1512 kernel_lock_id = LOCKDEBUG_ALLOC(&kernel_lock, &_kernel_lock_ops);
1513 }
1514
1515 /*
1516 * Print debugging information about the kernel lock.
1517 */
1518 void
1519 _kernel_lock_dump(volatile void *junk)
1520 {
1521 struct cpu_info *ci = curcpu();
1522
1523 (void)junk;
1524
1525 printf_nolog("curcpu holds : %18d wanted by: %#018lx\n",
1526 ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
1527 }
1528
1529 /*
1530 * Acquire 'nlocks' holds on the kernel lock. If 'l' is non-null, the
1531 * acquisition is from process context.
1532 */
1533 void
1534 _kernel_lock(int nlocks, struct lwp *l)
1535 {
1536 struct cpu_info *ci = curcpu();
1537 LOCKSTAT_TIMER(spintime);
1538 LOCKSTAT_FLAG(lsflag);
1539 struct lwp *owant;
1540 #ifdef LOCKDEBUG
1541 u_int spins;
1542 #endif
1543 int s;
1544
1545 (void)l;
1546
1547 if (nlocks == 0)
1548 return;
1549 _KERNEL_LOCK_ASSERT(nlocks > 0);
1550
1551 s = splsched(); /* XXX splvm() */
1552
1553 if (ci->ci_biglock_count != 0) {
1554 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(&kernel_lock));
1555 ci->ci_biglock_count += nlocks;
1556 splx(s);
1557 return;
1558 }
1559
1560 LOCKDEBUG_WANTLOCK(kernel_lock_id,
1561 (uintptr_t)__builtin_return_address(0), 0);
1562
1563 if (__cpu_simple_lock_try(&kernel_lock)) {
1564 ci->ci_biglock_count = nlocks;
1565 LOCKDEBUG_LOCKED(kernel_lock_id,
1566 (uintptr_t)__builtin_return_address(0), 0);
1567 splx(s);
1568 return;
1569 }
1570
1571 LOCKSTAT_ENTER(lsflag);
1572 LOCKSTAT_START_TIMER(lsflag, spintime);
1573
1574 /*
1575 * Before setting ci_biglock_wanted we must post a store
1576 * fence (see kern_mutex.c). This is accomplished by the
1577 * __cpu_simple_lock_try() above.
1578 */
1579 owant = ci->ci_biglock_wanted;
1580 ci->ci_biglock_wanted = curlwp; /* XXXAD */
1581
1582 #ifdef LOCKDEBUG
1583 spins = 0;
1584 #endif
1585
1586 do {
1587 while (__SIMPLELOCK_LOCKED_P(&kernel_lock)) {
1588 #ifdef LOCKDEBUG
1589 if (SPINLOCK_SPINOUT(spins))
1590 _KERNEL_LOCK_ABORT("spinout");
1591 #endif
1592 splx(s);
1593 SPINLOCK_SPIN_HOOK;
1594 (void)splsched(); /* XXX splvm() */
1595 }
1596 } while (!__cpu_simple_lock_try(&kernel_lock));
1597
1598 ci->ci_biglock_wanted = owant;
1599 ci->ci_biglock_count += nlocks;
1600 LOCKSTAT_STOP_TIMER(lsflag, spintime);
1601 LOCKDEBUG_LOCKED(kernel_lock_id,
1602 (uintptr_t)__builtin_return_address(0), 0);
1603 splx(s);
1604
1605 /*
1606 * Again, another store fence is required (see kern_mutex.c).
1607 */
1608 mb_write();
1609 if (owant == NULL) {
1610 LOCKSTAT_EVENT(lsflag, &kernel_lock, LB_KERNEL_LOCK | LB_SPIN,
1611 1, spintime);
1612 }
1613 LOCKSTAT_EXIT(lsflag);
1614 }
1615
1616 /*
1617 * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release
1618 * all holds. If 'l' is non-null, the release is from process context.
1619 */
1620 void
1621 _kernel_unlock(int nlocks, struct lwp *l, int *countp)
1622 {
1623 struct cpu_info *ci = curcpu();
1624 u_int olocks;
1625 int s;
1626
1627 (void)l;
1628
1629 _KERNEL_LOCK_ASSERT(nlocks < 2);
1630
1631 olocks = ci->ci_biglock_count;
1632
1633 if (olocks == 0) {
1634 _KERNEL_LOCK_ASSERT(nlocks <= 0);
1635 if (countp != NULL)
1636 *countp = 0;
1637 return;
1638 }
1639
1640 _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(&kernel_lock));
1641
1642 if (nlocks == 0)
1643 nlocks = olocks;
1644 else if (nlocks == -1) {
1645 nlocks = 1;
1646 _KERNEL_LOCK_ASSERT(olocks == 1);
1647 }
1648
1649 s = splsched(); /* XXX splvm() */
1650 if ((ci->ci_biglock_count -= nlocks) == 0) {
1651 LOCKDEBUG_UNLOCKED(kernel_lock_id,
1652 (uintptr_t)__builtin_return_address(0), 0);
1653 __cpu_simple_unlock(&kernel_lock);
1654 }
1655 splx(s);
1656
1657 if (countp != NULL)
1658 *countp = olocks;
1659 }
1660
1661 #if defined(DEBUG)
1662 /*
1663 * Assert that the kernel lock is held.
1664 */
1665 void
1666 _kernel_lock_assert_locked(void)
1667 {
1668
1669 if (!__SIMPLELOCK_LOCKED_P(&kernel_lock) ||
1670 curcpu()->ci_biglock_count == 0)
1671 _KERNEL_LOCK_ABORT("not locked");
1672 }
1673
1674 void
1675 _kernel_lock_assert_unlocked()
1676 {
1677
1678 if (curcpu()->ci_biglock_count != 0)
1679 _KERNEL_LOCK_ABORT("locked");
1680 }
1681 #endif
1682
1683 #endif /* MULTIPROCESSOR || LOCKDEBUG */
1684