Home | History | Annotate | Line # | Download | only in kern
kern_lock.c revision 1.51.2.12
      1 /*	$NetBSD: kern_lock.c,v 1.51.2.12 2002/10/18 02:44:52 nathanw Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * This code is derived from software contributed to The NetBSD Foundation
     12  * by Ross Harvey.
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  * 3. All advertising materials mentioning features or use of this software
     23  *    must display the following acknowledgement:
     24  *	This product includes software developed by the NetBSD
     25  *	Foundation, Inc. and its contributors.
     26  * 4. Neither the name of The NetBSD Foundation nor the names of its
     27  *    contributors may be used to endorse or promote products derived
     28  *    from this software without specific prior written permission.
     29  *
     30  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     32  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     33  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     34  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     37  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     38  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     40  * POSSIBILITY OF SUCH DAMAGE.
     41  */
     42 
     43 /*
     44  * Copyright (c) 1995
     45  *	The Regents of the University of California.  All rights reserved.
     46  *
     47  * This code contains ideas from software contributed to Berkeley by
     48  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
     49  * System project at Carnegie-Mellon University.
     50  *
     51  * Redistribution and use in source and binary forms, with or without
     52  * modification, are permitted provided that the following conditions
     53  * are met:
     54  * 1. Redistributions of source code must retain the above copyright
     55  *    notice, this list of conditions and the following disclaimer.
     56  * 2. Redistributions in binary form must reproduce the above copyright
     57  *    notice, this list of conditions and the following disclaimer in the
     58  *    documentation and/or other materials provided with the distribution.
     59  * 3. All advertising materials mentioning features or use of this software
     60  *    must display the following acknowledgement:
     61  *	This product includes software developed by the University of
     62  *	California, Berkeley and its contributors.
     63  * 4. Neither the name of the University nor the names of its contributors
     64  *    may be used to endorse or promote products derived from this software
     65  *    without specific prior written permission.
     66  *
     67  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     68  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     69  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     70  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     71  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     72  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     73  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     74  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     75  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     76  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     77  * SUCH DAMAGE.
     78  *
     79  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
     80  */
     81 
     82 #include <sys/cdefs.h>
     83 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.51.2.12 2002/10/18 02:44:52 nathanw Exp $");
     84 
     85 #include "opt_multiprocessor.h"
     86 #include "opt_lockdebug.h"
     87 #include "opt_ddb.h"
     88 
     89 #include <sys/param.h>
     90 #include <sys/proc.h>
     91 #include <sys/lock.h>
     92 #include <sys/systm.h>
     93 #include <machine/cpu.h>
     94 
     95 #if defined(LOCKDEBUG)
     96 #include <sys/syslog.h>
     97 /*
     98  * note that stdarg.h and the ansi style va_start macro is used for both
     99  * ansi and traditional c compiles.
    100  * XXX: this requires that stdarg.h define: va_alist and va_dcl
    101  */
    102 #include <machine/stdarg.h>
    103 
    104 void	lock_printf(const char *fmt, ...)
    105     __attribute__((__format__(__printf__,1,2)));
    106 
    107 int	lock_debug_syslog = 0;	/* defaults to printf, but can be patched */
    108 
    109 #ifdef DDB
    110 #include <ddb/ddbvar.h>
    111 #include <machine/db_machdep.h>
    112 #include <ddb/db_command.h>
    113 #include <ddb/db_interface.h>
    114 #endif
    115 #endif
    116 
    117 /*
    118  * Locking primitives implementation.
    119  * Locks provide shared/exclusive synchronization.
    120  */
    121 
    122 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
    123 #if defined(MULTIPROCESSOR) /* { */
    124 #define	COUNT_CPU(cpu_id, x)						\
    125 	curcpu()->ci_spin_locks += (x)
    126 #else
    127 u_long	spin_locks;
    128 #define	COUNT_CPU(cpu_id, x)	spin_locks += (x)
    129 #endif /* MULTIPROCESSOR */ /* } */
    130 
    131 #define	COUNT(lkp, l, cpu_id, x)					\
    132 do {									\
    133 	if ((lkp)->lk_flags & LK_SPIN)					\
    134 		COUNT_CPU((cpu_id), (x));				\
    135 	else								\
    136 		(l)->l_locks += (x);					\
    137 } while (/*CONSTCOND*/0)
    138 #else
    139 #define COUNT(lkp, p, cpu_id, x)
    140 #define COUNT_CPU(cpu_id, x)
    141 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
    142 
    143 #ifndef SPINLOCK_SPIN_HOOK		/* from <machine/lock.h> */
    144 #define	SPINLOCK_SPIN_HOOK		/* nothing */
    145 #endif
    146 
    147 #define	INTERLOCK_ACQUIRE(lkp, flags, s)				\
    148 do {									\
    149 	if ((flags) & LK_SPIN)						\
    150 		s = splsched();						\
    151 	simple_lock(&(lkp)->lk_interlock);				\
    152 } while (0)
    153 
    154 #define	INTERLOCK_RELEASE(lkp, flags, s)				\
    155 do {									\
    156 	simple_unlock(&(lkp)->lk_interlock);				\
    157 	if ((flags) & LK_SPIN)						\
    158 		splx(s);						\
    159 } while (0)
    160 
    161 #ifdef DDB /* { */
    162 #ifdef MULTIPROCESSOR
    163 int simple_lock_debugger = 1;	/* more serious on MP */
    164 #else
    165 int simple_lock_debugger = 0;
    166 #endif
    167 #define	SLOCK_DEBUGGER()	if (simple_lock_debugger) Debugger()
    168 #define	SLOCK_TRACE()							\
    169 	db_stack_trace_print((db_expr_t)__builtin_frame_address(0),	\
    170 	    TRUE, 65535, "", printf);
    171 #else
    172 #define	SLOCK_DEBUGGER()	/* nothing */
    173 #define	SLOCK_TRACE()		/* nothing */
    174 #endif /* } */
    175 
    176 #if defined(LOCKDEBUG)
    177 #if defined(DDB)
    178 #define	SPINLOCK_SPINCHECK_DEBUGGER	Debugger()
    179 #else
    180 #define	SPINLOCK_SPINCHECK_DEBUGGER	/* nothing */
    181 #endif
    182 
    183 #define	SPINLOCK_SPINCHECK_DECL						\
    184 	/* 32-bits of count -- wrap constitutes a "spinout" */		\
    185 	uint32_t __spinc = 0
    186 
    187 #define	SPINLOCK_SPINCHECK						\
    188 do {									\
    189 	if (++__spinc == 0) {						\
    190 		printf("LK_SPIN spinout, excl %d, share %d\n",		\
    191 		    lkp->lk_exclusivecount, lkp->lk_sharecount);	\
    192 		if (lkp->lk_exclusivecount)				\
    193 			printf("held by CPU %lu\n",			\
    194 			    (u_long) lkp->lk_cpu);			\
    195 		if (lkp->lk_lock_file)					\
    196 			printf("last locked at %s:%d\n",		\
    197 			    lkp->lk_lock_file, lkp->lk_lock_line);	\
    198 		if (lkp->lk_unlock_file)				\
    199 			printf("last unlocked at %s:%d\n",		\
    200 			    lkp->lk_unlock_file, lkp->lk_unlock_line);	\
    201 		SLOCK_TRACE();						\
    202 		SPINLOCK_SPINCHECK_DEBUGGER;				\
    203 	}								\
    204 } while (0)
    205 #else
    206 #define	SPINLOCK_SPINCHECK_DECL			/* nothing */
    207 #define	SPINLOCK_SPINCHECK			/* nothing */
    208 #endif /* LOCKDEBUG && DDB */
    209 
    210 /*
    211  * Acquire a resource.
    212  */
    213 #define ACQUIRE(lkp, error, extflags, drain, wanted)			\
    214 	if ((extflags) & LK_SPIN) {					\
    215 		int interlocked;					\
    216 		SPINLOCK_SPINCHECK_DECL;				\
    217 									\
    218 		if ((drain) == 0)					\
    219 			(lkp)->lk_waitcount++;				\
    220 		for (interlocked = 1;;) {				\
    221 			SPINLOCK_SPINCHECK;				\
    222 			if (wanted) {					\
    223 				if (interlocked) {			\
    224 					INTERLOCK_RELEASE((lkp),	\
    225 					    LK_SPIN, s);		\
    226 					interlocked = 0;		\
    227 				}					\
    228 				SPINLOCK_SPIN_HOOK;			\
    229 			} else if (interlocked) {			\
    230 				break;					\
    231 			} else {					\
    232 				INTERLOCK_ACQUIRE((lkp), LK_SPIN, s);	\
    233 				interlocked = 1;			\
    234 			}						\
    235 		}							\
    236 		if ((drain) == 0)					\
    237 			(lkp)->lk_waitcount--;				\
    238 		KASSERT((wanted) == 0);					\
    239 		error = 0;	/* sanity */				\
    240 	} else {							\
    241 		for (error = 0; wanted; ) {				\
    242 			if ((drain))					\
    243 				(lkp)->lk_flags |= LK_WAITDRAIN;	\
    244 			else						\
    245 				(lkp)->lk_waitcount++;			\
    246 			/* XXX Cast away volatile. */			\
    247 			error = ltsleep((drain) ?			\
    248 			    (void *)&(lkp)->lk_flags :			\
    249 			    (void *)(lkp), (lkp)->lk_prio,		\
    250 			    (lkp)->lk_wmesg, (lkp)->lk_timo,		\
    251 			    &(lkp)->lk_interlock);			\
    252 			if ((drain) == 0)				\
    253 				(lkp)->lk_waitcount--;			\
    254 			if (error)					\
    255 				break;					\
    256 			if ((extflags) & LK_SLEEPFAIL) {		\
    257 				error = ENOLCK;				\
    258 				break;					\
    259 			}						\
    260 		}							\
    261 	}
    262 
    263 #define	SETHOLDER(lkp, pid, lid, cpu_id)				\
    264 do {									\
    265 	if ((lkp)->lk_flags & LK_SPIN)					\
    266 		(lkp)->lk_cpu = cpu_id;					\
    267 	else {								\
    268 		(lkp)->lk_lockholder = pid;				\
    269 		(lkp)->lk_locklwp = lid;				\
    270 	}								\
    271 } while (/*CONSTCOND*/0)
    272 
    273 #define	WEHOLDIT(lkp, pid, lid, cpu_id)					\
    274 	(((lkp)->lk_flags & LK_SPIN) != 0 ?				\
    275 	 ((lkp)->lk_cpu == (cpu_id)) :					\
    276 	 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
    277 
    278 #define	WAKEUP_WAITER(lkp)						\
    279 do {									\
    280 	if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) {	\
    281 		/* XXX Cast away volatile. */				\
    282 		wakeup((void *)(lkp));					\
    283 	}								\
    284 } while (/*CONSTCOND*/0)
    285 
    286 #if defined(LOCKDEBUG) /* { */
    287 #if defined(MULTIPROCESSOR) /* { */
    288 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
    289 
    290 #define	SPINLOCK_LIST_LOCK()						\
    291 	__cpu_simple_lock(&spinlock_list_slock.lock_data)
    292 
    293 #define	SPINLOCK_LIST_UNLOCK()						\
    294 	__cpu_simple_unlock(&spinlock_list_slock.lock_data)
    295 #else
    296 #define	SPINLOCK_LIST_LOCK()	/* nothing */
    297 
    298 #define	SPINLOCK_LIST_UNLOCK()	/* nothing */
    299 #endif /* MULTIPROCESSOR */ /* } */
    300 
    301 TAILQ_HEAD(, lock) spinlock_list =
    302     TAILQ_HEAD_INITIALIZER(spinlock_list);
    303 
    304 #define	HAVEIT(lkp)							\
    305 do {									\
    306 	if ((lkp)->lk_flags & LK_SPIN) {				\
    307 		int s = spllock();					\
    308 		SPINLOCK_LIST_LOCK();					\
    309 		/* XXX Cast away volatile. */				\
    310 		TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp),	\
    311 		    lk_list);						\
    312 		SPINLOCK_LIST_UNLOCK();					\
    313 		splx(s);						\
    314 	}								\
    315 } while (/*CONSTCOND*/0)
    316 
    317 #define	DONTHAVEIT(lkp)							\
    318 do {									\
    319 	if ((lkp)->lk_flags & LK_SPIN) {				\
    320 		int s = spllock();					\
    321 		SPINLOCK_LIST_LOCK();					\
    322 		/* XXX Cast away volatile. */				\
    323 		TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp),	\
    324 		    lk_list);						\
    325 		SPINLOCK_LIST_UNLOCK();					\
    326 		splx(s);						\
    327 	}								\
    328 } while (/*CONSTCOND*/0)
    329 #else
    330 #define	HAVEIT(lkp)		/* nothing */
    331 
    332 #define	DONTHAVEIT(lkp)		/* nothing */
    333 #endif /* LOCKDEBUG */ /* } */
    334 
    335 #if defined(LOCKDEBUG)
    336 /*
    337  * Lock debug printing routine; can be configured to print to console
    338  * or log to syslog.
    339  */
    340 void
    341 lock_printf(const char *fmt, ...)
    342 {
    343 	va_list ap;
    344 
    345 	va_start(ap, fmt);
    346 	if (lock_debug_syslog)
    347 		vlog(LOG_DEBUG, fmt, ap);
    348 	else
    349 		vprintf(fmt, ap);
    350 	va_end(ap);
    351 }
    352 #endif /* LOCKDEBUG */
    353 
    354 /*
    355  * Initialize a lock; required before use.
    356  */
    357 void
    358 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
    359 {
    360 
    361 	memset(lkp, 0, sizeof(struct lock));
    362 	simple_lock_init(&lkp->lk_interlock);
    363 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
    364 	if (flags & LK_SPIN)
    365 		lkp->lk_cpu = LK_NOCPU;
    366 	else {
    367 		lkp->lk_lockholder = LK_NOPROC;
    368 		lkp->lk_prio = prio;
    369 		lkp->lk_timo = timo;
    370 	}
    371 	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */
    372 #if defined(LOCKDEBUG)
    373 	lkp->lk_lock_file = NULL;
    374 	lkp->lk_unlock_file = NULL;
    375 #endif
    376 }
    377 
    378 /*
    379  * Determine the status of a lock.
    380  */
    381 int
    382 lockstatus(struct lock *lkp)
    383 {
    384 	int s, lock_type = 0;
    385 
    386 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
    387 	if (lkp->lk_exclusivecount != 0)
    388 		lock_type = LK_EXCLUSIVE;
    389 	else if (lkp->lk_sharecount != 0)
    390 		lock_type = LK_SHARED;
    391 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    392 	return (lock_type);
    393 }
    394 
    395 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
    396 /*
    397  * Make sure no spin locks are held by a CPU that is about
    398  * to context switch.
    399  */
    400 void
    401 spinlock_switchcheck(void)
    402 {
    403 	u_long cnt;
    404 	int s;
    405 
    406 	s = spllock();
    407 #if defined(MULTIPROCESSOR)
    408 	cnt = curcpu()->ci_spin_locks;
    409 #else
    410 	cnt = spin_locks;
    411 #endif
    412 	splx(s);
    413 
    414 	if (cnt != 0)
    415 		panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
    416 		    (u_long) cpu_number(), cnt);
    417 }
    418 #endif /* LOCKDEBUG || DIAGNOSTIC */
    419 
    420 /*
    421  * Locks and IPLs (interrupt priority levels):
    422  *
    423  * Locks which may be taken from interrupt context must be handled
    424  * very carefully; you must spl to the highest IPL where the lock
    425  * is needed before acquiring the lock.
    426  *
    427  * It is also important to avoid deadlock, since certain (very high
    428  * priority) interrupts are often needed to keep the system as a whole
    429  * from deadlocking, and must not be blocked while you are spinning
    430  * waiting for a lower-priority lock.
    431  *
    432  * In addition, the lock-debugging hooks themselves need to use locks!
    433  *
    434  * A raw __cpu_simple_lock may be used from interrupts are long as it
    435  * is acquired and held at a single IPL.
    436  *
    437  * A simple_lock (which is a __cpu_simple_lock wrapped with some
    438  * debugging hooks) may be used at or below spllock(), which is
    439  * typically at or just below splhigh() (i.e. blocks everything
    440  * but certain machine-dependent extremely high priority interrupts).
    441  *
    442  * spinlockmgr spinlocks should be used at or below splsched().
    443  *
    444  * Some platforms may have interrupts of higher priority than splsched(),
    445  * including hard serial interrupts, inter-processor interrupts, and
    446  * kernel debugger traps.
    447  */
    448 
    449 /*
    450  * XXX XXX kludge around another kludge..
    451  *
    452  * vfs_shutdown() may be called from interrupt context, either as a result
    453  * of a panic, or from the debugger.   It proceeds to call
    454  * sys_sync(&proc0, ...), pretending its running on behalf of proc0
    455  *
    456  * We would like to make an attempt to sync the filesystems in this case, so
    457  * if this happens, we treat attempts to acquire locks specially.
    458  * All locks are acquired on behalf of proc0.
    459  *
    460  * If we've already paniced, we don't block waiting for locks, but
    461  * just barge right ahead since we're already going down in flames.
    462  */
    463 
    464 /*
    465  * Set, change, or release a lock.
    466  *
    467  * Shared requests increment the shared count. Exclusive requests set the
    468  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
    469  * accepted shared locks and shared-to-exclusive upgrades to go away.
    470  */
    471 int
    472 #if defined(LOCKDEBUG)
    473 _lockmgr(__volatile struct lock *lkp, u_int flags,
    474     struct simplelock *interlkp, const char *file, int line)
    475 #else
    476 lockmgr(__volatile struct lock *lkp, u_int flags,
    477     struct simplelock *interlkp)
    478 #endif
    479 {
    480 	int error;
    481 	pid_t pid;
    482 	lwpid_t lid;
    483 	int extflags;
    484 	cpuid_t cpu_id;
    485 	struct lwp *l = curlwp;
    486 	int lock_shutdown_noblock = 0;
    487 	int s;
    488 
    489 	error = 0;
    490 
    491 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
    492 	if (flags & LK_INTERLOCK)
    493 		simple_unlock(interlkp);
    494 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
    495 
    496 #ifdef DIAGNOSTIC /* { */
    497 	/*
    498 	 * Don't allow spins on sleep locks and don't allow sleeps
    499 	 * on spin locks.
    500 	 */
    501 	if ((flags ^ lkp->lk_flags) & LK_SPIN)
    502 		panic("lockmgr: sleep/spin mismatch");
    503 #endif /* } */
    504 
    505 	if (extflags & LK_SPIN) {
    506 		pid = LK_KERNPROC;
    507 		lid = 0;
    508 	} else {
    509 		if (l == NULL) {
    510 			if (!doing_shutdown) {
    511 				panic("lockmgr: no context");
    512 			} else {
    513 				l = &lwp0;
    514 				if (panicstr && (!(flags & LK_NOWAIT))) {
    515 					flags |= LK_NOWAIT;
    516 					lock_shutdown_noblock = 1;
    517 				}
    518 			}
    519 		}
    520 		lid = l->l_lid;
    521 		pid = l->l_proc->p_pid;
    522 	}
    523 	cpu_id = cpu_number();
    524 
    525 	/*
    526 	 * Once a lock has drained, the LK_DRAINING flag is set and an
    527 	 * exclusive lock is returned. The only valid operation thereafter
    528 	 * is a single release of that exclusive lock. This final release
    529 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
    530 	 * further requests of any sort will result in a panic. The bits
    531 	 * selected for these two flags are chosen so that they will be set
    532 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
    533 	 * The final release is permitted to give a new lease on life to
    534 	 * the lock by specifying LK_REENABLE.
    535 	 */
    536 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
    537 #ifdef DIAGNOSTIC /* { */
    538 		if (lkp->lk_flags & LK_DRAINED)
    539 			panic("lockmgr: using decommissioned lock");
    540 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
    541 		    WEHOLDIT(lkp, pid, lid, cpu_id) == 0)
    542 			panic("lockmgr: non-release on draining lock: %d",
    543 			    flags & LK_TYPE_MASK);
    544 #endif /* DIAGNOSTIC */ /* } */
    545 		lkp->lk_flags &= ~LK_DRAINING;
    546 		if ((flags & LK_REENABLE) == 0)
    547 			lkp->lk_flags |= LK_DRAINED;
    548 	}
    549 
    550 	switch (flags & LK_TYPE_MASK) {
    551 
    552 	case LK_SHARED:
    553 		if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
    554 			/*
    555 			 * If just polling, check to see if we will block.
    556 			 */
    557 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    558 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
    559 				error = EBUSY;
    560 				break;
    561 			}
    562 			/*
    563 			 * Wait for exclusive locks and upgrades to clear.
    564 			 */
    565 			ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
    566 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
    567 			if (error)
    568 				break;
    569 			lkp->lk_sharecount++;
    570 			COUNT(lkp, l, cpu_id, 1);
    571 			break;
    572 		}
    573 		/*
    574 		 * We hold an exclusive lock, so downgrade it to shared.
    575 		 * An alternative would be to fail with EDEADLK.
    576 		 */
    577 		lkp->lk_sharecount++;
    578 		COUNT(lkp, l, cpu_id, 1);
    579 		/* fall into downgrade */
    580 
    581 	case LK_DOWNGRADE:
    582 		if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0 ||
    583 		    lkp->lk_exclusivecount == 0)
    584 			panic("lockmgr: not holding exclusive lock");
    585 		lkp->lk_sharecount += lkp->lk_exclusivecount;
    586 		lkp->lk_exclusivecount = 0;
    587 		lkp->lk_recurselevel = 0;
    588 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    589 		SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    590 #if defined(LOCKDEBUG)
    591 		lkp->lk_unlock_file = file;
    592 		lkp->lk_unlock_line = line;
    593 #endif
    594 		DONTHAVEIT(lkp);
    595 		WAKEUP_WAITER(lkp);
    596 		break;
    597 
    598 	case LK_EXCLUPGRADE:
    599 		/*
    600 		 * If another process is ahead of us to get an upgrade,
    601 		 * then we want to fail rather than have an intervening
    602 		 * exclusive access.
    603 		 */
    604 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
    605 			lkp->lk_sharecount--;
    606 			COUNT(lkp, l, cpu_id, -1);
    607 			error = EBUSY;
    608 			break;
    609 		}
    610 		/* fall into normal upgrade */
    611 
    612 	case LK_UPGRADE:
    613 		/*
    614 		 * Upgrade a shared lock to an exclusive one. If another
    615 		 * shared lock has already requested an upgrade to an
    616 		 * exclusive lock, our shared lock is released and an
    617 		 * exclusive lock is requested (which will be granted
    618 		 * after the upgrade). If we return an error, the file
    619 		 * will always be unlocked.
    620 		 */
    621 		if (WEHOLDIT(lkp, pid, lid, cpu_id) || lkp->lk_sharecount <= 0)
    622 			panic("lockmgr: upgrade exclusive lock");
    623 		lkp->lk_sharecount--;
    624 		COUNT(lkp, l, cpu_id, -1);
    625 		/*
    626 		 * If we are just polling, check to see if we will block.
    627 		 */
    628 		if ((extflags & LK_NOWAIT) &&
    629 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
    630 		     lkp->lk_sharecount > 1)) {
    631 			error = EBUSY;
    632 			break;
    633 		}
    634 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
    635 			/*
    636 			 * We are first shared lock to request an upgrade, so
    637 			 * request upgrade and wait for the shared count to
    638 			 * drop to zero, then take exclusive lock.
    639 			 */
    640 			lkp->lk_flags |= LK_WANT_UPGRADE;
    641 			ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
    642 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
    643 			if (error)
    644 				break;
    645 			lkp->lk_flags |= LK_HAVE_EXCL;
    646 			SETHOLDER(lkp, pid, lid, cpu_id);
    647 #if defined(LOCKDEBUG)
    648 			lkp->lk_lock_file = file;
    649 			lkp->lk_lock_line = line;
    650 #endif
    651 			HAVEIT(lkp);
    652 			if (lkp->lk_exclusivecount != 0)
    653 				panic("lockmgr: non-zero exclusive count");
    654 			lkp->lk_exclusivecount = 1;
    655 			if (extflags & LK_SETRECURSE)
    656 				lkp->lk_recurselevel = 1;
    657 			COUNT(lkp, l, cpu_id, 1);
    658 			break;
    659 		}
    660 		/*
    661 		 * Someone else has requested upgrade. Release our shared
    662 		 * lock, awaken upgrade requestor if we are the last shared
    663 		 * lock, then request an exclusive lock.
    664 		 */
    665 		if (lkp->lk_sharecount == 0)
    666 			WAKEUP_WAITER(lkp);
    667 		/* fall into exclusive request */
    668 
    669 	case LK_EXCLUSIVE:
    670 		if (WEHOLDIT(lkp, pid, lid, cpu_id)) {
    671 			/*
    672 			 * Recursive lock.
    673 			 */
    674 			if ((extflags & LK_CANRECURSE) == 0 &&
    675 			     lkp->lk_recurselevel == 0) {
    676 				if (extflags & LK_RECURSEFAIL) {
    677 					error = EDEADLK;
    678 					break;
    679 				} else
    680 					panic("lockmgr: locking against myself");
    681 			}
    682 			lkp->lk_exclusivecount++;
    683 			if (extflags & LK_SETRECURSE &&
    684 			    lkp->lk_recurselevel == 0)
    685 				lkp->lk_recurselevel = lkp->lk_exclusivecount;
    686 			COUNT(lkp, l, cpu_id, 1);
    687 			break;
    688 		}
    689 		/*
    690 		 * If we are just polling, check to see if we will sleep.
    691 		 */
    692 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
    693 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    694 		     lkp->lk_sharecount != 0)) {
    695 			error = EBUSY;
    696 			break;
    697 		}
    698 		/*
    699 		 * Try to acquire the want_exclusive flag.
    700 		 */
    701 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
    702 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
    703 		if (error)
    704 			break;
    705 		lkp->lk_flags |= LK_WANT_EXCL;
    706 		/*
    707 		 * Wait for shared locks and upgrades to finish.
    708 		 */
    709 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
    710 		       (lkp->lk_flags & LK_WANT_UPGRADE));
    711 		lkp->lk_flags &= ~LK_WANT_EXCL;
    712 		if (error)
    713 			break;
    714 		lkp->lk_flags |= LK_HAVE_EXCL;
    715 		SETHOLDER(lkp, pid, lid, cpu_id);
    716 #if defined(LOCKDEBUG)
    717 		lkp->lk_lock_file = file;
    718 		lkp->lk_lock_line = line;
    719 #endif
    720 		HAVEIT(lkp);
    721 		if (lkp->lk_exclusivecount != 0)
    722 			panic("lockmgr: non-zero exclusive count");
    723 		lkp->lk_exclusivecount = 1;
    724 		if (extflags & LK_SETRECURSE)
    725 			lkp->lk_recurselevel = 1;
    726 		COUNT(lkp, l, cpu_id, 1);
    727 		break;
    728 
    729 	case LK_RELEASE:
    730 		if (lkp->lk_exclusivecount != 0) {
    731 			if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
    732 				if (lkp->lk_flags & LK_SPIN) {
    733 					panic("lockmgr: processor %lu, not "
    734 					    "exclusive lock holder %lu "
    735 					    "unlocking", cpu_id, lkp->lk_cpu);
    736 				} else {
    737 					panic("lockmgr: pid %d, not "
    738 					    "exclusive lock holder %d "
    739 					    "unlocking", pid,
    740 					    lkp->lk_lockholder);
    741 				}
    742 			}
    743 			if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
    744 				lkp->lk_recurselevel = 0;
    745 			lkp->lk_exclusivecount--;
    746 			COUNT(lkp, l, cpu_id, -1);
    747 			if (lkp->lk_exclusivecount == 0) {
    748 				lkp->lk_flags &= ~LK_HAVE_EXCL;
    749 				SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    750 #if defined(LOCKDEBUG)
    751 				lkp->lk_unlock_file = file;
    752 				lkp->lk_unlock_line = line;
    753 #endif
    754 				DONTHAVEIT(lkp);
    755 			}
    756 		} else if (lkp->lk_sharecount != 0) {
    757 			lkp->lk_sharecount--;
    758 			COUNT(lkp, l, cpu_id, -1);
    759 		}
    760 #ifdef DIAGNOSTIC
    761 		else
    762 			panic("lockmgr: release of unlocked lock!");
    763 #endif
    764 		WAKEUP_WAITER(lkp);
    765 		break;
    766 
    767 	case LK_DRAIN:
    768 		/*
    769 		 * Check that we do not already hold the lock, as it can
    770 		 * never drain if we do. Unfortunately, we have no way to
    771 		 * check for holding a shared lock, but at least we can
    772 		 * check for an exclusive one.
    773 		 */
    774 		if (WEHOLDIT(lkp, pid, lid, cpu_id))
    775 			panic("lockmgr: draining against myself");
    776 		/*
    777 		 * If we are just polling, check to see if we will sleep.
    778 		 */
    779 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
    780 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    781 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
    782 			error = EBUSY;
    783 			break;
    784 		}
    785 		ACQUIRE(lkp, error, extflags, 1,
    786 		    ((lkp->lk_flags &
    787 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    788 		     lkp->lk_sharecount != 0 ||
    789 		     lkp->lk_waitcount != 0));
    790 		if (error)
    791 			break;
    792 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
    793 		SETHOLDER(lkp, pid, lid, cpu_id);
    794 #if defined(LOCKDEBUG)
    795 		lkp->lk_lock_file = file;
    796 		lkp->lk_lock_line = line;
    797 #endif
    798 		HAVEIT(lkp);
    799 		lkp->lk_exclusivecount = 1;
    800 		/* XXX unlikely that we'd want this */
    801 		if (extflags & LK_SETRECURSE)
    802 			lkp->lk_recurselevel = 1;
    803 		COUNT(lkp, l, cpu_id, 1);
    804 		break;
    805 
    806 	default:
    807 		INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    808 		panic("lockmgr: unknown locktype request %d",
    809 		    flags & LK_TYPE_MASK);
    810 		/* NOTREACHED */
    811 	}
    812 	if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
    813 	    ((lkp->lk_flags &
    814 	      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
    815 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
    816 		lkp->lk_flags &= ~LK_WAITDRAIN;
    817 		wakeup((void *)&lkp->lk_flags);
    818 	}
    819 	/*
    820 	 * Note that this panic will be a recursive panic, since
    821 	 * we only set lock_shutdown_noblock above if panicstr != NULL.
    822 	 */
    823 	if (error && lock_shutdown_noblock)
    824 		panic("lockmgr: deadlock (see previous panic)");
    825 
    826 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    827 	return (error);
    828 }
    829 
    830 /*
    831  * For a recursive spinlock held one or more times by the current CPU,
    832  * release all N locks, and return N.
    833  * Intended for use in mi_switch() shortly before context switching.
    834  */
    835 
    836 int
    837 #if defined(LOCKDEBUG)
    838 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
    839 #else
    840 spinlock_release_all(__volatile struct lock *lkp)
    841 #endif
    842 {
    843 	int s, count;
    844 	cpuid_t cpu_id;
    845 
    846 	KASSERT(lkp->lk_flags & LK_SPIN);
    847 
    848 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
    849 
    850 	cpu_id = cpu_number();
    851 	count = lkp->lk_exclusivecount;
    852 
    853 	if (count != 0) {
    854 #ifdef DIAGNOSTIC
    855 		if (WEHOLDIT(lkp, 0, 0, cpu_id) == 0) {
    856 			panic("spinlock_release_all: processor %lu, not "
    857 			    "exclusive lock holder %lu "
    858 			    "unlocking", (long)cpu_id, lkp->lk_cpu);
    859 		}
    860 #endif
    861 		lkp->lk_recurselevel = 0;
    862 		lkp->lk_exclusivecount = 0;
    863 		COUNT_CPU(cpu_id, -count);
    864 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    865 		SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    866 #if defined(LOCKDEBUG)
    867 		lkp->lk_unlock_file = file;
    868 		lkp->lk_unlock_line = line;
    869 #endif
    870 		DONTHAVEIT(lkp);
    871 	}
    872 #ifdef DIAGNOSTIC
    873 	else if (lkp->lk_sharecount != 0)
    874 		panic("spinlock_release_all: release of shared lock!");
    875 	else
    876 		panic("spinlock_release_all: release of unlocked lock!");
    877 #endif
    878 	INTERLOCK_RELEASE(lkp, LK_SPIN, s);
    879 
    880 	return (count);
    881 }
    882 
    883 /*
    884  * For a recursive spinlock held one or more times by the current CPU,
    885  * release all N locks, and return N.
    886  * Intended for use in mi_switch() right after resuming execution.
    887  */
    888 
    889 void
    890 #if defined(LOCKDEBUG)
    891 _spinlock_acquire_count(__volatile struct lock *lkp, int count,
    892     const char *file, int line)
    893 #else
    894 spinlock_acquire_count(__volatile struct lock *lkp, int count)
    895 #endif
    896 {
    897 	int s, error;
    898 	cpuid_t cpu_id;
    899 
    900 	KASSERT(lkp->lk_flags & LK_SPIN);
    901 
    902 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
    903 
    904 	cpu_id = cpu_number();
    905 
    906 #ifdef DIAGNOSTIC
    907 	if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_id))
    908 		panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id);
    909 #endif
    910 	/*
    911 	 * Try to acquire the want_exclusive flag.
    912 	 */
    913 	ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags &
    914 	    (LK_HAVE_EXCL | LK_WANT_EXCL));
    915 	lkp->lk_flags |= LK_WANT_EXCL;
    916 	/*
    917 	 * Wait for shared locks and upgrades to finish.
    918 	 */
    919 	ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 ||
    920 	    (lkp->lk_flags & LK_WANT_UPGRADE));
    921 	lkp->lk_flags &= ~LK_WANT_EXCL;
    922 	lkp->lk_flags |= LK_HAVE_EXCL;
    923 	SETHOLDER(lkp, LK_NOPROC, 0, cpu_id);
    924 #if defined(LOCKDEBUG)
    925 	lkp->lk_lock_file = file;
    926 	lkp->lk_lock_line = line;
    927 #endif
    928 	HAVEIT(lkp);
    929 	if (lkp->lk_exclusivecount != 0)
    930 		panic("lockmgr: non-zero exclusive count");
    931 	lkp->lk_exclusivecount = count;
    932 	lkp->lk_recurselevel = 1;
    933 	COUNT_CPU(cpu_id, count);
    934 
    935 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    936 }
    937 
    938 
    939 
    940 /*
    941  * Print out information about state of a lock. Used by VOP_PRINT
    942  * routines to display ststus about contained locks.
    943  */
    944 void
    945 lockmgr_printinfo(__volatile struct lock *lkp)
    946 {
    947 
    948 	if (lkp->lk_sharecount)
    949 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
    950 		    lkp->lk_sharecount);
    951 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
    952 		printf(" lock type %s: EXCL (count %d) by ",
    953 		    lkp->lk_wmesg, lkp->lk_exclusivecount);
    954 		if (lkp->lk_flags & LK_SPIN)
    955 			printf("processor %lu", lkp->lk_cpu);
    956 		else
    957 			printf("pid %d.%d", lkp->lk_lockholder,
    958 			    lkp->lk_locklwp);
    959 	} else
    960 		printf(" not locked");
    961 	if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
    962 		printf(" with %d pending", lkp->lk_waitcount);
    963 }
    964 
    965 #if defined(LOCKDEBUG) /* { */
    966 TAILQ_HEAD(, simplelock) simplelock_list =
    967     TAILQ_HEAD_INITIALIZER(simplelock_list);
    968 
    969 #if defined(MULTIPROCESSOR) /* { */
    970 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
    971 
    972 #define	SLOCK_LIST_LOCK()						\
    973 	__cpu_simple_lock(&simplelock_list_slock.lock_data)
    974 
    975 #define	SLOCK_LIST_UNLOCK()						\
    976 	__cpu_simple_unlock(&simplelock_list_slock.lock_data)
    977 
    978 #define	SLOCK_COUNT(x)							\
    979 	curcpu()->ci_simple_locks += (x)
    980 #else
    981 u_long simple_locks;
    982 
    983 #define	SLOCK_LIST_LOCK()	/* nothing */
    984 
    985 #define	SLOCK_LIST_UNLOCK()	/* nothing */
    986 
    987 #define	SLOCK_COUNT(x)		simple_locks += (x)
    988 #endif /* MULTIPROCESSOR */ /* } */
    989 
    990 #ifdef MULTIPROCESSOR
    991 #define SLOCK_MP()		lock_printf("on cpu %ld\n", 		\
    992 				    (u_long) cpu_number())
    993 #else
    994 #define SLOCK_MP()		/* nothing */
    995 #endif
    996 
    997 #define	SLOCK_WHERE(str, alp, id, l)					\
    998 do {									\
    999 	lock_printf("\n");						\
   1000 	lock_printf(str);						\
   1001 	lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
   1002 	SLOCK_MP();							\
   1003 	if ((alp)->lock_file != NULL)					\
   1004 		lock_printf("last locked: %s:%d\n", (alp)->lock_file,	\
   1005 		    (alp)->lock_line);					\
   1006 	if ((alp)->unlock_file != NULL)					\
   1007 		lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
   1008 		    (alp)->unlock_line);				\
   1009 	SLOCK_TRACE()							\
   1010 	SLOCK_DEBUGGER();						\
   1011 } while (/*CONSTCOND*/0)
   1012 
   1013 /*
   1014  * Simple lock functions so that the debugger can see from whence
   1015  * they are being called.
   1016  */
   1017 void
   1018 simple_lock_init(struct simplelock *alp)
   1019 {
   1020 
   1021 #if defined(MULTIPROCESSOR) /* { */
   1022 	__cpu_simple_lock_init(&alp->lock_data);
   1023 #else
   1024 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
   1025 #endif /* } */
   1026 	alp->lock_file = NULL;
   1027 	alp->lock_line = 0;
   1028 	alp->unlock_file = NULL;
   1029 	alp->unlock_line = 0;
   1030 	alp->lock_holder = LK_NOCPU;
   1031 }
   1032 
   1033 void
   1034 _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
   1035 {
   1036 	cpuid_t cpu_id = cpu_number();
   1037 	int s;
   1038 
   1039 	s = spllock();
   1040 
   1041 	/*
   1042 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
   1043 	 * don't take any action, and just fall into the normal spin case.
   1044 	 */
   1045 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1046 #if defined(MULTIPROCESSOR) /* { */
   1047 		if (alp->lock_holder == cpu_id) {
   1048 			SLOCK_WHERE("simple_lock: locking against myself\n",
   1049 			    alp, id, l);
   1050 			goto out;
   1051 		}
   1052 #else
   1053 		SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
   1054 		goto out;
   1055 #endif /* MULTIPROCESSOR */ /* } */
   1056 	}
   1057 
   1058 #if defined(MULTIPROCESSOR) /* { */
   1059 	/* Acquire the lock before modifying any fields. */
   1060 	__cpu_simple_lock(&alp->lock_data);
   1061 #else
   1062 	alp->lock_data = __SIMPLELOCK_LOCKED;
   1063 #endif /* } */
   1064 
   1065 	if (alp->lock_holder != LK_NOCPU) {
   1066 		SLOCK_WHERE("simple_lock: uninitialized lock\n",
   1067 		    alp, id, l);
   1068 	}
   1069 	alp->lock_file = id;
   1070 	alp->lock_line = l;
   1071 	alp->lock_holder = cpu_id;
   1072 
   1073 	SLOCK_LIST_LOCK();
   1074 	/* XXX Cast away volatile */
   1075 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
   1076 	SLOCK_LIST_UNLOCK();
   1077 
   1078 	SLOCK_COUNT(1);
   1079 
   1080  out:
   1081 	splx(s);
   1082 }
   1083 
   1084 int
   1085 _simple_lock_held(__volatile struct simplelock *alp)
   1086 {
   1087 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
   1088 	cpuid_t cpu_id = cpu_number();
   1089 #endif
   1090 	int s, locked = 0;
   1091 
   1092 	s = spllock();
   1093 
   1094 #if defined(MULTIPROCESSOR)
   1095 	if (__cpu_simple_lock_try(&alp->lock_data) == 0)
   1096 		locked = (alp->lock_holder == cpu_id);
   1097 	else
   1098 		__cpu_simple_unlock(&alp->lock_data);
   1099 #else
   1100 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1101 		locked = 1;
   1102 		KASSERT(alp->lock_holder == cpu_id);
   1103 	}
   1104 #endif
   1105 
   1106 	splx(s);
   1107 
   1108 	return (locked);
   1109 }
   1110 
   1111 int
   1112 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
   1113 {
   1114 	cpuid_t cpu_id = cpu_number();
   1115 	int s, rv = 0;
   1116 
   1117 	s = spllock();
   1118 
   1119 	/*
   1120 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
   1121 	 * don't take any action.
   1122 	 */
   1123 #if defined(MULTIPROCESSOR) /* { */
   1124 	if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
   1125 		if (alp->lock_holder == cpu_id)
   1126 			SLOCK_WHERE("simple_lock_try: locking against myself\n",
   1127 			    alp, id, l);
   1128 		goto out;
   1129 	}
   1130 #else
   1131 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1132 		SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
   1133 		goto out;
   1134 	}
   1135 	alp->lock_data = __SIMPLELOCK_LOCKED;
   1136 #endif /* MULTIPROCESSOR */ /* } */
   1137 
   1138 	/*
   1139 	 * At this point, we have acquired the lock.
   1140 	 */
   1141 
   1142 	rv = 1;
   1143 
   1144 	alp->lock_file = id;
   1145 	alp->lock_line = l;
   1146 	alp->lock_holder = cpu_id;
   1147 
   1148 	SLOCK_LIST_LOCK();
   1149 	/* XXX Cast away volatile. */
   1150 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
   1151 	SLOCK_LIST_UNLOCK();
   1152 
   1153 	SLOCK_COUNT(1);
   1154 
   1155  out:
   1156 	splx(s);
   1157 	return (rv);
   1158 }
   1159 
   1160 void
   1161 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
   1162 {
   1163 	int s;
   1164 
   1165 	s = spllock();
   1166 
   1167 	/*
   1168 	 * MULTIPROCESSOR case: This is `safe' because we think we hold
   1169 	 * the lock, and if we don't, we don't take any action.
   1170 	 */
   1171 	if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
   1172 		SLOCK_WHERE("simple_unlock: lock not held\n",
   1173 		    alp, id, l);
   1174 		goto out;
   1175 	}
   1176 
   1177 	SLOCK_LIST_LOCK();
   1178 	TAILQ_REMOVE(&simplelock_list, alp, list);
   1179 	SLOCK_LIST_UNLOCK();
   1180 
   1181 	SLOCK_COUNT(-1);
   1182 
   1183 	alp->list.tqe_next = NULL;	/* sanity */
   1184 	alp->list.tqe_prev = NULL;	/* sanity */
   1185 
   1186 	alp->unlock_file = id;
   1187 	alp->unlock_line = l;
   1188 
   1189 #if defined(MULTIPROCESSOR) /* { */
   1190 	alp->lock_holder = LK_NOCPU;
   1191 	/* Now that we've modified all fields, release the lock. */
   1192 	__cpu_simple_unlock(&alp->lock_data);
   1193 #else
   1194 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
   1195 	KASSERT(alp->lock_holder == cpu_number());
   1196 	alp->lock_holder = LK_NOCPU;
   1197 #endif /* } */
   1198 
   1199  out:
   1200 	splx(s);
   1201 }
   1202 
   1203 void
   1204 simple_lock_dump(void)
   1205 {
   1206 	struct simplelock *alp;
   1207 	int s;
   1208 
   1209 	s = spllock();
   1210 	SLOCK_LIST_LOCK();
   1211 	lock_printf("all simple locks:\n");
   1212 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1213 		lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
   1214 		    alp->lock_file, alp->lock_line);
   1215 	}
   1216 	SLOCK_LIST_UNLOCK();
   1217 	splx(s);
   1218 }
   1219 
   1220 void
   1221 simple_lock_freecheck(void *start, void *end)
   1222 {
   1223 	struct simplelock *alp;
   1224 	int s;
   1225 
   1226 	s = spllock();
   1227 	SLOCK_LIST_LOCK();
   1228 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1229 		if ((void *)alp >= start && (void *)alp < end) {
   1230 			lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
   1231 			    alp, alp->lock_holder, alp->lock_file,
   1232 			    alp->lock_line);
   1233 			SLOCK_DEBUGGER();
   1234 		}
   1235 	}
   1236 	SLOCK_LIST_UNLOCK();
   1237 	splx(s);
   1238 }
   1239 
   1240 /*
   1241  * We must be holding exactly one lock: the sched_lock.
   1242  */
   1243 
   1244 void
   1245 simple_lock_switchcheck(void)
   1246 {
   1247 
   1248 	simple_lock_only_held(&sched_lock, "switching");
   1249 }
   1250 
   1251 void
   1252 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
   1253 {
   1254 	struct simplelock *alp;
   1255 	cpuid_t cpu_id = cpu_number();
   1256 	int s;
   1257 
   1258 	if (lp) {
   1259 		LOCK_ASSERT(simple_lock_held(lp));
   1260 	}
   1261 	s = spllock();
   1262 	SLOCK_LIST_LOCK();
   1263 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1264 		if (alp == lp)
   1265 			continue;
   1266 		if (alp->lock_holder == cpu_id)
   1267 			break;
   1268 	}
   1269 	SLOCK_LIST_UNLOCK();
   1270 	splx(s);
   1271 
   1272 	if (alp != NULL) {
   1273 		lock_printf("\n%s with held simple_lock %p "
   1274 		    "CPU %lu %s:%d\n",
   1275 		    where, alp, alp->lock_holder, alp->lock_file,
   1276 		    alp->lock_line);
   1277 		SLOCK_TRACE();
   1278 		SLOCK_DEBUGGER();
   1279 	}
   1280 }
   1281 #endif /* LOCKDEBUG */ /* } */
   1282 
   1283 #if defined(MULTIPROCESSOR)
   1284 /*
   1285  * Functions for manipulating the kernel_lock.  We put them here
   1286  * so that they show up in profiles.
   1287  */
   1288 
   1289 struct lock kernel_lock;
   1290 
   1291 void
   1292 _kernel_lock_init(void)
   1293 {
   1294 
   1295 	spinlockinit(&kernel_lock, "klock", 0);
   1296 }
   1297 
   1298 /*
   1299  * Acquire/release the kernel lock.  Intended for use in the scheduler
   1300  * and the lower half of the kernel.
   1301  */
   1302 void
   1303 _kernel_lock(int flag)
   1304 {
   1305 
   1306 	SCHED_ASSERT_UNLOCKED();
   1307 	spinlockmgr(&kernel_lock, flag, 0);
   1308 }
   1309 
   1310 void
   1311 _kernel_unlock(void)
   1312 {
   1313 
   1314 	spinlockmgr(&kernel_lock, LK_RELEASE, 0);
   1315 }
   1316 
   1317 /*
   1318  * Acquire/release the kernel_lock on behalf of a process.  Intended for
   1319  * use in the top half of the kernel.
   1320  */
   1321 void
   1322 _kernel_proc_lock(struct lwp *l)
   1323 {
   1324 
   1325 	SCHED_ASSERT_UNLOCKED();
   1326 	spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
   1327 	l->l_flag |= P_BIGLOCK;
   1328 }
   1329 
   1330 void
   1331 _kernel_proc_unlock(struct lwp *l)
   1332 {
   1333 
   1334 	l->l_flag &= ~P_BIGLOCK;
   1335 	spinlockmgr(&kernel_lock, LK_RELEASE, 0);
   1336 }
   1337 #endif /* MULTIPROCESSOR */
   1338