Home | History | Annotate | Line # | Download | only in kern
kern_lock.c revision 1.72
      1 /*	$NetBSD: kern_lock.c,v 1.72 2003/08/07 16:31:46 agc Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * This code is derived from software contributed to The NetBSD Foundation
     12  * by Ross Harvey.
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  * 3. All advertising materials mentioning features or use of this software
     23  *    must display the following acknowledgement:
     24  *	This product includes software developed by the NetBSD
     25  *	Foundation, Inc. and its contributors.
     26  * 4. Neither the name of The NetBSD Foundation nor the names of its
     27  *    contributors may be used to endorse or promote products derived
     28  *    from this software without specific prior written permission.
     29  *
     30  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     32  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     33  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     34  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     37  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     38  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     40  * POSSIBILITY OF SUCH DAMAGE.
     41  */
     42 
     43 /*
     44  * Copyright (c) 1995
     45  *	The Regents of the University of California.  All rights reserved.
     46  *
     47  * This code contains ideas from software contributed to Berkeley by
     48  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
     49  * System project at Carnegie-Mellon University.
     50  *
     51  * Redistribution and use in source and binary forms, with or without
     52  * modification, are permitted provided that the following conditions
     53  * are met:
     54  * 1. Redistributions of source code must retain the above copyright
     55  *    notice, this list of conditions and the following disclaimer.
     56  * 2. Redistributions in binary form must reproduce the above copyright
     57  *    notice, this list of conditions and the following disclaimer in the
     58  *    documentation and/or other materials provided with the distribution.
     59  * 3. Neither the name of the University nor the names of its contributors
     60  *    may be used to endorse or promote products derived from this software
     61  *    without specific prior written permission.
     62  *
     63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     73  * SUCH DAMAGE.
     74  *
     75  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.72 2003/08/07 16:31:46 agc Exp $");
     80 
     81 #include "opt_multiprocessor.h"
     82 #include "opt_lockdebug.h"
     83 #include "opt_ddb.h"
     84 
     85 #include <sys/param.h>
     86 #include <sys/proc.h>
     87 #include <sys/lock.h>
     88 #include <sys/systm.h>
     89 #include <machine/cpu.h>
     90 
     91 #if defined(LOCKDEBUG)
     92 #include <sys/syslog.h>
     93 /*
     94  * note that stdarg.h and the ansi style va_start macro is used for both
     95  * ansi and traditional c compiles.
     96  * XXX: this requires that stdarg.h define: va_alist and va_dcl
     97  */
     98 #include <machine/stdarg.h>
     99 
    100 void	lock_printf(const char *fmt, ...)
    101     __attribute__((__format__(__printf__,1,2)));
    102 
    103 int	lock_debug_syslog = 0;	/* defaults to printf, but can be patched */
    104 
    105 #ifdef DDB
    106 #include <ddb/ddbvar.h>
    107 #include <machine/db_machdep.h>
    108 #include <ddb/db_command.h>
    109 #include <ddb/db_interface.h>
    110 #endif
    111 #endif
    112 
    113 /*
    114  * Locking primitives implementation.
    115  * Locks provide shared/exclusive synchronization.
    116  */
    117 
    118 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
    119 #if defined(MULTIPROCESSOR) /* { */
    120 #define	COUNT_CPU(cpu_id, x)						\
    121 	curcpu()->ci_spin_locks += (x)
    122 #else
    123 u_long	spin_locks;
    124 #define	COUNT_CPU(cpu_id, x)	spin_locks += (x)
    125 #endif /* MULTIPROCESSOR */ /* } */
    126 
    127 #define	COUNT(lkp, l, cpu_id, x)					\
    128 do {									\
    129 	if ((lkp)->lk_flags & LK_SPIN)					\
    130 		COUNT_CPU((cpu_id), (x));				\
    131 	else								\
    132 		(l)->l_locks += (x);					\
    133 } while (/*CONSTCOND*/0)
    134 #else
    135 #define COUNT(lkp, p, cpu_id, x)
    136 #define COUNT_CPU(cpu_id, x)
    137 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
    138 
    139 #ifndef SPINLOCK_SPIN_HOOK		/* from <machine/lock.h> */
    140 #define	SPINLOCK_SPIN_HOOK		/* nothing */
    141 #endif
    142 
    143 #define	INTERLOCK_ACQUIRE(lkp, flags, s)				\
    144 do {									\
    145 	if ((flags) & LK_SPIN)						\
    146 		s = spllock();						\
    147 	simple_lock(&(lkp)->lk_interlock);				\
    148 } while (/*CONSTCOND*/ 0)
    149 
    150 #define	INTERLOCK_RELEASE(lkp, flags, s)				\
    151 do {									\
    152 	simple_unlock(&(lkp)->lk_interlock);				\
    153 	if ((flags) & LK_SPIN)						\
    154 		splx(s);						\
    155 } while (/*CONSTCOND*/ 0)
    156 
    157 #ifdef DDB /* { */
    158 #ifdef MULTIPROCESSOR
    159 int simple_lock_debugger = 1;	/* more serious on MP */
    160 #else
    161 int simple_lock_debugger = 0;
    162 #endif
    163 #define	SLOCK_DEBUGGER()	if (simple_lock_debugger) Debugger()
    164 #define	SLOCK_TRACE()							\
    165 	db_stack_trace_print((db_expr_t)__builtin_frame_address(0),	\
    166 	    TRUE, 65535, "", lock_printf);
    167 #else
    168 #define	SLOCK_DEBUGGER()	/* nothing */
    169 #define	SLOCK_TRACE()		/* nothing */
    170 #endif /* } */
    171 
    172 #if defined(LOCKDEBUG)
    173 #if defined(DDB)
    174 #define	SPINLOCK_SPINCHECK_DEBUGGER	Debugger()
    175 #else
    176 #define	SPINLOCK_SPINCHECK_DEBUGGER	/* nothing */
    177 #endif
    178 
    179 #define	SPINLOCK_SPINCHECK_DECL						\
    180 	/* 32-bits of count -- wrap constitutes a "spinout" */		\
    181 	uint32_t __spinc = 0
    182 
    183 #define	SPINLOCK_SPINCHECK						\
    184 do {									\
    185 	if (++__spinc == 0) {						\
    186 		lock_printf("LK_SPIN spinout, excl %d, share %d\n",	\
    187 		    lkp->lk_exclusivecount, lkp->lk_sharecount);	\
    188 		if (lkp->lk_exclusivecount)				\
    189 			lock_printf("held by CPU %lu\n",		\
    190 			    (u_long) lkp->lk_cpu);			\
    191 		if (lkp->lk_lock_file)					\
    192 			lock_printf("last locked at %s:%d\n",		\
    193 			    lkp->lk_lock_file, lkp->lk_lock_line);	\
    194 		if (lkp->lk_unlock_file)				\
    195 			lock_printf("last unlocked at %s:%d\n",		\
    196 			    lkp->lk_unlock_file, lkp->lk_unlock_line);	\
    197 		SLOCK_TRACE();						\
    198 		SPINLOCK_SPINCHECK_DEBUGGER;				\
    199 	}								\
    200 } while (/*CONSTCOND*/ 0)
    201 #else
    202 #define	SPINLOCK_SPINCHECK_DECL			/* nothing */
    203 #define	SPINLOCK_SPINCHECK			/* nothing */
    204 #endif /* LOCKDEBUG && DDB */
    205 
    206 /*
    207  * Acquire a resource.
    208  */
    209 #define ACQUIRE(lkp, error, extflags, drain, wanted)			\
    210 	if ((extflags) & LK_SPIN) {					\
    211 		int interlocked;					\
    212 		SPINLOCK_SPINCHECK_DECL;				\
    213 									\
    214 		if ((drain) == 0)					\
    215 			(lkp)->lk_waitcount++;				\
    216 		for (interlocked = 1;;) {				\
    217 			SPINLOCK_SPINCHECK;				\
    218 			if (wanted) {					\
    219 				if (interlocked) {			\
    220 					INTERLOCK_RELEASE((lkp),	\
    221 					    LK_SPIN, s);		\
    222 					interlocked = 0;		\
    223 				}					\
    224 				SPINLOCK_SPIN_HOOK;			\
    225 			} else if (interlocked) {			\
    226 				break;					\
    227 			} else {					\
    228 				INTERLOCK_ACQUIRE((lkp), LK_SPIN, s);	\
    229 				interlocked = 1;			\
    230 			}						\
    231 		}							\
    232 		if ((drain) == 0)					\
    233 			(lkp)->lk_waitcount--;				\
    234 		KASSERT((wanted) == 0);					\
    235 		error = 0;	/* sanity */				\
    236 	} else {							\
    237 		for (error = 0; wanted; ) {				\
    238 			if ((drain))					\
    239 				(lkp)->lk_flags |= LK_WAITDRAIN;	\
    240 			else						\
    241 				(lkp)->lk_waitcount++;			\
    242 			/* XXX Cast away volatile. */			\
    243 			error = ltsleep((drain) ?			\
    244 			    (void *)&(lkp)->lk_flags :			\
    245 			    (void *)(lkp), (lkp)->lk_prio,		\
    246 			    (lkp)->lk_wmesg, (lkp)->lk_timo,		\
    247 			    &(lkp)->lk_interlock);			\
    248 			if ((drain) == 0)				\
    249 				(lkp)->lk_waitcount--;			\
    250 			if (error)					\
    251 				break;					\
    252 			if ((extflags) & LK_SLEEPFAIL) {		\
    253 				error = ENOLCK;				\
    254 				break;					\
    255 			}						\
    256 		}							\
    257 	}
    258 
    259 #define	SETHOLDER(lkp, pid, lid, cpu_id)				\
    260 do {									\
    261 	if ((lkp)->lk_flags & LK_SPIN)					\
    262 		(lkp)->lk_cpu = cpu_id;					\
    263 	else {								\
    264 		(lkp)->lk_lockholder = pid;				\
    265 		(lkp)->lk_locklwp = lid;				\
    266 	}								\
    267 } while (/*CONSTCOND*/0)
    268 
    269 #define	WEHOLDIT(lkp, pid, lid, cpu_id)					\
    270 	(((lkp)->lk_flags & LK_SPIN) != 0 ?				\
    271 	 ((lkp)->lk_cpu == (cpu_id)) :					\
    272 	 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
    273 
    274 #define	WAKEUP_WAITER(lkp)						\
    275 do {									\
    276 	if (((lkp)->lk_flags & LK_SPIN) == 0 && (lkp)->lk_waitcount) {	\
    277 		/* XXX Cast away volatile. */				\
    278 		wakeup((void *)(lkp));					\
    279 	}								\
    280 } while (/*CONSTCOND*/0)
    281 
    282 #if defined(LOCKDEBUG) /* { */
    283 #if defined(MULTIPROCESSOR) /* { */
    284 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
    285 
    286 #define	SPINLOCK_LIST_LOCK()						\
    287 	__cpu_simple_lock(&spinlock_list_slock.lock_data)
    288 
    289 #define	SPINLOCK_LIST_UNLOCK()						\
    290 	__cpu_simple_unlock(&spinlock_list_slock.lock_data)
    291 #else
    292 #define	SPINLOCK_LIST_LOCK()	/* nothing */
    293 
    294 #define	SPINLOCK_LIST_UNLOCK()	/* nothing */
    295 #endif /* MULTIPROCESSOR */ /* } */
    296 
    297 TAILQ_HEAD(, lock) spinlock_list =
    298     TAILQ_HEAD_INITIALIZER(spinlock_list);
    299 
    300 #define	HAVEIT(lkp)							\
    301 do {									\
    302 	if ((lkp)->lk_flags & LK_SPIN) {				\
    303 		int s = spllock();					\
    304 		SPINLOCK_LIST_LOCK();					\
    305 		/* XXX Cast away volatile. */				\
    306 		TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp),	\
    307 		    lk_list);						\
    308 		SPINLOCK_LIST_UNLOCK();					\
    309 		splx(s);						\
    310 	}								\
    311 } while (/*CONSTCOND*/0)
    312 
    313 #define	DONTHAVEIT(lkp)							\
    314 do {									\
    315 	if ((lkp)->lk_flags & LK_SPIN) {				\
    316 		int s = spllock();					\
    317 		SPINLOCK_LIST_LOCK();					\
    318 		/* XXX Cast away volatile. */				\
    319 		TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp),	\
    320 		    lk_list);						\
    321 		SPINLOCK_LIST_UNLOCK();					\
    322 		splx(s);						\
    323 	}								\
    324 } while (/*CONSTCOND*/0)
    325 #else
    326 #define	HAVEIT(lkp)		/* nothing */
    327 
    328 #define	DONTHAVEIT(lkp)		/* nothing */
    329 #endif /* LOCKDEBUG */ /* } */
    330 
    331 #if defined(LOCKDEBUG)
    332 /*
    333  * Lock debug printing routine; can be configured to print to console
    334  * or log to syslog.
    335  */
    336 void
    337 lock_printf(const char *fmt, ...)
    338 {
    339 	char b[150];
    340 	va_list ap;
    341 
    342 	va_start(ap, fmt);
    343 	if (lock_debug_syslog)
    344 		vlog(LOG_DEBUG, fmt, ap);
    345 	else {
    346 		vsnprintf(b, sizeof(b), fmt, ap);
    347 		printf_nolog("%s", b);
    348 	}
    349 	va_end(ap);
    350 }
    351 #endif /* LOCKDEBUG */
    352 
    353 /*
    354  * Initialize a lock; required before use.
    355  */
    356 void
    357 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
    358 {
    359 
    360 	memset(lkp, 0, sizeof(struct lock));
    361 	simple_lock_init(&lkp->lk_interlock);
    362 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
    363 	if (flags & LK_SPIN)
    364 		lkp->lk_cpu = LK_NOCPU;
    365 	else {
    366 		lkp->lk_lockholder = LK_NOPROC;
    367 		lkp->lk_prio = prio;
    368 		lkp->lk_timo = timo;
    369 	}
    370 	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */
    371 #if defined(LOCKDEBUG)
    372 	lkp->lk_lock_file = NULL;
    373 	lkp->lk_unlock_file = NULL;
    374 #endif
    375 }
    376 
    377 /*
    378  * Determine the status of a lock.
    379  */
    380 int
    381 lockstatus(struct lock *lkp)
    382 {
    383 	int s = 0, lock_type = 0;
    384 
    385 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
    386 	if (lkp->lk_exclusivecount != 0)
    387 		lock_type = LK_EXCLUSIVE;
    388 	else if (lkp->lk_sharecount != 0)
    389 		lock_type = LK_SHARED;
    390 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    391 	return (lock_type);
    392 }
    393 
    394 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
    395 /*
    396  * Make sure no spin locks are held by a CPU that is about
    397  * to context switch.
    398  */
    399 void
    400 spinlock_switchcheck(void)
    401 {
    402 	u_long cnt;
    403 	int s;
    404 
    405 	s = spllock();
    406 #if defined(MULTIPROCESSOR)
    407 	cnt = curcpu()->ci_spin_locks;
    408 #else
    409 	cnt = spin_locks;
    410 #endif
    411 	splx(s);
    412 
    413 	if (cnt != 0)
    414 		panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
    415 		    (u_long) cpu_number(), cnt);
    416 }
    417 #endif /* LOCKDEBUG || DIAGNOSTIC */
    418 
    419 /*
    420  * Locks and IPLs (interrupt priority levels):
    421  *
    422  * Locks which may be taken from interrupt context must be handled
    423  * very carefully; you must spl to the highest IPL where the lock
    424  * is needed before acquiring the lock.
    425  *
    426  * It is also important to avoid deadlock, since certain (very high
    427  * priority) interrupts are often needed to keep the system as a whole
    428  * from deadlocking, and must not be blocked while you are spinning
    429  * waiting for a lower-priority lock.
    430  *
    431  * In addition, the lock-debugging hooks themselves need to use locks!
    432  *
    433  * A raw __cpu_simple_lock may be used from interrupts are long as it
    434  * is acquired and held at a single IPL.
    435  *
    436  * A simple_lock (which is a __cpu_simple_lock wrapped with some
    437  * debugging hooks) may be used at or below spllock(), which is
    438  * typically at or just below splhigh() (i.e. blocks everything
    439  * but certain machine-dependent extremely high priority interrupts).
    440  *
    441  * spinlockmgr spinlocks should be used at or below splsched().
    442  *
    443  * Some platforms may have interrupts of higher priority than splsched(),
    444  * including hard serial interrupts, inter-processor interrupts, and
    445  * kernel debugger traps.
    446  */
    447 
    448 /*
    449  * XXX XXX kludge around another kludge..
    450  *
    451  * vfs_shutdown() may be called from interrupt context, either as a result
    452  * of a panic, or from the debugger.   It proceeds to call
    453  * sys_sync(&proc0, ...), pretending its running on behalf of proc0
    454  *
    455  * We would like to make an attempt to sync the filesystems in this case, so
    456  * if this happens, we treat attempts to acquire locks specially.
    457  * All locks are acquired on behalf of proc0.
    458  *
    459  * If we've already paniced, we don't block waiting for locks, but
    460  * just barge right ahead since we're already going down in flames.
    461  */
    462 
    463 /*
    464  * Set, change, or release a lock.
    465  *
    466  * Shared requests increment the shared count. Exclusive requests set the
    467  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
    468  * accepted shared locks and shared-to-exclusive upgrades to go away.
    469  */
    470 int
    471 #if defined(LOCKDEBUG)
    472 _lockmgr(__volatile struct lock *lkp, u_int flags,
    473     struct simplelock *interlkp, const char *file, int line)
    474 #else
    475 lockmgr(__volatile struct lock *lkp, u_int flags,
    476     struct simplelock *interlkp)
    477 #endif
    478 {
    479 	int error;
    480 	pid_t pid;
    481 	lwpid_t lid;
    482 	int extflags;
    483 	cpuid_t cpu_id;
    484 	struct lwp *l = curlwp;
    485 	int lock_shutdown_noblock = 0;
    486 	int s = 0;
    487 
    488 	error = 0;
    489 
    490 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
    491 	if (flags & LK_INTERLOCK)
    492 		simple_unlock(interlkp);
    493 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
    494 
    495 #ifdef DIAGNOSTIC /* { */
    496 	/*
    497 	 * Don't allow spins on sleep locks and don't allow sleeps
    498 	 * on spin locks.
    499 	 */
    500 	if ((flags ^ lkp->lk_flags) & LK_SPIN)
    501 		panic("lockmgr: sleep/spin mismatch");
    502 #endif /* } */
    503 
    504 	if (extflags & LK_SPIN) {
    505 		pid = LK_KERNPROC;
    506 		lid = 0;
    507 	} else {
    508 		if (l == NULL) {
    509 			if (!doing_shutdown) {
    510 				panic("lockmgr: no context");
    511 			} else {
    512 				l = &lwp0;
    513 				if (panicstr && (!(flags & LK_NOWAIT))) {
    514 					flags |= LK_NOWAIT;
    515 					lock_shutdown_noblock = 1;
    516 				}
    517 			}
    518 		}
    519 		lid = l->l_lid;
    520 		pid = l->l_proc->p_pid;
    521 	}
    522 	cpu_id = cpu_number();
    523 
    524 	/*
    525 	 * Once a lock has drained, the LK_DRAINING flag is set and an
    526 	 * exclusive lock is returned. The only valid operation thereafter
    527 	 * is a single release of that exclusive lock. This final release
    528 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
    529 	 * further requests of any sort will result in a panic. The bits
    530 	 * selected for these two flags are chosen so that they will be set
    531 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
    532 	 * The final release is permitted to give a new lease on life to
    533 	 * the lock by specifying LK_REENABLE.
    534 	 */
    535 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
    536 #ifdef DIAGNOSTIC /* { */
    537 		if (lkp->lk_flags & LK_DRAINED)
    538 			panic("lockmgr: using decommissioned lock");
    539 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
    540 		    WEHOLDIT(lkp, pid, lid, cpu_id) == 0)
    541 			panic("lockmgr: non-release on draining lock: %d",
    542 			    flags & LK_TYPE_MASK);
    543 #endif /* DIAGNOSTIC */ /* } */
    544 		lkp->lk_flags &= ~LK_DRAINING;
    545 		if ((flags & LK_REENABLE) == 0)
    546 			lkp->lk_flags |= LK_DRAINED;
    547 	}
    548 
    549 	switch (flags & LK_TYPE_MASK) {
    550 
    551 	case LK_SHARED:
    552 		if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
    553 			/*
    554 			 * If just polling, check to see if we will block.
    555 			 */
    556 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    557 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
    558 				error = EBUSY;
    559 				break;
    560 			}
    561 			/*
    562 			 * Wait for exclusive locks and upgrades to clear.
    563 			 */
    564 			ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
    565 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
    566 			if (error)
    567 				break;
    568 			lkp->lk_sharecount++;
    569 			COUNT(lkp, l, cpu_id, 1);
    570 			break;
    571 		}
    572 		/*
    573 		 * We hold an exclusive lock, so downgrade it to shared.
    574 		 * An alternative would be to fail with EDEADLK.
    575 		 */
    576 		lkp->lk_sharecount++;
    577 		COUNT(lkp, l, cpu_id, 1);
    578 		/* fall into downgrade */
    579 
    580 	case LK_DOWNGRADE:
    581 		if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0 ||
    582 		    lkp->lk_exclusivecount == 0)
    583 			panic("lockmgr: not holding exclusive lock");
    584 		lkp->lk_sharecount += lkp->lk_exclusivecount;
    585 		lkp->lk_exclusivecount = 0;
    586 		lkp->lk_recurselevel = 0;
    587 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    588 		SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    589 #if defined(LOCKDEBUG)
    590 		lkp->lk_unlock_file = file;
    591 		lkp->lk_unlock_line = line;
    592 #endif
    593 		DONTHAVEIT(lkp);
    594 		WAKEUP_WAITER(lkp);
    595 		break;
    596 
    597 	case LK_EXCLUPGRADE:
    598 		/*
    599 		 * If another process is ahead of us to get an upgrade,
    600 		 * then we want to fail rather than have an intervening
    601 		 * exclusive access.
    602 		 */
    603 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
    604 			lkp->lk_sharecount--;
    605 			COUNT(lkp, l, cpu_id, -1);
    606 			error = EBUSY;
    607 			break;
    608 		}
    609 		/* fall into normal upgrade */
    610 
    611 	case LK_UPGRADE:
    612 		/*
    613 		 * Upgrade a shared lock to an exclusive one. If another
    614 		 * shared lock has already requested an upgrade to an
    615 		 * exclusive lock, our shared lock is released and an
    616 		 * exclusive lock is requested (which will be granted
    617 		 * after the upgrade). If we return an error, the file
    618 		 * will always be unlocked.
    619 		 */
    620 		if (WEHOLDIT(lkp, pid, lid, cpu_id) || lkp->lk_sharecount <= 0)
    621 			panic("lockmgr: upgrade exclusive lock");
    622 		lkp->lk_sharecount--;
    623 		COUNT(lkp, l, cpu_id, -1);
    624 		/*
    625 		 * If we are just polling, check to see if we will block.
    626 		 */
    627 		if ((extflags & LK_NOWAIT) &&
    628 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
    629 		     lkp->lk_sharecount > 1)) {
    630 			error = EBUSY;
    631 			break;
    632 		}
    633 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
    634 			/*
    635 			 * We are first shared lock to request an upgrade, so
    636 			 * request upgrade and wait for the shared count to
    637 			 * drop to zero, then take exclusive lock.
    638 			 */
    639 			lkp->lk_flags |= LK_WANT_UPGRADE;
    640 			ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
    641 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
    642 			if (error)
    643 				break;
    644 			lkp->lk_flags |= LK_HAVE_EXCL;
    645 			SETHOLDER(lkp, pid, lid, cpu_id);
    646 #if defined(LOCKDEBUG)
    647 			lkp->lk_lock_file = file;
    648 			lkp->lk_lock_line = line;
    649 #endif
    650 			HAVEIT(lkp);
    651 			if (lkp->lk_exclusivecount != 0)
    652 				panic("lockmgr: non-zero exclusive count");
    653 			lkp->lk_exclusivecount = 1;
    654 			if (extflags & LK_SETRECURSE)
    655 				lkp->lk_recurselevel = 1;
    656 			COUNT(lkp, l, cpu_id, 1);
    657 			break;
    658 		}
    659 		/*
    660 		 * Someone else has requested upgrade. Release our shared
    661 		 * lock, awaken upgrade requestor if we are the last shared
    662 		 * lock, then request an exclusive lock.
    663 		 */
    664 		if (lkp->lk_sharecount == 0)
    665 			WAKEUP_WAITER(lkp);
    666 		/* fall into exclusive request */
    667 
    668 	case LK_EXCLUSIVE:
    669 		if (WEHOLDIT(lkp, pid, lid, cpu_id)) {
    670 			/*
    671 			 * Recursive lock.
    672 			 */
    673 			if ((extflags & LK_CANRECURSE) == 0 &&
    674 			     lkp->lk_recurselevel == 0) {
    675 				if (extflags & LK_RECURSEFAIL) {
    676 					error = EDEADLK;
    677 					break;
    678 				} else
    679 					panic("lockmgr: locking against myself");
    680 			}
    681 			lkp->lk_exclusivecount++;
    682 			if (extflags & LK_SETRECURSE &&
    683 			    lkp->lk_recurselevel == 0)
    684 				lkp->lk_recurselevel = lkp->lk_exclusivecount;
    685 			COUNT(lkp, l, cpu_id, 1);
    686 			break;
    687 		}
    688 		/*
    689 		 * If we are just polling, check to see if we will sleep.
    690 		 */
    691 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
    692 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    693 		     lkp->lk_sharecount != 0)) {
    694 			error = EBUSY;
    695 			break;
    696 		}
    697 		/*
    698 		 * Try to acquire the want_exclusive flag.
    699 		 */
    700 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
    701 		    (LK_HAVE_EXCL | LK_WANT_EXCL));
    702 		if (error)
    703 			break;
    704 		lkp->lk_flags |= LK_WANT_EXCL;
    705 		/*
    706 		 * Wait for shared locks and upgrades to finish.
    707 		 */
    708 		ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
    709 		       (lkp->lk_flags & LK_WANT_UPGRADE));
    710 		lkp->lk_flags &= ~LK_WANT_EXCL;
    711 		if (error)
    712 			break;
    713 		lkp->lk_flags |= LK_HAVE_EXCL;
    714 		SETHOLDER(lkp, pid, lid, cpu_id);
    715 #if defined(LOCKDEBUG)
    716 		lkp->lk_lock_file = file;
    717 		lkp->lk_lock_line = line;
    718 #endif
    719 		HAVEIT(lkp);
    720 		if (lkp->lk_exclusivecount != 0)
    721 			panic("lockmgr: non-zero exclusive count");
    722 		lkp->lk_exclusivecount = 1;
    723 		if (extflags & LK_SETRECURSE)
    724 			lkp->lk_recurselevel = 1;
    725 		COUNT(lkp, l, cpu_id, 1);
    726 		break;
    727 
    728 	case LK_RELEASE:
    729 		if (lkp->lk_exclusivecount != 0) {
    730 			if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
    731 				if (lkp->lk_flags & LK_SPIN) {
    732 					panic("lockmgr: processor %lu, not "
    733 					    "exclusive lock holder %lu "
    734 					    "unlocking", cpu_id, lkp->lk_cpu);
    735 				} else {
    736 					panic("lockmgr: pid %d, not "
    737 					    "exclusive lock holder %d "
    738 					    "unlocking", pid,
    739 					    lkp->lk_lockholder);
    740 				}
    741 			}
    742 			if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
    743 				lkp->lk_recurselevel = 0;
    744 			lkp->lk_exclusivecount--;
    745 			COUNT(lkp, l, cpu_id, -1);
    746 			if (lkp->lk_exclusivecount == 0) {
    747 				lkp->lk_flags &= ~LK_HAVE_EXCL;
    748 				SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    749 #if defined(LOCKDEBUG)
    750 				lkp->lk_unlock_file = file;
    751 				lkp->lk_unlock_line = line;
    752 #endif
    753 				DONTHAVEIT(lkp);
    754 			}
    755 		} else if (lkp->lk_sharecount != 0) {
    756 			lkp->lk_sharecount--;
    757 			COUNT(lkp, l, cpu_id, -1);
    758 		}
    759 #ifdef DIAGNOSTIC
    760 		else
    761 			panic("lockmgr: release of unlocked lock!");
    762 #endif
    763 		WAKEUP_WAITER(lkp);
    764 		break;
    765 
    766 	case LK_DRAIN:
    767 		/*
    768 		 * Check that we do not already hold the lock, as it can
    769 		 * never drain if we do. Unfortunately, we have no way to
    770 		 * check for holding a shared lock, but at least we can
    771 		 * check for an exclusive one.
    772 		 */
    773 		if (WEHOLDIT(lkp, pid, lid, cpu_id))
    774 			panic("lockmgr: draining against myself");
    775 		/*
    776 		 * If we are just polling, check to see if we will sleep.
    777 		 */
    778 		if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
    779 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    780 		     lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
    781 			error = EBUSY;
    782 			break;
    783 		}
    784 		ACQUIRE(lkp, error, extflags, 1,
    785 		    ((lkp->lk_flags &
    786 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
    787 		     lkp->lk_sharecount != 0 ||
    788 		     lkp->lk_waitcount != 0));
    789 		if (error)
    790 			break;
    791 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
    792 		SETHOLDER(lkp, pid, lid, cpu_id);
    793 #if defined(LOCKDEBUG)
    794 		lkp->lk_lock_file = file;
    795 		lkp->lk_lock_line = line;
    796 #endif
    797 		HAVEIT(lkp);
    798 		lkp->lk_exclusivecount = 1;
    799 		/* XXX unlikely that we'd want this */
    800 		if (extflags & LK_SETRECURSE)
    801 			lkp->lk_recurselevel = 1;
    802 		COUNT(lkp, l, cpu_id, 1);
    803 		break;
    804 
    805 	default:
    806 		INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    807 		panic("lockmgr: unknown locktype request %d",
    808 		    flags & LK_TYPE_MASK);
    809 		/* NOTREACHED */
    810 	}
    811 	if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
    812 	    ((lkp->lk_flags &
    813 	      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
    814 	     lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
    815 		lkp->lk_flags &= ~LK_WAITDRAIN;
    816 		wakeup((void *)&lkp->lk_flags);
    817 	}
    818 	/*
    819 	 * Note that this panic will be a recursive panic, since
    820 	 * we only set lock_shutdown_noblock above if panicstr != NULL.
    821 	 */
    822 	if (error && lock_shutdown_noblock)
    823 		panic("lockmgr: deadlock (see previous panic)");
    824 
    825 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    826 	return (error);
    827 }
    828 
    829 /*
    830  * For a recursive spinlock held one or more times by the current CPU,
    831  * release all N locks, and return N.
    832  * Intended for use in mi_switch() shortly before context switching.
    833  */
    834 
    835 int
    836 #if defined(LOCKDEBUG)
    837 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
    838 #else
    839 spinlock_release_all(__volatile struct lock *lkp)
    840 #endif
    841 {
    842 	int s, count;
    843 	cpuid_t cpu_id;
    844 
    845 	KASSERT(lkp->lk_flags & LK_SPIN);
    846 
    847 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
    848 
    849 	cpu_id = cpu_number();
    850 	count = lkp->lk_exclusivecount;
    851 
    852 	if (count != 0) {
    853 #ifdef DIAGNOSTIC
    854 		if (WEHOLDIT(lkp, 0, 0, cpu_id) == 0) {
    855 			panic("spinlock_release_all: processor %lu, not "
    856 			    "exclusive lock holder %lu "
    857 			    "unlocking", (long)cpu_id, lkp->lk_cpu);
    858 		}
    859 #endif
    860 		lkp->lk_recurselevel = 0;
    861 		lkp->lk_exclusivecount = 0;
    862 		COUNT_CPU(cpu_id, -count);
    863 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    864 		SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    865 #if defined(LOCKDEBUG)
    866 		lkp->lk_unlock_file = file;
    867 		lkp->lk_unlock_line = line;
    868 #endif
    869 		DONTHAVEIT(lkp);
    870 	}
    871 #ifdef DIAGNOSTIC
    872 	else if (lkp->lk_sharecount != 0)
    873 		panic("spinlock_release_all: release of shared lock!");
    874 	else
    875 		panic("spinlock_release_all: release of unlocked lock!");
    876 #endif
    877 	INTERLOCK_RELEASE(lkp, LK_SPIN, s);
    878 
    879 	return (count);
    880 }
    881 
    882 /*
    883  * For a recursive spinlock held one or more times by the current CPU,
    884  * release all N locks, and return N.
    885  * Intended for use in mi_switch() right after resuming execution.
    886  */
    887 
    888 void
    889 #if defined(LOCKDEBUG)
    890 _spinlock_acquire_count(__volatile struct lock *lkp, int count,
    891     const char *file, int line)
    892 #else
    893 spinlock_acquire_count(__volatile struct lock *lkp, int count)
    894 #endif
    895 {
    896 	int s, error;
    897 	cpuid_t cpu_id;
    898 
    899 	KASSERT(lkp->lk_flags & LK_SPIN);
    900 
    901 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
    902 
    903 	cpu_id = cpu_number();
    904 
    905 #ifdef DIAGNOSTIC
    906 	if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_id))
    907 		panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id);
    908 #endif
    909 	/*
    910 	 * Try to acquire the want_exclusive flag.
    911 	 */
    912 	ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_flags &
    913 	    (LK_HAVE_EXCL | LK_WANT_EXCL));
    914 	lkp->lk_flags |= LK_WANT_EXCL;
    915 	/*
    916 	 * Wait for shared locks and upgrades to finish.
    917 	 */
    918 	ACQUIRE(lkp, error, LK_SPIN, 0, lkp->lk_sharecount != 0 ||
    919 	    (lkp->lk_flags & LK_WANT_UPGRADE));
    920 	lkp->lk_flags &= ~LK_WANT_EXCL;
    921 	lkp->lk_flags |= LK_HAVE_EXCL;
    922 	SETHOLDER(lkp, LK_NOPROC, 0, cpu_id);
    923 #if defined(LOCKDEBUG)
    924 	lkp->lk_lock_file = file;
    925 	lkp->lk_lock_line = line;
    926 #endif
    927 	HAVEIT(lkp);
    928 	if (lkp->lk_exclusivecount != 0)
    929 		panic("lockmgr: non-zero exclusive count");
    930 	lkp->lk_exclusivecount = count;
    931 	lkp->lk_recurselevel = 1;
    932 	COUNT_CPU(cpu_id, count);
    933 
    934 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    935 }
    936 
    937 
    938 
    939 /*
    940  * Print out information about state of a lock. Used by VOP_PRINT
    941  * routines to display ststus about contained locks.
    942  */
    943 void
    944 lockmgr_printinfo(__volatile struct lock *lkp)
    945 {
    946 
    947 	if (lkp->lk_sharecount)
    948 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
    949 		    lkp->lk_sharecount);
    950 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
    951 		printf(" lock type %s: EXCL (count %d) by ",
    952 		    lkp->lk_wmesg, lkp->lk_exclusivecount);
    953 		if (lkp->lk_flags & LK_SPIN)
    954 			printf("processor %lu", lkp->lk_cpu);
    955 		else
    956 			printf("pid %d.%d", lkp->lk_lockholder,
    957 			    lkp->lk_locklwp);
    958 	} else
    959 		printf(" not locked");
    960 	if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
    961 		printf(" with %d pending", lkp->lk_waitcount);
    962 }
    963 
    964 #if defined(LOCKDEBUG) /* { */
    965 TAILQ_HEAD(, simplelock) simplelock_list =
    966     TAILQ_HEAD_INITIALIZER(simplelock_list);
    967 
    968 #if defined(MULTIPROCESSOR) /* { */
    969 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
    970 
    971 #define	SLOCK_LIST_LOCK()						\
    972 	__cpu_simple_lock(&simplelock_list_slock.lock_data)
    973 
    974 #define	SLOCK_LIST_UNLOCK()						\
    975 	__cpu_simple_unlock(&simplelock_list_slock.lock_data)
    976 
    977 #define	SLOCK_COUNT(x)							\
    978 	curcpu()->ci_simple_locks += (x)
    979 #else
    980 u_long simple_locks;
    981 
    982 #define	SLOCK_LIST_LOCK()	/* nothing */
    983 
    984 #define	SLOCK_LIST_UNLOCK()	/* nothing */
    985 
    986 #define	SLOCK_COUNT(x)		simple_locks += (x)
    987 #endif /* MULTIPROCESSOR */ /* } */
    988 
    989 #ifdef MULTIPROCESSOR
    990 #define SLOCK_MP()		lock_printf("on cpu %ld\n", 		\
    991 				    (u_long) cpu_number())
    992 #else
    993 #define SLOCK_MP()		/* nothing */
    994 #endif
    995 
    996 #define	SLOCK_WHERE(str, alp, id, l)					\
    997 do {									\
    998 	lock_printf("\n");						\
    999 	lock_printf(str);						\
   1000 	lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
   1001 	SLOCK_MP();							\
   1002 	if ((alp)->lock_file != NULL)					\
   1003 		lock_printf("last locked: %s:%d\n", (alp)->lock_file,	\
   1004 		    (alp)->lock_line);					\
   1005 	if ((alp)->unlock_file != NULL)					\
   1006 		lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
   1007 		    (alp)->unlock_line);				\
   1008 	SLOCK_TRACE()							\
   1009 	SLOCK_DEBUGGER();						\
   1010 } while (/*CONSTCOND*/0)
   1011 
   1012 /*
   1013  * Simple lock functions so that the debugger can see from whence
   1014  * they are being called.
   1015  */
   1016 void
   1017 simple_lock_init(struct simplelock *alp)
   1018 {
   1019 
   1020 #if defined(MULTIPROCESSOR) /* { */
   1021 	__cpu_simple_lock_init(&alp->lock_data);
   1022 #else
   1023 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
   1024 #endif /* } */
   1025 	alp->lock_file = NULL;
   1026 	alp->lock_line = 0;
   1027 	alp->unlock_file = NULL;
   1028 	alp->unlock_line = 0;
   1029 	alp->lock_holder = LK_NOCPU;
   1030 }
   1031 
   1032 void
   1033 _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
   1034 {
   1035 	cpuid_t cpu_id = cpu_number();
   1036 	int s;
   1037 
   1038 	s = spllock();
   1039 
   1040 	/*
   1041 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
   1042 	 * don't take any action, and just fall into the normal spin case.
   1043 	 */
   1044 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1045 #if defined(MULTIPROCESSOR) /* { */
   1046 		if (alp->lock_holder == cpu_id) {
   1047 			SLOCK_WHERE("simple_lock: locking against myself\n",
   1048 			    alp, id, l);
   1049 			goto out;
   1050 		}
   1051 #else
   1052 		SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
   1053 		goto out;
   1054 #endif /* MULTIPROCESSOR */ /* } */
   1055 	}
   1056 
   1057 #if defined(MULTIPROCESSOR) /* { */
   1058 	/* Acquire the lock before modifying any fields. */
   1059 	splx(s);
   1060 	__cpu_simple_lock(&alp->lock_data);
   1061 	s = spllock();
   1062 #else
   1063 	alp->lock_data = __SIMPLELOCK_LOCKED;
   1064 #endif /* } */
   1065 
   1066 	if (alp->lock_holder != LK_NOCPU) {
   1067 		SLOCK_WHERE("simple_lock: uninitialized lock\n",
   1068 		    alp, id, l);
   1069 	}
   1070 	alp->lock_file = id;
   1071 	alp->lock_line = l;
   1072 	alp->lock_holder = cpu_id;
   1073 
   1074 	SLOCK_LIST_LOCK();
   1075 	/* XXX Cast away volatile */
   1076 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
   1077 	SLOCK_LIST_UNLOCK();
   1078 
   1079 	SLOCK_COUNT(1);
   1080 
   1081  out:
   1082 	splx(s);
   1083 }
   1084 
   1085 int
   1086 _simple_lock_held(__volatile struct simplelock *alp)
   1087 {
   1088 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
   1089 	cpuid_t cpu_id = cpu_number();
   1090 #endif
   1091 	int s, locked = 0;
   1092 
   1093 	s = spllock();
   1094 
   1095 #if defined(MULTIPROCESSOR)
   1096 	if (__cpu_simple_lock_try(&alp->lock_data) == 0)
   1097 		locked = (alp->lock_holder == cpu_id);
   1098 	else
   1099 		__cpu_simple_unlock(&alp->lock_data);
   1100 #else
   1101 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1102 		locked = 1;
   1103 		KASSERT(alp->lock_holder == cpu_id);
   1104 	}
   1105 #endif
   1106 
   1107 	splx(s);
   1108 
   1109 	return (locked);
   1110 }
   1111 
   1112 int
   1113 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
   1114 {
   1115 	cpuid_t cpu_id = cpu_number();
   1116 	int s, rv = 0;
   1117 
   1118 	s = spllock();
   1119 
   1120 	/*
   1121 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
   1122 	 * don't take any action.
   1123 	 */
   1124 #if defined(MULTIPROCESSOR) /* { */
   1125 	if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
   1126 		if (alp->lock_holder == cpu_id)
   1127 			SLOCK_WHERE("simple_lock_try: locking against myself\n",
   1128 			    alp, id, l);
   1129 		goto out;
   1130 	}
   1131 #else
   1132 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1133 		SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
   1134 		goto out;
   1135 	}
   1136 	alp->lock_data = __SIMPLELOCK_LOCKED;
   1137 #endif /* MULTIPROCESSOR */ /* } */
   1138 
   1139 	/*
   1140 	 * At this point, we have acquired the lock.
   1141 	 */
   1142 
   1143 	rv = 1;
   1144 
   1145 	alp->lock_file = id;
   1146 	alp->lock_line = l;
   1147 	alp->lock_holder = cpu_id;
   1148 
   1149 	SLOCK_LIST_LOCK();
   1150 	/* XXX Cast away volatile. */
   1151 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
   1152 	SLOCK_LIST_UNLOCK();
   1153 
   1154 	SLOCK_COUNT(1);
   1155 
   1156  out:
   1157 	splx(s);
   1158 	return (rv);
   1159 }
   1160 
   1161 void
   1162 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
   1163 {
   1164 	int s;
   1165 
   1166 	s = spllock();
   1167 
   1168 	/*
   1169 	 * MULTIPROCESSOR case: This is `safe' because we think we hold
   1170 	 * the lock, and if we don't, we don't take any action.
   1171 	 */
   1172 	if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
   1173 		SLOCK_WHERE("simple_unlock: lock not held\n",
   1174 		    alp, id, l);
   1175 		goto out;
   1176 	}
   1177 
   1178 	SLOCK_LIST_LOCK();
   1179 	TAILQ_REMOVE(&simplelock_list, alp, list);
   1180 	SLOCK_LIST_UNLOCK();
   1181 
   1182 	SLOCK_COUNT(-1);
   1183 
   1184 	alp->list.tqe_next = NULL;	/* sanity */
   1185 	alp->list.tqe_prev = NULL;	/* sanity */
   1186 
   1187 	alp->unlock_file = id;
   1188 	alp->unlock_line = l;
   1189 
   1190 #if defined(MULTIPROCESSOR) /* { */
   1191 	alp->lock_holder = LK_NOCPU;
   1192 	/* Now that we've modified all fields, release the lock. */
   1193 	__cpu_simple_unlock(&alp->lock_data);
   1194 #else
   1195 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
   1196 	KASSERT(alp->lock_holder == cpu_number());
   1197 	alp->lock_holder = LK_NOCPU;
   1198 #endif /* } */
   1199 
   1200  out:
   1201 	splx(s);
   1202 }
   1203 
   1204 void
   1205 simple_lock_dump(void)
   1206 {
   1207 	struct simplelock *alp;
   1208 	int s;
   1209 
   1210 	s = spllock();
   1211 	SLOCK_LIST_LOCK();
   1212 	lock_printf("all simple locks:\n");
   1213 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1214 		lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
   1215 		    alp->lock_file, alp->lock_line);
   1216 	}
   1217 	SLOCK_LIST_UNLOCK();
   1218 	splx(s);
   1219 }
   1220 
   1221 void
   1222 simple_lock_freecheck(void *start, void *end)
   1223 {
   1224 	struct simplelock *alp;
   1225 	int s;
   1226 
   1227 	s = spllock();
   1228 	SLOCK_LIST_LOCK();
   1229 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1230 		if ((void *)alp >= start && (void *)alp < end) {
   1231 			lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
   1232 			    alp, alp->lock_holder, alp->lock_file,
   1233 			    alp->lock_line);
   1234 			SLOCK_DEBUGGER();
   1235 		}
   1236 	}
   1237 	SLOCK_LIST_UNLOCK();
   1238 	splx(s);
   1239 }
   1240 
   1241 /*
   1242  * We must be holding exactly one lock: the sched_lock.
   1243  */
   1244 
   1245 void
   1246 simple_lock_switchcheck(void)
   1247 {
   1248 
   1249 	simple_lock_only_held(&sched_lock, "switching");
   1250 }
   1251 
   1252 void
   1253 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
   1254 {
   1255 	struct simplelock *alp;
   1256 	cpuid_t cpu_id = cpu_number();
   1257 	int s;
   1258 
   1259 	if (lp) {
   1260 		LOCK_ASSERT(simple_lock_held(lp));
   1261 	}
   1262 	s = spllock();
   1263 	SLOCK_LIST_LOCK();
   1264 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1265 		if (alp == lp)
   1266 			continue;
   1267 		if (alp->lock_holder == cpu_id)
   1268 			break;
   1269 	}
   1270 	SLOCK_LIST_UNLOCK();
   1271 	splx(s);
   1272 
   1273 	if (alp != NULL) {
   1274 		lock_printf("\n%s with held simple_lock %p "
   1275 		    "CPU %lu %s:%d\n",
   1276 		    where, alp, alp->lock_holder, alp->lock_file,
   1277 		    alp->lock_line);
   1278 		SLOCK_TRACE();
   1279 		SLOCK_DEBUGGER();
   1280 	}
   1281 }
   1282 #endif /* LOCKDEBUG */ /* } */
   1283 
   1284 #if defined(MULTIPROCESSOR)
   1285 /*
   1286  * Functions for manipulating the kernel_lock.  We put them here
   1287  * so that they show up in profiles.
   1288  */
   1289 
   1290 struct lock kernel_lock;
   1291 
   1292 void
   1293 _kernel_lock_init(void)
   1294 {
   1295 
   1296 	spinlockinit(&kernel_lock, "klock", 0);
   1297 }
   1298 
   1299 /*
   1300  * Acquire/release the kernel lock.  Intended for use in the scheduler
   1301  * and the lower half of the kernel.
   1302  */
   1303 void
   1304 _kernel_lock(int flag)
   1305 {
   1306 
   1307 	SCHED_ASSERT_UNLOCKED();
   1308 	spinlockmgr(&kernel_lock, flag, 0);
   1309 }
   1310 
   1311 void
   1312 _kernel_unlock(void)
   1313 {
   1314 
   1315 	spinlockmgr(&kernel_lock, LK_RELEASE, 0);
   1316 }
   1317 
   1318 /*
   1319  * Acquire/release the kernel_lock on behalf of a process.  Intended for
   1320  * use in the top half of the kernel.
   1321  */
   1322 void
   1323 _kernel_proc_lock(struct lwp *l)
   1324 {
   1325 
   1326 	SCHED_ASSERT_UNLOCKED();
   1327 	spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
   1328 	l->l_flag |= L_BIGLOCK;
   1329 }
   1330 
   1331 void
   1332 _kernel_proc_unlock(struct lwp *l)
   1333 {
   1334 
   1335 	l->l_flag &= ~L_BIGLOCK;
   1336 	spinlockmgr(&kernel_lock, LK_RELEASE, 0);
   1337 }
   1338 #endif /* MULTIPROCESSOR */
   1339