Home | History | Annotate | Line # | Download | only in kern
kern_lock.c revision 1.79
      1 /*	$NetBSD: kern_lock.c,v 1.79 2004/05/30 20:49:04 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * This code is derived from software contributed to The NetBSD Foundation
     12  * by Ross Harvey.
     13  *
     14  * Redistribution and use in source and binary forms, with or without
     15  * modification, are permitted provided that the following conditions
     16  * are met:
     17  * 1. Redistributions of source code must retain the above copyright
     18  *    notice, this list of conditions and the following disclaimer.
     19  * 2. Redistributions in binary form must reproduce the above copyright
     20  *    notice, this list of conditions and the following disclaimer in the
     21  *    documentation and/or other materials provided with the distribution.
     22  * 3. All advertising materials mentioning features or use of this software
     23  *    must display the following acknowledgement:
     24  *	This product includes software developed by the NetBSD
     25  *	Foundation, Inc. and its contributors.
     26  * 4. Neither the name of The NetBSD Foundation nor the names of its
     27  *    contributors may be used to endorse or promote products derived
     28  *    from this software without specific prior written permission.
     29  *
     30  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     31  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     32  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     33  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     34  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     37  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     38  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     40  * POSSIBILITY OF SUCH DAMAGE.
     41  */
     42 
     43 /*
     44  * Copyright (c) 1995
     45  *	The Regents of the University of California.  All rights reserved.
     46  *
     47  * This code contains ideas from software contributed to Berkeley by
     48  * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
     49  * System project at Carnegie-Mellon University.
     50  *
     51  * Redistribution and use in source and binary forms, with or without
     52  * modification, are permitted provided that the following conditions
     53  * are met:
     54  * 1. Redistributions of source code must retain the above copyright
     55  *    notice, this list of conditions and the following disclaimer.
     56  * 2. Redistributions in binary form must reproduce the above copyright
     57  *    notice, this list of conditions and the following disclaimer in the
     58  *    documentation and/or other materials provided with the distribution.
     59  * 3. Neither the name of the University nor the names of its contributors
     60  *    may be used to endorse or promote products derived from this software
     61  *    without specific prior written permission.
     62  *
     63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     73  * SUCH DAMAGE.
     74  *
     75  *	@(#)kern_lock.c	8.18 (Berkeley) 5/21/95
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.79 2004/05/30 20:49:04 yamt Exp $");
     80 
     81 #include "opt_multiprocessor.h"
     82 #include "opt_lockdebug.h"
     83 #include "opt_ddb.h"
     84 
     85 #include <sys/param.h>
     86 #include <sys/proc.h>
     87 #include <sys/lock.h>
     88 #include <sys/systm.h>
     89 #include <machine/cpu.h>
     90 
     91 #if defined(LOCKDEBUG)
     92 #include <sys/syslog.h>
     93 /*
     94  * note that stdarg.h and the ansi style va_start macro is used for both
     95  * ansi and traditional c compiles.
     96  * XXX: this requires that stdarg.h define: va_alist and va_dcl
     97  */
     98 #include <machine/stdarg.h>
     99 
    100 void	lock_printf(const char *fmt, ...)
    101     __attribute__((__format__(__printf__,1,2)));
    102 
    103 static int acquire(__volatile struct lock **, int *, int, int, int);
    104 
    105 int	lock_debug_syslog = 0;	/* defaults to printf, but can be patched */
    106 
    107 #ifdef DDB
    108 #include <ddb/ddbvar.h>
    109 #include <machine/db_machdep.h>
    110 #include <ddb/db_command.h>
    111 #include <ddb/db_interface.h>
    112 #endif
    113 #endif
    114 
    115 /*
    116  * Locking primitives implementation.
    117  * Locks provide shared/exclusive synchronization.
    118  */
    119 
    120 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
    121 #if defined(MULTIPROCESSOR) /* { */
    122 #define	COUNT_CPU(cpu_id, x)						\
    123 	curcpu()->ci_spin_locks += (x)
    124 #else
    125 u_long	spin_locks;
    126 #define	COUNT_CPU(cpu_id, x)	spin_locks += (x)
    127 #endif /* MULTIPROCESSOR */ /* } */
    128 
    129 #define	COUNT(lkp, l, cpu_id, x)					\
    130 do {									\
    131 	if ((lkp)->lk_flags & LK_SPIN)					\
    132 		COUNT_CPU((cpu_id), (x));				\
    133 	else								\
    134 		(l)->l_locks += (x);					\
    135 } while (/*CONSTCOND*/0)
    136 #else
    137 #define COUNT(lkp, p, cpu_id, x)
    138 #define COUNT_CPU(cpu_id, x)
    139 #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
    140 
    141 #ifndef SPINLOCK_SPIN_HOOK		/* from <machine/lock.h> */
    142 #define	SPINLOCK_SPIN_HOOK		/* nothing */
    143 #endif
    144 
    145 #define	INTERLOCK_ACQUIRE(lkp, flags, s)				\
    146 do {									\
    147 	if ((flags) & LK_SPIN)						\
    148 		s = spllock();						\
    149 	simple_lock(&(lkp)->lk_interlock);				\
    150 } while (/*CONSTCOND*/ 0)
    151 
    152 #define	INTERLOCK_RELEASE(lkp, flags, s)				\
    153 do {									\
    154 	simple_unlock(&(lkp)->lk_interlock);				\
    155 	if ((flags) & LK_SPIN)						\
    156 		splx(s);						\
    157 } while (/*CONSTCOND*/ 0)
    158 
    159 #ifdef DDB /* { */
    160 #ifdef MULTIPROCESSOR
    161 int simple_lock_debugger = 1;	/* more serious on MP */
    162 #else
    163 int simple_lock_debugger = 0;
    164 #endif
    165 #define	SLOCK_DEBUGGER()	if (simple_lock_debugger) Debugger()
    166 #define	SLOCK_TRACE()							\
    167 	db_stack_trace_print((db_expr_t)__builtin_frame_address(0),	\
    168 	    TRUE, 65535, "", lock_printf);
    169 #else
    170 #define	SLOCK_DEBUGGER()	/* nothing */
    171 #define	SLOCK_TRACE()		/* nothing */
    172 #endif /* } */
    173 
    174 #if defined(LOCKDEBUG)
    175 #if defined(DDB)
    176 #define	SPINLOCK_SPINCHECK_DEBUGGER	Debugger()
    177 #else
    178 #define	SPINLOCK_SPINCHECK_DEBUGGER	/* nothing */
    179 #endif
    180 
    181 #define	SPINLOCK_SPINCHECK_DECL						\
    182 	/* 32-bits of count -- wrap constitutes a "spinout" */		\
    183 	uint32_t __spinc = 0
    184 
    185 #define	SPINLOCK_SPINCHECK						\
    186 do {									\
    187 	if (++__spinc == 0) {						\
    188 		lock_printf("LK_SPIN spinout, excl %d, share %d\n",	\
    189 		    lkp->lk_exclusivecount, lkp->lk_sharecount);	\
    190 		if (lkp->lk_exclusivecount)				\
    191 			lock_printf("held by CPU %lu\n",		\
    192 			    (u_long) lkp->lk_cpu);			\
    193 		if (lkp->lk_lock_file)					\
    194 			lock_printf("last locked at %s:%d\n",		\
    195 			    lkp->lk_lock_file, lkp->lk_lock_line);	\
    196 		if (lkp->lk_unlock_file)				\
    197 			lock_printf("last unlocked at %s:%d\n",		\
    198 			    lkp->lk_unlock_file, lkp->lk_unlock_line);	\
    199 		SLOCK_TRACE();						\
    200 		SPINLOCK_SPINCHECK_DEBUGGER;				\
    201 	}								\
    202 } while (/*CONSTCOND*/ 0)
    203 #else
    204 #define	SPINLOCK_SPINCHECK_DECL			/* nothing */
    205 #define	SPINLOCK_SPINCHECK			/* nothing */
    206 #endif /* LOCKDEBUG && DDB */
    207 
    208 /*
    209  * Acquire a resource.
    210  */
    211 static int
    212 acquire(__volatile struct lock **lkpp, int *s, int extflags,
    213     int drain, int wanted)
    214 {
    215 	int error;
    216 	__volatile struct lock *lkp = *lkpp;
    217 
    218 	KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
    219 
    220 	if (extflags & LK_SPIN) {
    221 		int interlocked;
    222 
    223 		SPINLOCK_SPINCHECK_DECL;
    224 
    225 		if (!drain) {
    226 			lkp->lk_waitcount++;
    227 			lkp->lk_flags |= LK_WAIT_NONZERO;
    228 		}
    229 		for (interlocked = 1;;) {
    230 			SPINLOCK_SPINCHECK;
    231 			if ((lkp->lk_flags & wanted) != 0) {
    232 				if (interlocked) {
    233 					INTERLOCK_RELEASE(lkp, LK_SPIN, *s);
    234 					interlocked = 0;
    235 				}
    236 				SPINLOCK_SPIN_HOOK;
    237 			} else if (interlocked) {
    238 				break;
    239 			} else {
    240 				INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s);
    241 				interlocked = 1;
    242 			}
    243 		}
    244 		if (!drain) {
    245 			lkp->lk_waitcount--;
    246 			if (lkp->lk_waitcount == 0)
    247 				lkp->lk_flags &= ~LK_WAIT_NONZERO;
    248 		}
    249 		KASSERT((lkp->lk_flags & wanted) == 0);
    250 		error = 0;	/* sanity */
    251 	} else {
    252 		for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
    253 			if (drain)
    254 				lkp->lk_flags |= LK_WAITDRAIN;
    255 			else {
    256 				lkp->lk_waitcount++;
    257 				lkp->lk_flags |= LK_WAIT_NONZERO;
    258 			}
    259 			/* XXX Cast away volatile. */
    260 			error = ltsleep(drain ?
    261 			    (void *)&lkp->lk_flags :
    262 			    (void *)lkp, lkp->lk_prio,
    263 			    lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock);
    264 			if (!drain) {
    265 				lkp->lk_waitcount--;
    266 				if (lkp->lk_waitcount == 0)
    267 					lkp->lk_flags &= ~LK_WAIT_NONZERO;
    268 			}
    269 			if (error)
    270 				break;
    271 			if (extflags & LK_SLEEPFAIL) {
    272 				error = ENOLCK;
    273 				break;
    274 			}
    275 			if (lkp->lk_newlock != NULL) {
    276 				simple_lock(&lkp->lk_newlock->lk_interlock);
    277 				simple_unlock(&lkp->lk_interlock);
    278 				if (lkp->lk_waitcount == 0)
    279 					wakeup((void *)&lkp->lk_newlock);
    280 				*lkpp = lkp = lkp->lk_newlock;
    281 			}
    282 		}
    283 	}
    284 
    285 	return error;
    286 }
    287 
    288 #define	SETHOLDER(lkp, pid, lid, cpu_id)				\
    289 do {									\
    290 	if ((lkp)->lk_flags & LK_SPIN)					\
    291 		(lkp)->lk_cpu = cpu_id;					\
    292 	else {								\
    293 		(lkp)->lk_lockholder = pid;				\
    294 		(lkp)->lk_locklwp = lid;				\
    295 	}								\
    296 } while (/*CONSTCOND*/0)
    297 
    298 #define	WEHOLDIT(lkp, pid, lid, cpu_id)					\
    299 	(((lkp)->lk_flags & LK_SPIN) != 0 ?				\
    300 	 ((lkp)->lk_cpu == (cpu_id)) :					\
    301 	 ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
    302 
    303 #define	WAKEUP_WAITER(lkp)						\
    304 do {									\
    305 	if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) ==		\
    306 	    LK_WAIT_NONZERO) {						\
    307 		/* XXX Cast away volatile. */				\
    308 		wakeup((void *)(lkp));					\
    309 	}								\
    310 } while (/*CONSTCOND*/0)
    311 
    312 #if defined(LOCKDEBUG) /* { */
    313 #if defined(MULTIPROCESSOR) /* { */
    314 struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
    315 
    316 #define	SPINLOCK_LIST_LOCK()						\
    317 	__cpu_simple_lock(&spinlock_list_slock.lock_data)
    318 
    319 #define	SPINLOCK_LIST_UNLOCK()						\
    320 	__cpu_simple_unlock(&spinlock_list_slock.lock_data)
    321 #else
    322 #define	SPINLOCK_LIST_LOCK()	/* nothing */
    323 
    324 #define	SPINLOCK_LIST_UNLOCK()	/* nothing */
    325 #endif /* MULTIPROCESSOR */ /* } */
    326 
    327 TAILQ_HEAD(, lock) spinlock_list =
    328     TAILQ_HEAD_INITIALIZER(spinlock_list);
    329 
    330 #define	HAVEIT(lkp)							\
    331 do {									\
    332 	if ((lkp)->lk_flags & LK_SPIN) {				\
    333 		int s = spllock();					\
    334 		SPINLOCK_LIST_LOCK();					\
    335 		/* XXX Cast away volatile. */				\
    336 		TAILQ_INSERT_TAIL(&spinlock_list, (struct lock *)(lkp),	\
    337 		    lk_list);						\
    338 		SPINLOCK_LIST_UNLOCK();					\
    339 		splx(s);						\
    340 	}								\
    341 } while (/*CONSTCOND*/0)
    342 
    343 #define	DONTHAVEIT(lkp)							\
    344 do {									\
    345 	if ((lkp)->lk_flags & LK_SPIN) {				\
    346 		int s = spllock();					\
    347 		SPINLOCK_LIST_LOCK();					\
    348 		/* XXX Cast away volatile. */				\
    349 		TAILQ_REMOVE(&spinlock_list, (struct lock *)(lkp),	\
    350 		    lk_list);						\
    351 		SPINLOCK_LIST_UNLOCK();					\
    352 		splx(s);						\
    353 	}								\
    354 } while (/*CONSTCOND*/0)
    355 #else
    356 #define	HAVEIT(lkp)		/* nothing */
    357 
    358 #define	DONTHAVEIT(lkp)		/* nothing */
    359 #endif /* LOCKDEBUG */ /* } */
    360 
    361 #if defined(LOCKDEBUG)
    362 /*
    363  * Lock debug printing routine; can be configured to print to console
    364  * or log to syslog.
    365  */
    366 void
    367 lock_printf(const char *fmt, ...)
    368 {
    369 	char b[150];
    370 	va_list ap;
    371 
    372 	va_start(ap, fmt);
    373 	if (lock_debug_syslog)
    374 		vlog(LOG_DEBUG, fmt, ap);
    375 	else {
    376 		vsnprintf(b, sizeof(b), fmt, ap);
    377 		printf_nolog("%s", b);
    378 	}
    379 	va_end(ap);
    380 }
    381 #endif /* LOCKDEBUG */
    382 
    383 /*
    384  * Transfer any waiting processes from one lock to another.
    385  */
    386 void
    387 transferlockers(struct lock *from, struct lock *to)
    388 {
    389 
    390 	KASSERT(from != to);
    391 	KASSERT((from->lk_flags & LK_WAITDRAIN) == 0);
    392 	if (from->lk_waitcount == 0)
    393 		return;
    394 	from->lk_newlock = to;
    395 	wakeup((void *)from);
    396 	tsleep((void *)&from->lk_newlock, from->lk_prio, "lkxfer", 0);
    397 	from->lk_newlock = NULL;
    398 	from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
    399 	KASSERT(from->lk_waitcount == 0);
    400 }
    401 
    402 
    403 /*
    404  * Initialize a lock; required before use.
    405  */
    406 void
    407 lockinit(struct lock *lkp, int prio, const char *wmesg, int timo, int flags)
    408 {
    409 
    410 	memset(lkp, 0, sizeof(struct lock));
    411 	simple_lock_init(&lkp->lk_interlock);
    412 	lkp->lk_flags = flags & LK_EXTFLG_MASK;
    413 	if (flags & LK_SPIN)
    414 		lkp->lk_cpu = LK_NOCPU;
    415 	else {
    416 		lkp->lk_lockholder = LK_NOPROC;
    417 		lkp->lk_newlock = NULL;
    418 		lkp->lk_prio = prio;
    419 		lkp->lk_timo = timo;
    420 	}
    421 	lkp->lk_wmesg = wmesg;	/* just a name for spin locks */
    422 #if defined(LOCKDEBUG)
    423 	lkp->lk_lock_file = NULL;
    424 	lkp->lk_unlock_file = NULL;
    425 #endif
    426 }
    427 
    428 /*
    429  * Determine the status of a lock.
    430  */
    431 int
    432 lockstatus(struct lock *lkp)
    433 {
    434 	int s = 0; /* XXX: gcc */
    435 	int lock_type = 0;
    436 	struct lwp *l = curlwp; /* XXX */
    437 	pid_t pid;
    438 	lwpid_t lid;
    439 	cpuid_t cpu_id;
    440 
    441 	if ((lkp->lk_flags & LK_SPIN) || l == NULL) {
    442 		cpu_id = cpu_number();
    443 		pid = LK_KERNPROC;
    444 		lid = 0;
    445 	} else {
    446 		cpu_id = LK_NOCPU;
    447 		pid = l->l_proc->p_pid;
    448 		lid = l->l_lid;
    449 	}
    450 
    451 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
    452 	if (lkp->lk_exclusivecount != 0) {
    453 		if (WEHOLDIT(lkp, pid, lid, cpu_id))
    454 			lock_type = LK_EXCLUSIVE;
    455 		else
    456 			lock_type = LK_EXCLOTHER;
    457 	} else if (lkp->lk_sharecount != 0)
    458 		lock_type = LK_SHARED;
    459 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    460 	return (lock_type);
    461 }
    462 
    463 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
    464 /*
    465  * Make sure no spin locks are held by a CPU that is about
    466  * to context switch.
    467  */
    468 void
    469 spinlock_switchcheck(void)
    470 {
    471 	u_long cnt;
    472 	int s;
    473 
    474 	s = spllock();
    475 #if defined(MULTIPROCESSOR)
    476 	cnt = curcpu()->ci_spin_locks;
    477 #else
    478 	cnt = spin_locks;
    479 #endif
    480 	splx(s);
    481 
    482 	if (cnt != 0)
    483 		panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
    484 		    (u_long) cpu_number(), cnt);
    485 }
    486 #endif /* LOCKDEBUG || DIAGNOSTIC */
    487 
    488 /*
    489  * Locks and IPLs (interrupt priority levels):
    490  *
    491  * Locks which may be taken from interrupt context must be handled
    492  * very carefully; you must spl to the highest IPL where the lock
    493  * is needed before acquiring the lock.
    494  *
    495  * It is also important to avoid deadlock, since certain (very high
    496  * priority) interrupts are often needed to keep the system as a whole
    497  * from deadlocking, and must not be blocked while you are spinning
    498  * waiting for a lower-priority lock.
    499  *
    500  * In addition, the lock-debugging hooks themselves need to use locks!
    501  *
    502  * A raw __cpu_simple_lock may be used from interrupts are long as it
    503  * is acquired and held at a single IPL.
    504  *
    505  * A simple_lock (which is a __cpu_simple_lock wrapped with some
    506  * debugging hooks) may be used at or below spllock(), which is
    507  * typically at or just below splhigh() (i.e. blocks everything
    508  * but certain machine-dependent extremely high priority interrupts).
    509  *
    510  * spinlockmgr spinlocks should be used at or below splsched().
    511  *
    512  * Some platforms may have interrupts of higher priority than splsched(),
    513  * including hard serial interrupts, inter-processor interrupts, and
    514  * kernel debugger traps.
    515  */
    516 
    517 /*
    518  * XXX XXX kludge around another kludge..
    519  *
    520  * vfs_shutdown() may be called from interrupt context, either as a result
    521  * of a panic, or from the debugger.   It proceeds to call
    522  * sys_sync(&proc0, ...), pretending its running on behalf of proc0
    523  *
    524  * We would like to make an attempt to sync the filesystems in this case, so
    525  * if this happens, we treat attempts to acquire locks specially.
    526  * All locks are acquired on behalf of proc0.
    527  *
    528  * If we've already paniced, we don't block waiting for locks, but
    529  * just barge right ahead since we're already going down in flames.
    530  */
    531 
    532 /*
    533  * Set, change, or release a lock.
    534  *
    535  * Shared requests increment the shared count. Exclusive requests set the
    536  * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
    537  * accepted shared locks and shared-to-exclusive upgrades to go away.
    538  */
    539 int
    540 #if defined(LOCKDEBUG)
    541 _lockmgr(__volatile struct lock *lkp, u_int flags,
    542     struct simplelock *interlkp, const char *file, int line)
    543 #else
    544 lockmgr(__volatile struct lock *lkp, u_int flags,
    545     struct simplelock *interlkp)
    546 #endif
    547 {
    548 	int error;
    549 	pid_t pid;
    550 	lwpid_t lid;
    551 	int extflags;
    552 	cpuid_t cpu_id;
    553 	struct lwp *l = curlwp;
    554 	int lock_shutdown_noblock = 0;
    555 	int s = 0;
    556 
    557 	error = 0;
    558 
    559 	KASSERT((flags & LK_RETRY) == 0);
    560 
    561 	INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
    562 	if (flags & LK_INTERLOCK)
    563 		simple_unlock(interlkp);
    564 	extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
    565 
    566 #ifdef DIAGNOSTIC /* { */
    567 	/*
    568 	 * Don't allow spins on sleep locks and don't allow sleeps
    569 	 * on spin locks.
    570 	 */
    571 	if ((flags ^ lkp->lk_flags) & LK_SPIN)
    572 		panic("lockmgr: sleep/spin mismatch");
    573 #endif /* } */
    574 
    575 	if (extflags & LK_SPIN) {
    576 		pid = LK_KERNPROC;
    577 		lid = 0;
    578 	} else {
    579 		if (l == NULL) {
    580 			if (!doing_shutdown) {
    581 				panic("lockmgr: no context");
    582 			} else {
    583 				l = &lwp0;
    584 				if (panicstr && (!(flags & LK_NOWAIT))) {
    585 					flags |= LK_NOWAIT;
    586 					lock_shutdown_noblock = 1;
    587 				}
    588 			}
    589 		}
    590 		lid = l->l_lid;
    591 		pid = l->l_proc->p_pid;
    592 	}
    593 	cpu_id = cpu_number();
    594 
    595 	/*
    596 	 * Once a lock has drained, the LK_DRAINING flag is set and an
    597 	 * exclusive lock is returned. The only valid operation thereafter
    598 	 * is a single release of that exclusive lock. This final release
    599 	 * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
    600 	 * further requests of any sort will result in a panic. The bits
    601 	 * selected for these two flags are chosen so that they will be set
    602 	 * in memory that is freed (freed memory is filled with 0xdeadbeef).
    603 	 * The final release is permitted to give a new lease on life to
    604 	 * the lock by specifying LK_REENABLE.
    605 	 */
    606 	if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
    607 #ifdef DIAGNOSTIC /* { */
    608 		if (lkp->lk_flags & LK_DRAINED)
    609 			panic("lockmgr: using decommissioned lock");
    610 		if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
    611 		    WEHOLDIT(lkp, pid, lid, cpu_id) == 0)
    612 			panic("lockmgr: non-release on draining lock: %d",
    613 			    flags & LK_TYPE_MASK);
    614 #endif /* DIAGNOSTIC */ /* } */
    615 		lkp->lk_flags &= ~LK_DRAINING;
    616 		if ((flags & LK_REENABLE) == 0)
    617 			lkp->lk_flags |= LK_DRAINED;
    618 	}
    619 
    620 	switch (flags & LK_TYPE_MASK) {
    621 
    622 	case LK_SHARED:
    623 		if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
    624 			/*
    625 			 * If just polling, check to see if we will block.
    626 			 */
    627 			if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    628 			    (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
    629 				error = EBUSY;
    630 				break;
    631 			}
    632 			/*
    633 			 * Wait for exclusive locks and upgrades to clear.
    634 			 */
    635 			error = acquire(&lkp, &s, extflags, 0,
    636 			    LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE);
    637 			if (error)
    638 				break;
    639 			lkp->lk_sharecount++;
    640 			lkp->lk_flags |= LK_SHARE_NONZERO;
    641 			COUNT(lkp, l, cpu_id, 1);
    642 			break;
    643 		}
    644 		/*
    645 		 * We hold an exclusive lock, so downgrade it to shared.
    646 		 * An alternative would be to fail with EDEADLK.
    647 		 */
    648 		lkp->lk_sharecount++;
    649 		lkp->lk_flags |= LK_SHARE_NONZERO;
    650 		COUNT(lkp, l, cpu_id, 1);
    651 		/* fall into downgrade */
    652 
    653 	case LK_DOWNGRADE:
    654 		if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0 ||
    655 		    lkp->lk_exclusivecount == 0)
    656 			panic("lockmgr: not holding exclusive lock");
    657 		lkp->lk_sharecount += lkp->lk_exclusivecount;
    658 		lkp->lk_flags |= LK_SHARE_NONZERO;
    659 		lkp->lk_exclusivecount = 0;
    660 		lkp->lk_recurselevel = 0;
    661 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    662 		SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    663 #if defined(LOCKDEBUG)
    664 		lkp->lk_unlock_file = file;
    665 		lkp->lk_unlock_line = line;
    666 #endif
    667 		DONTHAVEIT(lkp);
    668 		WAKEUP_WAITER(lkp);
    669 		break;
    670 
    671 	case LK_EXCLUPGRADE:
    672 		/*
    673 		 * If another process is ahead of us to get an upgrade,
    674 		 * then we want to fail rather than have an intervening
    675 		 * exclusive access.
    676 		 */
    677 		if (lkp->lk_flags & LK_WANT_UPGRADE) {
    678 			lkp->lk_sharecount--;
    679 			if (lkp->lk_sharecount == 0)
    680 				lkp->lk_flags &= ~LK_SHARE_NONZERO;
    681 			COUNT(lkp, l, cpu_id, -1);
    682 			error = EBUSY;
    683 			break;
    684 		}
    685 		/* fall into normal upgrade */
    686 
    687 	case LK_UPGRADE:
    688 		/*
    689 		 * Upgrade a shared lock to an exclusive one. If another
    690 		 * shared lock has already requested an upgrade to an
    691 		 * exclusive lock, our shared lock is released and an
    692 		 * exclusive lock is requested (which will be granted
    693 		 * after the upgrade). If we return an error, the file
    694 		 * will always be unlocked.
    695 		 */
    696 		if (WEHOLDIT(lkp, pid, lid, cpu_id) || lkp->lk_sharecount <= 0)
    697 			panic("lockmgr: upgrade exclusive lock");
    698 		lkp->lk_sharecount--;
    699 		if (lkp->lk_sharecount == 0)
    700 			lkp->lk_flags &= ~LK_SHARE_NONZERO;
    701 		COUNT(lkp, l, cpu_id, -1);
    702 		/*
    703 		 * If we are just polling, check to see if we will block.
    704 		 */
    705 		if ((extflags & LK_NOWAIT) &&
    706 		    ((lkp->lk_flags & LK_WANT_UPGRADE) ||
    707 		     lkp->lk_sharecount > 1)) {
    708 			error = EBUSY;
    709 			break;
    710 		}
    711 		if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
    712 			/*
    713 			 * We are first shared lock to request an upgrade, so
    714 			 * request upgrade and wait for the shared count to
    715 			 * drop to zero, then take exclusive lock.
    716 			 */
    717 			lkp->lk_flags |= LK_WANT_UPGRADE;
    718 			error = acquire(&lkp, &s, extflags, 0, LK_SHARE_NONZERO);
    719 			lkp->lk_flags &= ~LK_WANT_UPGRADE;
    720 			if (error)
    721 				break;
    722 			lkp->lk_flags |= LK_HAVE_EXCL;
    723 			SETHOLDER(lkp, pid, lid, cpu_id);
    724 #if defined(LOCKDEBUG)
    725 			lkp->lk_lock_file = file;
    726 			lkp->lk_lock_line = line;
    727 #endif
    728 			HAVEIT(lkp);
    729 			if (lkp->lk_exclusivecount != 0)
    730 				panic("lockmgr: non-zero exclusive count");
    731 			lkp->lk_exclusivecount = 1;
    732 			if (extflags & LK_SETRECURSE)
    733 				lkp->lk_recurselevel = 1;
    734 			COUNT(lkp, l, cpu_id, 1);
    735 			break;
    736 		}
    737 		/*
    738 		 * Someone else has requested upgrade. Release our shared
    739 		 * lock, awaken upgrade requestor if we are the last shared
    740 		 * lock, then request an exclusive lock.
    741 		 */
    742 		if (lkp->lk_sharecount == 0)
    743 			WAKEUP_WAITER(lkp);
    744 		/* fall into exclusive request */
    745 
    746 	case LK_EXCLUSIVE:
    747 		if (WEHOLDIT(lkp, pid, lid, cpu_id)) {
    748 			/*
    749 			 * Recursive lock.
    750 			 */
    751 			if ((extflags & LK_CANRECURSE) == 0 &&
    752 			     lkp->lk_recurselevel == 0) {
    753 				if (extflags & LK_RECURSEFAIL) {
    754 					error = EDEADLK;
    755 					break;
    756 				} else
    757 					panic("lockmgr: locking against myself");
    758 			}
    759 			lkp->lk_exclusivecount++;
    760 			if (extflags & LK_SETRECURSE &&
    761 			    lkp->lk_recurselevel == 0)
    762 				lkp->lk_recurselevel = lkp->lk_exclusivecount;
    763 			COUNT(lkp, l, cpu_id, 1);
    764 			break;
    765 		}
    766 		/*
    767 		 * If we are just polling, check to see if we will sleep.
    768 		 */
    769 		if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    770 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    771 		     LK_SHARE_NONZERO))) {
    772 			error = EBUSY;
    773 			break;
    774 		}
    775 		/*
    776 		 * Try to acquire the want_exclusive flag.
    777 		 */
    778 		error = acquire(&lkp, &s, extflags, 0,
    779 		    LK_HAVE_EXCL | LK_WANT_EXCL);
    780 		if (error)
    781 			break;
    782 		lkp->lk_flags |= LK_WANT_EXCL;
    783 		/*
    784 		 * Wait for shared locks and upgrades to finish.
    785 		 */
    786 		error = acquire(&lkp, &s, extflags, 0,
    787 		    LK_WANT_UPGRADE | LK_SHARE_NONZERO);
    788 		lkp->lk_flags &= ~LK_WANT_EXCL;
    789 		if (error)
    790 			break;
    791 		lkp->lk_flags |= LK_HAVE_EXCL;
    792 		SETHOLDER(lkp, pid, lid, cpu_id);
    793 #if defined(LOCKDEBUG)
    794 		lkp->lk_lock_file = file;
    795 		lkp->lk_lock_line = line;
    796 #endif
    797 		HAVEIT(lkp);
    798 		if (lkp->lk_exclusivecount != 0)
    799 			panic("lockmgr: non-zero exclusive count");
    800 		lkp->lk_exclusivecount = 1;
    801 		if (extflags & LK_SETRECURSE)
    802 			lkp->lk_recurselevel = 1;
    803 		COUNT(lkp, l, cpu_id, 1);
    804 		break;
    805 
    806 	case LK_RELEASE:
    807 		if (lkp->lk_exclusivecount != 0) {
    808 			if (WEHOLDIT(lkp, pid, lid, cpu_id) == 0) {
    809 				if (lkp->lk_flags & LK_SPIN) {
    810 					panic("lockmgr: processor %lu, not "
    811 					    "exclusive lock holder %lu "
    812 					    "unlocking", cpu_id, lkp->lk_cpu);
    813 				} else {
    814 					panic("lockmgr: pid %d, not "
    815 					    "exclusive lock holder %d "
    816 					    "unlocking", pid,
    817 					    lkp->lk_lockholder);
    818 				}
    819 			}
    820 			if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
    821 				lkp->lk_recurselevel = 0;
    822 			lkp->lk_exclusivecount--;
    823 			COUNT(lkp, l, cpu_id, -1);
    824 			if (lkp->lk_exclusivecount == 0) {
    825 				lkp->lk_flags &= ~LK_HAVE_EXCL;
    826 				SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    827 #if defined(LOCKDEBUG)
    828 				lkp->lk_unlock_file = file;
    829 				lkp->lk_unlock_line = line;
    830 #endif
    831 				DONTHAVEIT(lkp);
    832 			}
    833 		} else if (lkp->lk_sharecount != 0) {
    834 			lkp->lk_sharecount--;
    835 			if (lkp->lk_sharecount == 0)
    836 				lkp->lk_flags &= ~LK_SHARE_NONZERO;
    837 			COUNT(lkp, l, cpu_id, -1);
    838 		}
    839 #ifdef DIAGNOSTIC
    840 		else
    841 			panic("lockmgr: release of unlocked lock!");
    842 #endif
    843 		WAKEUP_WAITER(lkp);
    844 		break;
    845 
    846 	case LK_DRAIN:
    847 		/*
    848 		 * Check that we do not already hold the lock, as it can
    849 		 * never drain if we do. Unfortunately, we have no way to
    850 		 * check for holding a shared lock, but at least we can
    851 		 * check for an exclusive one.
    852 		 */
    853 		if (WEHOLDIT(lkp, pid, lid, cpu_id))
    854 			panic("lockmgr: draining against myself");
    855 		/*
    856 		 * If we are just polling, check to see if we will sleep.
    857 		 */
    858 		if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
    859 		     (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    860 		     LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
    861 			error = EBUSY;
    862 			break;
    863 		}
    864 		error = acquire(&lkp, &s, extflags, 1,
    865 		    LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    866 		    LK_SHARE_NONZERO | LK_WAIT_NONZERO);
    867 		if (error)
    868 			break;
    869 		lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
    870 		SETHOLDER(lkp, pid, lid, cpu_id);
    871 #if defined(LOCKDEBUG)
    872 		lkp->lk_lock_file = file;
    873 		lkp->lk_lock_line = line;
    874 #endif
    875 		HAVEIT(lkp);
    876 		lkp->lk_exclusivecount = 1;
    877 		/* XXX unlikely that we'd want this */
    878 		if (extflags & LK_SETRECURSE)
    879 			lkp->lk_recurselevel = 1;
    880 		COUNT(lkp, l, cpu_id, 1);
    881 		break;
    882 
    883 	default:
    884 		INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    885 		panic("lockmgr: unknown locktype request %d",
    886 		    flags & LK_TYPE_MASK);
    887 		/* NOTREACHED */
    888 	}
    889 	if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
    890 	    ((lkp->lk_flags &
    891 	      (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
    892 	      LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
    893 		lkp->lk_flags &= ~LK_WAITDRAIN;
    894 		wakeup((void *)&lkp->lk_flags);
    895 	}
    896 	/*
    897 	 * Note that this panic will be a recursive panic, since
    898 	 * we only set lock_shutdown_noblock above if panicstr != NULL.
    899 	 */
    900 	if (error && lock_shutdown_noblock)
    901 		panic("lockmgr: deadlock (see previous panic)");
    902 
    903 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
    904 	return (error);
    905 }
    906 
    907 /*
    908  * For a recursive spinlock held one or more times by the current CPU,
    909  * release all N locks, and return N.
    910  * Intended for use in mi_switch() shortly before context switching.
    911  */
    912 
    913 int
    914 #if defined(LOCKDEBUG)
    915 _spinlock_release_all(__volatile struct lock *lkp, const char *file, int line)
    916 #else
    917 spinlock_release_all(__volatile struct lock *lkp)
    918 #endif
    919 {
    920 	int s, count;
    921 	cpuid_t cpu_id;
    922 
    923 	KASSERT(lkp->lk_flags & LK_SPIN);
    924 
    925 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
    926 
    927 	cpu_id = cpu_number();
    928 	count = lkp->lk_exclusivecount;
    929 
    930 	if (count != 0) {
    931 #ifdef DIAGNOSTIC
    932 		if (WEHOLDIT(lkp, 0, 0, cpu_id) == 0) {
    933 			panic("spinlock_release_all: processor %lu, not "
    934 			    "exclusive lock holder %lu "
    935 			    "unlocking", (long)cpu_id, lkp->lk_cpu);
    936 		}
    937 #endif
    938 		lkp->lk_recurselevel = 0;
    939 		lkp->lk_exclusivecount = 0;
    940 		COUNT_CPU(cpu_id, -count);
    941 		lkp->lk_flags &= ~LK_HAVE_EXCL;
    942 		SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
    943 #if defined(LOCKDEBUG)
    944 		lkp->lk_unlock_file = file;
    945 		lkp->lk_unlock_line = line;
    946 #endif
    947 		DONTHAVEIT(lkp);
    948 	}
    949 #ifdef DIAGNOSTIC
    950 	else if (lkp->lk_sharecount != 0)
    951 		panic("spinlock_release_all: release of shared lock!");
    952 	else
    953 		panic("spinlock_release_all: release of unlocked lock!");
    954 #endif
    955 	INTERLOCK_RELEASE(lkp, LK_SPIN, s);
    956 
    957 	return (count);
    958 }
    959 
    960 /*
    961  * For a recursive spinlock held one or more times by the current CPU,
    962  * release all N locks, and return N.
    963  * Intended for use in mi_switch() right after resuming execution.
    964  */
    965 
    966 void
    967 #if defined(LOCKDEBUG)
    968 _spinlock_acquire_count(__volatile struct lock *lkp, int count,
    969     const char *file, int line)
    970 #else
    971 spinlock_acquire_count(__volatile struct lock *lkp, int count)
    972 #endif
    973 {
    974 	int s, error;
    975 	cpuid_t cpu_id;
    976 
    977 	KASSERT(lkp->lk_flags & LK_SPIN);
    978 
    979 	INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
    980 
    981 	cpu_id = cpu_number();
    982 
    983 #ifdef DIAGNOSTIC
    984 	if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_id))
    985 		panic("spinlock_acquire_count: processor %lu already holds lock", (long)cpu_id);
    986 #endif
    987 	/*
    988 	 * Try to acquire the want_exclusive flag.
    989 	 */
    990 	error = acquire(&lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL);
    991 	lkp->lk_flags |= LK_WANT_EXCL;
    992 	/*
    993 	 * Wait for shared locks and upgrades to finish.
    994 	 */
    995 	error = acquire(&lkp, &s, LK_SPIN, 0,
    996 	    LK_SHARE_NONZERO | LK_WANT_UPGRADE);
    997 	lkp->lk_flags &= ~LK_WANT_EXCL;
    998 	lkp->lk_flags |= LK_HAVE_EXCL;
    999 	SETHOLDER(lkp, LK_NOPROC, 0, cpu_id);
   1000 #if defined(LOCKDEBUG)
   1001 	lkp->lk_lock_file = file;
   1002 	lkp->lk_lock_line = line;
   1003 #endif
   1004 	HAVEIT(lkp);
   1005 	if (lkp->lk_exclusivecount != 0)
   1006 		panic("lockmgr: non-zero exclusive count");
   1007 	lkp->lk_exclusivecount = count;
   1008 	lkp->lk_recurselevel = 1;
   1009 	COUNT_CPU(cpu_id, count);
   1010 
   1011 	INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
   1012 }
   1013 
   1014 
   1015 
   1016 /*
   1017  * Print out information about state of a lock. Used by VOP_PRINT
   1018  * routines to display ststus about contained locks.
   1019  */
   1020 void
   1021 lockmgr_printinfo(__volatile struct lock *lkp)
   1022 {
   1023 
   1024 	if (lkp->lk_sharecount)
   1025 		printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
   1026 		    lkp->lk_sharecount);
   1027 	else if (lkp->lk_flags & LK_HAVE_EXCL) {
   1028 		printf(" lock type %s: EXCL (count %d) by ",
   1029 		    lkp->lk_wmesg, lkp->lk_exclusivecount);
   1030 		if (lkp->lk_flags & LK_SPIN)
   1031 			printf("processor %lu", lkp->lk_cpu);
   1032 		else
   1033 			printf("pid %d.%d", lkp->lk_lockholder,
   1034 			    lkp->lk_locklwp);
   1035 	} else
   1036 		printf(" not locked");
   1037 	if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
   1038 		printf(" with %d pending", lkp->lk_waitcount);
   1039 }
   1040 
   1041 #if defined(LOCKDEBUG) /* { */
   1042 TAILQ_HEAD(, simplelock) simplelock_list =
   1043     TAILQ_HEAD_INITIALIZER(simplelock_list);
   1044 
   1045 #if defined(MULTIPROCESSOR) /* { */
   1046 struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
   1047 
   1048 #define	SLOCK_LIST_LOCK()						\
   1049 	__cpu_simple_lock(&simplelock_list_slock.lock_data)
   1050 
   1051 #define	SLOCK_LIST_UNLOCK()						\
   1052 	__cpu_simple_unlock(&simplelock_list_slock.lock_data)
   1053 
   1054 #define	SLOCK_COUNT(x)							\
   1055 	curcpu()->ci_simple_locks += (x)
   1056 #else
   1057 u_long simple_locks;
   1058 
   1059 #define	SLOCK_LIST_LOCK()	/* nothing */
   1060 
   1061 #define	SLOCK_LIST_UNLOCK()	/* nothing */
   1062 
   1063 #define	SLOCK_COUNT(x)		simple_locks += (x)
   1064 #endif /* MULTIPROCESSOR */ /* } */
   1065 
   1066 #ifdef MULTIPROCESSOR
   1067 #define SLOCK_MP()		lock_printf("on CPU %ld\n", 		\
   1068 				    (u_long) cpu_number())
   1069 #else
   1070 #define SLOCK_MP()		/* nothing */
   1071 #endif
   1072 
   1073 #define	SLOCK_WHERE(str, alp, id, l)					\
   1074 do {									\
   1075 	lock_printf("\n");						\
   1076 	lock_printf(str);						\
   1077 	lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
   1078 	SLOCK_MP();							\
   1079 	if ((alp)->lock_file != NULL)					\
   1080 		lock_printf("last locked: %s:%d\n", (alp)->lock_file,	\
   1081 		    (alp)->lock_line);					\
   1082 	if ((alp)->unlock_file != NULL)					\
   1083 		lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
   1084 		    (alp)->unlock_line);				\
   1085 	SLOCK_TRACE()							\
   1086 	SLOCK_DEBUGGER();						\
   1087 } while (/*CONSTCOND*/0)
   1088 
   1089 /*
   1090  * Simple lock functions so that the debugger can see from whence
   1091  * they are being called.
   1092  */
   1093 void
   1094 simple_lock_init(struct simplelock *alp)
   1095 {
   1096 
   1097 #if defined(MULTIPROCESSOR) /* { */
   1098 	__cpu_simple_lock_init(&alp->lock_data);
   1099 #else
   1100 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
   1101 #endif /* } */
   1102 	alp->lock_file = NULL;
   1103 	alp->lock_line = 0;
   1104 	alp->unlock_file = NULL;
   1105 	alp->unlock_line = 0;
   1106 	alp->lock_holder = LK_NOCPU;
   1107 }
   1108 
   1109 void
   1110 _simple_lock(__volatile struct simplelock *alp, const char *id, int l)
   1111 {
   1112 	cpuid_t cpu_id = cpu_number();
   1113 	int s;
   1114 
   1115 	s = spllock();
   1116 
   1117 	/*
   1118 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
   1119 	 * don't take any action, and just fall into the normal spin case.
   1120 	 */
   1121 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1122 #if defined(MULTIPROCESSOR) /* { */
   1123 		if (alp->lock_holder == cpu_id) {
   1124 			SLOCK_WHERE("simple_lock: locking against myself\n",
   1125 			    alp, id, l);
   1126 			goto out;
   1127 		}
   1128 #else
   1129 		SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
   1130 		goto out;
   1131 #endif /* MULTIPROCESSOR */ /* } */
   1132 	}
   1133 
   1134 #if defined(MULTIPROCESSOR) /* { */
   1135 	/* Acquire the lock before modifying any fields. */
   1136 	splx(s);
   1137 	__cpu_simple_lock(&alp->lock_data);
   1138 	s = spllock();
   1139 #else
   1140 	alp->lock_data = __SIMPLELOCK_LOCKED;
   1141 #endif /* } */
   1142 
   1143 	if (alp->lock_holder != LK_NOCPU) {
   1144 		SLOCK_WHERE("simple_lock: uninitialized lock\n",
   1145 		    alp, id, l);
   1146 	}
   1147 	alp->lock_file = id;
   1148 	alp->lock_line = l;
   1149 	alp->lock_holder = cpu_id;
   1150 
   1151 	SLOCK_LIST_LOCK();
   1152 	/* XXX Cast away volatile */
   1153 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
   1154 	SLOCK_LIST_UNLOCK();
   1155 
   1156 	SLOCK_COUNT(1);
   1157 
   1158  out:
   1159 	splx(s);
   1160 }
   1161 
   1162 int
   1163 _simple_lock_held(__volatile struct simplelock *alp)
   1164 {
   1165 #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
   1166 	cpuid_t cpu_id = cpu_number();
   1167 #endif
   1168 	int s, locked = 0;
   1169 
   1170 	s = spllock();
   1171 
   1172 #if defined(MULTIPROCESSOR)
   1173 	if (__cpu_simple_lock_try(&alp->lock_data) == 0)
   1174 		locked = (alp->lock_holder == cpu_id);
   1175 	else
   1176 		__cpu_simple_unlock(&alp->lock_data);
   1177 #else
   1178 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1179 		locked = 1;
   1180 		KASSERT(alp->lock_holder == cpu_id);
   1181 	}
   1182 #endif
   1183 
   1184 	splx(s);
   1185 
   1186 	return (locked);
   1187 }
   1188 
   1189 int
   1190 _simple_lock_try(__volatile struct simplelock *alp, const char *id, int l)
   1191 {
   1192 	cpuid_t cpu_id = cpu_number();
   1193 	int s, rv = 0;
   1194 
   1195 	s = spllock();
   1196 
   1197 	/*
   1198 	 * MULTIPROCESSOR case: This is `safe' since if it's not us, we
   1199 	 * don't take any action.
   1200 	 */
   1201 #if defined(MULTIPROCESSOR) /* { */
   1202 	if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
   1203 		if (alp->lock_holder == cpu_id)
   1204 			SLOCK_WHERE("simple_lock_try: locking against myself\n",
   1205 			    alp, id, l);
   1206 		goto out;
   1207 	}
   1208 #else
   1209 	if (alp->lock_data == __SIMPLELOCK_LOCKED) {
   1210 		SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
   1211 		goto out;
   1212 	}
   1213 	alp->lock_data = __SIMPLELOCK_LOCKED;
   1214 #endif /* MULTIPROCESSOR */ /* } */
   1215 
   1216 	/*
   1217 	 * At this point, we have acquired the lock.
   1218 	 */
   1219 
   1220 	rv = 1;
   1221 
   1222 	alp->lock_file = id;
   1223 	alp->lock_line = l;
   1224 	alp->lock_holder = cpu_id;
   1225 
   1226 	SLOCK_LIST_LOCK();
   1227 	/* XXX Cast away volatile. */
   1228 	TAILQ_INSERT_TAIL(&simplelock_list, (struct simplelock *)alp, list);
   1229 	SLOCK_LIST_UNLOCK();
   1230 
   1231 	SLOCK_COUNT(1);
   1232 
   1233  out:
   1234 	splx(s);
   1235 	return (rv);
   1236 }
   1237 
   1238 void
   1239 _simple_unlock(__volatile struct simplelock *alp, const char *id, int l)
   1240 {
   1241 	int s;
   1242 
   1243 	s = spllock();
   1244 
   1245 	/*
   1246 	 * MULTIPROCESSOR case: This is `safe' because we think we hold
   1247 	 * the lock, and if we don't, we don't take any action.
   1248 	 */
   1249 	if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
   1250 		SLOCK_WHERE("simple_unlock: lock not held\n",
   1251 		    alp, id, l);
   1252 		goto out;
   1253 	}
   1254 
   1255 	SLOCK_LIST_LOCK();
   1256 	TAILQ_REMOVE(&simplelock_list, alp, list);
   1257 	SLOCK_LIST_UNLOCK();
   1258 
   1259 	SLOCK_COUNT(-1);
   1260 
   1261 	alp->list.tqe_next = NULL;	/* sanity */
   1262 	alp->list.tqe_prev = NULL;	/* sanity */
   1263 
   1264 	alp->unlock_file = id;
   1265 	alp->unlock_line = l;
   1266 
   1267 #if defined(MULTIPROCESSOR) /* { */
   1268 	alp->lock_holder = LK_NOCPU;
   1269 	/* Now that we've modified all fields, release the lock. */
   1270 	__cpu_simple_unlock(&alp->lock_data);
   1271 #else
   1272 	alp->lock_data = __SIMPLELOCK_UNLOCKED;
   1273 	KASSERT(alp->lock_holder == cpu_number());
   1274 	alp->lock_holder = LK_NOCPU;
   1275 #endif /* } */
   1276 
   1277  out:
   1278 	splx(s);
   1279 }
   1280 
   1281 void
   1282 simple_lock_dump(void)
   1283 {
   1284 	struct simplelock *alp;
   1285 	int s;
   1286 
   1287 	s = spllock();
   1288 	SLOCK_LIST_LOCK();
   1289 	lock_printf("all simple locks:\n");
   1290 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1291 		lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
   1292 		    alp->lock_file, alp->lock_line);
   1293 	}
   1294 	SLOCK_LIST_UNLOCK();
   1295 	splx(s);
   1296 }
   1297 
   1298 void
   1299 simple_lock_freecheck(void *start, void *end)
   1300 {
   1301 	struct simplelock *alp;
   1302 	int s;
   1303 
   1304 	s = spllock();
   1305 	SLOCK_LIST_LOCK();
   1306 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1307 		if ((void *)alp >= start && (void *)alp < end) {
   1308 			lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
   1309 			    alp, alp->lock_holder, alp->lock_file,
   1310 			    alp->lock_line);
   1311 			SLOCK_DEBUGGER();
   1312 		}
   1313 	}
   1314 	SLOCK_LIST_UNLOCK();
   1315 	splx(s);
   1316 }
   1317 
   1318 /*
   1319  * We must be holding exactly one lock: the sched_lock.
   1320  */
   1321 
   1322 void
   1323 simple_lock_switchcheck(void)
   1324 {
   1325 
   1326 	simple_lock_only_held(&sched_lock, "switching");
   1327 }
   1328 
   1329 void
   1330 simple_lock_only_held(volatile struct simplelock *lp, const char *where)
   1331 {
   1332 	struct simplelock *alp;
   1333 	cpuid_t cpu_id = cpu_number();
   1334 	int s;
   1335 
   1336 	if (lp) {
   1337 		LOCK_ASSERT(simple_lock_held(lp));
   1338 	}
   1339 	s = spllock();
   1340 	SLOCK_LIST_LOCK();
   1341 	TAILQ_FOREACH(alp, &simplelock_list, list) {
   1342 		if (alp == lp)
   1343 			continue;
   1344 		if (alp->lock_holder == cpu_id)
   1345 			break;
   1346 	}
   1347 	SLOCK_LIST_UNLOCK();
   1348 	splx(s);
   1349 
   1350 	if (alp != NULL) {
   1351 		lock_printf("\n%s with held simple_lock %p "
   1352 		    "CPU %lu %s:%d\n",
   1353 		    where, alp, alp->lock_holder, alp->lock_file,
   1354 		    alp->lock_line);
   1355 		SLOCK_TRACE();
   1356 		SLOCK_DEBUGGER();
   1357 	}
   1358 }
   1359 #endif /* LOCKDEBUG */ /* } */
   1360 
   1361 #if defined(MULTIPROCESSOR)
   1362 /*
   1363  * Functions for manipulating the kernel_lock.  We put them here
   1364  * so that they show up in profiles.
   1365  */
   1366 
   1367 struct lock kernel_lock;
   1368 
   1369 void
   1370 _kernel_lock_init(void)
   1371 {
   1372 
   1373 	spinlockinit(&kernel_lock, "klock", 0);
   1374 }
   1375 
   1376 /*
   1377  * Acquire/release the kernel lock.  Intended for use in the scheduler
   1378  * and the lower half of the kernel.
   1379  */
   1380 void
   1381 _kernel_lock(int flag)
   1382 {
   1383 
   1384 	SCHED_ASSERT_UNLOCKED();
   1385 	spinlockmgr(&kernel_lock, flag, 0);
   1386 }
   1387 
   1388 void
   1389 _kernel_unlock(void)
   1390 {
   1391 
   1392 	spinlockmgr(&kernel_lock, LK_RELEASE, 0);
   1393 }
   1394 
   1395 /*
   1396  * Acquire/release the kernel_lock on behalf of a process.  Intended for
   1397  * use in the top half of the kernel.
   1398  */
   1399 void
   1400 _kernel_proc_lock(struct lwp *l)
   1401 {
   1402 
   1403 	SCHED_ASSERT_UNLOCKED();
   1404 	spinlockmgr(&kernel_lock, LK_EXCLUSIVE, 0);
   1405 }
   1406 
   1407 void
   1408 _kernel_proc_unlock(struct lwp *l)
   1409 {
   1410 
   1411 	spinlockmgr(&kernel_lock, LK_RELEASE, 0);
   1412 }
   1413 
   1414 int
   1415 _kernel_lock_release_all()
   1416 {
   1417 	int hold_count;
   1418 
   1419 	if (lockstatus(&kernel_lock) == LK_EXCLUSIVE)
   1420 		hold_count = spinlock_release_all(&kernel_lock);
   1421 	else
   1422 		hold_count = 0;
   1423 
   1424 	return hold_count;
   1425 }
   1426 
   1427 void
   1428 _kernel_lock_acquire_count(int hold_count)
   1429 {
   1430 
   1431 	if (hold_count != 0)
   1432 		spinlock_acquire_count(&kernel_lock, hold_count);
   1433 }
   1434 #endif /* MULTIPROCESSOR */
   1435