Home | History | Annotate | Line # | Download | only in kern
kern_mutex.c revision 1.21
      1 /*	$NetBSD: kern_mutex.c,v 1.21 2007/11/04 17:26:02 pooka Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Kernel mutex implementation, modeled after those found in Solaris,
     41  * a description of which can be found in:
     42  *
     43  *	Solaris Internals: Core Kernel Architecture, Jim Mauro and
     44  *	    Richard McDougall.
     45  */
     46 
     47 #define	__MUTEX_PRIVATE
     48 
     49 #include <sys/cdefs.h>
     50 __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.21 2007/11/04 17:26:02 pooka Exp $");
     51 
     52 #include "opt_multiprocessor.h"
     53 
     54 #include <sys/param.h>
     55 #include <sys/proc.h>
     56 #include <sys/mutex.h>
     57 #include <sys/sched.h>
     58 #include <sys/sleepq.h>
     59 #include <sys/systm.h>
     60 #include <sys/lockdebug.h>
     61 #include <sys/kernel.h>
     62 
     63 #include <dev/lockstat.h>
     64 
     65 #include <sys/intr.h>
     66 
     67 /*
     68  * When not running a debug kernel, spin mutexes are not much
     69  * more than an splraiseipl() and splx() pair.
     70  */
     71 
     72 #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
     73 #define	FULL
     74 #endif
     75 
     76 /*
     77  * Debugging support.
     78  */
     79 
     80 #define	MUTEX_WANTLOCK(mtx)					\
     81     LOCKDEBUG_WANTLOCK(MUTEX_GETID(mtx),			\
     82         (uintptr_t)__builtin_return_address(0), 0)
     83 #define	MUTEX_LOCKED(mtx)					\
     84     LOCKDEBUG_LOCKED(MUTEX_GETID(mtx),				\
     85         (uintptr_t)__builtin_return_address(0), 0)
     86 #define	MUTEX_UNLOCKED(mtx)					\
     87     LOCKDEBUG_UNLOCKED(MUTEX_GETID(mtx),			\
     88         (uintptr_t)__builtin_return_address(0), 0)
     89 #define	MUTEX_ABORT(mtx, msg)					\
     90     mutex_abort(mtx, __func__, msg)
     91 
     92 #if defined(LOCKDEBUG)
     93 
     94 #define	MUTEX_DASSERT(mtx, cond)				\
     95 do {								\
     96 	if (!(cond))						\
     97 		MUTEX_ABORT(mtx, "assertion failed: " #cond);	\
     98 } while (/* CONSTCOND */ 0);
     99 
    100 #else	/* LOCKDEBUG */
    101 
    102 #define	MUTEX_DASSERT(mtx, cond)	/* nothing */
    103 
    104 #endif /* LOCKDEBUG */
    105 
    106 #if defined(DIAGNOSTIC)
    107 
    108 #define	MUTEX_ASSERT(mtx, cond)					\
    109 do {								\
    110 	if (!(cond))						\
    111 		MUTEX_ABORT(mtx, "assertion failed: " #cond);	\
    112 } while (/* CONSTCOND */ 0)
    113 
    114 #else	/* DIAGNOSTIC */
    115 
    116 #define	MUTEX_ASSERT(mtx, cond)	/* nothing */
    117 
    118 #endif	/* DIAGNOSTIC */
    119 
    120 /*
    121  * Spin mutex SPL save / restore.
    122  */
    123 #ifndef MUTEX_COUNT_BIAS
    124 #define	MUTEX_COUNT_BIAS	0
    125 #endif
    126 
    127 #define	MUTEX_SPIN_SPLRAISE(mtx)					\
    128 do {									\
    129 	struct cpu_info *x__ci = curcpu();				\
    130 	int x__cnt, s;							\
    131 	x__cnt = x__ci->ci_mtx_count--;					\
    132 	s = splraiseipl(mtx->mtx_ipl);					\
    133 	if (x__cnt == MUTEX_COUNT_BIAS)					\
    134 		x__ci->ci_mtx_oldspl = (s);				\
    135 } while (/* CONSTCOND */ 0)
    136 
    137 #define	MUTEX_SPIN_SPLRESTORE(mtx)					\
    138 do {									\
    139 	struct cpu_info *x__ci = curcpu();				\
    140 	int s = x__ci->ci_mtx_oldspl;					\
    141 	__insn_barrier();						\
    142 	if (++(x__ci->ci_mtx_count) == MUTEX_COUNT_BIAS)		\
    143 		splx(s);						\
    144 } while (/* CONSTCOND */ 0)
    145 
    146 /*
    147  * For architectures that provide 'simple' mutexes: they provide a
    148  * CAS function that is either MP-safe, or does not need to be MP
    149  * safe.  Adaptive mutexes on these architectures do not require an
    150  * additional interlock.
    151  */
    152 
    153 #ifdef __HAVE_SIMPLE_MUTEXES
    154 
    155 #define	MUTEX_OWNER(owner)						\
    156 	(owner & MUTEX_THREAD)
    157 #define	MUTEX_OWNED(owner)						\
    158 	(owner != 0)
    159 #define	MUTEX_HAS_WAITERS(mtx)						\
    160 	(((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
    161 
    162 #define	MUTEX_INITIALIZE_ADAPTIVE(mtx, id)				\
    163 do {									\
    164 	(mtx)->mtx_id = (id);						\
    165 } while (/* CONSTCOND */ 0);
    166 
    167 #define	MUTEX_INITIALIZE_SPIN(mtx, id, ipl)				\
    168 do {									\
    169 	(mtx)->mtx_owner = MUTEX_BIT_SPIN;				\
    170 	(mtx)->mtx_ipl = makeiplcookie((ipl));				\
    171 	(mtx)->mtx_id = (id);						\
    172 	__cpu_simple_lock_init(&(mtx)->mtx_lock);			\
    173 } while (/* CONSTCOND */ 0)
    174 
    175 #define	MUTEX_DESTROY(mtx)						\
    176 do {									\
    177 	(mtx)->mtx_owner = MUTEX_THREAD;				\
    178 	(mtx)->mtx_id = -1;						\
    179 } while (/* CONSTCOND */ 0);
    180 
    181 #define	MUTEX_SPIN_P(mtx)		\
    182     (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
    183 #define	MUTEX_ADAPTIVE_P(mtx)		\
    184     (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
    185 
    186 #define	MUTEX_GETID(mtx)		((mtx)->mtx_id)
    187 
    188 static inline int
    189 MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
    190 {
    191 	int rv;
    192 	rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread);
    193 	MUTEX_RECEIVE(mtx);
    194 	return rv;
    195 }
    196 
    197 static inline int
    198 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
    199 {
    200 	int rv;
    201 	rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
    202 	MUTEX_RECEIVE(mtx);
    203 	return rv;
    204 }
    205 
    206 static inline void
    207 MUTEX_RELEASE(kmutex_t *mtx)
    208 {
    209 	MUTEX_GIVE(mtx);
    210 	mtx->mtx_owner = 0;
    211 }
    212 
    213 static inline void
    214 MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
    215 {
    216 	/* nothing */
    217 }
    218 #endif	/* __HAVE_SIMPLE_MUTEXES */
    219 
    220 /*
    221  * Patch in stubs via strong alias where they are not available.
    222  */
    223 
    224 #if defined(LOCKDEBUG)
    225 #undef	__HAVE_MUTEX_STUBS
    226 #undef	__HAVE_SPIN_MUTEX_STUBS
    227 #endif
    228 
    229 #ifndef __HAVE_MUTEX_STUBS
    230 __strong_alias(mutex_enter,mutex_vector_enter);
    231 __strong_alias(mutex_exit,mutex_vector_exit);
    232 #endif
    233 
    234 #ifndef __HAVE_SPIN_MUTEX_STUBS
    235 __strong_alias(mutex_spin_enter,mutex_vector_enter);
    236 __strong_alias(mutex_spin_exit,mutex_vector_exit);
    237 #endif
    238 
    239 void	mutex_abort(kmutex_t *, const char *, const char *);
    240 void	mutex_dump(volatile void *);
    241 int	mutex_onproc(uintptr_t, struct cpu_info **);
    242 static struct lwp *mutex_owner(wchan_t);
    243 
    244 lockops_t mutex_spin_lockops = {
    245 	"Mutex",
    246 	0,
    247 	mutex_dump
    248 };
    249 
    250 lockops_t mutex_adaptive_lockops = {
    251 	"Mutex",
    252 	1,
    253 	mutex_dump
    254 };
    255 
    256 syncobj_t mutex_syncobj = {
    257 	SOBJ_SLEEPQ_SORTED,
    258 	turnstile_unsleep,
    259 	turnstile_changepri,
    260 	sleepq_lendpri,
    261 	mutex_owner,
    262 };
    263 
    264 /*
    265  * mutex_dump:
    266  *
    267  *	Dump the contents of a mutex structure.
    268  */
    269 void
    270 mutex_dump(volatile void *cookie)
    271 {
    272 	volatile kmutex_t *mtx = cookie;
    273 
    274 	printf_nolog("owner field  : %#018lx wait/spin: %16d/%d\n",
    275 	    (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
    276 	    MUTEX_SPIN_P(mtx));
    277 }
    278 
    279 /*
    280  * mutex_abort:
    281  *
    282  *	Dump information about an error and panic the system.  This
    283  *	generates a lot of machine code in the DIAGNOSTIC case, so
    284  *	we ask the compiler to not inline it.
    285  */
    286 
    287 #if __GNUC_PREREQ__(3, 0)
    288 __attribute ((noinline)) __attribute ((noreturn))
    289 #endif
    290 void
    291 mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
    292 {
    293 
    294 	LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ?
    295 	    &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
    296 	/* NOTREACHED */
    297 }
    298 
    299 /*
    300  * mutex_init:
    301  *
    302  *	Initialize a mutex for use.  Note that adaptive mutexes are in
    303  *	essence spin mutexes that can sleep to avoid deadlock and wasting
    304  *	CPU time.  We can't easily provide a type of mutex that always
    305  *	sleeps - see comments in mutex_vector_enter() about releasing
    306  *	mutexes unlocked.
    307  */
    308 void
    309 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
    310 {
    311 	u_int id;
    312 
    313 	memset(mtx, 0, sizeof(*mtx));
    314 
    315 	switch (type) {
    316 	case MUTEX_ADAPTIVE:
    317 	case MUTEX_DEFAULT:
    318 		KASSERT(ipl == IPL_NONE);
    319 		break;
    320 	case MUTEX_DRIVER:
    321 		type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN);
    322 		break;
    323 	default:
    324 		break;
    325 	}
    326 
    327 	switch (type) {
    328 	case MUTEX_NODEBUG:
    329 		id = LOCKDEBUG_ALLOC(mtx, NULL,
    330 		    (uintptr_t)__builtin_return_address(0));
    331 		MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
    332 		break;
    333 	case MUTEX_ADAPTIVE:
    334 	case MUTEX_DEFAULT:
    335 		id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
    336 		    (uintptr_t)__builtin_return_address(0));
    337 		MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
    338 		break;
    339 	case MUTEX_SPIN:
    340 		id = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
    341 		    (uintptr_t)__builtin_return_address(0));
    342 		MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
    343 		break;
    344 	default:
    345 		panic("mutex_init: impossible type");
    346 		break;
    347 	}
    348 }
    349 
    350 /*
    351  * mutex_destroy:
    352  *
    353  *	Tear down a mutex.
    354  */
    355 void
    356 mutex_destroy(kmutex_t *mtx)
    357 {
    358 
    359 	if (MUTEX_ADAPTIVE_P(mtx)) {
    360 		MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
    361 		    !MUTEX_HAS_WAITERS(mtx));
    362 	} else {
    363 		MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock));
    364 	}
    365 
    366 	LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx));
    367 	MUTEX_DESTROY(mtx);
    368 }
    369 
    370 /*
    371  * mutex_onproc:
    372  *
    373  *	Return true if an adaptive mutex owner is running on a CPU in the
    374  *	system.  If the target is waiting on the kernel big lock, then we
    375  *	must release it.  This is necessary to avoid deadlock.
    376  *
    377  *	Note that we can't use the mutex owner field as an LWP pointer.  We
    378  *	don't have full control over the timing of our execution, and so the
    379  *	pointer could be completely invalid by the time we dereference it.
    380  */
    381 #ifdef MULTIPROCESSOR
    382 int
    383 mutex_onproc(uintptr_t owner, struct cpu_info **cip)
    384 {
    385 	CPU_INFO_ITERATOR cii;
    386 	struct cpu_info *ci;
    387 	struct lwp *l;
    388 
    389 	if (!MUTEX_OWNED(owner))
    390 		return 0;
    391 	l = (struct lwp *)MUTEX_OWNER(owner);
    392 
    393 	/* See if the target is running on a CPU somewhere. */
    394 	if ((ci = *cip) != NULL && ci->ci_curlwp == l)
    395 		goto run;
    396 	for (CPU_INFO_FOREACH(cii, ci))
    397 		if (ci->ci_curlwp == l)
    398 			goto run;
    399 
    400 	/* No: it may be safe to block now. */
    401 	*cip = NULL;
    402 	return 0;
    403 
    404  run:
    405  	/* Target is running; do we need to block? */
    406  	*cip = ci;
    407 	return ci->ci_biglock_wanted != l;
    408 }
    409 #endif	/* MULTIPROCESSOR */
    410 
    411 /*
    412  * mutex_vector_enter:
    413  *
    414  *	Support routine for mutex_enter() that must handles all cases.  In
    415  *	the LOCKDEBUG case, mutex_enter() is always aliased here, even if
    416  *	fast-path stubs are available.  If an mutex_spin_enter() stub is
    417  *	not available, then it is also aliased directly here.
    418  */
    419 void
    420 mutex_vector_enter(kmutex_t *mtx)
    421 {
    422 	uintptr_t owner, curthread;
    423 	turnstile_t *ts;
    424 #ifdef MULTIPROCESSOR
    425 	struct cpu_info *ci = NULL;
    426 	u_int count;
    427 #endif
    428 	LOCKSTAT_COUNTER(spincnt);
    429 	LOCKSTAT_COUNTER(slpcnt);
    430 	LOCKSTAT_TIMER(spintime);
    431 	LOCKSTAT_TIMER(slptime);
    432 	LOCKSTAT_FLAG(lsflag);
    433 
    434 	/*
    435 	 * Handle spin mutexes.
    436 	 */
    437 	if (MUTEX_SPIN_P(mtx)) {
    438 #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
    439 		u_int spins = 0;
    440 #endif
    441 		MUTEX_SPIN_SPLRAISE(mtx);
    442 		MUTEX_WANTLOCK(mtx);
    443 #ifdef FULL
    444 		if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
    445 			MUTEX_LOCKED(mtx);
    446 			return;
    447 		}
    448 #if !defined(MULTIPROCESSOR)
    449 		MUTEX_ABORT(mtx, "locking against myself");
    450 #else /* !MULTIPROCESSOR */
    451 
    452 		LOCKSTAT_ENTER(lsflag);
    453 		LOCKSTAT_START_TIMER(lsflag, spintime);
    454 		count = SPINLOCK_BACKOFF_MIN;
    455 
    456 		/*
    457 		 * Spin testing the lock word and do exponential backoff
    458 		 * to reduce cache line ping-ponging between CPUs.
    459 		 */
    460 		do {
    461 			if (panicstr != NULL)
    462 				break;
    463 			while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
    464 				SPINLOCK_BACKOFF(count);
    465 #ifdef LOCKDEBUG
    466 				if (SPINLOCK_SPINOUT(spins))
    467 					MUTEX_ABORT(mtx, "spinout");
    468 #endif	/* LOCKDEBUG */
    469 			}
    470 		} while (!__cpu_simple_lock_try(&mtx->mtx_lock));
    471 
    472 		if (count != SPINLOCK_BACKOFF_MIN) {
    473 			LOCKSTAT_STOP_TIMER(lsflag, spintime);
    474 			LOCKSTAT_EVENT(lsflag, mtx,
    475 			    LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
    476 		}
    477 		LOCKSTAT_EXIT(lsflag);
    478 #endif	/* !MULTIPROCESSOR */
    479 #endif	/* FULL */
    480 		MUTEX_LOCKED(mtx);
    481 		return;
    482 	}
    483 
    484 	curthread = (uintptr_t)curlwp;
    485 
    486 	MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
    487 	MUTEX_ASSERT(mtx, curthread != 0);
    488 	MUTEX_WANTLOCK(mtx);
    489 
    490 #ifdef LOCKDEBUG
    491 	if (panicstr == NULL) {
    492 		simple_lock_only_held(NULL, "mutex_enter");
    493 #ifdef MULTIPROCESSOR
    494 		LOCKDEBUG_BARRIER(&kernel_lock, 1);
    495 #else
    496 		LOCKDEBUG_BARRIER(NULL, 1);
    497 #endif
    498 	}
    499 #endif
    500 
    501 	LOCKSTAT_ENTER(lsflag);
    502 
    503 	/*
    504 	 * Adaptive mutex; spin trying to acquire the mutex.  If we
    505 	 * determine that the owner is not running on a processor,
    506 	 * then we stop spinning, and sleep instead.
    507 	 */
    508 	for (;;) {
    509 		owner = mtx->mtx_owner;
    510 		if (!MUTEX_OWNED(owner)) {
    511 			/*
    512 			 * Mutex owner clear could mean two things:
    513 			 *
    514 			 *	* The mutex has been released.
    515 			 *	* The owner field hasn't been set yet.
    516 			 *
    517 			 * Try to acquire it again.  If that fails,
    518 			 * we'll just loop again.
    519 			 */
    520 			if (MUTEX_ACQUIRE(mtx, curthread))
    521 				break;
    522 			continue;
    523 		}
    524 
    525 		if (panicstr != NULL)
    526 			return;
    527 		if (MUTEX_OWNER(owner) == curthread)
    528 			MUTEX_ABORT(mtx, "locking against myself");
    529 
    530 #ifdef MULTIPROCESSOR
    531 		/*
    532 		 * Check to see if the owner is running on a processor.
    533 		 * If so, then we should just spin, as the owner will
    534 		 * likely release the lock very soon.
    535 		 */
    536 		if (mutex_onproc(owner, &ci)) {
    537 			LOCKSTAT_START_TIMER(lsflag, spintime);
    538 			count = SPINLOCK_BACKOFF_MIN;
    539 			for (;;) {
    540 				owner = mtx->mtx_owner;
    541 				if (!mutex_onproc(owner, &ci))
    542 					break;
    543 				SPINLOCK_BACKOFF(count);
    544 			}
    545 			LOCKSTAT_STOP_TIMER(lsflag, spintime);
    546 			LOCKSTAT_COUNT(spincnt, 1);
    547 			if (!MUTEX_OWNED(owner))
    548 				continue;
    549 		}
    550 #endif
    551 
    552 		ts = turnstile_lookup(mtx);
    553 
    554 		/*
    555 		 * Once we have the turnstile chain interlock, mark the
    556 		 * mutex has having waiters.  If that fails, spin again:
    557 		 * chances are that the mutex has been released.
    558 		 */
    559 		if (!MUTEX_SET_WAITERS(mtx, owner)) {
    560 			turnstile_exit(mtx);
    561 			continue;
    562 		}
    563 
    564 #ifdef MULTIPROCESSOR
    565 		/*
    566 		 * mutex_exit() is permitted to release the mutex without
    567 		 * any interlocking instructions, and the following can
    568 		 * occur as a result:
    569 		 *
    570 		 *  CPU 1: MUTEX_SET_WAITERS()      CPU2: mutex_exit()
    571 		 * ---------------------------- ----------------------------
    572 		 *		..		    acquire cache line
    573 		 *		..                   test for waiters
    574 		 *	acquire cache line    <-      lose cache line
    575 		 *	 lock cache line	           ..
    576 		 *     verify mutex is held                ..
    577 		 *	    set waiters  	           ..
    578 		 *	 unlock cache line		   ..
    579 		 *	  lose cache line     ->    acquire cache line
    580 		 *		..	          clear lock word, waiters
    581 		 *	  return success
    582 		 *
    583 		 * There is a another race that can occur: a third CPU could
    584 		 * acquire the mutex as soon as it is released.  Since
    585 		 * adaptive mutexes are primarily spin mutexes, this is not
    586 		 * something that we need to worry about too much.  What we
    587 		 * do need to ensure is that the waiters bit gets set.
    588 		 *
    589 		 * To allow the unlocked release, we need to make some
    590 		 * assumptions here:
    591 		 *
    592 		 * o Release is the only non-atomic/unlocked operation
    593 		 *   that can be performed on the mutex.  (It must still
    594 		 *   be atomic on the local CPU, e.g. in case interrupted
    595 		 *   or preempted).
    596 		 *
    597 		 * o At any given time, MUTEX_SET_WAITERS() can only ever
    598 		 *   be in progress on one CPU in the system - guaranteed
    599 		 *   by the turnstile chain lock.
    600 		 *
    601 		 * o No other operations other than MUTEX_SET_WAITERS()
    602 		 *   and release can modify a mutex with a non-zero
    603 		 *   owner field.
    604 		 *
    605 		 * o The result of a successful MUTEX_SET_WAITERS() call
    606 		 *   is an unbuffered write that is immediately visible
    607 		 *   to all other processors in the system.
    608 		 *
    609 		 * o If the holding LWP switches away, it posts a store
    610 		 *   fence before changing curlwp, ensuring that any
    611 		 *   overwrite of the mutex waiters flag by mutex_exit()
    612 		 *   completes before the modification of curlwp becomes
    613 		 *   visible to this CPU.
    614 		 *
    615 		 * o mi_switch() posts a store fence before setting curlwp
    616 		 *   and before resuming execution of an LWP.
    617 		 *
    618 		 * o _kernel_lock() posts a store fence before setting
    619 		 *   curcpu()->ci_biglock_wanted, and after clearing it.
    620 		 *   This ensures that any overwrite of the mutex waiters
    621 		 *   flag by mutex_exit() completes before the modification
    622 		 *   of ci_biglock_wanted becomes visible.
    623 		 *
    624 		 * We now post a read memory barrier (after setting the
    625 		 * waiters field) and check the lock holder's status again.
    626 		 * Some of the possible outcomes (not an exhaustive list):
    627 		 *
    628 		 * 1. The onproc check returns true: the holding LWP is
    629 		 *    running again.  The lock may be released soon and
    630 		 *    we should spin.  Importantly, we can't trust the
    631 		 *    value of the waiters flag.
    632 		 *
    633 		 * 2. The onproc check returns false: the holding LWP is
    634 		 *    not running.  We now have the oppertunity to check
    635 		 *    if mutex_exit() has blatted the modifications made
    636 		 *    by MUTEX_SET_WAITERS().
    637 		 *
    638 		 * 3. The onproc check returns false: the holding LWP may
    639 		 *    or may not be running.  It has context switched at
    640 		 *    some point during our check.  Again, we have the
    641 		 *    chance to see if the waiters bit is still set or
    642 		 *    has been overwritten.
    643 		 *
    644 		 * 4. The onproc check returns false: the holding LWP is
    645 		 *    running on a CPU, but wants the big lock.  It's OK
    646 		 *    to check the waiters field in this case.
    647 		 *
    648 		 * 5. The has-waiters check fails: the mutex has been
    649 		 *    released, the waiters flag cleared and another LWP
    650 		 *    now owns the mutex.
    651 		 *
    652 		 * 6. The has-waiters check fails: the mutex has been
    653 		 *    released.
    654 		 *
    655 		 * If the waiters bit is not set it's unsafe to go asleep,
    656 		 * as we might never be awoken.
    657 		 */
    658 		if ((mb_read(), mutex_onproc(owner, &ci)) ||
    659 		    (mb_read(), !MUTEX_HAS_WAITERS(mtx))) {
    660 			turnstile_exit(mtx);
    661 			continue;
    662 		}
    663 #endif	/* MULTIPROCESSOR */
    664 
    665 		LOCKSTAT_START_TIMER(lsflag, slptime);
    666 
    667 		turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
    668 
    669 		LOCKSTAT_STOP_TIMER(lsflag, slptime);
    670 		LOCKSTAT_COUNT(slpcnt, 1);
    671 	}
    672 
    673 	LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
    674 	    slpcnt, slptime);
    675 	LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
    676 	    spincnt, spintime);
    677 	LOCKSTAT_EXIT(lsflag);
    678 
    679 	MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
    680 	MUTEX_LOCKED(mtx);
    681 }
    682 
    683 /*
    684  * mutex_vector_exit:
    685  *
    686  *	Support routine for mutex_exit() that handles all cases.
    687  */
    688 void
    689 mutex_vector_exit(kmutex_t *mtx)
    690 {
    691 	turnstile_t *ts;
    692 	uintptr_t curthread;
    693 
    694 	if (MUTEX_SPIN_P(mtx)) {
    695 #ifdef FULL
    696 		if (!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock))
    697 			MUTEX_ABORT(mtx, "exiting unheld spin mutex");
    698 		MUTEX_UNLOCKED(mtx);
    699 		__cpu_simple_unlock(&mtx->mtx_lock);
    700 #endif
    701 		MUTEX_SPIN_SPLRESTORE(mtx);
    702 		return;
    703 	}
    704 
    705 	if (__predict_false((uintptr_t)panicstr | cold)) {
    706 		MUTEX_UNLOCKED(mtx);
    707 		MUTEX_RELEASE(mtx);
    708 		return;
    709 	}
    710 
    711 	curthread = (uintptr_t)curlwp;
    712 	MUTEX_DASSERT(mtx, curthread != 0);
    713 	MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
    714 	MUTEX_UNLOCKED(mtx);
    715 
    716 #ifdef LOCKDEBUG
    717 	/*
    718 	 * Avoid having to take the turnstile chain lock every time
    719 	 * around.  Raise the priority level to splhigh() in order
    720 	 * to disable preemption and so make the following atomic.
    721 	 */
    722 	{
    723 		int s = splhigh();
    724 		if (!MUTEX_HAS_WAITERS(mtx)) {
    725 			MUTEX_RELEASE(mtx);
    726 			splx(s);
    727 			return;
    728 		}
    729 		splx(s);
    730 	}
    731 #endif
    732 
    733 	/*
    734 	 * Get this lock's turnstile.  This gets the interlock on
    735 	 * the sleep queue.  Once we have that, we can clear the
    736 	 * lock.  If there was no turnstile for the lock, there
    737 	 * were no waiters remaining.
    738 	 */
    739 	ts = turnstile_lookup(mtx);
    740 
    741 	if (ts == NULL) {
    742 		MUTEX_RELEASE(mtx);
    743 		turnstile_exit(mtx);
    744 	} else {
    745 		MUTEX_RELEASE(mtx);
    746 		turnstile_wakeup(ts, TS_WRITER_Q,
    747 		    TS_WAITERS(ts, TS_WRITER_Q), NULL);
    748 	}
    749 }
    750 
    751 #ifndef __HAVE_SIMPLE_MUTEXES
    752 /*
    753  * mutex_wakeup:
    754  *
    755  *	Support routine for mutex_exit() that wakes up all waiters.
    756  *	We assume that the mutex has been released, but it need not
    757  *	be.
    758  */
    759 void
    760 mutex_wakeup(kmutex_t *mtx)
    761 {
    762 	turnstile_t *ts;
    763 
    764 	ts = turnstile_lookup(mtx);
    765 	if (ts == NULL) {
    766 		turnstile_exit(mtx);
    767 		return;
    768 	}
    769 	MUTEX_CLEAR_WAITERS(mtx);
    770 	turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
    771 }
    772 #endif	/* !__HAVE_SIMPLE_MUTEXES */
    773 
    774 /*
    775  * mutex_owned:
    776  *
    777  *	Return true if the current LWP (adaptive) or CPU (spin)
    778  *	holds the mutex.
    779  */
    780 int
    781 mutex_owned(kmutex_t *mtx)
    782 {
    783 
    784 	if (MUTEX_ADAPTIVE_P(mtx))
    785 		return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
    786 #ifdef FULL
    787 	return __SIMPLELOCK_LOCKED_P(&mtx->mtx_lock);
    788 #else
    789 	return 1;
    790 #endif
    791 }
    792 
    793 /*
    794  * mutex_owner:
    795  *
    796  *	Return the current owner of an adaptive mutex.  Used for
    797  *	priority inheritance.
    798  */
    799 static struct lwp *
    800 mutex_owner(wchan_t obj)
    801 {
    802 	kmutex_t *mtx = (void *)(uintptr_t)obj; /* discard qualifiers */
    803 
    804 	MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
    805 	return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
    806 }
    807 
    808 /*
    809  * mutex_tryenter:
    810  *
    811  *	Try to acquire the mutex; return non-zero if we did.
    812  */
    813 int
    814 mutex_tryenter(kmutex_t *mtx)
    815 {
    816 	uintptr_t curthread;
    817 
    818 	/*
    819 	 * Handle spin mutexes.
    820 	 */
    821 	if (MUTEX_SPIN_P(mtx)) {
    822 		MUTEX_SPIN_SPLRAISE(mtx);
    823 #ifdef FULL
    824 		if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
    825 			MUTEX_WANTLOCK(mtx);
    826 			MUTEX_LOCKED(mtx);
    827 			return 1;
    828 		}
    829 		MUTEX_SPIN_SPLRESTORE(mtx);
    830 #else
    831 		MUTEX_WANTLOCK(mtx);
    832 		MUTEX_LOCKED(mtx);
    833 		return 1;
    834 #endif
    835 	} else {
    836 		curthread = (uintptr_t)curlwp;
    837 		MUTEX_ASSERT(mtx, curthread != 0);
    838 		if (MUTEX_ACQUIRE(mtx, curthread)) {
    839 			MUTEX_WANTLOCK(mtx);
    840 			MUTEX_LOCKED(mtx);
    841 			MUTEX_DASSERT(mtx,
    842 			    MUTEX_OWNER(mtx->mtx_owner) == curthread);
    843 			return 1;
    844 		}
    845 	}
    846 
    847 	return 0;
    848 }
    849 
    850 #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
    851 /*
    852  * mutex_spin_retry:
    853  *
    854  *	Support routine for mutex_spin_enter().  Assumes that the caller
    855  *	has already raised the SPL, and adjusted counters.
    856  */
    857 void
    858 mutex_spin_retry(kmutex_t *mtx)
    859 {
    860 #ifdef MULTIPROCESSOR
    861 	u_int count;
    862 	LOCKSTAT_TIMER(spintime);
    863 	LOCKSTAT_FLAG(lsflag);
    864 #ifdef LOCKDEBUG
    865 	u_int spins = 0;
    866 #endif	/* LOCKDEBUG */
    867 
    868 	MUTEX_WANTLOCK(mtx);
    869 
    870 	LOCKSTAT_ENTER(lsflag);
    871 	LOCKSTAT_START_TIMER(lsflag, spintime);
    872 	count = SPINLOCK_BACKOFF_MIN;
    873 
    874 	/*
    875 	 * Spin testing the lock word and do exponential backoff
    876 	 * to reduce cache line ping-ponging between CPUs.
    877 	 */
    878 	do {
    879 		if (panicstr != NULL)
    880 			break;
    881 		while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
    882 			SPINLOCK_BACKOFF(count);
    883 #ifdef LOCKDEBUG
    884 			if (SPINLOCK_SPINOUT(spins))
    885 				MUTEX_ABORT(mtx, "spinout");
    886 #endif	/* LOCKDEBUG */
    887 		}
    888 	} while (!__cpu_simple_lock_try(&mtx->mtx_lock));
    889 
    890 	LOCKSTAT_STOP_TIMER(lsflag, spintime);
    891 	LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
    892 	LOCKSTAT_EXIT(lsflag);
    893 
    894 	MUTEX_LOCKED(mtx);
    895 #else	/* MULTIPROCESSOR */
    896 	MUTEX_ABORT(mtx, "locking against myself");
    897 #endif	/* MULTIPROCESSOR */
    898 }
    899 #endif	/* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
    900