Home | History | Annotate | Line # | Download | only in libpthread
pthread_mutex.c revision 1.73
      1 /*	$NetBSD: pthread_mutex.c,v 1.73 2020/02/01 15:39:56 kamil Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2003, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * To track threads waiting for mutexes to be released, we use lockless
     34  * lists built on atomic operations and memory barriers.
     35  *
     36  * A simple spinlock would be faster and make the code easier to
     37  * follow, but spinlocks are problematic in userspace.  If a thread is
     38  * preempted by the kernel while holding a spinlock, any other thread
     39  * attempting to acquire that spinlock will needlessly busy wait.
     40  *
     41  * There is no good way to know that the holding thread is no longer
     42  * running, nor to request a wake-up once it has begun running again.
     43  * Of more concern, threads in the SCHED_FIFO class do not have a
     44  * limited time quantum and so could spin forever, preventing the
     45  * thread holding the spinlock from getting CPU time: it would never
     46  * be released.
     47  */
     48 
     49 #include <sys/cdefs.h>
     50 __RCSID("$NetBSD: pthread_mutex.c,v 1.73 2020/02/01 15:39:56 kamil Exp $");
     51 
     52 #include <sys/types.h>
     53 #include <sys/lwpctl.h>
     54 #include <sys/sched.h>
     55 #include <sys/lock.h>
     56 
     57 #include <errno.h>
     58 #include <limits.h>
     59 #include <stdlib.h>
     60 #include <time.h>
     61 #include <string.h>
     62 #include <stdio.h>
     63 
     64 #include "pthread.h"
     65 #include "pthread_int.h"
     66 #include "reentrant.h"
     67 
     68 #define	MUTEX_WAITERS_BIT		((uintptr_t)0x01)
     69 #define	MUTEX_RECURSIVE_BIT		((uintptr_t)0x02)
     70 #define	MUTEX_DEFERRED_BIT		((uintptr_t)0x04)
     71 #define	MUTEX_PROTECT_BIT		((uintptr_t)0x08)
     72 #define	MUTEX_THREAD			((uintptr_t)~0x0f)
     73 
     74 #define	MUTEX_HAS_WAITERS(x)		((uintptr_t)(x) & MUTEX_WAITERS_BIT)
     75 #define	MUTEX_RECURSIVE(x)		((uintptr_t)(x) & MUTEX_RECURSIVE_BIT)
     76 #define	MUTEX_PROTECT(x)		((uintptr_t)(x) & MUTEX_PROTECT_BIT)
     77 #define	MUTEX_OWNER(x)			((uintptr_t)(x) & MUTEX_THREAD)
     78 
     79 #define	MUTEX_GET_TYPE(x)		\
     80     ((int)(((uintptr_t)(x) & 0x000000ff) >> 0))
     81 #define	MUTEX_SET_TYPE(x, t) 		\
     82     (x) = (void *)(((uintptr_t)(x) & ~0x000000ff) | ((t) << 0))
     83 #define	MUTEX_GET_PROTOCOL(x)		\
     84     ((int)(((uintptr_t)(x) & 0x0000ff00) >> 8))
     85 #define	MUTEX_SET_PROTOCOL(x, p)	\
     86     (x) = (void *)(((uintptr_t)(x) & ~0x0000ff00) | ((p) << 8))
     87 #define	MUTEX_GET_CEILING(x)		\
     88     ((int)(((uintptr_t)(x) & 0x00ff0000) >> 16))
     89 #define	MUTEX_SET_CEILING(x, c)	\
     90     (x) = (void *)(((uintptr_t)(x) & ~0x00ff0000) | ((c) << 16))
     91 
     92 #if __GNUC_PREREQ__(3, 0)
     93 #define	NOINLINE		__attribute ((noinline))
     94 #else
     95 #define	NOINLINE		/* nothing */
     96 #endif
     97 
     98 static void	pthread__mutex_wakeup(pthread_t, pthread_mutex_t *);
     99 static int	pthread__mutex_lock_slow(pthread_mutex_t *,
    100     const struct timespec *);
    101 static int	pthread__mutex_unlock_slow(pthread_mutex_t *);
    102 static void	pthread__mutex_pause(void);
    103 
    104 int		_pthread_mutex_held_np(pthread_mutex_t *);
    105 pthread_t	_pthread_mutex_owner_np(pthread_mutex_t *);
    106 
    107 __weak_alias(pthread_mutex_held_np,_pthread_mutex_held_np)
    108 __weak_alias(pthread_mutex_owner_np,_pthread_mutex_owner_np)
    109 
    110 __strong_alias(__libc_mutex_init,pthread_mutex_init)
    111 __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
    112 __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
    113 __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
    114 __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
    115 
    116 __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
    117 __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
    118 __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
    119 
    120 int
    121 pthread_mutex_init(pthread_mutex_t *ptm, const pthread_mutexattr_t *attr)
    122 {
    123 	uintptr_t type, proto, val, ceil;
    124 
    125 	/*
    126 	 * Always initialize the mutex structure, maybe be used later
    127 	 * and the cost should be minimal.
    128 	 */
    129 	if (__predict_false(__uselibcstub))
    130 		return __libc_mutex_init_stub(ptm, attr);
    131 
    132 	pthread__error(EINVAL, "Invalid mutes attribute",
    133 	    attr == NULL || attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    134 
    135 	if (attr == NULL) {
    136 		type = PTHREAD_MUTEX_NORMAL;
    137 		proto = PTHREAD_PRIO_NONE;
    138 		ceil = 0;
    139 	} else {
    140 		val = (uintptr_t)attr->ptma_private;
    141 
    142 		type = MUTEX_GET_TYPE(val);
    143 		proto = MUTEX_GET_PROTOCOL(val);
    144 		ceil = MUTEX_GET_CEILING(val);
    145 	}
    146 	switch (type) {
    147 	case PTHREAD_MUTEX_ERRORCHECK:
    148 		__cpu_simple_lock_set(&ptm->ptm_errorcheck);
    149 		ptm->ptm_owner = NULL;
    150 		break;
    151 	case PTHREAD_MUTEX_RECURSIVE:
    152 		__cpu_simple_lock_clear(&ptm->ptm_errorcheck);
    153 		ptm->ptm_owner = (void *)MUTEX_RECURSIVE_BIT;
    154 		break;
    155 	default:
    156 		__cpu_simple_lock_clear(&ptm->ptm_errorcheck);
    157 		ptm->ptm_owner = NULL;
    158 		break;
    159 	}
    160 	switch (proto) {
    161 	case PTHREAD_PRIO_PROTECT:
    162 		val = (uintptr_t)ptm->ptm_owner;
    163 		val |= MUTEX_PROTECT_BIT;
    164 		ptm->ptm_owner = (void *)val;
    165 		break;
    166 
    167 	}
    168 	ptm->ptm_magic = _PT_MUTEX_MAGIC;
    169 	ptm->ptm_waiters = NULL;
    170 	ptm->ptm_recursed = 0;
    171 	ptm->ptm_ceiling = (unsigned char)ceil;
    172 
    173 	return 0;
    174 }
    175 
    176 int
    177 pthread_mutex_destroy(pthread_mutex_t *ptm)
    178 {
    179 
    180 	if (__predict_false(__uselibcstub))
    181 		return __libc_mutex_destroy_stub(ptm);
    182 
    183 	pthread__error(EINVAL, "Invalid mutex",
    184 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    185 	pthread__error(EBUSY, "Destroying locked mutex",
    186 	    MUTEX_OWNER(ptm->ptm_owner) == 0);
    187 
    188 	ptm->ptm_magic = _PT_MUTEX_DEAD;
    189 	return 0;
    190 }
    191 
    192 int
    193 pthread_mutex_lock(pthread_mutex_t *ptm)
    194 {
    195 	pthread_t self;
    196 	void *val;
    197 
    198 	if (__predict_false(__uselibcstub))
    199 		return __libc_mutex_lock_stub(ptm);
    200 
    201 	pthread__error(EINVAL, "Invalid mutex",
    202 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    203 
    204 	self = pthread__self();
    205 	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
    206 	if (__predict_true(val == NULL)) {
    207 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    208 		membar_enter();
    209 #endif
    210 		return 0;
    211 	}
    212 	return pthread__mutex_lock_slow(ptm, NULL);
    213 }
    214 
    215 int
    216 pthread_mutex_timedlock(pthread_mutex_t* ptm, const struct timespec *ts)
    217 {
    218 	pthread_t self;
    219 	void *val;
    220 
    221 	pthread__error(EINVAL, "Invalid mutex",
    222 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    223 
    224 	self = pthread__self();
    225 	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
    226 	if (__predict_true(val == NULL)) {
    227 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    228 		membar_enter();
    229 #endif
    230 		return 0;
    231 	}
    232 	return pthread__mutex_lock_slow(ptm, ts);
    233 }
    234 
    235 /* We want function call overhead. */
    236 NOINLINE static void
    237 pthread__mutex_pause(void)
    238 {
    239 
    240 	pthread__smt_pause();
    241 }
    242 
    243 /*
    244  * Spin while the holder is running.  'lwpctl' gives us the true
    245  * status of the thread.
    246  */
    247 NOINLINE static void *
    248 pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
    249 {
    250 	pthread_t thread;
    251 	unsigned int count, i;
    252 
    253 	for (count = 2;; owner = ptm->ptm_owner) {
    254 		thread = (pthread_t)MUTEX_OWNER(owner);
    255 		if (thread == NULL)
    256 			break;
    257 		if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
    258 			break;
    259 		if (count < 128)
    260 			count += count;
    261 		for (i = count; i != 0; i--)
    262 			pthread__mutex_pause();
    263 	}
    264 
    265 	return owner;
    266 }
    267 
    268 NOINLINE static bool
    269 pthread__mutex_setwaiters(pthread_t self, pthread_mutex_t *ptm)
    270 {
    271 	void *owner, *next;
    272 
    273 	/*
    274 	 * Note that the mutex can become unlocked before we set
    275 	 * the waiters bit.  If that happens it's not safe to sleep
    276 	 * as we may never be awoken: we must remove the current
    277 	 * thread from the waiters list and try again.
    278 	 *
    279 	 * Because we are doing this atomically, we can't remove
    280 	 * one waiter: we must remove all waiters and awken them,
    281 	 * then sleep in _lwp_park() until we have been awoken.
    282 	 *
    283 	 * Issue a memory barrier to ensure that we are reading
    284 	 * the value of ptm_owner/pt_mutexwait after we have entered
    285 	 * the waiters list (the CAS itself must be atomic).
    286 	 */
    287 	for (owner = ptm->ptm_owner;; owner = next) {
    288 		if (MUTEX_OWNER(owner) == 0) {
    289 			pthread__mutex_wakeup(self, ptm);
    290 			return true;
    291 		}
    292 		if (MUTEX_HAS_WAITERS(owner)) {
    293 			return false;
    294 		}
    295 		next = atomic_cas_ptr(&ptm->ptm_owner, owner,
    296 		    (void *)((uintptr_t)owner | MUTEX_WAITERS_BIT));
    297 	}
    298 }
    299 
    300 NOINLINE static int
    301 pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts)
    302 {
    303 	void *waiters, *new, *owner, *next;
    304 	pthread_t self;
    305 	int serrno;
    306 	int error;
    307 
    308 	owner = ptm->ptm_owner;
    309 	self = pthread__self();
    310 
    311 	/* Recursive or errorcheck? */
    312 	if (MUTEX_OWNER(owner) == (uintptr_t)self) {
    313 		if (MUTEX_RECURSIVE(owner)) {
    314 			if (ptm->ptm_recursed == INT_MAX)
    315 				return EAGAIN;
    316 			ptm->ptm_recursed++;
    317 			return 0;
    318 		}
    319 		if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck))
    320 			return EDEADLK;
    321 	}
    322 
    323 	/* priority protect */
    324 	if (MUTEX_PROTECT(owner) && _sched_protect(ptm->ptm_ceiling) == -1) {
    325 		return errno;
    326 	}
    327 	serrno = errno;
    328 	for (;; owner = ptm->ptm_owner) {
    329 		/* Spin while the owner is running. */
    330 		if (MUTEX_OWNER(owner) != (uintptr_t)self)
    331 			owner = pthread__mutex_spin(ptm, owner);
    332 
    333 		/* If it has become free, try to acquire it again. */
    334 		if (MUTEX_OWNER(owner) == 0) {
    335 			do {
    336 				new = (void *)
    337 				    ((uintptr_t)self | (uintptr_t)owner);
    338 				next = atomic_cas_ptr(&ptm->ptm_owner, owner,
    339 				    new);
    340 				if (next == owner) {
    341 					errno = serrno;
    342 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    343 					membar_enter();
    344 #endif
    345 					return 0;
    346 				}
    347 				owner = next;
    348 			} while (MUTEX_OWNER(owner) == 0);
    349 			/*
    350 			 * We have lost the race to acquire the mutex.
    351 			 * The new owner could be running on another
    352 			 * CPU, in which case we should spin and avoid
    353 			 * the overhead of blocking.
    354 			 */
    355 			continue;
    356 		}
    357 
    358 		/*
    359 		 * Nope, still held.  Add thread to the list of waiters.
    360 		 * Issue a memory barrier to ensure mutexwait/mutexnext
    361 		 * are visible before we enter the waiters list.
    362 		 */
    363 		self->pt_mutexwait = 1;
    364 		for (waiters = ptm->ptm_waiters;; waiters = next) {
    365 			self->pt_mutexnext = waiters;
    366 			membar_producer();
    367 			next = atomic_cas_ptr(&ptm->ptm_waiters, waiters, self);
    368 			if (next == waiters)
    369 			    	break;
    370 		}
    371 
    372 		/* Set the waiters bit and block. */
    373 		membar_sync();
    374 		if (pthread__mutex_setwaiters(self, ptm)) {
    375 			continue;
    376 		}
    377 
    378 		/*
    379 		 * We may have been awoken by the current thread above,
    380 		 * or will be awoken by the current holder of the mutex.
    381 		 * The key requirement is that we must not proceed until
    382 		 * told that we are no longer waiting (via pt_mutexwait
    383 		 * being set to zero).  Otherwise it is unsafe to re-enter
    384 		 * the thread onto the waiters list.
    385 		 */
    386 		membar_sync();
    387 		while (self->pt_mutexwait) {
    388 			error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME,
    389 			    __UNCONST(ts), self->pt_unpark,
    390 			    __UNVOLATILE(&ptm->ptm_waiters),
    391 			    __UNVOLATILE(&ptm->ptm_waiters));
    392 			self->pt_unpark = 0;
    393 			if (__predict_true(error != -1)) {
    394 				continue;
    395 			}
    396 			if (errno == ETIMEDOUT && self->pt_mutexwait) {
    397 				/*Remove self from waiters list*/
    398 				pthread__mutex_wakeup(self, ptm);
    399 				/*priority protect*/
    400 				if (MUTEX_PROTECT(owner))
    401 					(void)_sched_protect(-1);
    402 				return ETIMEDOUT;
    403 			}
    404 		}
    405 	}
    406 }
    407 
    408 int
    409 pthread_mutex_trylock(pthread_mutex_t *ptm)
    410 {
    411 	pthread_t self;
    412 	void *val, *new, *next;
    413 
    414 	if (__predict_false(__uselibcstub))
    415 		return __libc_mutex_trylock_stub(ptm);
    416 
    417 	pthread__error(EINVAL, "Invalid mutex",
    418 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    419 
    420 	self = pthread__self();
    421 	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
    422 	if (__predict_true(val == NULL)) {
    423 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    424 		membar_enter();
    425 #endif
    426 		return 0;
    427 	}
    428 
    429 	if (MUTEX_RECURSIVE(val)) {
    430 		if (MUTEX_OWNER(val) == 0) {
    431 			new = (void *)((uintptr_t)self | (uintptr_t)val);
    432 			next = atomic_cas_ptr(&ptm->ptm_owner, val, new);
    433 			if (__predict_true(next == val)) {
    434 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    435 				membar_enter();
    436 #endif
    437 				return 0;
    438 			}
    439 		}
    440 		if (MUTEX_OWNER(val) == (uintptr_t)self) {
    441 			if (ptm->ptm_recursed == INT_MAX)
    442 				return EAGAIN;
    443 			ptm->ptm_recursed++;
    444 			return 0;
    445 		}
    446 	}
    447 
    448 	return EBUSY;
    449 }
    450 
    451 int
    452 pthread_mutex_unlock(pthread_mutex_t *ptm)
    453 {
    454 	pthread_t self;
    455 	void *value;
    456 
    457 	if (__predict_false(__uselibcstub))
    458 		return __libc_mutex_unlock_stub(ptm);
    459 
    460 	pthread__error(EINVAL, "Invalid mutex",
    461 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    462 
    463 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    464 	membar_exit();
    465 #endif
    466 	self = pthread__self();
    467 	value = atomic_cas_ptr(&ptm->ptm_owner, self, NULL);
    468 	if (__predict_true(value == self)) {
    469 		pthread__smt_wake();
    470 		return 0;
    471 	}
    472 	return pthread__mutex_unlock_slow(ptm);
    473 }
    474 
    475 NOINLINE static int
    476 pthread__mutex_unlock_slow(pthread_mutex_t *ptm)
    477 {
    478 	pthread_t self, owner, new;
    479 	int weown, error;
    480 
    481 	self = pthread__self();
    482 	owner = ptm->ptm_owner;
    483 	weown = (MUTEX_OWNER(owner) == (uintptr_t)self);
    484 	error = 0;
    485 
    486 	if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) {
    487 		if (!weown) {
    488 			error = EPERM;
    489 			new = owner;
    490 		} else {
    491 			new = NULL;
    492 		}
    493 	} else if (MUTEX_RECURSIVE(owner)) {
    494 		if (!weown) {
    495 			error = EPERM;
    496 			new = owner;
    497 		} else if (ptm->ptm_recursed) {
    498 			ptm->ptm_recursed--;
    499 			new = owner;
    500 		} else {
    501 			new = (pthread_t)MUTEX_RECURSIVE_BIT;
    502 		}
    503 	} else {
    504 		pthread__error(EPERM,
    505 		    "Unlocking unlocked mutex", (owner != NULL));
    506 		pthread__error(EPERM,
    507 		    "Unlocking mutex owned by another thread", weown);
    508 		new = NULL;
    509 	}
    510 
    511 	/*
    512 	 * Release the mutex.  If there appear to be waiters, then
    513 	 * wake them up.
    514 	 */
    515 	if (new != owner) {
    516 		owner = atomic_swap_ptr(&ptm->ptm_owner, new);
    517 		if (__predict_false(MUTEX_PROTECT(owner))) {
    518 			/* restore elevated priority */
    519 			(void)_sched_protect(-1);
    520 		}
    521 		if (MUTEX_HAS_WAITERS(owner) != 0) {
    522 			pthread__mutex_wakeup(self, ptm);
    523 			return 0;
    524 		}
    525 		error = 0;
    526 	}
    527 
    528 	if (self->pt_nwaiters == 1) {
    529 		/*
    530 		 * If the calling thread is about to block, defer
    531 		 * unparking the target until _lwp_park() is called.
    532 		 */
    533 		if (self->pt_willpark && self->pt_unpark == 0) {
    534 			self->pt_unpark = self->pt_waiters[0];
    535 		} else {
    536 			(void)_lwp_unpark(self->pt_waiters[0],
    537 			    __UNVOLATILE(&ptm->ptm_waiters));
    538 		}
    539 	} else if (self->pt_nwaiters > 0) {
    540 		(void)_lwp_unpark_all(self->pt_waiters, self->pt_nwaiters,
    541 		    __UNVOLATILE(&ptm->ptm_waiters));
    542 	}
    543 	self->pt_nwaiters = 0;
    544 
    545 	return error;
    546 }
    547 
    548 /*
    549  * pthread__mutex_wakeup: unpark threads waiting for us
    550  *
    551  * unpark threads on the ptm->ptm_waiters list and self->pt_waiters.
    552  */
    553 
    554 static void
    555 pthread__mutex_wakeup(pthread_t self, pthread_mutex_t *ptm)
    556 {
    557 	pthread_t thread, next;
    558 	ssize_t n, rv;
    559 
    560 	/* Take ownership of the current set of waiters. */
    561 	thread = atomic_swap_ptr(&ptm->ptm_waiters, NULL);
    562 	membar_datadep_consumer(); /* for alpha */
    563 	pthread__smt_wake();
    564 
    565 	for (;;) {
    566 		/*
    567 		 * Pull waiters from the queue and add to our list.
    568 		 * Use a memory barrier to ensure that we safely
    569 		 * read the value of pt_mutexnext before 'thread'
    570 		 * sees pt_mutexwait being cleared.
    571 		 */
    572 		for (n = self->pt_nwaiters, self->pt_nwaiters = 0;
    573 		    n < pthread__unpark_max && thread != NULL;
    574 		    thread = next) {
    575 		    	next = thread->pt_mutexnext;
    576 		    	if (thread != self) {
    577 				self->pt_waiters[n++] = thread->pt_lid;
    578 				membar_sync();
    579 			}
    580 			thread->pt_mutexwait = 0;
    581 			/* No longer safe to touch 'thread' */
    582 		}
    583 
    584 		switch (n) {
    585 		case 0:
    586 			return;
    587 		case 1:
    588 			/*
    589 			 * If the calling thread is about to block,
    590 			 * defer unparking the target until _lwp_park()
    591 			 * is called.
    592 			 */
    593 			if (self->pt_willpark && self->pt_unpark == 0) {
    594 				self->pt_unpark = self->pt_waiters[0];
    595 				return;
    596 			}
    597 			rv = (ssize_t)_lwp_unpark(self->pt_waiters[0],
    598 			    __UNVOLATILE(&ptm->ptm_waiters));
    599 			if (rv != 0 && errno != EALREADY && errno != EINTR &&
    600 			    errno != ESRCH) {
    601 				pthread__errorfunc(__FILE__, __LINE__,
    602 				    __func__, "_lwp_unpark failed");
    603 			}
    604 			return;
    605 		default:
    606 			rv = _lwp_unpark_all(self->pt_waiters, (size_t)n,
    607 			    __UNVOLATILE(&ptm->ptm_waiters));
    608 			if (rv != 0 && errno != EINTR) {
    609 				pthread__errorfunc(__FILE__, __LINE__,
    610 				    __func__, "_lwp_unpark_all failed");
    611 			}
    612 			break;
    613 		}
    614 	}
    615 }
    616 
    617 int
    618 pthread_mutexattr_init(pthread_mutexattr_t *attr)
    619 {
    620 
    621 	if (__predict_false(__uselibcstub))
    622 		return __libc_mutexattr_init_stub(attr);
    623 
    624 	attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
    625 	attr->ptma_private = (void *)PTHREAD_MUTEX_DEFAULT;
    626 	return 0;
    627 }
    628 
    629 int
    630 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
    631 {
    632 	if (__predict_false(__uselibcstub))
    633 		return __libc_mutexattr_destroy_stub(attr);
    634 
    635 	pthread__error(EINVAL, "Invalid mutex attribute",
    636 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    637 
    638 	attr->ptma_magic = _PT_MUTEXATTR_DEAD;
    639 
    640 	return 0;
    641 }
    642 
    643 int
    644 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
    645 {
    646 
    647 	pthread__error(EINVAL, "Invalid mutex attribute",
    648 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    649 
    650 	*typep = MUTEX_GET_TYPE(attr->ptma_private);
    651 	return 0;
    652 }
    653 
    654 int
    655 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
    656 {
    657 
    658 	if (__predict_false(__uselibcstub))
    659 		return __libc_mutexattr_settype_stub(attr, type);
    660 
    661 	pthread__error(EINVAL, "Invalid mutex attribute",
    662 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    663 
    664 	switch (type) {
    665 	case PTHREAD_MUTEX_NORMAL:
    666 	case PTHREAD_MUTEX_ERRORCHECK:
    667 	case PTHREAD_MUTEX_RECURSIVE:
    668 		MUTEX_SET_TYPE(attr->ptma_private, type);
    669 		return 0;
    670 	default:
    671 		return EINVAL;
    672 	}
    673 }
    674 
    675 int
    676 pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int*proto)
    677 {
    678 
    679 	pthread__error(EINVAL, "Invalid mutex attribute",
    680 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    681 
    682 	*proto = MUTEX_GET_PROTOCOL(attr->ptma_private);
    683 	return 0;
    684 }
    685 
    686 int
    687 pthread_mutexattr_setprotocol(pthread_mutexattr_t* attr, int proto)
    688 {
    689 
    690 	pthread__error(EINVAL, "Invalid mutex attribute",
    691 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    692 
    693 	switch (proto) {
    694 	case PTHREAD_PRIO_NONE:
    695 	case PTHREAD_PRIO_PROTECT:
    696 		MUTEX_SET_PROTOCOL(attr->ptma_private, proto);
    697 		return 0;
    698 	case PTHREAD_PRIO_INHERIT:
    699 		return ENOTSUP;
    700 	default:
    701 		return EINVAL;
    702 	}
    703 }
    704 
    705 int
    706 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *ceil)
    707 {
    708 
    709 	pthread__error(EINVAL, "Invalid mutex attribute",
    710 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    711 
    712 	*ceil = MUTEX_GET_CEILING(attr->ptma_private);
    713 	return 0;
    714 }
    715 
    716 int
    717 pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int ceil)
    718 {
    719 
    720 	pthread__error(EINVAL, "Invalid mutex attribute",
    721 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    722 
    723 	if (ceil & ~0xff)
    724 		return EINVAL;
    725 
    726 	MUTEX_SET_CEILING(attr->ptma_private, ceil);
    727 	return 0;
    728 }
    729 
    730 #ifdef _PTHREAD_PSHARED
    731 int
    732 pthread_mutexattr_getpshared(const pthread_mutexattr_t * __restrict attr,
    733     int * __restrict pshared)
    734 {
    735 
    736 	pthread__error(EINVAL, "Invalid mutex attribute",
    737 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    738 
    739 	*pshared = PTHREAD_PROCESS_PRIVATE;
    740 	return 0;
    741 }
    742 
    743 int
    744 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
    745 {
    746 
    747 	pthread__error(EINVAL, "Invalid mutex attribute",
    748 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    749 
    750 	switch(pshared) {
    751 	case PTHREAD_PROCESS_PRIVATE:
    752 		return 0;
    753 	case PTHREAD_PROCESS_SHARED:
    754 		return ENOSYS;
    755 	}
    756 	return EINVAL;
    757 }
    758 #endif
    759 
    760 /*
    761  * pthread__mutex_deferwake: try to defer unparking threads in self->pt_waiters
    762  *
    763  * In order to avoid unnecessary contention on the interlocking mutex,
    764  * we defer waking up threads until we unlock the mutex.  The threads will
    765  * be woken up when the calling thread (self) releases the first mutex with
    766  * MUTEX_DEFERRED_BIT set.  It likely be the mutex 'ptm', but no problem
    767  * even if it isn't.
    768  */
    769 
    770 void
    771 pthread__mutex_deferwake(pthread_t self, pthread_mutex_t *ptm)
    772 {
    773 
    774 	if (__predict_false(ptm == NULL ||
    775 	    MUTEX_OWNER(ptm->ptm_owner) != (uintptr_t)self)) {
    776 	    	(void)_lwp_unpark_all(self->pt_waiters, self->pt_nwaiters,
    777 	    	    __UNVOLATILE(&ptm->ptm_waiters));
    778 	    	self->pt_nwaiters = 0;
    779 	} else {
    780 		atomic_or_ulong((volatile unsigned long *)
    781 		    (uintptr_t)&ptm->ptm_owner,
    782 		    (unsigned long)MUTEX_DEFERRED_BIT);
    783 	}
    784 }
    785 
    786 int
    787 pthread_mutex_getprioceiling(const pthread_mutex_t *ptm, int *ceil)
    788 {
    789 
    790 	pthread__error(EINVAL, "Invalid mutex",
    791 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    792 
    793 	*ceil = ptm->ptm_ceiling;
    794 	return 0;
    795 }
    796 
    797 int
    798 pthread_mutex_setprioceiling(pthread_mutex_t *ptm, int ceil, int *old_ceil)
    799 {
    800 	int error;
    801 
    802 	pthread__error(EINVAL, "Invalid mutex",
    803 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    804 
    805 	error = pthread_mutex_lock(ptm);
    806 	if (error == 0) {
    807 		*old_ceil = ptm->ptm_ceiling;
    808 		/*check range*/
    809 		ptm->ptm_ceiling = ceil;
    810 		pthread_mutex_unlock(ptm);
    811 	}
    812 	return error;
    813 }
    814 
    815 int
    816 _pthread_mutex_held_np(pthread_mutex_t *ptm)
    817 {
    818 
    819 	return MUTEX_OWNER(ptm->ptm_owner) == (uintptr_t)pthread__self();
    820 }
    821 
    822 pthread_t
    823 _pthread_mutex_owner_np(pthread_mutex_t *ptm)
    824 {
    825 
    826 	return (pthread_t)MUTEX_OWNER(ptm->ptm_owner);
    827 }
    828