Home | History | Annotate | Line # | Download | only in libpthread
pthread_mutex.c revision 1.77
      1 /*	$NetBSD: pthread_mutex.c,v 1.77 2020/05/16 22:53:37 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2003, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * To track threads waiting for mutexes to be released, we use lockless
     34  * lists built on atomic operations and memory barriers.
     35  *
     36  * A simple spinlock would be faster and make the code easier to
     37  * follow, but spinlocks are problematic in userspace.  If a thread is
     38  * preempted by the kernel while holding a spinlock, any other thread
     39  * attempting to acquire that spinlock will needlessly busy wait.
     40  *
     41  * There is no good way to know that the holding thread is no longer
     42  * running, nor to request a wake-up once it has begun running again.
     43  * Of more concern, threads in the SCHED_FIFO class do not have a
     44  * limited time quantum and so could spin forever, preventing the
     45  * thread holding the spinlock from getting CPU time: it would never
     46  * be released.
     47  */
     48 
     49 #include <sys/cdefs.h>
     50 __RCSID("$NetBSD: pthread_mutex.c,v 1.77 2020/05/16 22:53:37 ad Exp $");
     51 
     52 #include <sys/types.h>
     53 #include <sys/lwpctl.h>
     54 #include <sys/sched.h>
     55 #include <sys/lock.h>
     56 
     57 #include <errno.h>
     58 #include <limits.h>
     59 #include <stdlib.h>
     60 #include <time.h>
     61 #include <string.h>
     62 #include <stdio.h>
     63 
     64 #include "pthread.h"
     65 #include "pthread_int.h"
     66 #include "reentrant.h"
     67 
     68 #define	MUTEX_WAITERS_BIT		((uintptr_t)0x01)
     69 #define	MUTEX_RECURSIVE_BIT		((uintptr_t)0x02)
     70 #define	MUTEX_PROTECT_BIT		((uintptr_t)0x08)
     71 #define	MUTEX_THREAD			((uintptr_t)~0x0f)
     72 
     73 #define	MUTEX_HAS_WAITERS(x)		((uintptr_t)(x) & MUTEX_WAITERS_BIT)
     74 #define	MUTEX_RECURSIVE(x)		((uintptr_t)(x) & MUTEX_RECURSIVE_BIT)
     75 #define	MUTEX_PROTECT(x)		((uintptr_t)(x) & MUTEX_PROTECT_BIT)
     76 #define	MUTEX_OWNER(x)			((uintptr_t)(x) & MUTEX_THREAD)
     77 
     78 #define	MUTEX_GET_TYPE(x)		\
     79     ((int)(((uintptr_t)(x) & 0x000000ff) >> 0))
     80 #define	MUTEX_SET_TYPE(x, t) 		\
     81     (x) = (void *)(((uintptr_t)(x) & ~0x000000ff) | ((t) << 0))
     82 #define	MUTEX_GET_PROTOCOL(x)		\
     83     ((int)(((uintptr_t)(x) & 0x0000ff00) >> 8))
     84 #define	MUTEX_SET_PROTOCOL(x, p)	\
     85     (x) = (void *)(((uintptr_t)(x) & ~0x0000ff00) | ((p) << 8))
     86 #define	MUTEX_GET_CEILING(x)		\
     87     ((int)(((uintptr_t)(x) & 0x00ff0000) >> 16))
     88 #define	MUTEX_SET_CEILING(x, c)	\
     89     (x) = (void *)(((uintptr_t)(x) & ~0x00ff0000) | ((c) << 16))
     90 
     91 #if __GNUC_PREREQ__(3, 0)
     92 #define	NOINLINE		__attribute ((noinline))
     93 #else
     94 #define	NOINLINE		/* nothing */
     95 #endif
     96 
     97 static void	pthread__mutex_wakeup(pthread_t, pthread_mutex_t *);
     98 static int	pthread__mutex_lock_slow(pthread_mutex_t *,
     99     const struct timespec *);
    100 static void	pthread__mutex_pause(void);
    101 
    102 int		_pthread_mutex_held_np(pthread_mutex_t *);
    103 pthread_t	_pthread_mutex_owner_np(pthread_mutex_t *);
    104 
    105 __weak_alias(pthread_mutex_held_np,_pthread_mutex_held_np)
    106 __weak_alias(pthread_mutex_owner_np,_pthread_mutex_owner_np)
    107 
    108 __strong_alias(__libc_mutex_init,pthread_mutex_init)
    109 __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
    110 __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
    111 __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
    112 __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
    113 
    114 __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
    115 __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
    116 __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
    117 
    118 int
    119 pthread_mutex_init(pthread_mutex_t *ptm, const pthread_mutexattr_t *attr)
    120 {
    121 	uintptr_t type, proto, val, ceil;
    122 
    123 #if 0
    124 	/*
    125 	 * Always initialize the mutex structure, maybe be used later
    126 	 * and the cost should be minimal.
    127 	 */
    128 	if (__predict_false(__uselibcstub))
    129 		return __libc_mutex_init_stub(ptm, attr);
    130 #endif
    131 
    132 	pthread__error(EINVAL, "Invalid mutes attribute",
    133 	    attr == NULL || attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    134 
    135 	if (attr == NULL) {
    136 		type = PTHREAD_MUTEX_NORMAL;
    137 		proto = PTHREAD_PRIO_NONE;
    138 		ceil = 0;
    139 	} else {
    140 		val = (uintptr_t)attr->ptma_private;
    141 
    142 		type = MUTEX_GET_TYPE(val);
    143 		proto = MUTEX_GET_PROTOCOL(val);
    144 		ceil = MUTEX_GET_CEILING(val);
    145 	}
    146 	switch (type) {
    147 	case PTHREAD_MUTEX_ERRORCHECK:
    148 		__cpu_simple_lock_set(&ptm->ptm_errorcheck);
    149 		ptm->ptm_owner = NULL;
    150 		break;
    151 	case PTHREAD_MUTEX_RECURSIVE:
    152 		__cpu_simple_lock_clear(&ptm->ptm_errorcheck);
    153 		ptm->ptm_owner = (void *)MUTEX_RECURSIVE_BIT;
    154 		break;
    155 	default:
    156 		__cpu_simple_lock_clear(&ptm->ptm_errorcheck);
    157 		ptm->ptm_owner = NULL;
    158 		break;
    159 	}
    160 	switch (proto) {
    161 	case PTHREAD_PRIO_PROTECT:
    162 		val = (uintptr_t)ptm->ptm_owner;
    163 		val |= MUTEX_PROTECT_BIT;
    164 		ptm->ptm_owner = (void *)val;
    165 		break;
    166 
    167 	}
    168 	ptm->ptm_magic = _PT_MUTEX_MAGIC;
    169 	ptm->ptm_waiters = NULL;
    170 	ptm->ptm_recursed = 0;
    171 	ptm->ptm_ceiling = (unsigned char)ceil;
    172 
    173 	return 0;
    174 }
    175 
    176 int
    177 pthread_mutex_destroy(pthread_mutex_t *ptm)
    178 {
    179 
    180 	if (__predict_false(__uselibcstub))
    181 		return __libc_mutex_destroy_stub(ptm);
    182 
    183 	pthread__error(EINVAL, "Invalid mutex",
    184 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    185 	pthread__error(EBUSY, "Destroying locked mutex",
    186 	    MUTEX_OWNER(ptm->ptm_owner) == 0);
    187 
    188 	ptm->ptm_magic = _PT_MUTEX_DEAD;
    189 	return 0;
    190 }
    191 
    192 int
    193 pthread_mutex_lock(pthread_mutex_t *ptm)
    194 {
    195 	pthread_t self;
    196 	void *val;
    197 
    198 	if (__predict_false(__uselibcstub))
    199 		return __libc_mutex_lock_stub(ptm);
    200 
    201 	pthread__error(EINVAL, "Invalid mutex",
    202 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    203 
    204 	self = pthread__self();
    205 	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
    206 	if (__predict_true(val == NULL)) {
    207 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    208 		membar_enter();
    209 #endif
    210 		return 0;
    211 	}
    212 	return pthread__mutex_lock_slow(ptm, NULL);
    213 }
    214 
    215 int
    216 pthread_mutex_timedlock(pthread_mutex_t* ptm, const struct timespec *ts)
    217 {
    218 	pthread_t self;
    219 	void *val;
    220 
    221 	pthread__error(EINVAL, "Invalid mutex",
    222 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    223 
    224 	self = pthread__self();
    225 	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
    226 	if (__predict_true(val == NULL)) {
    227 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    228 		membar_enter();
    229 #endif
    230 		return 0;
    231 	}
    232 	return pthread__mutex_lock_slow(ptm, ts);
    233 }
    234 
    235 /* We want function call overhead. */
    236 NOINLINE static void
    237 pthread__mutex_pause(void)
    238 {
    239 
    240 	pthread__smt_pause();
    241 }
    242 
    243 /*
    244  * Spin while the holder is running.  'lwpctl' gives us the true
    245  * status of the thread.
    246  */
    247 NOINLINE static void *
    248 pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
    249 {
    250 	pthread_t thread;
    251 	unsigned int count, i;
    252 
    253 	for (count = 2;; owner = ptm->ptm_owner) {
    254 		thread = (pthread_t)MUTEX_OWNER(owner);
    255 		if (thread == NULL)
    256 			break;
    257 		if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
    258 			break;
    259 		if (count < 128)
    260 			count += count;
    261 		for (i = count; i != 0; i--)
    262 			pthread__mutex_pause();
    263 	}
    264 
    265 	return owner;
    266 }
    267 
    268 NOINLINE static int
    269 pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts)
    270 {
    271 	void *waiters, *newval, *owner, *next;
    272 	pthread_t self;
    273 	int serrno;
    274 	int error;
    275 
    276 	owner = ptm->ptm_owner;
    277 	self = pthread__self();
    278 	serrno = errno;
    279 
    280 	pthread__assert(!self->pt_willpark);
    281 
    282 	/* Recursive or errorcheck? */
    283 	if (MUTEX_OWNER(owner) == (uintptr_t)self) {
    284 		if (MUTEX_RECURSIVE(owner)) {
    285 			if (ptm->ptm_recursed == INT_MAX)
    286 				return EAGAIN;
    287 			ptm->ptm_recursed++;
    288 			return 0;
    289 		}
    290 		if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck))
    291 			return EDEADLK;
    292 	}
    293 
    294 	/* priority protect */
    295 	if (MUTEX_PROTECT(owner) && _sched_protect(ptm->ptm_ceiling) == -1) {
    296 		error = errno;
    297 		errno = serrno;
    298 		return error;
    299 	}
    300 
    301 	for (;;) {
    302 		/* If it has become free, try to acquire it again. */
    303 		if (MUTEX_OWNER(owner) == 0) {
    304 			newval = (void *)((uintptr_t)self | (uintptr_t)owner);
    305 			next = atomic_cas_ptr(&ptm->ptm_owner, owner, newval);
    306 			if (__predict_false(next != owner)) {
    307 				owner = next;
    308 				continue;
    309 			}
    310 			errno = serrno;
    311 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    312 			membar_enter();
    313 #endif
    314 			return 0;
    315 		} else if (MUTEX_OWNER(owner) != (uintptr_t)self) {
    316 			/* Spin while the owner is running. */
    317 			owner = pthread__mutex_spin(ptm, owner);
    318 			if (MUTEX_OWNER(owner) == 0) {
    319 				continue;
    320 			}
    321 		}
    322 
    323 		/*
    324 		 * Nope, still held.  Add thread to the list of waiters.
    325 		 * Issue a memory barrier to ensure mutexwait/mutexnext
    326 		 * are visible before we enter the waiters list.
    327 		 */
    328 		self->pt_mutexwait = 1;
    329 		for (waiters = ptm->ptm_waiters;; waiters = next) {
    330 			self->pt_mutexnext = waiters;
    331 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    332 			membar_producer();
    333 #endif
    334 			next = atomic_cas_ptr(&ptm->ptm_waiters, waiters, self);
    335 			if (next == waiters)
    336 			    	break;
    337 		}
    338 
    339 		/*
    340 		 * Try to set the waiters bit.  If the mutex has become free
    341 		 * since entering self onto the waiters list, need to wake
    342 		 * everybody up (including self) and retry.  It's possible
    343 		 * to race with the unlocking thread, so self may have
    344 		 * already been awoken.
    345 		 */
    346 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    347 		membar_sync();
    348 #endif
    349 		next = atomic_cas_ptr(&ptm->ptm_owner, owner,
    350 		    (void *)((uintptr_t)owner | MUTEX_WAITERS_BIT));
    351 		if (next != owner) {
    352 			pthread__mutex_wakeup(self, ptm);
    353 		}
    354 
    355 		/*
    356 		 * We must not proceed until told that we are no longer
    357 		 * waiting (via pt_mutexwait being set to zero).  Otherwise
    358 		 * it is unsafe to re-enter the thread onto the waiters
    359 		 * list.
    360 		 */
    361 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    362 		membar_sync();
    363 #endif
    364 		while (self->pt_mutexwait) {
    365 			error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME,
    366 			    __UNCONST(ts), self->pt_unpark, NULL, NULL);
    367 			self->pt_unpark = 0;
    368 			if (__predict_true(error != -1)) {
    369 				continue;
    370 			}
    371 			if (errno == ETIMEDOUT && self->pt_mutexwait) {
    372 				/*Remove self from waiters list*/
    373 				pthread__mutex_wakeup(self, ptm);
    374 				/*priority protect*/
    375 				if (MUTEX_PROTECT(owner))
    376 					(void)_sched_protect(-1);
    377 				errno = serrno;
    378 				return ETIMEDOUT;
    379 			}
    380 		}
    381 		owner = ptm->ptm_owner;
    382 	}
    383 }
    384 
    385 int
    386 pthread_mutex_trylock(pthread_mutex_t *ptm)
    387 {
    388 	pthread_t self;
    389 	void *val, *new, *next;
    390 
    391 	if (__predict_false(__uselibcstub))
    392 		return __libc_mutex_trylock_stub(ptm);
    393 
    394 	pthread__error(EINVAL, "Invalid mutex",
    395 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    396 
    397 	self = pthread__self();
    398 	val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
    399 	if (__predict_true(val == NULL)) {
    400 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    401 		membar_enter();
    402 #endif
    403 		return 0;
    404 	}
    405 
    406 	if (MUTEX_RECURSIVE(val)) {
    407 		if (MUTEX_OWNER(val) == 0) {
    408 			new = (void *)((uintptr_t)self | (uintptr_t)val);
    409 			next = atomic_cas_ptr(&ptm->ptm_owner, val, new);
    410 			if (__predict_true(next == val)) {
    411 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    412 				membar_enter();
    413 #endif
    414 				return 0;
    415 			}
    416 		}
    417 		if (MUTEX_OWNER(val) == (uintptr_t)self) {
    418 			if (ptm->ptm_recursed == INT_MAX)
    419 				return EAGAIN;
    420 			ptm->ptm_recursed++;
    421 			return 0;
    422 		}
    423 	}
    424 
    425 	return EBUSY;
    426 }
    427 
    428 int
    429 pthread_mutex_unlock(pthread_mutex_t *ptm)
    430 {
    431 	pthread_t self;
    432 	void *val;
    433 	int error;
    434 
    435 	if (__predict_false(__uselibcstub))
    436 		return __libc_mutex_unlock_stub(ptm);
    437 
    438 	pthread__error(EINVAL, "Invalid mutex",
    439 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    440 
    441 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    442 	membar_exit();
    443 #endif
    444 	error = 0;
    445 	self = pthread__self();
    446 
    447 	val = atomic_cas_ptr(&ptm->ptm_owner, self, NULL);
    448 	if (__predict_false(val != self)) {
    449 		bool weown = (MUTEX_OWNER(val) == (uintptr_t)self);
    450 		void *newval = val;
    451 		if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) {
    452 			if (!weown) {
    453 				error = EPERM;
    454 				newval = val;
    455 			} else {
    456 				newval = NULL;
    457 			}
    458 		} else if (MUTEX_RECURSIVE(val)) {
    459 			if (!weown) {
    460 				error = EPERM;
    461 				newval = val;
    462 			} else if (ptm->ptm_recursed) {
    463 				ptm->ptm_recursed--;
    464 				newval = val;
    465 			} else {
    466 				newval = (pthread_t)MUTEX_RECURSIVE_BIT;
    467 			}
    468 		} else {
    469 			pthread__error(EPERM,
    470 			    "Unlocking unlocked mutex", (val != NULL));
    471 			pthread__error(EPERM,
    472 			    "Unlocking mutex owned by another thread", weown);
    473 			newval = NULL;
    474 		}
    475 
    476 		/*
    477 		 * Release the mutex.  If there appear to be waiters, then
    478 		 * wake them up.
    479 		 */
    480 		if (newval != val) {
    481 			val = atomic_swap_ptr(&ptm->ptm_owner, newval);
    482 			if (__predict_false(MUTEX_PROTECT(val))) {
    483 				/* restore elevated priority */
    484 				(void)_sched_protect(-1);
    485 			}
    486 		}
    487 	}
    488 	pthread__smt_wake();
    489 
    490 	/*
    491 	 * Finally, wake any waiters and return.
    492 	 */
    493 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    494 	membar_enter();
    495 #endif
    496 	if (MUTEX_HAS_WAITERS(val)) {
    497 		pthread__mutex_wakeup(self, ptm);
    498 	} else if (self->pt_nwaiters > 0) {
    499 		pthread__clear_waiters(self);
    500 	}
    501 	return error;
    502 }
    503 
    504 /*
    505  * pthread__mutex_wakeup: unpark threads waiting for us
    506  *
    507  * unpark threads on the ptm->ptm_waiters list and self->pt_waiters.
    508  */
    509 
    510 static void
    511 pthread__mutex_wakeup(pthread_t self, pthread_mutex_t *ptm)
    512 {
    513 	pthread_t thread, next;
    514 
    515 	/* Take ownership of the current set of waiters. */
    516 	thread = atomic_swap_ptr(&ptm->ptm_waiters, NULL);
    517 	membar_datadep_consumer(); /* for alpha */
    518 	pthread__smt_wake();
    519 
    520 	/*
    521 	 * Pull waiters from the queue and add to our list.  Use a memory
    522 	 * barrier to ensure that we safely read the value of pt_mutexnext
    523 	 * before 'thread' sees pt_mutexwait being cleared.
    524 	 */
    525 	while (thread != NULL) {
    526 		if (self->pt_nwaiters < pthread__unpark_max) {
    527 			next = thread->pt_mutexnext;
    528 			if (thread != self) {
    529 				self->pt_waiters[self->pt_nwaiters++] =
    530 				    thread->pt_lid;
    531 				membar_sync();
    532 			}
    533 			thread->pt_mutexwait = 0;
    534 			/* No longer safe to touch 'thread' */
    535 			thread = next;
    536 			continue;
    537 		}
    538 		pthread__clear_waiters(self);
    539 	}
    540 	if (self->pt_nwaiters > 0) {
    541 		pthread__clear_waiters(self);
    542 	}
    543 }
    544 
    545 int
    546 pthread_mutexattr_init(pthread_mutexattr_t *attr)
    547 {
    548 #if 0
    549 	if (__predict_false(__uselibcstub))
    550 		return __libc_mutexattr_init_stub(attr);
    551 #endif
    552 
    553 	attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
    554 	attr->ptma_private = (void *)PTHREAD_MUTEX_DEFAULT;
    555 	return 0;
    556 }
    557 
    558 int
    559 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
    560 {
    561 	if (__predict_false(__uselibcstub))
    562 		return __libc_mutexattr_destroy_stub(attr);
    563 
    564 	pthread__error(EINVAL, "Invalid mutex attribute",
    565 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    566 
    567 	attr->ptma_magic = _PT_MUTEXATTR_DEAD;
    568 
    569 	return 0;
    570 }
    571 
    572 int
    573 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
    574 {
    575 
    576 	pthread__error(EINVAL, "Invalid mutex attribute",
    577 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    578 
    579 	*typep = MUTEX_GET_TYPE(attr->ptma_private);
    580 	return 0;
    581 }
    582 
    583 int
    584 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
    585 {
    586 
    587 	if (__predict_false(__uselibcstub))
    588 		return __libc_mutexattr_settype_stub(attr, type);
    589 
    590 	pthread__error(EINVAL, "Invalid mutex attribute",
    591 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    592 
    593 	switch (type) {
    594 	case PTHREAD_MUTEX_NORMAL:
    595 	case PTHREAD_MUTEX_ERRORCHECK:
    596 	case PTHREAD_MUTEX_RECURSIVE:
    597 		MUTEX_SET_TYPE(attr->ptma_private, type);
    598 		return 0;
    599 	default:
    600 		return EINVAL;
    601 	}
    602 }
    603 
    604 int
    605 pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int*proto)
    606 {
    607 
    608 	pthread__error(EINVAL, "Invalid mutex attribute",
    609 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    610 
    611 	*proto = MUTEX_GET_PROTOCOL(attr->ptma_private);
    612 	return 0;
    613 }
    614 
    615 int
    616 pthread_mutexattr_setprotocol(pthread_mutexattr_t* attr, int proto)
    617 {
    618 
    619 	pthread__error(EINVAL, "Invalid mutex attribute",
    620 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    621 
    622 	switch (proto) {
    623 	case PTHREAD_PRIO_NONE:
    624 	case PTHREAD_PRIO_PROTECT:
    625 		MUTEX_SET_PROTOCOL(attr->ptma_private, proto);
    626 		return 0;
    627 	case PTHREAD_PRIO_INHERIT:
    628 		return ENOTSUP;
    629 	default:
    630 		return EINVAL;
    631 	}
    632 }
    633 
    634 int
    635 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *ceil)
    636 {
    637 
    638 	pthread__error(EINVAL, "Invalid mutex attribute",
    639 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    640 
    641 	*ceil = MUTEX_GET_CEILING(attr->ptma_private);
    642 	return 0;
    643 }
    644 
    645 int
    646 pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int ceil)
    647 {
    648 
    649 	pthread__error(EINVAL, "Invalid mutex attribute",
    650 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    651 
    652 	if (ceil & ~0xff)
    653 		return EINVAL;
    654 
    655 	MUTEX_SET_CEILING(attr->ptma_private, ceil);
    656 	return 0;
    657 }
    658 
    659 #ifdef _PTHREAD_PSHARED
    660 int
    661 pthread_mutexattr_getpshared(const pthread_mutexattr_t * __restrict attr,
    662     int * __restrict pshared)
    663 {
    664 
    665 	pthread__error(EINVAL, "Invalid mutex attribute",
    666 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    667 
    668 	*pshared = PTHREAD_PROCESS_PRIVATE;
    669 	return 0;
    670 }
    671 
    672 int
    673 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
    674 {
    675 
    676 	pthread__error(EINVAL, "Invalid mutex attribute",
    677 		attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    678 
    679 	switch(pshared) {
    680 	case PTHREAD_PROCESS_PRIVATE:
    681 		return 0;
    682 	case PTHREAD_PROCESS_SHARED:
    683 		return ENOSYS;
    684 	}
    685 	return EINVAL;
    686 }
    687 #endif
    688 
    689 /*
    690  * pthread__mutex_deferwake: try to defer unparking threads in self->pt_waiters
    691  *
    692  * In order to avoid unnecessary contention on interlocking mutexes, we try
    693  * to defer waking up threads until we unlock the mutex.  The threads will
    694  * be woken up when the calling thread (self) releases a mutex.
    695  */
    696 void
    697 pthread__mutex_deferwake(pthread_t self, pthread_mutex_t *ptm)
    698 {
    699 
    700 	if (__predict_false(ptm == NULL ||
    701 	    MUTEX_OWNER(ptm->ptm_owner) != (uintptr_t)self)) {
    702 		pthread__clear_waiters(self);
    703 	}
    704 }
    705 
    706 int
    707 pthread_mutex_getprioceiling(const pthread_mutex_t *ptm, int *ceil)
    708 {
    709 
    710 	pthread__error(EINVAL, "Invalid mutex",
    711 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    712 
    713 	*ceil = ptm->ptm_ceiling;
    714 	return 0;
    715 }
    716 
    717 int
    718 pthread_mutex_setprioceiling(pthread_mutex_t *ptm, int ceil, int *old_ceil)
    719 {
    720 	int error;
    721 
    722 	pthread__error(EINVAL, "Invalid mutex",
    723 	    ptm->ptm_magic == _PT_MUTEX_MAGIC);
    724 
    725 	error = pthread_mutex_lock(ptm);
    726 	if (error == 0) {
    727 		*old_ceil = ptm->ptm_ceiling;
    728 		/*check range*/
    729 		ptm->ptm_ceiling = ceil;
    730 		pthread_mutex_unlock(ptm);
    731 	}
    732 	return error;
    733 }
    734 
    735 int
    736 _pthread_mutex_held_np(pthread_mutex_t *ptm)
    737 {
    738 
    739 	return MUTEX_OWNER(ptm->ptm_owner) == (uintptr_t)pthread__self();
    740 }
    741 
    742 pthread_t
    743 _pthread_mutex_owner_np(pthread_mutex_t *ptm)
    744 {
    745 
    746 	return (pthread_t)MUTEX_OWNER(ptm->ptm_owner);
    747 }
    748