Lines Matching defs:ptm

125 pthread_mutex_init(pthread_mutex_t *ptm, const pthread_mutexattr_t *attr)
135 return __libc_mutex_init_stub(ptm, attr);
154 __cpu_simple_lock_set(&ptm->ptm_errorcheck);
155 ptm->ptm_owner = NULL;
158 __cpu_simple_lock_clear(&ptm->ptm_errorcheck);
159 ptm->ptm_owner = (void *)MUTEX_RECURSIVE_BIT;
162 __cpu_simple_lock_clear(&ptm->ptm_errorcheck);
163 ptm->ptm_owner = NULL;
168 val = (uintptr_t)ptm->ptm_owner;
170 ptm->ptm_owner = (void *)val;
174 ptm->ptm_magic = _PT_MUTEX_MAGIC;
175 ptm->ptm_waiters = NULL;
176 ptm->ptm_recursed = 0;
177 ptm->ptm_ceiling = (unsigned char)ceil;
183 pthread_mutex_destroy(pthread_mutex_t *ptm)
187 return __libc_mutex_destroy_stub(ptm);
190 ptm->ptm_magic == _PT_MUTEX_MAGIC);
192 MUTEX_OWNER(ptm->ptm_owner) == 0);
194 ptm->ptm_magic = _PT_MUTEX_DEAD;
199 pthread_mutex_lock(pthread_mutex_t *ptm)
205 return __libc_mutex_lock_stub(ptm);
208 ptm->ptm_magic == _PT_MUTEX_MAGIC);
211 val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
218 return pthread__mutex_lock_slow(ptm, NULL);
222 pthread_mutex_timedlock(pthread_mutex_t* ptm, const struct timespec *ts)
228 ptm->ptm_magic == _PT_MUTEX_MAGIC);
231 val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
238 return pthread__mutex_lock_slow(ptm, ts);
254 pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
259 for (count = 2;; owner = ptm->ptm_owner) {
275 pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts)
283 owner = ptm->ptm_owner;
292 if (ptm->ptm_recursed == INT_MAX)
294 ptm->ptm_recursed++;
297 if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck))
302 if (MUTEX_PROTECT(owner) && _sched_protect(ptm->ptm_ceiling) == -1) {
312 next = atomic_cas_ptr(&ptm->ptm_owner, owner, newval);
324 owner = pthread__mutex_spin(ptm, owner);
335 waiter.next = ptm->ptm_waiters;
340 next = atomic_cas_ptr(&ptm->ptm_waiters, waiter.next, &waiter);
342 owner = ptm->ptm_owner;
355 if (MUTEX_OWNER(ptm->ptm_owner) == 0) {
357 atomic_swap_ptr(&ptm->ptm_waiters, NULL));
371 atomic_swap_ptr(&ptm->ptm_waiters, NULL));
391 owner = ptm->ptm_owner;
396 pthread_mutex_trylock(pthread_mutex_t *ptm)
402 return __libc_mutex_trylock_stub(ptm);
405 ptm->ptm_magic == _PT_MUTEX_MAGIC);
408 val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
419 next = atomic_cas_ptr(&ptm->ptm_owner, val, new);
428 if (ptm->ptm_recursed == INT_MAX)
430 ptm->ptm_recursed++;
439 pthread_mutex_unlock(pthread_mutex_t *ptm)
446 return __libc_mutex_unlock_stub(ptm);
449 ptm->ptm_magic == _PT_MUTEX_MAGIC);
458 val = atomic_cas_ptr(&ptm->ptm_owner, self, newval);
461 if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) {
472 } else if (ptm->ptm_recursed) {
473 ptm->ptm_recursed--;
491 val = atomic_swap_ptr(&ptm->ptm_owner, newval);
505 if (MUTEX_OWNER(newval) == 0 && ptm->ptm_waiters != NULL) {
507 atomic_swap_ptr(&ptm->ptm_waiters, NULL));
699 pthread__mutex_deferwake(pthread_t self, pthread_mutex_t *ptm,
706 if (__predict_false(ptm == NULL ||
707 MUTEX_OWNER(ptm->ptm_owner) != (uintptr_t)self)) {
713 if (atomic_cas_ptr(&ptm->ptm_waiters, NULL, head) == NULL) {
723 for (o = ptm->ptm_waiters;; o = n) {
728 n = atomic_cas_ptr(&ptm->ptm_waiters, o, head);
736 pthread_mutex_getprioceiling(const pthread_mutex_t *ptm, int *ceil)
740 ptm->ptm_magic == _PT_MUTEX_MAGIC);
742 *ceil = ptm->ptm_ceiling;
747 pthread_mutex_setprioceiling(pthread_mutex_t *ptm, int ceil, int *old_ceil)
752 ptm->ptm_magic == _PT_MUTEX_MAGIC);
754 error = pthread_mutex_lock(ptm);
756 *old_ceil = ptm->ptm_ceiling;
758 ptm->ptm_ceiling = ceil;
759 pthread_mutex_unlock(ptm);
765 _pthread_mutex_held_np(pthread_mutex_t *ptm)
768 return MUTEX_OWNER(ptm->ptm_owner) == (uintptr_t)pthread__self();
772 _pthread_mutex_owner_np(pthread_mutex_t *ptm)
775 return (pthread_t)MUTEX_OWNER(ptm->ptm_owner);