1 1.83 riastrad /* $NetBSD: pthread_mutex.c,v 1.83 2022/04/10 10:38:33 riastradh Exp $ */ 2 1.2 thorpej 3 1.2 thorpej /*- 4 1.77 ad * Copyright (c) 2001, 2003, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. 5 1.2 thorpej * All rights reserved. 6 1.2 thorpej * 7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation 8 1.27 ad * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran. 9 1.2 thorpej * 10 1.2 thorpej * Redistribution and use in source and binary forms, with or without 11 1.2 thorpej * modification, are permitted provided that the following conditions 12 1.2 thorpej * are met: 13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright 14 1.2 thorpej * notice, this list of conditions and the following disclaimer. 15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright 16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the 17 1.2 thorpej * documentation and/or other materials provided with the distribution. 18 1.2 thorpej * 19 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE. 30 1.2 thorpej */ 31 1.2 thorpej 32 1.49 ad /* 33 1.49 ad * To track threads waiting for mutexes to be released, we use lockless 34 1.49 ad * lists built on atomic operations and memory barriers. 35 1.49 ad * 36 1.49 ad * A simple spinlock would be faster and make the code easier to 37 1.49 ad * follow, but spinlocks are problematic in userspace. If a thread is 38 1.49 ad * preempted by the kernel while holding a spinlock, any other thread 39 1.49 ad * attempting to acquire that spinlock will needlessly busy wait. 40 1.49 ad * 41 1.49 ad * There is no good way to know that the holding thread is no longer 42 1.83 riastrad * running, nor to request a wake-up once it has begun running again. 43 1.49 ad * Of more concern, threads in the SCHED_FIFO class do not have a 44 1.49 ad * limited time quantum and so could spin forever, preventing the 45 1.49 ad * thread holding the spinlock from getting CPU time: it would never 46 1.49 ad * be released. 47 1.49 ad */ 48 1.49 ad 49 1.2 thorpej #include <sys/cdefs.h> 50 1.83 riastrad __RCSID("$NetBSD: pthread_mutex.c,v 1.83 2022/04/10 10:38:33 riastradh Exp $"); 51 1.82 riastrad 52 1.82 riastrad /* Need to use libc-private names for atomic operations. */ 53 1.82 riastrad #include "../../common/lib/libc/atomic/atomic_op_namespace.h" 54 1.40 ad 55 1.40 ad #include <sys/types.h> 56 1.44 ad #include <sys/lwpctl.h> 57 1.60 christos #include <sys/sched.h> 58 1.51 matt #include <sys/lock.h> 59 1.10 lukem 60 1.2 thorpej #include <errno.h> 61 1.2 thorpej #include <limits.h> 62 1.2 thorpej #include <stdlib.h> 63 1.56 christos #include <time.h> 64 1.6 scw #include <string.h> 65 1.44 ad #include <stdio.h> 66 1.2 thorpej 67 1.2 thorpej #include "pthread.h" 68 1.2 thorpej #include "pthread_int.h" 69 1.56 christos #include "reentrant.h" 70 1.2 thorpej 71 1.44 ad #define MUTEX_RECURSIVE_BIT ((uintptr_t)0x02) 72 1.60 christos #define MUTEX_PROTECT_BIT ((uintptr_t)0x08) 73 1.60 christos #define MUTEX_THREAD ((uintptr_t)~0x0f) 74 1.44 ad 75 1.44 ad #define MUTEX_RECURSIVE(x) ((uintptr_t)(x) & MUTEX_RECURSIVE_BIT) 76 1.60 christos #define MUTEX_PROTECT(x) ((uintptr_t)(x) & MUTEX_PROTECT_BIT) 77 1.44 ad #define MUTEX_OWNER(x) ((uintptr_t)(x) & MUTEX_THREAD) 78 1.44 ad 79 1.60 christos #define MUTEX_GET_TYPE(x) \ 80 1.60 christos ((int)(((uintptr_t)(x) & 0x000000ff) >> 0)) 81 1.60 christos #define MUTEX_SET_TYPE(x, t) \ 82 1.60 christos (x) = (void *)(((uintptr_t)(x) & ~0x000000ff) | ((t) << 0)) 83 1.60 christos #define MUTEX_GET_PROTOCOL(x) \ 84 1.60 christos ((int)(((uintptr_t)(x) & 0x0000ff00) >> 8)) 85 1.60 christos #define MUTEX_SET_PROTOCOL(x, p) \ 86 1.60 christos (x) = (void *)(((uintptr_t)(x) & ~0x0000ff00) | ((p) << 8)) 87 1.60 christos #define MUTEX_GET_CEILING(x) \ 88 1.60 christos ((int)(((uintptr_t)(x) & 0x00ff0000) >> 16)) 89 1.60 christos #define MUTEX_SET_CEILING(x, c) \ 90 1.60 christos (x) = (void *)(((uintptr_t)(x) & ~0x00ff0000) | ((c) << 16)) 91 1.60 christos 92 1.44 ad #if __GNUC_PREREQ__(3, 0) 93 1.44 ad #define NOINLINE __attribute ((noinline)) 94 1.44 ad #else 95 1.44 ad #define NOINLINE /* nothing */ 96 1.44 ad #endif 97 1.44 ad 98 1.80 ad struct waiter { 99 1.80 ad struct waiter *volatile next; 100 1.80 ad lwpid_t volatile lid; 101 1.80 ad }; 102 1.80 ad 103 1.80 ad static void pthread__mutex_wakeup(pthread_t, struct pthread__waiter *); 104 1.60 christos static int pthread__mutex_lock_slow(pthread_mutex_t *, 105 1.60 christos const struct timespec *); 106 1.44 ad static void pthread__mutex_pause(void); 107 1.2 thorpej 108 1.39 ad int _pthread_mutex_held_np(pthread_mutex_t *); 109 1.39 ad pthread_t _pthread_mutex_owner_np(pthread_mutex_t *); 110 1.39 ad 111 1.39 ad __weak_alias(pthread_mutex_held_np,_pthread_mutex_held_np) 112 1.39 ad __weak_alias(pthread_mutex_owner_np,_pthread_mutex_owner_np) 113 1.39 ad 114 1.2 thorpej __strong_alias(__libc_mutex_init,pthread_mutex_init) 115 1.2 thorpej __strong_alias(__libc_mutex_lock,pthread_mutex_lock) 116 1.2 thorpej __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock) 117 1.2 thorpej __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock) 118 1.2 thorpej __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy) 119 1.4 thorpej 120 1.4 thorpej __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init) 121 1.4 thorpej __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy) 122 1.5 thorpej __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype) 123 1.2 thorpej 124 1.2 thorpej int 125 1.44 ad pthread_mutex_init(pthread_mutex_t *ptm, const pthread_mutexattr_t *attr) 126 1.2 thorpej { 127 1.60 christos uintptr_t type, proto, val, ceil; 128 1.2 thorpej 129 1.76 kamil #if 0 130 1.65 christos /* 131 1.65 christos * Always initialize the mutex structure, maybe be used later 132 1.65 christos * and the cost should be minimal. 133 1.65 christos */ 134 1.56 christos if (__predict_false(__uselibcstub)) 135 1.56 christos return __libc_mutex_init_stub(ptm, attr); 136 1.76 kamil #endif 137 1.56 christos 138 1.72 kamil pthread__error(EINVAL, "Invalid mutes attribute", 139 1.72 kamil attr == NULL || attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 140 1.72 kamil 141 1.60 christos if (attr == NULL) { 142 1.44 ad type = PTHREAD_MUTEX_NORMAL; 143 1.60 christos proto = PTHREAD_PRIO_NONE; 144 1.60 christos ceil = 0; 145 1.60 christos } else { 146 1.60 christos val = (uintptr_t)attr->ptma_private; 147 1.2 thorpej 148 1.60 christos type = MUTEX_GET_TYPE(val); 149 1.60 christos proto = MUTEX_GET_PROTOCOL(val); 150 1.60 christos ceil = MUTEX_GET_CEILING(val); 151 1.60 christos } 152 1.44 ad switch (type) { 153 1.44 ad case PTHREAD_MUTEX_ERRORCHECK: 154 1.51 matt __cpu_simple_lock_set(&ptm->ptm_errorcheck); 155 1.44 ad ptm->ptm_owner = NULL; 156 1.44 ad break; 157 1.44 ad case PTHREAD_MUTEX_RECURSIVE: 158 1.51 matt __cpu_simple_lock_clear(&ptm->ptm_errorcheck); 159 1.44 ad ptm->ptm_owner = (void *)MUTEX_RECURSIVE_BIT; 160 1.44 ad break; 161 1.44 ad default: 162 1.51 matt __cpu_simple_lock_clear(&ptm->ptm_errorcheck); 163 1.44 ad ptm->ptm_owner = NULL; 164 1.44 ad break; 165 1.2 thorpej } 166 1.60 christos switch (proto) { 167 1.60 christos case PTHREAD_PRIO_PROTECT: 168 1.60 christos val = (uintptr_t)ptm->ptm_owner; 169 1.60 christos val |= MUTEX_PROTECT_BIT; 170 1.60 christos ptm->ptm_owner = (void *)val; 171 1.60 christos break; 172 1.2 thorpej 173 1.60 christos } 174 1.44 ad ptm->ptm_magic = _PT_MUTEX_MAGIC; 175 1.44 ad ptm->ptm_waiters = NULL; 176 1.45 ad ptm->ptm_recursed = 0; 177 1.60 christos ptm->ptm_ceiling = (unsigned char)ceil; 178 1.2 thorpej 179 1.2 thorpej return 0; 180 1.2 thorpej } 181 1.2 thorpej 182 1.2 thorpej int 183 1.44 ad pthread_mutex_destroy(pthread_mutex_t *ptm) 184 1.2 thorpej { 185 1.2 thorpej 186 1.56 christos if (__predict_false(__uselibcstub)) 187 1.56 christos return __libc_mutex_destroy_stub(ptm); 188 1.56 christos 189 1.14 nathanw pthread__error(EINVAL, "Invalid mutex", 190 1.44 ad ptm->ptm_magic == _PT_MUTEX_MAGIC); 191 1.14 nathanw pthread__error(EBUSY, "Destroying locked mutex", 192 1.44 ad MUTEX_OWNER(ptm->ptm_owner) == 0); 193 1.2 thorpej 194 1.44 ad ptm->ptm_magic = _PT_MUTEX_DEAD; 195 1.2 thorpej return 0; 196 1.2 thorpej } 197 1.2 thorpej 198 1.2 thorpej int 199 1.44 ad pthread_mutex_lock(pthread_mutex_t *ptm) 200 1.2 thorpej { 201 1.27 ad pthread_t self; 202 1.44 ad void *val; 203 1.2 thorpej 204 1.56 christos if (__predict_false(__uselibcstub)) 205 1.56 christos return __libc_mutex_lock_stub(ptm); 206 1.56 christos 207 1.70 kamil pthread__error(EINVAL, "Invalid mutex", 208 1.70 kamil ptm->ptm_magic == _PT_MUTEX_MAGIC); 209 1.70 kamil 210 1.27 ad self = pthread__self(); 211 1.44 ad val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self); 212 1.44 ad if (__predict_true(val == NULL)) { 213 1.44 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR 214 1.44 ad membar_enter(); 215 1.44 ad #endif 216 1.44 ad return 0; 217 1.2 thorpej } 218 1.60 christos return pthread__mutex_lock_slow(ptm, NULL); 219 1.60 christos } 220 1.60 christos 221 1.60 christos int 222 1.60 christos pthread_mutex_timedlock(pthread_mutex_t* ptm, const struct timespec *ts) 223 1.60 christos { 224 1.60 christos pthread_t self; 225 1.60 christos void *val; 226 1.60 christos 227 1.70 kamil pthread__error(EINVAL, "Invalid mutex", 228 1.70 kamil ptm->ptm_magic == _PT_MUTEX_MAGIC); 229 1.70 kamil 230 1.60 christos self = pthread__self(); 231 1.60 christos val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self); 232 1.60 christos if (__predict_true(val == NULL)) { 233 1.60 christos #ifndef PTHREAD__ATOMIC_IS_MEMBAR 234 1.60 christos membar_enter(); 235 1.60 christos #endif 236 1.60 christos return 0; 237 1.60 christos } 238 1.60 christos return pthread__mutex_lock_slow(ptm, ts); 239 1.44 ad } 240 1.2 thorpej 241 1.44 ad /* We want function call overhead. */ 242 1.44 ad NOINLINE static void 243 1.44 ad pthread__mutex_pause(void) 244 1.44 ad { 245 1.2 thorpej 246 1.44 ad pthread__smt_pause(); 247 1.2 thorpej } 248 1.2 thorpej 249 1.44 ad /* 250 1.44 ad * Spin while the holder is running. 'lwpctl' gives us the true 251 1.66 ad * status of the thread. 252 1.44 ad */ 253 1.44 ad NOINLINE static void * 254 1.44 ad pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner) 255 1.44 ad { 256 1.44 ad pthread_t thread; 257 1.44 ad unsigned int count, i; 258 1.44 ad 259 1.44 ad for (count = 2;; owner = ptm->ptm_owner) { 260 1.44 ad thread = (pthread_t)MUTEX_OWNER(owner); 261 1.44 ad if (thread == NULL) 262 1.44 ad break; 263 1.66 ad if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE) 264 1.44 ad break; 265 1.83 riastrad if (count < 128) 266 1.44 ad count += count; 267 1.44 ad for (i = count; i != 0; i--) 268 1.44 ad pthread__mutex_pause(); 269 1.44 ad } 270 1.2 thorpej 271 1.44 ad return owner; 272 1.44 ad } 273 1.44 ad 274 1.44 ad NOINLINE static int 275 1.60 christos pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts) 276 1.2 thorpej { 277 1.80 ad void *newval, *owner, *next; 278 1.80 ad struct waiter waiter; 279 1.44 ad pthread_t self; 280 1.57 christos int serrno; 281 1.60 christos int error; 282 1.2 thorpej 283 1.44 ad owner = ptm->ptm_owner; 284 1.44 ad self = pthread__self(); 285 1.77 ad serrno = errno; 286 1.77 ad 287 1.80 ad pthread__assert(self->pt_lid != 0); 288 1.13 nathanw 289 1.44 ad /* Recursive or errorcheck? */ 290 1.44 ad if (MUTEX_OWNER(owner) == (uintptr_t)self) { 291 1.44 ad if (MUTEX_RECURSIVE(owner)) { 292 1.45 ad if (ptm->ptm_recursed == INT_MAX) 293 1.44 ad return EAGAIN; 294 1.45 ad ptm->ptm_recursed++; 295 1.44 ad return 0; 296 1.29 ad } 297 1.51 matt if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) 298 1.44 ad return EDEADLK; 299 1.44 ad } 300 1.29 ad 301 1.60 christos /* priority protect */ 302 1.60 christos if (MUTEX_PROTECT(owner) && _sched_protect(ptm->ptm_ceiling) == -1) { 303 1.77 ad error = errno; 304 1.77 ad errno = serrno; 305 1.77 ad return error; 306 1.60 christos } 307 1.44 ad 308 1.77 ad for (;;) { 309 1.44 ad /* If it has become free, try to acquire it again. */ 310 1.44 ad if (MUTEX_OWNER(owner) == 0) { 311 1.77 ad newval = (void *)((uintptr_t)self | (uintptr_t)owner); 312 1.77 ad next = atomic_cas_ptr(&ptm->ptm_owner, owner, newval); 313 1.77 ad if (__predict_false(next != owner)) { 314 1.77 ad owner = next; 315 1.77 ad continue; 316 1.77 ad } 317 1.77 ad errno = serrno; 318 1.44 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR 319 1.77 ad membar_enter(); 320 1.44 ad #endif 321 1.77 ad return 0; 322 1.77 ad } else if (MUTEX_OWNER(owner) != (uintptr_t)self) { 323 1.77 ad /* Spin while the owner is running. */ 324 1.77 ad owner = pthread__mutex_spin(ptm, owner); 325 1.77 ad if (MUTEX_OWNER(owner) == 0) { 326 1.77 ad continue; 327 1.77 ad } 328 1.44 ad } 329 1.21 chs 330 1.2 thorpej /* 331 1.44 ad * Nope, still held. Add thread to the list of waiters. 332 1.80 ad * Issue a memory barrier to ensure stores to 'waiter' 333 1.80 ad * are visible before we enter the list. 334 1.2 thorpej */ 335 1.80 ad waiter.next = ptm->ptm_waiters; 336 1.80 ad waiter.lid = self->pt_lid; 337 1.77 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR 338 1.80 ad membar_producer(); 339 1.77 ad #endif 340 1.80 ad next = atomic_cas_ptr(&ptm->ptm_waiters, waiter.next, &waiter); 341 1.80 ad if (next != waiter.next) { 342 1.80 ad owner = ptm->ptm_owner; 343 1.80 ad continue; 344 1.44 ad } 345 1.80 ad 346 1.77 ad /* 347 1.80 ad * If the mutex has become free since entering self onto the 348 1.80 ad * waiters list, need to wake everybody up (including self) 349 1.80 ad * and retry. It's possible to race with an unlocking 350 1.80 ad * thread, so self may have already been awoken. 351 1.77 ad */ 352 1.77 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR 353 1.80 ad membar_enter(); 354 1.77 ad #endif 355 1.80 ad if (MUTEX_OWNER(ptm->ptm_owner) == 0) { 356 1.80 ad pthread__mutex_wakeup(self, 357 1.80 ad atomic_swap_ptr(&ptm->ptm_waiters, NULL)); 358 1.66 ad } 359 1.21 chs 360 1.29 ad /* 361 1.77 ad * We must not proceed until told that we are no longer 362 1.80 ad * waiting (via waiter.lid being set to zero). Otherwise 363 1.80 ad * it's unsafe to re-enter "waiter" onto the waiters list. 364 1.29 ad */ 365 1.80 ad while (waiter.lid != 0) { 366 1.64 kre error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME, 367 1.80 ad __UNCONST(ts), 0, NULL, NULL); 368 1.78 ad if (error < 0 && errno == ETIMEDOUT) { 369 1.78 ad /* Remove self from waiters list */ 370 1.80 ad pthread__mutex_wakeup(self, 371 1.80 ad atomic_swap_ptr(&ptm->ptm_waiters, NULL)); 372 1.79 ad 373 1.79 ad /* 374 1.79 ad * Might have raced with another thread to 375 1.79 ad * do the wakeup. In any case there will be 376 1.79 ad * a wakeup for sure. Eat it and wait for 377 1.80 ad * waiter.lid to clear. 378 1.79 ad */ 379 1.80 ad while (waiter.lid != 0) { 380 1.80 ad (void)_lwp_park(CLOCK_MONOTONIC, 0, 381 1.80 ad NULL, 0, NULL, NULL); 382 1.80 ad } 383 1.79 ad 384 1.78 ad /* Priority protect */ 385 1.60 christos if (MUTEX_PROTECT(owner)) 386 1.60 christos (void)_sched_protect(-1); 387 1.77 ad errno = serrno; 388 1.60 christos return ETIMEDOUT; 389 1.60 christos } 390 1.80 ad } 391 1.77 ad owner = ptm->ptm_owner; 392 1.2 thorpej } 393 1.2 thorpej } 394 1.2 thorpej 395 1.2 thorpej int 396 1.44 ad pthread_mutex_trylock(pthread_mutex_t *ptm) 397 1.2 thorpej { 398 1.27 ad pthread_t self; 399 1.46 ad void *val, *new, *next; 400 1.2 thorpej 401 1.56 christos if (__predict_false(__uselibcstub)) 402 1.56 christos return __libc_mutex_trylock_stub(ptm); 403 1.56 christos 404 1.70 kamil pthread__error(EINVAL, "Invalid mutex", 405 1.70 kamil ptm->ptm_magic == _PT_MUTEX_MAGIC); 406 1.70 kamil 407 1.27 ad self = pthread__self(); 408 1.44 ad val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self); 409 1.44 ad if (__predict_true(val == NULL)) { 410 1.44 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR 411 1.44 ad membar_enter(); 412 1.44 ad #endif 413 1.44 ad return 0; 414 1.44 ad } 415 1.27 ad 416 1.46 ad if (MUTEX_RECURSIVE(val)) { 417 1.46 ad if (MUTEX_OWNER(val) == 0) { 418 1.46 ad new = (void *)((uintptr_t)self | (uintptr_t)val); 419 1.46 ad next = atomic_cas_ptr(&ptm->ptm_owner, val, new); 420 1.46 ad if (__predict_true(next == val)) { 421 1.46 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR 422 1.46 ad membar_enter(); 423 1.46 ad #endif 424 1.46 ad return 0; 425 1.46 ad } 426 1.46 ad } 427 1.46 ad if (MUTEX_OWNER(val) == (uintptr_t)self) { 428 1.46 ad if (ptm->ptm_recursed == INT_MAX) 429 1.46 ad return EAGAIN; 430 1.46 ad ptm->ptm_recursed++; 431 1.46 ad return 0; 432 1.46 ad } 433 1.2 thorpej } 434 1.2 thorpej 435 1.44 ad return EBUSY; 436 1.2 thorpej } 437 1.2 thorpej 438 1.2 thorpej int 439 1.44 ad pthread_mutex_unlock(pthread_mutex_t *ptm) 440 1.2 thorpej { 441 1.27 ad pthread_t self; 442 1.80 ad void *val, *newval; 443 1.77 ad int error; 444 1.44 ad 445 1.56 christos if (__predict_false(__uselibcstub)) 446 1.56 christos return __libc_mutex_unlock_stub(ptm); 447 1.56 christos 448 1.70 kamil pthread__error(EINVAL, "Invalid mutex", 449 1.70 kamil ptm->ptm_magic == _PT_MUTEX_MAGIC); 450 1.70 kamil 451 1.44 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR 452 1.44 ad membar_exit(); 453 1.44 ad #endif 454 1.77 ad error = 0; 455 1.44 ad self = pthread__self(); 456 1.80 ad newval = NULL; 457 1.44 ad 458 1.80 ad val = atomic_cas_ptr(&ptm->ptm_owner, self, newval); 459 1.77 ad if (__predict_false(val != self)) { 460 1.77 ad bool weown = (MUTEX_OWNER(val) == (uintptr_t)self); 461 1.77 ad if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) { 462 1.77 ad if (!weown) { 463 1.77 ad error = EPERM; 464 1.77 ad newval = val; 465 1.77 ad } else { 466 1.77 ad newval = NULL; 467 1.77 ad } 468 1.77 ad } else if (MUTEX_RECURSIVE(val)) { 469 1.77 ad if (!weown) { 470 1.77 ad error = EPERM; 471 1.77 ad newval = val; 472 1.77 ad } else if (ptm->ptm_recursed) { 473 1.77 ad ptm->ptm_recursed--; 474 1.77 ad newval = val; 475 1.77 ad } else { 476 1.77 ad newval = (pthread_t)MUTEX_RECURSIVE_BIT; 477 1.77 ad } 478 1.44 ad } else { 479 1.77 ad pthread__error(EPERM, 480 1.77 ad "Unlocking unlocked mutex", (val != NULL)); 481 1.77 ad pthread__error(EPERM, 482 1.77 ad "Unlocking mutex owned by another thread", weown); 483 1.77 ad newval = NULL; 484 1.44 ad } 485 1.77 ad 486 1.77 ad /* 487 1.77 ad * Release the mutex. If there appear to be waiters, then 488 1.77 ad * wake them up. 489 1.77 ad */ 490 1.77 ad if (newval != val) { 491 1.77 ad val = atomic_swap_ptr(&ptm->ptm_owner, newval); 492 1.77 ad if (__predict_false(MUTEX_PROTECT(val))) { 493 1.77 ad /* restore elevated priority */ 494 1.77 ad (void)_sched_protect(-1); 495 1.77 ad } 496 1.44 ad } 497 1.44 ad } 498 1.2 thorpej 499 1.2 thorpej /* 500 1.77 ad * Finally, wake any waiters and return. 501 1.2 thorpej */ 502 1.77 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR 503 1.77 ad membar_enter(); 504 1.77 ad #endif 505 1.80 ad if (MUTEX_OWNER(newval) == 0 && ptm->ptm_waiters != NULL) { 506 1.80 ad pthread__mutex_wakeup(self, 507 1.80 ad atomic_swap_ptr(&ptm->ptm_waiters, NULL)); 508 1.2 thorpej } 509 1.44 ad return error; 510 1.44 ad } 511 1.44 ad 512 1.55 yamt /* 513 1.55 yamt * pthread__mutex_wakeup: unpark threads waiting for us 514 1.55 yamt */ 515 1.55 yamt 516 1.44 ad static void 517 1.80 ad pthread__mutex_wakeup(pthread_t self, struct pthread__waiter *cur) 518 1.44 ad { 519 1.80 ad lwpid_t lids[PTHREAD__UNPARK_MAX]; 520 1.80 ad const size_t mlid = pthread__unpark_max; 521 1.80 ad struct pthread__waiter *next; 522 1.80 ad size_t nlid; 523 1.44 ad 524 1.77 ad /* 525 1.77 ad * Pull waiters from the queue and add to our list. Use a memory 526 1.80 ad * barrier to ensure that we safely read the value of waiter->next 527 1.80 ad * before the awoken thread sees waiter->lid being cleared. 528 1.77 ad */ 529 1.80 ad membar_datadep_consumer(); /* for alpha */ 530 1.80 ad for (nlid = 0; cur != NULL; cur = next) { 531 1.80 ad if (nlid == mlid) { 532 1.80 ad (void)_lwp_unpark_all(lids, nlid, NULL); 533 1.80 ad nlid = 0; 534 1.44 ad } 535 1.80 ad next = cur->next; 536 1.80 ad pthread__assert(cur->lid != 0); 537 1.80 ad lids[nlid++] = cur->lid; 538 1.81 ad membar_exit(); 539 1.80 ad cur->lid = 0; 540 1.80 ad /* No longer safe to touch 'cur' */ 541 1.80 ad } 542 1.80 ad if (nlid == 1) { 543 1.80 ad (void)_lwp_unpark(lids[0], NULL); 544 1.80 ad } else if (nlid > 1) { 545 1.80 ad (void)_lwp_unpark_all(lids, nlid, NULL); 546 1.44 ad } 547 1.2 thorpej } 548 1.55 yamt 549 1.2 thorpej int 550 1.2 thorpej pthread_mutexattr_init(pthread_mutexattr_t *attr) 551 1.2 thorpej { 552 1.76 kamil #if 0 553 1.56 christos if (__predict_false(__uselibcstub)) 554 1.56 christos return __libc_mutexattr_init_stub(attr); 555 1.76 kamil #endif 556 1.2 thorpej 557 1.2 thorpej attr->ptma_magic = _PT_MUTEXATTR_MAGIC; 558 1.44 ad attr->ptma_private = (void *)PTHREAD_MUTEX_DEFAULT; 559 1.2 thorpej return 0; 560 1.2 thorpej } 561 1.2 thorpej 562 1.2 thorpej int 563 1.2 thorpej pthread_mutexattr_destroy(pthread_mutexattr_t *attr) 564 1.2 thorpej { 565 1.56 christos if (__predict_false(__uselibcstub)) 566 1.56 christos return __libc_mutexattr_destroy_stub(attr); 567 1.2 thorpej 568 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute", 569 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 570 1.2 thorpej 571 1.69 kamil attr->ptma_magic = _PT_MUTEXATTR_DEAD; 572 1.69 kamil 573 1.2 thorpej return 0; 574 1.2 thorpej } 575 1.2 thorpej 576 1.2 thorpej int 577 1.2 thorpej pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep) 578 1.2 thorpej { 579 1.60 christos 580 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute", 581 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 582 1.2 thorpej 583 1.60 christos *typep = MUTEX_GET_TYPE(attr->ptma_private); 584 1.2 thorpej return 0; 585 1.2 thorpej } 586 1.2 thorpej 587 1.2 thorpej int 588 1.2 thorpej pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) 589 1.2 thorpej { 590 1.60 christos 591 1.56 christos if (__predict_false(__uselibcstub)) 592 1.56 christos return __libc_mutexattr_settype_stub(attr, type); 593 1.2 thorpej 594 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute", 595 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 596 1.13 nathanw 597 1.2 thorpej switch (type) { 598 1.2 thorpej case PTHREAD_MUTEX_NORMAL: 599 1.2 thorpej case PTHREAD_MUTEX_ERRORCHECK: 600 1.2 thorpej case PTHREAD_MUTEX_RECURSIVE: 601 1.60 christos MUTEX_SET_TYPE(attr->ptma_private, type); 602 1.60 christos return 0; 603 1.60 christos default: 604 1.60 christos return EINVAL; 605 1.60 christos } 606 1.60 christos } 607 1.60 christos 608 1.60 christos int 609 1.60 christos pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int*proto) 610 1.60 christos { 611 1.83 riastrad 612 1.60 christos pthread__error(EINVAL, "Invalid mutex attribute", 613 1.60 christos attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 614 1.60 christos 615 1.60 christos *proto = MUTEX_GET_PROTOCOL(attr->ptma_private); 616 1.60 christos return 0; 617 1.60 christos } 618 1.60 christos 619 1.83 riastrad int 620 1.60 christos pthread_mutexattr_setprotocol(pthread_mutexattr_t* attr, int proto) 621 1.60 christos { 622 1.60 christos 623 1.60 christos pthread__error(EINVAL, "Invalid mutex attribute", 624 1.60 christos attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 625 1.60 christos 626 1.60 christos switch (proto) { 627 1.60 christos case PTHREAD_PRIO_NONE: 628 1.60 christos case PTHREAD_PRIO_PROTECT: 629 1.60 christos MUTEX_SET_PROTOCOL(attr->ptma_private, proto); 630 1.44 ad return 0; 631 1.60 christos case PTHREAD_PRIO_INHERIT: 632 1.60 christos return ENOTSUP; 633 1.2 thorpej default: 634 1.2 thorpej return EINVAL; 635 1.2 thorpej } 636 1.2 thorpej } 637 1.2 thorpej 638 1.83 riastrad int 639 1.60 christos pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *ceil) 640 1.60 christos { 641 1.83 riastrad 642 1.60 christos pthread__error(EINVAL, "Invalid mutex attribute", 643 1.60 christos attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 644 1.60 christos 645 1.60 christos *ceil = MUTEX_GET_CEILING(attr->ptma_private); 646 1.60 christos return 0; 647 1.60 christos } 648 1.60 christos 649 1.83 riastrad int 650 1.83 riastrad pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int ceil) 651 1.60 christos { 652 1.60 christos 653 1.60 christos pthread__error(EINVAL, "Invalid mutex attribute", 654 1.60 christos attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 655 1.60 christos 656 1.60 christos if (ceil & ~0xff) 657 1.60 christos return EINVAL; 658 1.60 christos 659 1.60 christos MUTEX_SET_CEILING(attr->ptma_private, ceil); 660 1.60 christos return 0; 661 1.60 christos } 662 1.60 christos 663 1.60 christos #ifdef _PTHREAD_PSHARED 664 1.60 christos int 665 1.60 christos pthread_mutexattr_getpshared(const pthread_mutexattr_t * __restrict attr, 666 1.60 christos int * __restrict pshared) 667 1.60 christos { 668 1.60 christos 669 1.70 kamil pthread__error(EINVAL, "Invalid mutex attribute", 670 1.70 kamil attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 671 1.70 kamil 672 1.60 christos *pshared = PTHREAD_PROCESS_PRIVATE; 673 1.60 christos return 0; 674 1.60 christos } 675 1.60 christos 676 1.60 christos int 677 1.60 christos pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared) 678 1.60 christos { 679 1.60 christos 680 1.70 kamil pthread__error(EINVAL, "Invalid mutex attribute", 681 1.70 kamil attr->ptma_magic == _PT_MUTEXATTR_MAGIC); 682 1.70 kamil 683 1.60 christos switch(pshared) { 684 1.60 christos case PTHREAD_PROCESS_PRIVATE: 685 1.60 christos return 0; 686 1.60 christos case PTHREAD_PROCESS_SHARED: 687 1.60 christos return ENOSYS; 688 1.60 christos } 689 1.60 christos return EINVAL; 690 1.60 christos } 691 1.60 christos #endif 692 1.60 christos 693 1.55 yamt /* 694 1.77 ad * In order to avoid unnecessary contention on interlocking mutexes, we try 695 1.77 ad * to defer waking up threads until we unlock the mutex. The threads will 696 1.80 ad * be woken up when the calling thread (self) releases the mutex. 697 1.55 yamt */ 698 1.50 ad void 699 1.80 ad pthread__mutex_deferwake(pthread_t self, pthread_mutex_t *ptm, 700 1.80 ad struct pthread__waiter *head) 701 1.33 ad { 702 1.80 ad struct pthread__waiter *tail, *n, *o; 703 1.80 ad 704 1.80 ad pthread__assert(head != NULL); 705 1.33 ad 706 1.50 ad if (__predict_false(ptm == NULL || 707 1.50 ad MUTEX_OWNER(ptm->ptm_owner) != (uintptr_t)self)) { 708 1.80 ad pthread__mutex_wakeup(self, head); 709 1.80 ad return; 710 1.80 ad } 711 1.80 ad 712 1.80 ad /* This is easy if no existing waiters on mutex. */ 713 1.80 ad if (atomic_cas_ptr(&ptm->ptm_waiters, NULL, head) == NULL) { 714 1.80 ad return; 715 1.80 ad } 716 1.80 ad 717 1.80 ad /* Oops need to append. Find the tail of the new queue. */ 718 1.80 ad for (tail = head; tail->next != NULL; tail = tail->next) { 719 1.80 ad /* nothing */ 720 1.80 ad } 721 1.80 ad 722 1.80 ad /* Append atomically. */ 723 1.80 ad for (o = ptm->ptm_waiters;; o = n) { 724 1.80 ad tail->next = o; 725 1.81 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR 726 1.81 ad membar_producer(); 727 1.81 ad #endif 728 1.80 ad n = atomic_cas_ptr(&ptm->ptm_waiters, o, head); 729 1.80 ad if (__predict_true(n == o)) { 730 1.80 ad break; 731 1.80 ad } 732 1.50 ad } 733 1.33 ad } 734 1.33 ad 735 1.39 ad int 736 1.83 riastrad pthread_mutex_getprioceiling(const pthread_mutex_t *ptm, int *ceil) 737 1.60 christos { 738 1.70 kamil 739 1.70 kamil pthread__error(EINVAL, "Invalid mutex", 740 1.70 kamil ptm->ptm_magic == _PT_MUTEX_MAGIC); 741 1.70 kamil 742 1.62 skrll *ceil = ptm->ptm_ceiling; 743 1.60 christos return 0; 744 1.60 christos } 745 1.60 christos 746 1.60 christos int 747 1.83 riastrad pthread_mutex_setprioceiling(pthread_mutex_t *ptm, int ceil, int *old_ceil) 748 1.60 christos { 749 1.60 christos int error; 750 1.60 christos 751 1.70 kamil pthread__error(EINVAL, "Invalid mutex", 752 1.70 kamil ptm->ptm_magic == _PT_MUTEX_MAGIC); 753 1.70 kamil 754 1.60 christos error = pthread_mutex_lock(ptm); 755 1.60 christos if (error == 0) { 756 1.62 skrll *old_ceil = ptm->ptm_ceiling; 757 1.60 christos /*check range*/ 758 1.62 skrll ptm->ptm_ceiling = ceil; 759 1.60 christos pthread_mutex_unlock(ptm); 760 1.60 christos } 761 1.60 christos return error; 762 1.60 christos } 763 1.60 christos 764 1.60 christos int 765 1.44 ad _pthread_mutex_held_np(pthread_mutex_t *ptm) 766 1.39 ad { 767 1.39 ad 768 1.44 ad return MUTEX_OWNER(ptm->ptm_owner) == (uintptr_t)pthread__self(); 769 1.39 ad } 770 1.39 ad 771 1.39 ad pthread_t 772 1.44 ad _pthread_mutex_owner_np(pthread_mutex_t *ptm) 773 1.39 ad { 774 1.39 ad 775 1.44 ad return (pthread_t)MUTEX_OWNER(ptm->ptm_owner); 776 1.39 ad } 777