pthread_mutex.c revision 1.81 1 /* $NetBSD: pthread_mutex.c,v 1.81 2020/06/11 18:41:22 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2003, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * To track threads waiting for mutexes to be released, we use lockless
34 * lists built on atomic operations and memory barriers.
35 *
36 * A simple spinlock would be faster and make the code easier to
37 * follow, but spinlocks are problematic in userspace. If a thread is
38 * preempted by the kernel while holding a spinlock, any other thread
39 * attempting to acquire that spinlock will needlessly busy wait.
40 *
41 * There is no good way to know that the holding thread is no longer
42 * running, nor to request a wake-up once it has begun running again.
43 * Of more concern, threads in the SCHED_FIFO class do not have a
44 * limited time quantum and so could spin forever, preventing the
45 * thread holding the spinlock from getting CPU time: it would never
46 * be released.
47 */
48
49 #include <sys/cdefs.h>
50 __RCSID("$NetBSD: pthread_mutex.c,v 1.81 2020/06/11 18:41:22 ad Exp $");
51
52 #include <sys/types.h>
53 #include <sys/lwpctl.h>
54 #include <sys/sched.h>
55 #include <sys/lock.h>
56
57 #include <errno.h>
58 #include <limits.h>
59 #include <stdlib.h>
60 #include <time.h>
61 #include <string.h>
62 #include <stdio.h>
63
64 #include "pthread.h"
65 #include "pthread_int.h"
66 #include "reentrant.h"
67
68 #define MUTEX_RECURSIVE_BIT ((uintptr_t)0x02)
69 #define MUTEX_PROTECT_BIT ((uintptr_t)0x08)
70 #define MUTEX_THREAD ((uintptr_t)~0x0f)
71
72 #define MUTEX_RECURSIVE(x) ((uintptr_t)(x) & MUTEX_RECURSIVE_BIT)
73 #define MUTEX_PROTECT(x) ((uintptr_t)(x) & MUTEX_PROTECT_BIT)
74 #define MUTEX_OWNER(x) ((uintptr_t)(x) & MUTEX_THREAD)
75
76 #define MUTEX_GET_TYPE(x) \
77 ((int)(((uintptr_t)(x) & 0x000000ff) >> 0))
78 #define MUTEX_SET_TYPE(x, t) \
79 (x) = (void *)(((uintptr_t)(x) & ~0x000000ff) | ((t) << 0))
80 #define MUTEX_GET_PROTOCOL(x) \
81 ((int)(((uintptr_t)(x) & 0x0000ff00) >> 8))
82 #define MUTEX_SET_PROTOCOL(x, p) \
83 (x) = (void *)(((uintptr_t)(x) & ~0x0000ff00) | ((p) << 8))
84 #define MUTEX_GET_CEILING(x) \
85 ((int)(((uintptr_t)(x) & 0x00ff0000) >> 16))
86 #define MUTEX_SET_CEILING(x, c) \
87 (x) = (void *)(((uintptr_t)(x) & ~0x00ff0000) | ((c) << 16))
88
89 #if __GNUC_PREREQ__(3, 0)
90 #define NOINLINE __attribute ((noinline))
91 #else
92 #define NOINLINE /* nothing */
93 #endif
94
95 struct waiter {
96 struct waiter *volatile next;
97 lwpid_t volatile lid;
98 };
99
100 static void pthread__mutex_wakeup(pthread_t, struct pthread__waiter *);
101 static int pthread__mutex_lock_slow(pthread_mutex_t *,
102 const struct timespec *);
103 static void pthread__mutex_pause(void);
104
105 int _pthread_mutex_held_np(pthread_mutex_t *);
106 pthread_t _pthread_mutex_owner_np(pthread_mutex_t *);
107
108 __weak_alias(pthread_mutex_held_np,_pthread_mutex_held_np)
109 __weak_alias(pthread_mutex_owner_np,_pthread_mutex_owner_np)
110
111 __strong_alias(__libc_mutex_init,pthread_mutex_init)
112 __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
113 __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
114 __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
115 __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
116
117 __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
118 __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
119 __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
120
121 int
122 pthread_mutex_init(pthread_mutex_t *ptm, const pthread_mutexattr_t *attr)
123 {
124 uintptr_t type, proto, val, ceil;
125
126 #if 0
127 /*
128 * Always initialize the mutex structure, maybe be used later
129 * and the cost should be minimal.
130 */
131 if (__predict_false(__uselibcstub))
132 return __libc_mutex_init_stub(ptm, attr);
133 #endif
134
135 pthread__error(EINVAL, "Invalid mutes attribute",
136 attr == NULL || attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
137
138 if (attr == NULL) {
139 type = PTHREAD_MUTEX_NORMAL;
140 proto = PTHREAD_PRIO_NONE;
141 ceil = 0;
142 } else {
143 val = (uintptr_t)attr->ptma_private;
144
145 type = MUTEX_GET_TYPE(val);
146 proto = MUTEX_GET_PROTOCOL(val);
147 ceil = MUTEX_GET_CEILING(val);
148 }
149 switch (type) {
150 case PTHREAD_MUTEX_ERRORCHECK:
151 __cpu_simple_lock_set(&ptm->ptm_errorcheck);
152 ptm->ptm_owner = NULL;
153 break;
154 case PTHREAD_MUTEX_RECURSIVE:
155 __cpu_simple_lock_clear(&ptm->ptm_errorcheck);
156 ptm->ptm_owner = (void *)MUTEX_RECURSIVE_BIT;
157 break;
158 default:
159 __cpu_simple_lock_clear(&ptm->ptm_errorcheck);
160 ptm->ptm_owner = NULL;
161 break;
162 }
163 switch (proto) {
164 case PTHREAD_PRIO_PROTECT:
165 val = (uintptr_t)ptm->ptm_owner;
166 val |= MUTEX_PROTECT_BIT;
167 ptm->ptm_owner = (void *)val;
168 break;
169
170 }
171 ptm->ptm_magic = _PT_MUTEX_MAGIC;
172 ptm->ptm_waiters = NULL;
173 ptm->ptm_recursed = 0;
174 ptm->ptm_ceiling = (unsigned char)ceil;
175
176 return 0;
177 }
178
179 int
180 pthread_mutex_destroy(pthread_mutex_t *ptm)
181 {
182
183 if (__predict_false(__uselibcstub))
184 return __libc_mutex_destroy_stub(ptm);
185
186 pthread__error(EINVAL, "Invalid mutex",
187 ptm->ptm_magic == _PT_MUTEX_MAGIC);
188 pthread__error(EBUSY, "Destroying locked mutex",
189 MUTEX_OWNER(ptm->ptm_owner) == 0);
190
191 ptm->ptm_magic = _PT_MUTEX_DEAD;
192 return 0;
193 }
194
195 int
196 pthread_mutex_lock(pthread_mutex_t *ptm)
197 {
198 pthread_t self;
199 void *val;
200
201 if (__predict_false(__uselibcstub))
202 return __libc_mutex_lock_stub(ptm);
203
204 pthread__error(EINVAL, "Invalid mutex",
205 ptm->ptm_magic == _PT_MUTEX_MAGIC);
206
207 self = pthread__self();
208 val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
209 if (__predict_true(val == NULL)) {
210 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
211 membar_enter();
212 #endif
213 return 0;
214 }
215 return pthread__mutex_lock_slow(ptm, NULL);
216 }
217
218 int
219 pthread_mutex_timedlock(pthread_mutex_t* ptm, const struct timespec *ts)
220 {
221 pthread_t self;
222 void *val;
223
224 pthread__error(EINVAL, "Invalid mutex",
225 ptm->ptm_magic == _PT_MUTEX_MAGIC);
226
227 self = pthread__self();
228 val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
229 if (__predict_true(val == NULL)) {
230 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
231 membar_enter();
232 #endif
233 return 0;
234 }
235 return pthread__mutex_lock_slow(ptm, ts);
236 }
237
238 /* We want function call overhead. */
239 NOINLINE static void
240 pthread__mutex_pause(void)
241 {
242
243 pthread__smt_pause();
244 }
245
246 /*
247 * Spin while the holder is running. 'lwpctl' gives us the true
248 * status of the thread.
249 */
250 NOINLINE static void *
251 pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
252 {
253 pthread_t thread;
254 unsigned int count, i;
255
256 for (count = 2;; owner = ptm->ptm_owner) {
257 thread = (pthread_t)MUTEX_OWNER(owner);
258 if (thread == NULL)
259 break;
260 if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
261 break;
262 if (count < 128)
263 count += count;
264 for (i = count; i != 0; i--)
265 pthread__mutex_pause();
266 }
267
268 return owner;
269 }
270
271 NOINLINE static int
272 pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts)
273 {
274 void *newval, *owner, *next;
275 struct waiter waiter;
276 pthread_t self;
277 int serrno;
278 int error;
279
280 owner = ptm->ptm_owner;
281 self = pthread__self();
282 serrno = errno;
283
284 pthread__assert(self->pt_lid != 0);
285
286 /* Recursive or errorcheck? */
287 if (MUTEX_OWNER(owner) == (uintptr_t)self) {
288 if (MUTEX_RECURSIVE(owner)) {
289 if (ptm->ptm_recursed == INT_MAX)
290 return EAGAIN;
291 ptm->ptm_recursed++;
292 return 0;
293 }
294 if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck))
295 return EDEADLK;
296 }
297
298 /* priority protect */
299 if (MUTEX_PROTECT(owner) && _sched_protect(ptm->ptm_ceiling) == -1) {
300 error = errno;
301 errno = serrno;
302 return error;
303 }
304
305 for (;;) {
306 /* If it has become free, try to acquire it again. */
307 if (MUTEX_OWNER(owner) == 0) {
308 newval = (void *)((uintptr_t)self | (uintptr_t)owner);
309 next = atomic_cas_ptr(&ptm->ptm_owner, owner, newval);
310 if (__predict_false(next != owner)) {
311 owner = next;
312 continue;
313 }
314 errno = serrno;
315 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
316 membar_enter();
317 #endif
318 return 0;
319 } else if (MUTEX_OWNER(owner) != (uintptr_t)self) {
320 /* Spin while the owner is running. */
321 owner = pthread__mutex_spin(ptm, owner);
322 if (MUTEX_OWNER(owner) == 0) {
323 continue;
324 }
325 }
326
327 /*
328 * Nope, still held. Add thread to the list of waiters.
329 * Issue a memory barrier to ensure stores to 'waiter'
330 * are visible before we enter the list.
331 */
332 waiter.next = ptm->ptm_waiters;
333 waiter.lid = self->pt_lid;
334 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
335 membar_producer();
336 #endif
337 next = atomic_cas_ptr(&ptm->ptm_waiters, waiter.next, &waiter);
338 if (next != waiter.next) {
339 owner = ptm->ptm_owner;
340 continue;
341 }
342
343 /*
344 * If the mutex has become free since entering self onto the
345 * waiters list, need to wake everybody up (including self)
346 * and retry. It's possible to race with an unlocking
347 * thread, so self may have already been awoken.
348 */
349 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
350 membar_enter();
351 #endif
352 if (MUTEX_OWNER(ptm->ptm_owner) == 0) {
353 pthread__mutex_wakeup(self,
354 atomic_swap_ptr(&ptm->ptm_waiters, NULL));
355 }
356
357 /*
358 * We must not proceed until told that we are no longer
359 * waiting (via waiter.lid being set to zero). Otherwise
360 * it's unsafe to re-enter "waiter" onto the waiters list.
361 */
362 while (waiter.lid != 0) {
363 error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME,
364 __UNCONST(ts), 0, NULL, NULL);
365 if (error < 0 && errno == ETIMEDOUT) {
366 /* Remove self from waiters list */
367 pthread__mutex_wakeup(self,
368 atomic_swap_ptr(&ptm->ptm_waiters, NULL));
369
370 /*
371 * Might have raced with another thread to
372 * do the wakeup. In any case there will be
373 * a wakeup for sure. Eat it and wait for
374 * waiter.lid to clear.
375 */
376 while (waiter.lid != 0) {
377 (void)_lwp_park(CLOCK_MONOTONIC, 0,
378 NULL, 0, NULL, NULL);
379 }
380
381 /* Priority protect */
382 if (MUTEX_PROTECT(owner))
383 (void)_sched_protect(-1);
384 errno = serrno;
385 return ETIMEDOUT;
386 }
387 }
388 owner = ptm->ptm_owner;
389 }
390 }
391
392 int
393 pthread_mutex_trylock(pthread_mutex_t *ptm)
394 {
395 pthread_t self;
396 void *val, *new, *next;
397
398 if (__predict_false(__uselibcstub))
399 return __libc_mutex_trylock_stub(ptm);
400
401 pthread__error(EINVAL, "Invalid mutex",
402 ptm->ptm_magic == _PT_MUTEX_MAGIC);
403
404 self = pthread__self();
405 val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
406 if (__predict_true(val == NULL)) {
407 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
408 membar_enter();
409 #endif
410 return 0;
411 }
412
413 if (MUTEX_RECURSIVE(val)) {
414 if (MUTEX_OWNER(val) == 0) {
415 new = (void *)((uintptr_t)self | (uintptr_t)val);
416 next = atomic_cas_ptr(&ptm->ptm_owner, val, new);
417 if (__predict_true(next == val)) {
418 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
419 membar_enter();
420 #endif
421 return 0;
422 }
423 }
424 if (MUTEX_OWNER(val) == (uintptr_t)self) {
425 if (ptm->ptm_recursed == INT_MAX)
426 return EAGAIN;
427 ptm->ptm_recursed++;
428 return 0;
429 }
430 }
431
432 return EBUSY;
433 }
434
435 int
436 pthread_mutex_unlock(pthread_mutex_t *ptm)
437 {
438 pthread_t self;
439 void *val, *newval;
440 int error;
441
442 if (__predict_false(__uselibcstub))
443 return __libc_mutex_unlock_stub(ptm);
444
445 pthread__error(EINVAL, "Invalid mutex",
446 ptm->ptm_magic == _PT_MUTEX_MAGIC);
447
448 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
449 membar_exit();
450 #endif
451 error = 0;
452 self = pthread__self();
453 newval = NULL;
454
455 val = atomic_cas_ptr(&ptm->ptm_owner, self, newval);
456 if (__predict_false(val != self)) {
457 bool weown = (MUTEX_OWNER(val) == (uintptr_t)self);
458 if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) {
459 if (!weown) {
460 error = EPERM;
461 newval = val;
462 } else {
463 newval = NULL;
464 }
465 } else if (MUTEX_RECURSIVE(val)) {
466 if (!weown) {
467 error = EPERM;
468 newval = val;
469 } else if (ptm->ptm_recursed) {
470 ptm->ptm_recursed--;
471 newval = val;
472 } else {
473 newval = (pthread_t)MUTEX_RECURSIVE_BIT;
474 }
475 } else {
476 pthread__error(EPERM,
477 "Unlocking unlocked mutex", (val != NULL));
478 pthread__error(EPERM,
479 "Unlocking mutex owned by another thread", weown);
480 newval = NULL;
481 }
482
483 /*
484 * Release the mutex. If there appear to be waiters, then
485 * wake them up.
486 */
487 if (newval != val) {
488 val = atomic_swap_ptr(&ptm->ptm_owner, newval);
489 if (__predict_false(MUTEX_PROTECT(val))) {
490 /* restore elevated priority */
491 (void)_sched_protect(-1);
492 }
493 }
494 }
495
496 /*
497 * Finally, wake any waiters and return.
498 */
499 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
500 membar_enter();
501 #endif
502 if (MUTEX_OWNER(newval) == 0 && ptm->ptm_waiters != NULL) {
503 pthread__mutex_wakeup(self,
504 atomic_swap_ptr(&ptm->ptm_waiters, NULL));
505 }
506 return error;
507 }
508
509 /*
510 * pthread__mutex_wakeup: unpark threads waiting for us
511 */
512
513 static void
514 pthread__mutex_wakeup(pthread_t self, struct pthread__waiter *cur)
515 {
516 lwpid_t lids[PTHREAD__UNPARK_MAX];
517 const size_t mlid = pthread__unpark_max;
518 struct pthread__waiter *next;
519 size_t nlid;
520
521 /*
522 * Pull waiters from the queue and add to our list. Use a memory
523 * barrier to ensure that we safely read the value of waiter->next
524 * before the awoken thread sees waiter->lid being cleared.
525 */
526 membar_datadep_consumer(); /* for alpha */
527 for (nlid = 0; cur != NULL; cur = next) {
528 if (nlid == mlid) {
529 (void)_lwp_unpark_all(lids, nlid, NULL);
530 nlid = 0;
531 }
532 next = cur->next;
533 pthread__assert(cur->lid != 0);
534 lids[nlid++] = cur->lid;
535 membar_exit();
536 cur->lid = 0;
537 /* No longer safe to touch 'cur' */
538 }
539 if (nlid == 1) {
540 (void)_lwp_unpark(lids[0], NULL);
541 } else if (nlid > 1) {
542 (void)_lwp_unpark_all(lids, nlid, NULL);
543 }
544 }
545
546 int
547 pthread_mutexattr_init(pthread_mutexattr_t *attr)
548 {
549 #if 0
550 if (__predict_false(__uselibcstub))
551 return __libc_mutexattr_init_stub(attr);
552 #endif
553
554 attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
555 attr->ptma_private = (void *)PTHREAD_MUTEX_DEFAULT;
556 return 0;
557 }
558
559 int
560 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
561 {
562 if (__predict_false(__uselibcstub))
563 return __libc_mutexattr_destroy_stub(attr);
564
565 pthread__error(EINVAL, "Invalid mutex attribute",
566 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
567
568 attr->ptma_magic = _PT_MUTEXATTR_DEAD;
569
570 return 0;
571 }
572
573 int
574 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
575 {
576
577 pthread__error(EINVAL, "Invalid mutex attribute",
578 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
579
580 *typep = MUTEX_GET_TYPE(attr->ptma_private);
581 return 0;
582 }
583
584 int
585 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
586 {
587
588 if (__predict_false(__uselibcstub))
589 return __libc_mutexattr_settype_stub(attr, type);
590
591 pthread__error(EINVAL, "Invalid mutex attribute",
592 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
593
594 switch (type) {
595 case PTHREAD_MUTEX_NORMAL:
596 case PTHREAD_MUTEX_ERRORCHECK:
597 case PTHREAD_MUTEX_RECURSIVE:
598 MUTEX_SET_TYPE(attr->ptma_private, type);
599 return 0;
600 default:
601 return EINVAL;
602 }
603 }
604
605 int
606 pthread_mutexattr_getprotocol(const pthread_mutexattr_t *attr, int*proto)
607 {
608
609 pthread__error(EINVAL, "Invalid mutex attribute",
610 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
611
612 *proto = MUTEX_GET_PROTOCOL(attr->ptma_private);
613 return 0;
614 }
615
616 int
617 pthread_mutexattr_setprotocol(pthread_mutexattr_t* attr, int proto)
618 {
619
620 pthread__error(EINVAL, "Invalid mutex attribute",
621 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
622
623 switch (proto) {
624 case PTHREAD_PRIO_NONE:
625 case PTHREAD_PRIO_PROTECT:
626 MUTEX_SET_PROTOCOL(attr->ptma_private, proto);
627 return 0;
628 case PTHREAD_PRIO_INHERIT:
629 return ENOTSUP;
630 default:
631 return EINVAL;
632 }
633 }
634
635 int
636 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *ceil)
637 {
638
639 pthread__error(EINVAL, "Invalid mutex attribute",
640 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
641
642 *ceil = MUTEX_GET_CEILING(attr->ptma_private);
643 return 0;
644 }
645
646 int
647 pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int ceil)
648 {
649
650 pthread__error(EINVAL, "Invalid mutex attribute",
651 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
652
653 if (ceil & ~0xff)
654 return EINVAL;
655
656 MUTEX_SET_CEILING(attr->ptma_private, ceil);
657 return 0;
658 }
659
660 #ifdef _PTHREAD_PSHARED
661 int
662 pthread_mutexattr_getpshared(const pthread_mutexattr_t * __restrict attr,
663 int * __restrict pshared)
664 {
665
666 pthread__error(EINVAL, "Invalid mutex attribute",
667 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
668
669 *pshared = PTHREAD_PROCESS_PRIVATE;
670 return 0;
671 }
672
673 int
674 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
675 {
676
677 pthread__error(EINVAL, "Invalid mutex attribute",
678 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
679
680 switch(pshared) {
681 case PTHREAD_PROCESS_PRIVATE:
682 return 0;
683 case PTHREAD_PROCESS_SHARED:
684 return ENOSYS;
685 }
686 return EINVAL;
687 }
688 #endif
689
690 /*
691 * In order to avoid unnecessary contention on interlocking mutexes, we try
692 * to defer waking up threads until we unlock the mutex. The threads will
693 * be woken up when the calling thread (self) releases the mutex.
694 */
695 void
696 pthread__mutex_deferwake(pthread_t self, pthread_mutex_t *ptm,
697 struct pthread__waiter *head)
698 {
699 struct pthread__waiter *tail, *n, *o;
700
701 pthread__assert(head != NULL);
702
703 if (__predict_false(ptm == NULL ||
704 MUTEX_OWNER(ptm->ptm_owner) != (uintptr_t)self)) {
705 pthread__mutex_wakeup(self, head);
706 return;
707 }
708
709 /* This is easy if no existing waiters on mutex. */
710 if (atomic_cas_ptr(&ptm->ptm_waiters, NULL, head) == NULL) {
711 return;
712 }
713
714 /* Oops need to append. Find the tail of the new queue. */
715 for (tail = head; tail->next != NULL; tail = tail->next) {
716 /* nothing */
717 }
718
719 /* Append atomically. */
720 for (o = ptm->ptm_waiters;; o = n) {
721 tail->next = o;
722 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
723 membar_producer();
724 #endif
725 n = atomic_cas_ptr(&ptm->ptm_waiters, o, head);
726 if (__predict_true(n == o)) {
727 break;
728 }
729 }
730 }
731
732 int
733 pthread_mutex_getprioceiling(const pthread_mutex_t *ptm, int *ceil)
734 {
735
736 pthread__error(EINVAL, "Invalid mutex",
737 ptm->ptm_magic == _PT_MUTEX_MAGIC);
738
739 *ceil = ptm->ptm_ceiling;
740 return 0;
741 }
742
743 int
744 pthread_mutex_setprioceiling(pthread_mutex_t *ptm, int ceil, int *old_ceil)
745 {
746 int error;
747
748 pthread__error(EINVAL, "Invalid mutex",
749 ptm->ptm_magic == _PT_MUTEX_MAGIC);
750
751 error = pthread_mutex_lock(ptm);
752 if (error == 0) {
753 *old_ceil = ptm->ptm_ceiling;
754 /*check range*/
755 ptm->ptm_ceiling = ceil;
756 pthread_mutex_unlock(ptm);
757 }
758 return error;
759 }
760
761 int
762 _pthread_mutex_held_np(pthread_mutex_t *ptm)
763 {
764
765 return MUTEX_OWNER(ptm->ptm_owner) == (uintptr_t)pthread__self();
766 }
767
768 pthread_t
769 _pthread_mutex_owner_np(pthread_mutex_t *ptm)
770 {
771
772 return (pthread_t)MUTEX_OWNER(ptm->ptm_owner);
773 }
774