pthread_mutex.c revision 1.43 1 1.43 rafal /* $NetBSD: pthread_mutex.c,v 1.43 2008/01/25 02:12:10 rafal Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.25 ad * Copyright (c) 2001, 2003, 2006, 2007 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.27 ad * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 1.2 thorpej *
10 1.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.2 thorpej * modification, are permitted provided that the following conditions
12 1.2 thorpej * are met:
13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
19 1.2 thorpej * must display the following acknowledgement:
20 1.2 thorpej * This product includes software developed by the NetBSD
21 1.2 thorpej * Foundation, Inc. and its contributors.
22 1.2 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 thorpej * contributors may be used to endorse or promote products derived
24 1.2 thorpej * from this software without specific prior written permission.
25 1.2 thorpej *
26 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
37 1.2 thorpej */
38 1.2 thorpej
39 1.2 thorpej #include <sys/cdefs.h>
40 1.43 rafal __RCSID("$NetBSD: pthread_mutex.c,v 1.43 2008/01/25 02:12:10 rafal Exp $");
41 1.40 ad
42 1.40 ad #include <sys/types.h>
43 1.40 ad
44 1.40 ad #include <machine/lock.h>
45 1.10 lukem
46 1.2 thorpej #include <errno.h>
47 1.2 thorpej #include <limits.h>
48 1.2 thorpej #include <stdlib.h>
49 1.6 scw #include <string.h>
50 1.2 thorpej
51 1.2 thorpej #include "pthread.h"
52 1.2 thorpej #include "pthread_int.h"
53 1.2 thorpej
54 1.32 ad #ifndef PTHREAD__HAVE_ATOMIC
55 1.32 ad
56 1.27 ad static int pthread_mutex_lock_slow(pthread_t, pthread_mutex_t *);
57 1.41 christos static void once_cleanup(void *);
58 1.2 thorpej
59 1.39 ad int _pthread_mutex_held_np(pthread_mutex_t *);
60 1.39 ad pthread_t _pthread_mutex_owner_np(pthread_mutex_t *);
61 1.39 ad
62 1.39 ad __weak_alias(pthread_mutex_held_np,_pthread_mutex_held_np)
63 1.39 ad __weak_alias(pthread_mutex_owner_np,_pthread_mutex_owner_np)
64 1.39 ad
65 1.2 thorpej __strong_alias(__libc_mutex_init,pthread_mutex_init)
66 1.2 thorpej __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
67 1.2 thorpej __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
68 1.2 thorpej __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
69 1.2 thorpej __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
70 1.4 thorpej
71 1.4 thorpej __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
72 1.4 thorpej __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
73 1.5 thorpej __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
74 1.2 thorpej
75 1.2 thorpej __strong_alias(__libc_thr_once,pthread_once)
76 1.2 thorpej
77 1.2 thorpej struct mutex_private {
78 1.2 thorpej int type;
79 1.2 thorpej int recursecount;
80 1.2 thorpej };
81 1.2 thorpej
82 1.2 thorpej static const struct mutex_private mutex_private_default = {
83 1.2 thorpej PTHREAD_MUTEX_DEFAULT,
84 1.2 thorpej 0,
85 1.2 thorpej };
86 1.2 thorpej
87 1.2 thorpej struct mutexattr_private {
88 1.2 thorpej int type;
89 1.2 thorpej };
90 1.2 thorpej
91 1.2 thorpej static const struct mutexattr_private mutexattr_private_default = {
92 1.2 thorpej PTHREAD_MUTEX_DEFAULT,
93 1.2 thorpej };
94 1.2 thorpej
95 1.2 thorpej int
96 1.2 thorpej pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
97 1.2 thorpej {
98 1.2 thorpej struct mutexattr_private *map;
99 1.2 thorpej struct mutex_private *mp;
100 1.2 thorpej
101 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
102 1.14 nathanw (attr == NULL) || (attr->ptma_magic == _PT_MUTEXATTR_MAGIC));
103 1.2 thorpej
104 1.2 thorpej if (attr != NULL && (map = attr->ptma_private) != NULL &&
105 1.2 thorpej memcmp(map, &mutexattr_private_default, sizeof(*map)) != 0) {
106 1.2 thorpej mp = malloc(sizeof(*mp));
107 1.2 thorpej if (mp == NULL)
108 1.2 thorpej return ENOMEM;
109 1.2 thorpej
110 1.2 thorpej mp->type = map->type;
111 1.2 thorpej mp->recursecount = 0;
112 1.2 thorpej } else {
113 1.2 thorpej /* LINTED cast away const */
114 1.2 thorpej mp = (struct mutex_private *) &mutex_private_default;
115 1.2 thorpej }
116 1.2 thorpej
117 1.2 thorpej mutex->ptm_magic = _PT_MUTEX_MAGIC;
118 1.2 thorpej mutex->ptm_owner = NULL;
119 1.2 thorpej pthread_lockinit(&mutex->ptm_lock);
120 1.2 thorpej pthread_lockinit(&mutex->ptm_interlock);
121 1.2 thorpej PTQ_INIT(&mutex->ptm_blocked);
122 1.2 thorpej mutex->ptm_private = mp;
123 1.2 thorpej
124 1.2 thorpej return 0;
125 1.2 thorpej }
126 1.2 thorpej
127 1.2 thorpej
128 1.2 thorpej int
129 1.2 thorpej pthread_mutex_destroy(pthread_mutex_t *mutex)
130 1.2 thorpej {
131 1.2 thorpej
132 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
133 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
134 1.14 nathanw pthread__error(EBUSY, "Destroying locked mutex",
135 1.34 skrll __SIMPLELOCK_UNLOCKED_P(&mutex->ptm_lock));
136 1.2 thorpej
137 1.2 thorpej mutex->ptm_magic = _PT_MUTEX_DEAD;
138 1.2 thorpej if (mutex->ptm_private != NULL &&
139 1.3 christos mutex->ptm_private != (const void *)&mutex_private_default)
140 1.2 thorpej free(mutex->ptm_private);
141 1.2 thorpej
142 1.2 thorpej return 0;
143 1.2 thorpej }
144 1.2 thorpej
145 1.2 thorpej
146 1.2 thorpej /*
147 1.2 thorpej * Note regarding memory visibility: Pthreads has rules about memory
148 1.2 thorpej * visibility and mutexes. Very roughly: Memory a thread can see when
149 1.2 thorpej * it unlocks a mutex can be seen by another thread that locks the
150 1.2 thorpej * same mutex.
151 1.2 thorpej *
152 1.2 thorpej * A memory barrier after a lock and before an unlock will provide
153 1.37 ad * this behavior. This code relies on pthread__spintrylock() to issue
154 1.37 ad * a barrier after obtaining a lock, and on pthread__spinunlock() to
155 1.2 thorpej * issue a barrier before releasing a lock.
156 1.2 thorpej */
157 1.2 thorpej
158 1.2 thorpej int
159 1.2 thorpej pthread_mutex_lock(pthread_mutex_t *mutex)
160 1.2 thorpej {
161 1.27 ad pthread_t self;
162 1.2 thorpej int error;
163 1.2 thorpej
164 1.27 ad self = pthread__self();
165 1.27 ad
166 1.2 thorpej /*
167 1.2 thorpej * Note that if we get the lock, we don't have to deal with any
168 1.2 thorpej * non-default lock type handling.
169 1.2 thorpej */
170 1.37 ad if (__predict_false(pthread__spintrylock(self, &mutex->ptm_lock) == 0)) {
171 1.27 ad error = pthread_mutex_lock_slow(self, mutex);
172 1.2 thorpej if (error)
173 1.2 thorpej return error;
174 1.2 thorpej }
175 1.2 thorpej
176 1.8 nathanw /*
177 1.27 ad * We have the lock!
178 1.8 nathanw */
179 1.27 ad mutex->ptm_owner = self;
180 1.2 thorpej
181 1.2 thorpej return 0;
182 1.2 thorpej }
183 1.2 thorpej
184 1.2 thorpej
185 1.2 thorpej static int
186 1.27 ad pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
187 1.2 thorpej {
188 1.29 ad struct mutex_private *mp;
189 1.29 ad int count;
190 1.2 thorpej
191 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
192 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
193 1.13 nathanw
194 1.29 ad for (;;) {
195 1.29 ad /* Spin for a while. */
196 1.29 ad count = pthread__nspins;
197 1.34 skrll while (__SIMPLELOCK_LOCKED_P(&mutex->ptm_lock) && --count > 0)
198 1.29 ad pthread__smt_pause();
199 1.29 ad if (count > 0) {
200 1.37 ad if (pthread__spintrylock(self, &mutex->ptm_lock) != 0)
201 1.29 ad break;
202 1.29 ad continue;
203 1.29 ad }
204 1.29 ad
205 1.2 thorpej /* Okay, didn't look free. Get the interlock... */
206 1.37 ad pthread__spinlock(self, &mutex->ptm_interlock);
207 1.21 chs
208 1.2 thorpej /*
209 1.2 thorpej * The mutex_unlock routine will get the interlock
210 1.2 thorpej * before looking at the list of sleepers, so if the
211 1.2 thorpej * lock is held we can safely put ourselves on the
212 1.2 thorpej * sleep queue. If it's not held, we can try taking it
213 1.2 thorpej * again.
214 1.2 thorpej */
215 1.18 cl PTQ_INSERT_HEAD(&mutex->ptm_blocked, self, pt_sleep);
216 1.35 ad if (__SIMPLELOCK_UNLOCKED_P(&mutex->ptm_lock)) {
217 1.29 ad PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
218 1.37 ad pthread__spinunlock(self, &mutex->ptm_interlock);
219 1.29 ad continue;
220 1.29 ad }
221 1.2 thorpej
222 1.31 ad mp = mutex->ptm_private;
223 1.31 ad if (mutex->ptm_owner == self && mp != NULL) {
224 1.29 ad switch (mp->type) {
225 1.29 ad case PTHREAD_MUTEX_ERRORCHECK:
226 1.29 ad PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
227 1.37 ad pthread__spinunlock(self, &mutex->ptm_interlock);
228 1.29 ad return EDEADLK;
229 1.21 chs
230 1.29 ad case PTHREAD_MUTEX_RECURSIVE:
231 1.21 chs /*
232 1.29 ad * It's safe to do this without
233 1.29 ad * holding the interlock, because
234 1.29 ad * we only modify it if we know we
235 1.29 ad * own the mutex.
236 1.21 chs */
237 1.29 ad PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
238 1.37 ad pthread__spinunlock(self, &mutex->ptm_interlock);
239 1.29 ad if (mp->recursecount == INT_MAX)
240 1.29 ad return EAGAIN;
241 1.29 ad mp->recursecount++;
242 1.29 ad return 0;
243 1.21 chs }
244 1.29 ad }
245 1.21 chs
246 1.29 ad /*
247 1.29 ad * Locking a mutex is not a cancellation
248 1.29 ad * point, so we don't need to do the
249 1.29 ad * test-cancellation dance. We may get woken
250 1.29 ad * up spuriously by pthread_cancel or signals,
251 1.29 ad * but it's okay since we're just going to
252 1.29 ad * retry.
253 1.29 ad */
254 1.29 ad self->pt_sleeponq = 1;
255 1.29 ad self->pt_sleepobj = &mutex->ptm_blocked;
256 1.37 ad pthread__spinunlock(self, &mutex->ptm_interlock);
257 1.29 ad (void)pthread__park(self, &mutex->ptm_interlock,
258 1.29 ad &mutex->ptm_blocked, NULL, 0, &mutex->ptm_blocked);
259 1.2 thorpej }
260 1.2 thorpej
261 1.2 thorpej return 0;
262 1.2 thorpej }
263 1.2 thorpej
264 1.2 thorpej
265 1.2 thorpej int
266 1.2 thorpej pthread_mutex_trylock(pthread_mutex_t *mutex)
267 1.2 thorpej {
268 1.27 ad struct mutex_private *mp;
269 1.27 ad pthread_t self;
270 1.2 thorpej
271 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
272 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
273 1.2 thorpej
274 1.27 ad self = pthread__self();
275 1.27 ad
276 1.37 ad if (pthread__spintrylock(self, &mutex->ptm_lock) == 0) {
277 1.2 thorpej /*
278 1.2 thorpej * These tests can be performed without holding the
279 1.2 thorpej * interlock because these fields are only modified
280 1.2 thorpej * if we know we own the mutex.
281 1.2 thorpej */
282 1.31 ad mp = mutex->ptm_private;
283 1.31 ad if (mp != NULL && mp->type == PTHREAD_MUTEX_RECURSIVE &&
284 1.27 ad mutex->ptm_owner == self) {
285 1.13 nathanw if (mp->recursecount == INT_MAX)
286 1.13 nathanw return EAGAIN;
287 1.13 nathanw mp->recursecount++;
288 1.13 nathanw return 0;
289 1.2 thorpej }
290 1.2 thorpej
291 1.2 thorpej return EBUSY;
292 1.2 thorpej }
293 1.2 thorpej
294 1.27 ad mutex->ptm_owner = self;
295 1.2 thorpej
296 1.2 thorpej return 0;
297 1.2 thorpej }
298 1.2 thorpej
299 1.2 thorpej
300 1.2 thorpej int
301 1.2 thorpej pthread_mutex_unlock(pthread_mutex_t *mutex)
302 1.2 thorpej {
303 1.2 thorpej struct mutex_private *mp;
304 1.27 ad pthread_t self;
305 1.13 nathanw int weown;
306 1.13 nathanw
307 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
308 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
309 1.2 thorpej
310 1.2 thorpej /*
311 1.2 thorpej * These tests can be performed without holding the
312 1.2 thorpej * interlock because these fields are only modified
313 1.2 thorpej * if we know we own the mutex.
314 1.2 thorpej */
315 1.37 ad self = pthread__self();
316 1.27 ad weown = (mutex->ptm_owner == self);
317 1.31 ad mp = mutex->ptm_private;
318 1.31 ad
319 1.31 ad if (mp == NULL) {
320 1.31 ad if (__predict_false(!weown)) {
321 1.31 ad pthread__error(EPERM, "Unlocking unlocked mutex",
322 1.31 ad (mutex->ptm_owner != 0));
323 1.31 ad pthread__error(EPERM,
324 1.31 ad "Unlocking mutex owned by another thread", weown);
325 1.31 ad }
326 1.31 ad } else if (mp->type == PTHREAD_MUTEX_RECURSIVE) {
327 1.13 nathanw if (!weown)
328 1.2 thorpej return EPERM;
329 1.2 thorpej if (mp->recursecount != 0) {
330 1.2 thorpej mp->recursecount--;
331 1.2 thorpej return 0;
332 1.2 thorpej }
333 1.31 ad } else if (mp->type == PTHREAD_MUTEX_ERRORCHECK) {
334 1.13 nathanw if (!weown)
335 1.13 nathanw return EPERM;
336 1.15 nathanw if (__predict_false(!weown)) {
337 1.15 nathanw pthread__error(EPERM, "Unlocking unlocked mutex",
338 1.15 nathanw (mutex->ptm_owner != 0));
339 1.15 nathanw pthread__error(EPERM,
340 1.15 nathanw "Unlocking mutex owned by another thread", weown);
341 1.15 nathanw }
342 1.2 thorpej }
343 1.2 thorpej
344 1.2 thorpej mutex->ptm_owner = NULL;
345 1.37 ad pthread__spinunlock(self, &mutex->ptm_lock);
346 1.27 ad
347 1.8 nathanw /*
348 1.8 nathanw * Do a double-checked locking dance to see if there are any
349 1.8 nathanw * waiters. If we don't see any waiters, we can exit, because
350 1.8 nathanw * we've already released the lock. If we do see waiters, they
351 1.8 nathanw * were probably waiting on us... there's a slight chance that
352 1.8 nathanw * they are waiting on a different thread's ownership of the
353 1.8 nathanw * lock that happened between the unlock above and this
354 1.8 nathanw * examination of the queue; if so, no harm is done, as the
355 1.8 nathanw * waiter will loop and see that the mutex is still locked.
356 1.8 nathanw */
357 1.37 ad pthread__spinlock(self, &mutex->ptm_interlock);
358 1.27 ad pthread__unpark_all(self, &mutex->ptm_interlock, &mutex->ptm_blocked);
359 1.2 thorpej return 0;
360 1.2 thorpej }
361 1.2 thorpej
362 1.2 thorpej int
363 1.2 thorpej pthread_mutexattr_init(pthread_mutexattr_t *attr)
364 1.2 thorpej {
365 1.2 thorpej struct mutexattr_private *map;
366 1.2 thorpej
367 1.2 thorpej map = malloc(sizeof(*map));
368 1.2 thorpej if (map == NULL)
369 1.2 thorpej return ENOMEM;
370 1.2 thorpej
371 1.2 thorpej *map = mutexattr_private_default;
372 1.2 thorpej
373 1.2 thorpej attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
374 1.2 thorpej attr->ptma_private = map;
375 1.2 thorpej
376 1.2 thorpej return 0;
377 1.2 thorpej }
378 1.2 thorpej
379 1.2 thorpej
380 1.2 thorpej int
381 1.2 thorpej pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
382 1.2 thorpej {
383 1.2 thorpej
384 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
385 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
386 1.2 thorpej
387 1.2 thorpej attr->ptma_magic = _PT_MUTEXATTR_DEAD;
388 1.2 thorpej if (attr->ptma_private != NULL)
389 1.2 thorpej free(attr->ptma_private);
390 1.2 thorpej
391 1.2 thorpej return 0;
392 1.2 thorpej }
393 1.2 thorpej
394 1.2 thorpej
395 1.2 thorpej int
396 1.2 thorpej pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
397 1.2 thorpej {
398 1.2 thorpej struct mutexattr_private *map;
399 1.2 thorpej
400 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
401 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
402 1.2 thorpej
403 1.2 thorpej map = attr->ptma_private;
404 1.2 thorpej
405 1.2 thorpej *typep = map->type;
406 1.2 thorpej
407 1.2 thorpej return 0;
408 1.2 thorpej }
409 1.2 thorpej
410 1.2 thorpej
411 1.2 thorpej int
412 1.2 thorpej pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
413 1.2 thorpej {
414 1.2 thorpej struct mutexattr_private *map;
415 1.2 thorpej
416 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
417 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
418 1.13 nathanw
419 1.2 thorpej map = attr->ptma_private;
420 1.2 thorpej
421 1.2 thorpej switch (type) {
422 1.2 thorpej case PTHREAD_MUTEX_NORMAL:
423 1.2 thorpej case PTHREAD_MUTEX_ERRORCHECK:
424 1.2 thorpej case PTHREAD_MUTEX_RECURSIVE:
425 1.2 thorpej map->type = type;
426 1.2 thorpej break;
427 1.2 thorpej
428 1.2 thorpej default:
429 1.2 thorpej return EINVAL;
430 1.2 thorpej }
431 1.2 thorpej
432 1.2 thorpej return 0;
433 1.2 thorpej }
434 1.2 thorpej
435 1.2 thorpej
436 1.19 nathanw static void
437 1.19 nathanw once_cleanup(void *closure)
438 1.19 nathanw {
439 1.19 nathanw
440 1.19 nathanw pthread_mutex_unlock((pthread_mutex_t *)closure);
441 1.19 nathanw }
442 1.19 nathanw
443 1.19 nathanw
444 1.2 thorpej int
445 1.2 thorpej pthread_once(pthread_once_t *once_control, void (*routine)(void))
446 1.2 thorpej {
447 1.2 thorpej
448 1.2 thorpej if (once_control->pto_done == 0) {
449 1.2 thorpej pthread_mutex_lock(&once_control->pto_mutex);
450 1.19 nathanw pthread_cleanup_push(&once_cleanup, &once_control->pto_mutex);
451 1.2 thorpej if (once_control->pto_done == 0) {
452 1.2 thorpej routine();
453 1.2 thorpej once_control->pto_done = 1;
454 1.2 thorpej }
455 1.19 nathanw pthread_cleanup_pop(1);
456 1.2 thorpej }
457 1.2 thorpej
458 1.2 thorpej return 0;
459 1.2 thorpej }
460 1.32 ad
461 1.33 ad int
462 1.36 ad pthread__mutex_deferwake(pthread_t thread, pthread_mutex_t *mutex)
463 1.33 ad {
464 1.33 ad
465 1.33 ad return mutex->ptm_owner == thread;
466 1.33 ad }
467 1.33 ad
468 1.39 ad int
469 1.39 ad _pthread_mutex_held_np(pthread_mutex_t *mutex)
470 1.39 ad {
471 1.39 ad
472 1.39 ad return mutex->ptm_owner == pthread__self();
473 1.39 ad }
474 1.39 ad
475 1.39 ad pthread_t
476 1.39 ad _pthread_mutex_owner_np(pthread_mutex_t *mutex)
477 1.39 ad {
478 1.39 ad
479 1.39 ad return (pthread_t)mutex->ptm_owner;
480 1.39 ad }
481 1.39 ad
482 1.32 ad #endif /* !PTHREAD__HAVE_ATOMIC */
483