pthread_mutex.c revision 1.31.2.2 1 1.31.2.2 matt /* $NetBSD: pthread_mutex.c,v 1.31.2.2 2008/01/09 01:36:37 matt Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.25 ad * Copyright (c) 2001, 2003, 2006, 2007 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.27 ad * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 1.2 thorpej *
10 1.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.2 thorpej * modification, are permitted provided that the following conditions
12 1.2 thorpej * are met:
13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
19 1.2 thorpej * must display the following acknowledgement:
20 1.2 thorpej * This product includes software developed by the NetBSD
21 1.2 thorpej * Foundation, Inc. and its contributors.
22 1.2 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 thorpej * contributors may be used to endorse or promote products derived
24 1.2 thorpej * from this software without specific prior written permission.
25 1.2 thorpej *
26 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
37 1.2 thorpej */
38 1.2 thorpej
39 1.2 thorpej #include <sys/cdefs.h>
40 1.31.2.2 matt __RCSID("$NetBSD: pthread_mutex.c,v 1.31.2.2 2008/01/09 01:36:37 matt Exp $");
41 1.31.2.2 matt
42 1.31.2.2 matt #include <sys/types.h>
43 1.31.2.2 matt
44 1.31.2.2 matt #include <machine/lock.h>
45 1.10 lukem
46 1.2 thorpej #include <errno.h>
47 1.2 thorpej #include <limits.h>
48 1.2 thorpej #include <stdlib.h>
49 1.6 scw #include <string.h>
50 1.2 thorpej
51 1.2 thorpej #include "pthread.h"
52 1.2 thorpej #include "pthread_int.h"
53 1.2 thorpej
54 1.31.2.1 matt #ifndef PTHREAD__HAVE_ATOMIC
55 1.31.2.1 matt
56 1.27 ad static int pthread_mutex_lock_slow(pthread_t, pthread_mutex_t *);
57 1.2 thorpej
58 1.31.2.2 matt int _pthread_mutex_held_np(pthread_mutex_t *);
59 1.31.2.2 matt pthread_t _pthread_mutex_owner_np(pthread_mutex_t *);
60 1.31.2.2 matt
61 1.31.2.2 matt __weak_alias(pthread_mutex_held_np,_pthread_mutex_held_np)
62 1.31.2.2 matt __weak_alias(pthread_mutex_owner_np,_pthread_mutex_owner_np)
63 1.31.2.2 matt
64 1.2 thorpej __strong_alias(__libc_mutex_init,pthread_mutex_init)
65 1.2 thorpej __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
66 1.2 thorpej __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
67 1.2 thorpej __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
68 1.2 thorpej __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
69 1.4 thorpej
70 1.4 thorpej __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
71 1.4 thorpej __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
72 1.5 thorpej __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
73 1.2 thorpej
74 1.2 thorpej __strong_alias(__libc_thr_once,pthread_once)
75 1.2 thorpej
76 1.2 thorpej struct mutex_private {
77 1.2 thorpej int type;
78 1.2 thorpej int recursecount;
79 1.2 thorpej };
80 1.2 thorpej
81 1.2 thorpej static const struct mutex_private mutex_private_default = {
82 1.2 thorpej PTHREAD_MUTEX_DEFAULT,
83 1.2 thorpej 0,
84 1.2 thorpej };
85 1.2 thorpej
86 1.2 thorpej struct mutexattr_private {
87 1.2 thorpej int type;
88 1.2 thorpej };
89 1.2 thorpej
90 1.2 thorpej static const struct mutexattr_private mutexattr_private_default = {
91 1.2 thorpej PTHREAD_MUTEX_DEFAULT,
92 1.2 thorpej };
93 1.2 thorpej
94 1.2 thorpej int
95 1.2 thorpej pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
96 1.2 thorpej {
97 1.2 thorpej struct mutexattr_private *map;
98 1.2 thorpej struct mutex_private *mp;
99 1.2 thorpej
100 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
101 1.14 nathanw (attr == NULL) || (attr->ptma_magic == _PT_MUTEXATTR_MAGIC));
102 1.2 thorpej
103 1.2 thorpej if (attr != NULL && (map = attr->ptma_private) != NULL &&
104 1.2 thorpej memcmp(map, &mutexattr_private_default, sizeof(*map)) != 0) {
105 1.2 thorpej mp = malloc(sizeof(*mp));
106 1.2 thorpej if (mp == NULL)
107 1.2 thorpej return ENOMEM;
108 1.2 thorpej
109 1.2 thorpej mp->type = map->type;
110 1.2 thorpej mp->recursecount = 0;
111 1.2 thorpej } else {
112 1.2 thorpej /* LINTED cast away const */
113 1.2 thorpej mp = (struct mutex_private *) &mutex_private_default;
114 1.2 thorpej }
115 1.2 thorpej
116 1.2 thorpej mutex->ptm_magic = _PT_MUTEX_MAGIC;
117 1.2 thorpej mutex->ptm_owner = NULL;
118 1.2 thorpej pthread_lockinit(&mutex->ptm_lock);
119 1.2 thorpej pthread_lockinit(&mutex->ptm_interlock);
120 1.2 thorpej PTQ_INIT(&mutex->ptm_blocked);
121 1.2 thorpej mutex->ptm_private = mp;
122 1.2 thorpej
123 1.2 thorpej return 0;
124 1.2 thorpej }
125 1.2 thorpej
126 1.2 thorpej
127 1.2 thorpej int
128 1.2 thorpej pthread_mutex_destroy(pthread_mutex_t *mutex)
129 1.2 thorpej {
130 1.2 thorpej
131 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
132 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
133 1.14 nathanw pthread__error(EBUSY, "Destroying locked mutex",
134 1.31.2.1 matt __SIMPLELOCK_UNLOCKED_P(&mutex->ptm_lock));
135 1.2 thorpej
136 1.2 thorpej mutex->ptm_magic = _PT_MUTEX_DEAD;
137 1.2 thorpej if (mutex->ptm_private != NULL &&
138 1.3 christos mutex->ptm_private != (const void *)&mutex_private_default)
139 1.2 thorpej free(mutex->ptm_private);
140 1.2 thorpej
141 1.2 thorpej return 0;
142 1.2 thorpej }
143 1.2 thorpej
144 1.2 thorpej
145 1.2 thorpej /*
146 1.2 thorpej * Note regarding memory visibility: Pthreads has rules about memory
147 1.2 thorpej * visibility and mutexes. Very roughly: Memory a thread can see when
148 1.2 thorpej * it unlocks a mutex can be seen by another thread that locks the
149 1.2 thorpej * same mutex.
150 1.2 thorpej *
151 1.2 thorpej * A memory barrier after a lock and before an unlock will provide
152 1.31.2.2 matt * this behavior. This code relies on pthread__spintrylock() to issue
153 1.31.2.2 matt * a barrier after obtaining a lock, and on pthread__spinunlock() to
154 1.2 thorpej * issue a barrier before releasing a lock.
155 1.2 thorpej */
156 1.2 thorpej
157 1.2 thorpej int
158 1.2 thorpej pthread_mutex_lock(pthread_mutex_t *mutex)
159 1.2 thorpej {
160 1.27 ad pthread_t self;
161 1.2 thorpej int error;
162 1.2 thorpej
163 1.27 ad self = pthread__self();
164 1.27 ad
165 1.2 thorpej /*
166 1.2 thorpej * Note that if we get the lock, we don't have to deal with any
167 1.2 thorpej * non-default lock type handling.
168 1.2 thorpej */
169 1.31.2.2 matt if (__predict_false(pthread__spintrylock(self, &mutex->ptm_lock) == 0)) {
170 1.27 ad error = pthread_mutex_lock_slow(self, mutex);
171 1.2 thorpej if (error)
172 1.2 thorpej return error;
173 1.2 thorpej }
174 1.2 thorpej
175 1.8 nathanw /*
176 1.27 ad * We have the lock!
177 1.8 nathanw */
178 1.27 ad mutex->ptm_owner = self;
179 1.2 thorpej
180 1.2 thorpej return 0;
181 1.2 thorpej }
182 1.2 thorpej
183 1.2 thorpej
184 1.2 thorpej static int
185 1.27 ad pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
186 1.2 thorpej {
187 1.20 chs extern int pthread__started;
188 1.29 ad struct mutex_private *mp;
189 1.29 ad sigset_t ss;
190 1.29 ad int count;
191 1.2 thorpej
192 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
193 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
194 1.13 nathanw
195 1.29 ad for (;;) {
196 1.29 ad /* Spin for a while. */
197 1.29 ad count = pthread__nspins;
198 1.31.2.1 matt while (__SIMPLELOCK_LOCKED_P(&mutex->ptm_lock) && --count > 0)
199 1.29 ad pthread__smt_pause();
200 1.29 ad if (count > 0) {
201 1.31.2.2 matt if (pthread__spintrylock(self, &mutex->ptm_lock) != 0)
202 1.29 ad break;
203 1.29 ad continue;
204 1.29 ad }
205 1.29 ad
206 1.2 thorpej /* Okay, didn't look free. Get the interlock... */
207 1.31.2.2 matt pthread__spinlock(self, &mutex->ptm_interlock);
208 1.21 chs
209 1.2 thorpej /*
210 1.2 thorpej * The mutex_unlock routine will get the interlock
211 1.2 thorpej * before looking at the list of sleepers, so if the
212 1.2 thorpej * lock is held we can safely put ourselves on the
213 1.2 thorpej * sleep queue. If it's not held, we can try taking it
214 1.2 thorpej * again.
215 1.2 thorpej */
216 1.18 cl PTQ_INSERT_HEAD(&mutex->ptm_blocked, self, pt_sleep);
217 1.31.2.1 matt if (__SIMPLELOCK_UNLOCKED_P(&mutex->ptm_lock)) {
218 1.29 ad PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
219 1.31.2.2 matt pthread__spinunlock(self, &mutex->ptm_interlock);
220 1.29 ad continue;
221 1.29 ad }
222 1.2 thorpej
223 1.31 ad mp = mutex->ptm_private;
224 1.31 ad if (mutex->ptm_owner == self && mp != NULL) {
225 1.29 ad switch (mp->type) {
226 1.29 ad case PTHREAD_MUTEX_ERRORCHECK:
227 1.29 ad PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
228 1.31.2.2 matt pthread__spinunlock(self, &mutex->ptm_interlock);
229 1.29 ad return EDEADLK;
230 1.21 chs
231 1.29 ad case PTHREAD_MUTEX_RECURSIVE:
232 1.21 chs /*
233 1.29 ad * It's safe to do this without
234 1.29 ad * holding the interlock, because
235 1.29 ad * we only modify it if we know we
236 1.29 ad * own the mutex.
237 1.21 chs */
238 1.29 ad PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
239 1.31.2.2 matt pthread__spinunlock(self, &mutex->ptm_interlock);
240 1.29 ad if (mp->recursecount == INT_MAX)
241 1.29 ad return EAGAIN;
242 1.29 ad mp->recursecount++;
243 1.29 ad return 0;
244 1.21 chs }
245 1.29 ad }
246 1.21 chs
247 1.29 ad if (pthread__started == 0) {
248 1.29 ad /* The spec says we must deadlock, so... */
249 1.29 ad pthread__assert(mp->type == PTHREAD_MUTEX_NORMAL);
250 1.29 ad (void) sigprocmask(SIG_SETMASK, NULL, &ss);
251 1.29 ad for (;;) {
252 1.29 ad sigsuspend(&ss);
253 1.29 ad }
254 1.29 ad /*NOTREACHED*/
255 1.2 thorpej }
256 1.29 ad
257 1.29 ad /*
258 1.29 ad * Locking a mutex is not a cancellation
259 1.29 ad * point, so we don't need to do the
260 1.29 ad * test-cancellation dance. We may get woken
261 1.29 ad * up spuriously by pthread_cancel or signals,
262 1.29 ad * but it's okay since we're just going to
263 1.29 ad * retry.
264 1.29 ad */
265 1.29 ad self->pt_sleeponq = 1;
266 1.29 ad self->pt_sleepobj = &mutex->ptm_blocked;
267 1.31.2.2 matt pthread__spinunlock(self, &mutex->ptm_interlock);
268 1.29 ad (void)pthread__park(self, &mutex->ptm_interlock,
269 1.29 ad &mutex->ptm_blocked, NULL, 0, &mutex->ptm_blocked);
270 1.2 thorpej }
271 1.2 thorpej
272 1.2 thorpej return 0;
273 1.2 thorpej }
274 1.2 thorpej
275 1.2 thorpej
276 1.2 thorpej int
277 1.2 thorpej pthread_mutex_trylock(pthread_mutex_t *mutex)
278 1.2 thorpej {
279 1.27 ad struct mutex_private *mp;
280 1.27 ad pthread_t self;
281 1.2 thorpej
282 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
283 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
284 1.2 thorpej
285 1.27 ad self = pthread__self();
286 1.27 ad
287 1.31.2.2 matt if (pthread__spintrylock(self, &mutex->ptm_lock) == 0) {
288 1.2 thorpej /*
289 1.2 thorpej * These tests can be performed without holding the
290 1.2 thorpej * interlock because these fields are only modified
291 1.2 thorpej * if we know we own the mutex.
292 1.2 thorpej */
293 1.31 ad mp = mutex->ptm_private;
294 1.31 ad if (mp != NULL && mp->type == PTHREAD_MUTEX_RECURSIVE &&
295 1.27 ad mutex->ptm_owner == self) {
296 1.13 nathanw if (mp->recursecount == INT_MAX)
297 1.13 nathanw return EAGAIN;
298 1.13 nathanw mp->recursecount++;
299 1.13 nathanw return 0;
300 1.2 thorpej }
301 1.2 thorpej
302 1.2 thorpej return EBUSY;
303 1.2 thorpej }
304 1.2 thorpej
305 1.27 ad mutex->ptm_owner = self;
306 1.2 thorpej
307 1.2 thorpej return 0;
308 1.2 thorpej }
309 1.2 thorpej
310 1.2 thorpej
311 1.2 thorpej int
312 1.2 thorpej pthread_mutex_unlock(pthread_mutex_t *mutex)
313 1.2 thorpej {
314 1.2 thorpej struct mutex_private *mp;
315 1.27 ad pthread_t self;
316 1.13 nathanw int weown;
317 1.13 nathanw
318 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
319 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
320 1.2 thorpej
321 1.2 thorpej /*
322 1.2 thorpej * These tests can be performed without holding the
323 1.2 thorpej * interlock because these fields are only modified
324 1.2 thorpej * if we know we own the mutex.
325 1.2 thorpej */
326 1.31.2.2 matt self = pthread__self();
327 1.27 ad weown = (mutex->ptm_owner == self);
328 1.31 ad mp = mutex->ptm_private;
329 1.31 ad
330 1.31 ad if (mp == NULL) {
331 1.31 ad if (__predict_false(!weown)) {
332 1.31 ad pthread__error(EPERM, "Unlocking unlocked mutex",
333 1.31 ad (mutex->ptm_owner != 0));
334 1.31 ad pthread__error(EPERM,
335 1.31 ad "Unlocking mutex owned by another thread", weown);
336 1.31 ad }
337 1.31 ad } else if (mp->type == PTHREAD_MUTEX_RECURSIVE) {
338 1.13 nathanw if (!weown)
339 1.2 thorpej return EPERM;
340 1.2 thorpej if (mp->recursecount != 0) {
341 1.2 thorpej mp->recursecount--;
342 1.2 thorpej return 0;
343 1.2 thorpej }
344 1.31 ad } else if (mp->type == PTHREAD_MUTEX_ERRORCHECK) {
345 1.13 nathanw if (!weown)
346 1.13 nathanw return EPERM;
347 1.15 nathanw if (__predict_false(!weown)) {
348 1.15 nathanw pthread__error(EPERM, "Unlocking unlocked mutex",
349 1.15 nathanw (mutex->ptm_owner != 0));
350 1.15 nathanw pthread__error(EPERM,
351 1.15 nathanw "Unlocking mutex owned by another thread", weown);
352 1.15 nathanw }
353 1.2 thorpej }
354 1.2 thorpej
355 1.2 thorpej mutex->ptm_owner = NULL;
356 1.31.2.2 matt pthread__spinunlock(self, &mutex->ptm_lock);
357 1.27 ad
358 1.8 nathanw /*
359 1.8 nathanw * Do a double-checked locking dance to see if there are any
360 1.8 nathanw * waiters. If we don't see any waiters, we can exit, because
361 1.8 nathanw * we've already released the lock. If we do see waiters, they
362 1.8 nathanw * were probably waiting on us... there's a slight chance that
363 1.8 nathanw * they are waiting on a different thread's ownership of the
364 1.8 nathanw * lock that happened between the unlock above and this
365 1.8 nathanw * examination of the queue; if so, no harm is done, as the
366 1.8 nathanw * waiter will loop and see that the mutex is still locked.
367 1.8 nathanw */
368 1.31.2.2 matt pthread__spinlock(self, &mutex->ptm_interlock);
369 1.27 ad pthread__unpark_all(self, &mutex->ptm_interlock, &mutex->ptm_blocked);
370 1.2 thorpej return 0;
371 1.2 thorpej }
372 1.2 thorpej
373 1.2 thorpej int
374 1.2 thorpej pthread_mutexattr_init(pthread_mutexattr_t *attr)
375 1.2 thorpej {
376 1.2 thorpej struct mutexattr_private *map;
377 1.2 thorpej
378 1.2 thorpej map = malloc(sizeof(*map));
379 1.2 thorpej if (map == NULL)
380 1.2 thorpej return ENOMEM;
381 1.2 thorpej
382 1.2 thorpej *map = mutexattr_private_default;
383 1.2 thorpej
384 1.2 thorpej attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
385 1.2 thorpej attr->ptma_private = map;
386 1.2 thorpej
387 1.2 thorpej return 0;
388 1.2 thorpej }
389 1.2 thorpej
390 1.2 thorpej
391 1.2 thorpej int
392 1.2 thorpej pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
393 1.2 thorpej {
394 1.2 thorpej
395 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
396 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
397 1.2 thorpej
398 1.2 thorpej attr->ptma_magic = _PT_MUTEXATTR_DEAD;
399 1.2 thorpej if (attr->ptma_private != NULL)
400 1.2 thorpej free(attr->ptma_private);
401 1.2 thorpej
402 1.2 thorpej return 0;
403 1.2 thorpej }
404 1.2 thorpej
405 1.2 thorpej
406 1.2 thorpej int
407 1.2 thorpej pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
408 1.2 thorpej {
409 1.2 thorpej struct mutexattr_private *map;
410 1.2 thorpej
411 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
412 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
413 1.2 thorpej
414 1.2 thorpej map = attr->ptma_private;
415 1.2 thorpej
416 1.2 thorpej *typep = map->type;
417 1.2 thorpej
418 1.2 thorpej return 0;
419 1.2 thorpej }
420 1.2 thorpej
421 1.2 thorpej
422 1.2 thorpej int
423 1.2 thorpej pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
424 1.2 thorpej {
425 1.2 thorpej struct mutexattr_private *map;
426 1.2 thorpej
427 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
428 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
429 1.13 nathanw
430 1.2 thorpej map = attr->ptma_private;
431 1.2 thorpej
432 1.2 thorpej switch (type) {
433 1.2 thorpej case PTHREAD_MUTEX_NORMAL:
434 1.2 thorpej case PTHREAD_MUTEX_ERRORCHECK:
435 1.2 thorpej case PTHREAD_MUTEX_RECURSIVE:
436 1.2 thorpej map->type = type;
437 1.2 thorpej break;
438 1.2 thorpej
439 1.2 thorpej default:
440 1.2 thorpej return EINVAL;
441 1.2 thorpej }
442 1.2 thorpej
443 1.2 thorpej return 0;
444 1.2 thorpej }
445 1.2 thorpej
446 1.2 thorpej
447 1.19 nathanw static void
448 1.19 nathanw once_cleanup(void *closure)
449 1.19 nathanw {
450 1.19 nathanw
451 1.19 nathanw pthread_mutex_unlock((pthread_mutex_t *)closure);
452 1.19 nathanw }
453 1.19 nathanw
454 1.19 nathanw
455 1.2 thorpej int
456 1.2 thorpej pthread_once(pthread_once_t *once_control, void (*routine)(void))
457 1.2 thorpej {
458 1.2 thorpej
459 1.2 thorpej if (once_control->pto_done == 0) {
460 1.2 thorpej pthread_mutex_lock(&once_control->pto_mutex);
461 1.19 nathanw pthread_cleanup_push(&once_cleanup, &once_control->pto_mutex);
462 1.2 thorpej if (once_control->pto_done == 0) {
463 1.2 thorpej routine();
464 1.2 thorpej once_control->pto_done = 1;
465 1.2 thorpej }
466 1.19 nathanw pthread_cleanup_pop(1);
467 1.2 thorpej }
468 1.2 thorpej
469 1.2 thorpej return 0;
470 1.2 thorpej }
471 1.31.2.1 matt
472 1.31.2.1 matt int
473 1.31.2.1 matt pthread__mutex_deferwake(pthread_t thread, pthread_mutex_t *mutex)
474 1.31.2.1 matt {
475 1.31.2.1 matt
476 1.31.2.1 matt return mutex->ptm_owner == thread;
477 1.31.2.1 matt }
478 1.31.2.1 matt
479 1.31.2.2 matt int
480 1.31.2.2 matt _pthread_mutex_held_np(pthread_mutex_t *mutex)
481 1.31.2.2 matt {
482 1.31.2.2 matt
483 1.31.2.2 matt return mutex->ptm_owner == pthread__self();
484 1.31.2.2 matt }
485 1.31.2.2 matt
486 1.31.2.2 matt pthread_t
487 1.31.2.2 matt _pthread_mutex_owner_np(pthread_mutex_t *mutex)
488 1.31.2.2 matt {
489 1.31.2.2 matt
490 1.31.2.2 matt return (pthread_t)mutex->ptm_owner;
491 1.31.2.2 matt }
492 1.31.2.2 matt
493 1.31.2.1 matt #endif /* !PTHREAD__HAVE_ATOMIC */
494