pthread_mutex.c revision 1.28.2.4 1 1.28.2.4 skrll /* $NetBSD: pthread_mutex.c,v 1.28.2.4 2007/09/10 10:54:06 skrll Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.25 ad * Copyright (c) 2001, 2003, 2006, 2007 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.27 ad * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 1.2 thorpej *
10 1.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.2 thorpej * modification, are permitted provided that the following conditions
12 1.2 thorpej * are met:
13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
19 1.2 thorpej * must display the following acknowledgement:
20 1.2 thorpej * This product includes software developed by the NetBSD
21 1.2 thorpej * Foundation, Inc. and its contributors.
22 1.2 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 thorpej * contributors may be used to endorse or promote products derived
24 1.2 thorpej * from this software without specific prior written permission.
25 1.2 thorpej *
26 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
37 1.2 thorpej */
38 1.2 thorpej
39 1.2 thorpej #include <sys/cdefs.h>
40 1.28.2.4 skrll __RCSID("$NetBSD: pthread_mutex.c,v 1.28.2.4 2007/09/10 10:54:06 skrll Exp $");
41 1.10 lukem
42 1.2 thorpej #include <errno.h>
43 1.2 thorpej #include <limits.h>
44 1.2 thorpej #include <stdlib.h>
45 1.6 scw #include <string.h>
46 1.2 thorpej
47 1.28.2.1 skrll #include <sys/types.h>
48 1.28.2.1 skrll #include <sys/lock.h>
49 1.28.2.1 skrll
50 1.2 thorpej #include "pthread.h"
51 1.2 thorpej #include "pthread_int.h"
52 1.2 thorpej
53 1.28.2.4 skrll #ifndef PTHREAD__HAVE_ATOMIC
54 1.28.2.4 skrll
55 1.27 ad static int pthread_mutex_lock_slow(pthread_t, pthread_mutex_t *);
56 1.2 thorpej
57 1.2 thorpej __strong_alias(__libc_mutex_init,pthread_mutex_init)
58 1.2 thorpej __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
59 1.2 thorpej __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
60 1.2 thorpej __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
61 1.2 thorpej __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
62 1.4 thorpej
63 1.4 thorpej __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
64 1.4 thorpej __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
65 1.5 thorpej __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
66 1.2 thorpej
67 1.2 thorpej __strong_alias(__libc_thr_once,pthread_once)
68 1.2 thorpej
69 1.2 thorpej struct mutex_private {
70 1.2 thorpej int type;
71 1.2 thorpej int recursecount;
72 1.2 thorpej };
73 1.2 thorpej
74 1.2 thorpej static const struct mutex_private mutex_private_default = {
75 1.2 thorpej PTHREAD_MUTEX_DEFAULT,
76 1.2 thorpej 0,
77 1.2 thorpej };
78 1.2 thorpej
79 1.2 thorpej struct mutexattr_private {
80 1.2 thorpej int type;
81 1.2 thorpej };
82 1.2 thorpej
83 1.2 thorpej static const struct mutexattr_private mutexattr_private_default = {
84 1.2 thorpej PTHREAD_MUTEX_DEFAULT,
85 1.2 thorpej };
86 1.2 thorpej
87 1.2 thorpej int
88 1.2 thorpej pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
89 1.2 thorpej {
90 1.2 thorpej struct mutexattr_private *map;
91 1.2 thorpej struct mutex_private *mp;
92 1.2 thorpej
93 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
94 1.14 nathanw (attr == NULL) || (attr->ptma_magic == _PT_MUTEXATTR_MAGIC));
95 1.2 thorpej
96 1.2 thorpej if (attr != NULL && (map = attr->ptma_private) != NULL &&
97 1.2 thorpej memcmp(map, &mutexattr_private_default, sizeof(*map)) != 0) {
98 1.2 thorpej mp = malloc(sizeof(*mp));
99 1.2 thorpej if (mp == NULL)
100 1.2 thorpej return ENOMEM;
101 1.2 thorpej
102 1.2 thorpej mp->type = map->type;
103 1.2 thorpej mp->recursecount = 0;
104 1.2 thorpej } else {
105 1.2 thorpej /* LINTED cast away const */
106 1.2 thorpej mp = (struct mutex_private *) &mutex_private_default;
107 1.2 thorpej }
108 1.2 thorpej
109 1.2 thorpej mutex->ptm_magic = _PT_MUTEX_MAGIC;
110 1.2 thorpej mutex->ptm_owner = NULL;
111 1.2 thorpej pthread_lockinit(&mutex->ptm_lock);
112 1.2 thorpej pthread_lockinit(&mutex->ptm_interlock);
113 1.2 thorpej PTQ_INIT(&mutex->ptm_blocked);
114 1.2 thorpej mutex->ptm_private = mp;
115 1.2 thorpej
116 1.2 thorpej return 0;
117 1.2 thorpej }
118 1.2 thorpej
119 1.2 thorpej
120 1.2 thorpej int
121 1.2 thorpej pthread_mutex_destroy(pthread_mutex_t *mutex)
122 1.2 thorpej {
123 1.2 thorpej
124 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
125 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
126 1.14 nathanw pthread__error(EBUSY, "Destroying locked mutex",
127 1.28.2.1 skrll __SIMPLELOCK_UNLOCKED_P(&mutex->ptm_lock));
128 1.2 thorpej
129 1.2 thorpej mutex->ptm_magic = _PT_MUTEX_DEAD;
130 1.2 thorpej if (mutex->ptm_private != NULL &&
131 1.3 christos mutex->ptm_private != (const void *)&mutex_private_default)
132 1.2 thorpej free(mutex->ptm_private);
133 1.2 thorpej
134 1.2 thorpej return 0;
135 1.2 thorpej }
136 1.2 thorpej
137 1.2 thorpej
138 1.2 thorpej /*
139 1.2 thorpej * Note regarding memory visibility: Pthreads has rules about memory
140 1.2 thorpej * visibility and mutexes. Very roughly: Memory a thread can see when
141 1.2 thorpej * it unlocks a mutex can be seen by another thread that locks the
142 1.2 thorpej * same mutex.
143 1.2 thorpej *
144 1.2 thorpej * A memory barrier after a lock and before an unlock will provide
145 1.2 thorpej * this behavior. This code relies on pthread__simple_lock_try() to issue
146 1.2 thorpej * a barrier after obtaining a lock, and on pthread__simple_unlock() to
147 1.2 thorpej * issue a barrier before releasing a lock.
148 1.2 thorpej */
149 1.2 thorpej
150 1.2 thorpej int
151 1.2 thorpej pthread_mutex_lock(pthread_mutex_t *mutex)
152 1.2 thorpej {
153 1.27 ad pthread_t self;
154 1.2 thorpej int error;
155 1.2 thorpej
156 1.27 ad self = pthread__self();
157 1.27 ad
158 1.7 nathanw PTHREADD_ADD(PTHREADD_MUTEX_LOCK);
159 1.27 ad
160 1.2 thorpej /*
161 1.2 thorpej * Note that if we get the lock, we don't have to deal with any
162 1.2 thorpej * non-default lock type handling.
163 1.2 thorpej */
164 1.2 thorpej if (__predict_false(pthread__simple_lock_try(&mutex->ptm_lock) == 0)) {
165 1.27 ad error = pthread_mutex_lock_slow(self, mutex);
166 1.2 thorpej if (error)
167 1.2 thorpej return error;
168 1.2 thorpej }
169 1.2 thorpej
170 1.8 nathanw /*
171 1.27 ad * We have the lock!
172 1.8 nathanw */
173 1.27 ad mutex->ptm_owner = self;
174 1.2 thorpej
175 1.2 thorpej return 0;
176 1.2 thorpej }
177 1.2 thorpej
178 1.2 thorpej
179 1.2 thorpej static int
180 1.27 ad pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
181 1.2 thorpej {
182 1.20 chs extern int pthread__started;
183 1.28.2.2 skrll struct mutex_private *mp;
184 1.28.2.2 skrll sigset_t ss;
185 1.28.2.2 skrll int count;
186 1.2 thorpej
187 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
188 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
189 1.13 nathanw
190 1.7 nathanw PTHREADD_ADD(PTHREADD_MUTEX_LOCK_SLOW);
191 1.28.2.2 skrll
192 1.28.2.2 skrll for (;;) {
193 1.28.2.2 skrll /* Spin for a while. */
194 1.28.2.2 skrll count = pthread__nspins;
195 1.28.2.2 skrll while (__SIMPLELOCK_LOCKED_P(&mutex->ptm_lock) && --count > 0)
196 1.28.2.2 skrll pthread__smt_pause();
197 1.28.2.2 skrll if (count > 0) {
198 1.28.2.2 skrll if (pthread__simple_lock_try(&mutex->ptm_lock) != 0)
199 1.28.2.2 skrll break;
200 1.28.2.2 skrll continue;
201 1.28.2.2 skrll }
202 1.28.2.2 skrll
203 1.2 thorpej /* Okay, didn't look free. Get the interlock... */
204 1.28.2.3 skrll pthread_spinlock(&mutex->ptm_interlock);
205 1.21 chs
206 1.2 thorpej /*
207 1.2 thorpej * The mutex_unlock routine will get the interlock
208 1.2 thorpej * before looking at the list of sleepers, so if the
209 1.2 thorpej * lock is held we can safely put ourselves on the
210 1.2 thorpej * sleep queue. If it's not held, we can try taking it
211 1.2 thorpej * again.
212 1.2 thorpej */
213 1.18 cl PTQ_INSERT_HEAD(&mutex->ptm_blocked, self, pt_sleep);
214 1.28.2.1 skrll if (__SIMPLELOCK_LOCKED_P(&mutex->ptm_lock)) {
215 1.28.2.2 skrll PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
216 1.28.2.3 skrll pthread_spinunlock(&mutex->ptm_interlock);
217 1.28.2.2 skrll continue;
218 1.28.2.2 skrll }
219 1.2 thorpej
220 1.28.2.3 skrll mp = mutex->ptm_private;
221 1.28.2.3 skrll if (mutex->ptm_owner == self && mp != NULL) {
222 1.28.2.2 skrll switch (mp->type) {
223 1.28.2.2 skrll case PTHREAD_MUTEX_ERRORCHECK:
224 1.28.2.2 skrll PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
225 1.28.2.3 skrll pthread_spinunlock(&mutex->ptm_interlock);
226 1.28.2.2 skrll return EDEADLK;
227 1.21 chs
228 1.28.2.2 skrll case PTHREAD_MUTEX_RECURSIVE:
229 1.21 chs /*
230 1.28.2.2 skrll * It's safe to do this without
231 1.28.2.2 skrll * holding the interlock, because
232 1.28.2.2 skrll * we only modify it if we know we
233 1.28.2.2 skrll * own the mutex.
234 1.21 chs */
235 1.28.2.2 skrll PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
236 1.28.2.3 skrll pthread_spinunlock(&mutex->ptm_interlock);
237 1.28.2.2 skrll if (mp->recursecount == INT_MAX)
238 1.28.2.2 skrll return EAGAIN;
239 1.28.2.2 skrll mp->recursecount++;
240 1.28.2.2 skrll return 0;
241 1.21 chs }
242 1.28.2.2 skrll }
243 1.21 chs
244 1.28.2.2 skrll if (pthread__started == 0) {
245 1.28.2.2 skrll /* The spec says we must deadlock, so... */
246 1.28.2.2 skrll pthread__assert(mp->type == PTHREAD_MUTEX_NORMAL);
247 1.28.2.2 skrll (void) sigprocmask(SIG_SETMASK, NULL, &ss);
248 1.28.2.2 skrll for (;;) {
249 1.28.2.2 skrll sigsuspend(&ss);
250 1.28.2.2 skrll }
251 1.28.2.2 skrll /*NOTREACHED*/
252 1.2 thorpej }
253 1.28.2.2 skrll
254 1.28.2.2 skrll /*
255 1.28.2.2 skrll * Locking a mutex is not a cancellation
256 1.28.2.2 skrll * point, so we don't need to do the
257 1.28.2.2 skrll * test-cancellation dance. We may get woken
258 1.28.2.2 skrll * up spuriously by pthread_cancel or signals,
259 1.28.2.2 skrll * but it's okay since we're just going to
260 1.28.2.2 skrll * retry.
261 1.28.2.2 skrll */
262 1.28.2.2 skrll self->pt_sleeponq = 1;
263 1.28.2.2 skrll self->pt_sleepobj = &mutex->ptm_blocked;
264 1.28.2.3 skrll pthread_spinunlock(&mutex->ptm_interlock);
265 1.28.2.2 skrll (void)pthread__park(self, &mutex->ptm_interlock,
266 1.28.2.2 skrll &mutex->ptm_blocked, NULL, 0, &mutex->ptm_blocked);
267 1.2 thorpej }
268 1.2 thorpej
269 1.2 thorpej return 0;
270 1.2 thorpej }
271 1.2 thorpej
272 1.2 thorpej
273 1.2 thorpej int
274 1.2 thorpej pthread_mutex_trylock(pthread_mutex_t *mutex)
275 1.2 thorpej {
276 1.27 ad struct mutex_private *mp;
277 1.27 ad pthread_t self;
278 1.2 thorpej
279 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
280 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
281 1.2 thorpej
282 1.27 ad self = pthread__self();
283 1.27 ad
284 1.7 nathanw PTHREADD_ADD(PTHREADD_MUTEX_TRYLOCK);
285 1.2 thorpej if (pthread__simple_lock_try(&mutex->ptm_lock) == 0) {
286 1.2 thorpej /*
287 1.2 thorpej * These tests can be performed without holding the
288 1.2 thorpej * interlock because these fields are only modified
289 1.2 thorpej * if we know we own the mutex.
290 1.2 thorpej */
291 1.28.2.3 skrll mp = mutex->ptm_private;
292 1.28.2.3 skrll if (mp != NULL && mp->type == PTHREAD_MUTEX_RECURSIVE &&
293 1.27 ad mutex->ptm_owner == self) {
294 1.13 nathanw if (mp->recursecount == INT_MAX)
295 1.13 nathanw return EAGAIN;
296 1.13 nathanw mp->recursecount++;
297 1.13 nathanw return 0;
298 1.2 thorpej }
299 1.2 thorpej
300 1.2 thorpej return EBUSY;
301 1.2 thorpej }
302 1.2 thorpej
303 1.27 ad mutex->ptm_owner = self;
304 1.2 thorpej
305 1.2 thorpej return 0;
306 1.2 thorpej }
307 1.2 thorpej
308 1.2 thorpej
309 1.2 thorpej int
310 1.2 thorpej pthread_mutex_unlock(pthread_mutex_t *mutex)
311 1.2 thorpej {
312 1.2 thorpej struct mutex_private *mp;
313 1.27 ad pthread_t self;
314 1.13 nathanw int weown;
315 1.13 nathanw
316 1.14 nathanw pthread__error(EINVAL, "Invalid mutex",
317 1.14 nathanw mutex->ptm_magic == _PT_MUTEX_MAGIC);
318 1.2 thorpej
319 1.7 nathanw PTHREADD_ADD(PTHREADD_MUTEX_UNLOCK);
320 1.2 thorpej
321 1.2 thorpej /*
322 1.2 thorpej * These tests can be performed without holding the
323 1.2 thorpej * interlock because these fields are only modified
324 1.2 thorpej * if we know we own the mutex.
325 1.2 thorpej */
326 1.28.2.3 skrll self = pthread_self();
327 1.27 ad weown = (mutex->ptm_owner == self);
328 1.28.2.3 skrll mp = mutex->ptm_private;
329 1.28.2.3 skrll
330 1.28.2.3 skrll if (mp == NULL) {
331 1.28.2.3 skrll if (__predict_false(!weown)) {
332 1.28.2.3 skrll pthread__error(EPERM, "Unlocking unlocked mutex",
333 1.28.2.3 skrll (mutex->ptm_owner != 0));
334 1.28.2.3 skrll pthread__error(EPERM,
335 1.28.2.3 skrll "Unlocking mutex owned by another thread", weown);
336 1.28.2.3 skrll }
337 1.28.2.3 skrll } else if (mp->type == PTHREAD_MUTEX_RECURSIVE) {
338 1.13 nathanw if (!weown)
339 1.2 thorpej return EPERM;
340 1.2 thorpej if (mp->recursecount != 0) {
341 1.2 thorpej mp->recursecount--;
342 1.2 thorpej return 0;
343 1.2 thorpej }
344 1.28.2.3 skrll } else if (mp->type == PTHREAD_MUTEX_ERRORCHECK) {
345 1.13 nathanw if (!weown)
346 1.13 nathanw return EPERM;
347 1.15 nathanw if (__predict_false(!weown)) {
348 1.15 nathanw pthread__error(EPERM, "Unlocking unlocked mutex",
349 1.15 nathanw (mutex->ptm_owner != 0));
350 1.15 nathanw pthread__error(EPERM,
351 1.15 nathanw "Unlocking mutex owned by another thread", weown);
352 1.15 nathanw }
353 1.2 thorpej }
354 1.2 thorpej
355 1.2 thorpej mutex->ptm_owner = NULL;
356 1.2 thorpej pthread__simple_unlock(&mutex->ptm_lock);
357 1.27 ad
358 1.8 nathanw /*
359 1.8 nathanw * Do a double-checked locking dance to see if there are any
360 1.8 nathanw * waiters. If we don't see any waiters, we can exit, because
361 1.8 nathanw * we've already released the lock. If we do see waiters, they
362 1.8 nathanw * were probably waiting on us... there's a slight chance that
363 1.8 nathanw * they are waiting on a different thread's ownership of the
364 1.8 nathanw * lock that happened between the unlock above and this
365 1.8 nathanw * examination of the queue; if so, no harm is done, as the
366 1.8 nathanw * waiter will loop and see that the mutex is still locked.
367 1.8 nathanw */
368 1.28.2.3 skrll pthread_spinlock(&mutex->ptm_interlock);
369 1.27 ad pthread__unpark_all(self, &mutex->ptm_interlock, &mutex->ptm_blocked);
370 1.2 thorpej return 0;
371 1.2 thorpej }
372 1.2 thorpej
373 1.2 thorpej int
374 1.2 thorpej pthread_mutexattr_init(pthread_mutexattr_t *attr)
375 1.2 thorpej {
376 1.2 thorpej struct mutexattr_private *map;
377 1.2 thorpej
378 1.2 thorpej map = malloc(sizeof(*map));
379 1.2 thorpej if (map == NULL)
380 1.2 thorpej return ENOMEM;
381 1.2 thorpej
382 1.2 thorpej *map = mutexattr_private_default;
383 1.2 thorpej
384 1.2 thorpej attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
385 1.2 thorpej attr->ptma_private = map;
386 1.2 thorpej
387 1.2 thorpej return 0;
388 1.2 thorpej }
389 1.2 thorpej
390 1.2 thorpej
391 1.2 thorpej int
392 1.2 thorpej pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
393 1.2 thorpej {
394 1.2 thorpej
395 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
396 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
397 1.2 thorpej
398 1.2 thorpej attr->ptma_magic = _PT_MUTEXATTR_DEAD;
399 1.2 thorpej if (attr->ptma_private != NULL)
400 1.2 thorpej free(attr->ptma_private);
401 1.2 thorpej
402 1.2 thorpej return 0;
403 1.2 thorpej }
404 1.2 thorpej
405 1.2 thorpej
406 1.2 thorpej int
407 1.2 thorpej pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
408 1.2 thorpej {
409 1.2 thorpej struct mutexattr_private *map;
410 1.2 thorpej
411 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
412 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
413 1.2 thorpej
414 1.2 thorpej map = attr->ptma_private;
415 1.2 thorpej
416 1.2 thorpej *typep = map->type;
417 1.2 thorpej
418 1.2 thorpej return 0;
419 1.2 thorpej }
420 1.2 thorpej
421 1.2 thorpej
422 1.2 thorpej int
423 1.2 thorpej pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
424 1.2 thorpej {
425 1.2 thorpej struct mutexattr_private *map;
426 1.2 thorpej
427 1.14 nathanw pthread__error(EINVAL, "Invalid mutex attribute",
428 1.14 nathanw attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
429 1.13 nathanw
430 1.2 thorpej map = attr->ptma_private;
431 1.2 thorpej
432 1.2 thorpej switch (type) {
433 1.2 thorpej case PTHREAD_MUTEX_NORMAL:
434 1.2 thorpej case PTHREAD_MUTEX_ERRORCHECK:
435 1.2 thorpej case PTHREAD_MUTEX_RECURSIVE:
436 1.2 thorpej map->type = type;
437 1.2 thorpej break;
438 1.2 thorpej
439 1.2 thorpej default:
440 1.2 thorpej return EINVAL;
441 1.2 thorpej }
442 1.2 thorpej
443 1.2 thorpej return 0;
444 1.2 thorpej }
445 1.2 thorpej
446 1.2 thorpej
447 1.19 nathanw static void
448 1.19 nathanw once_cleanup(void *closure)
449 1.19 nathanw {
450 1.19 nathanw
451 1.19 nathanw pthread_mutex_unlock((pthread_mutex_t *)closure);
452 1.19 nathanw }
453 1.19 nathanw
454 1.19 nathanw
455 1.2 thorpej int
456 1.2 thorpej pthread_once(pthread_once_t *once_control, void (*routine)(void))
457 1.2 thorpej {
458 1.2 thorpej
459 1.2 thorpej if (once_control->pto_done == 0) {
460 1.2 thorpej pthread_mutex_lock(&once_control->pto_mutex);
461 1.19 nathanw pthread_cleanup_push(&once_cleanup, &once_control->pto_mutex);
462 1.2 thorpej if (once_control->pto_done == 0) {
463 1.2 thorpej routine();
464 1.2 thorpej once_control->pto_done = 1;
465 1.2 thorpej }
466 1.19 nathanw pthread_cleanup_pop(1);
467 1.2 thorpej }
468 1.2 thorpej
469 1.2 thorpej return 0;
470 1.2 thorpej }
471 1.28.2.4 skrll
472 1.28.2.4 skrll int
473 1.28.2.4 skrll pthread__mutex_owned(pthread_t thread, pthread_mutex_t *mutex)
474 1.28.2.4 skrll {
475 1.28.2.4 skrll
476 1.28.2.4 skrll return mutex->ptm_owner == thread;
477 1.28.2.4 skrll }
478 1.28.2.4 skrll
479 1.28.2.4 skrll #endif /* !PTHREAD__HAVE_ATOMIC */
480