pthread_mutex.c revision 1.33 1 /* $NetBSD: pthread_mutex.c,v 1.33 2007/09/08 22:49:50 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2003, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread_mutex.c,v 1.33 2007/09/08 22:49:50 ad Exp $");
41
42 #include <errno.h>
43 #include <limits.h>
44 #include <stdlib.h>
45 #include <string.h>
46
47 #include "pthread.h"
48 #include "pthread_int.h"
49
50 #ifndef PTHREAD__HAVE_ATOMIC
51
52 static int pthread_mutex_lock_slow(pthread_t, pthread_mutex_t *);
53
54 __strong_alias(__libc_mutex_init,pthread_mutex_init)
55 __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
56 __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
57 __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
58 __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
59
60 __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
61 __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
62 __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
63
64 __strong_alias(__libc_thr_once,pthread_once)
65
66 struct mutex_private {
67 int type;
68 int recursecount;
69 };
70
71 static const struct mutex_private mutex_private_default = {
72 PTHREAD_MUTEX_DEFAULT,
73 0,
74 };
75
76 struct mutexattr_private {
77 int type;
78 };
79
80 static const struct mutexattr_private mutexattr_private_default = {
81 PTHREAD_MUTEX_DEFAULT,
82 };
83
84 int
85 pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
86 {
87 struct mutexattr_private *map;
88 struct mutex_private *mp;
89
90 pthread__error(EINVAL, "Invalid mutex attribute",
91 (attr == NULL) || (attr->ptma_magic == _PT_MUTEXATTR_MAGIC));
92
93 if (attr != NULL && (map = attr->ptma_private) != NULL &&
94 memcmp(map, &mutexattr_private_default, sizeof(*map)) != 0) {
95 mp = malloc(sizeof(*mp));
96 if (mp == NULL)
97 return ENOMEM;
98
99 mp->type = map->type;
100 mp->recursecount = 0;
101 } else {
102 /* LINTED cast away const */
103 mp = (struct mutex_private *) &mutex_private_default;
104 }
105
106 mutex->ptm_magic = _PT_MUTEX_MAGIC;
107 mutex->ptm_owner = NULL;
108 pthread_lockinit(&mutex->ptm_lock);
109 pthread_lockinit(&mutex->ptm_interlock);
110 PTQ_INIT(&mutex->ptm_blocked);
111 mutex->ptm_private = mp;
112
113 return 0;
114 }
115
116
117 int
118 pthread_mutex_destroy(pthread_mutex_t *mutex)
119 {
120
121 pthread__error(EINVAL, "Invalid mutex",
122 mutex->ptm_magic == _PT_MUTEX_MAGIC);
123 pthread__error(EBUSY, "Destroying locked mutex",
124 mutex->ptm_lock == __SIMPLELOCK_UNLOCKED);
125
126 mutex->ptm_magic = _PT_MUTEX_DEAD;
127 if (mutex->ptm_private != NULL &&
128 mutex->ptm_private != (const void *)&mutex_private_default)
129 free(mutex->ptm_private);
130
131 return 0;
132 }
133
134
135 /*
136 * Note regarding memory visibility: Pthreads has rules about memory
137 * visibility and mutexes. Very roughly: Memory a thread can see when
138 * it unlocks a mutex can be seen by another thread that locks the
139 * same mutex.
140 *
141 * A memory barrier after a lock and before an unlock will provide
142 * this behavior. This code relies on pthread__simple_lock_try() to issue
143 * a barrier after obtaining a lock, and on pthread__simple_unlock() to
144 * issue a barrier before releasing a lock.
145 */
146
147 int
148 pthread_mutex_lock(pthread_mutex_t *mutex)
149 {
150 pthread_t self;
151 int error;
152
153 self = pthread__self();
154
155 PTHREADD_ADD(PTHREADD_MUTEX_LOCK);
156
157 /*
158 * Note that if we get the lock, we don't have to deal with any
159 * non-default lock type handling.
160 */
161 if (__predict_false(pthread__simple_lock_try(&mutex->ptm_lock) == 0)) {
162 error = pthread_mutex_lock_slow(self, mutex);
163 if (error)
164 return error;
165 }
166
167 /*
168 * We have the lock!
169 */
170 mutex->ptm_owner = self;
171
172 return 0;
173 }
174
175
176 static int
177 pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
178 {
179 extern int pthread__started;
180 struct mutex_private *mp;
181 sigset_t ss;
182 int count;
183
184 pthread__error(EINVAL, "Invalid mutex",
185 mutex->ptm_magic == _PT_MUTEX_MAGIC);
186
187 PTHREADD_ADD(PTHREADD_MUTEX_LOCK_SLOW);
188
189 for (;;) {
190 /* Spin for a while. */
191 count = pthread__nspins;
192 while (mutex->ptm_lock == __SIMPLELOCK_LOCKED && --count > 0)
193 pthread__smt_pause();
194 if (count > 0) {
195 if (pthread__simple_lock_try(&mutex->ptm_lock) != 0)
196 break;
197 continue;
198 }
199
200 /* Okay, didn't look free. Get the interlock... */
201 pthread_spinlock(&mutex->ptm_interlock);
202
203 /*
204 * The mutex_unlock routine will get the interlock
205 * before looking at the list of sleepers, so if the
206 * lock is held we can safely put ourselves on the
207 * sleep queue. If it's not held, we can try taking it
208 * again.
209 */
210 PTQ_INSERT_HEAD(&mutex->ptm_blocked, self, pt_sleep);
211 if (mutex->ptm_lock != __SIMPLELOCK_LOCKED) {
212 PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
213 pthread_spinunlock(&mutex->ptm_interlock);
214 continue;
215 }
216
217 mp = mutex->ptm_private;
218 if (mutex->ptm_owner == self && mp != NULL) {
219 switch (mp->type) {
220 case PTHREAD_MUTEX_ERRORCHECK:
221 PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
222 pthread_spinunlock(&mutex->ptm_interlock);
223 return EDEADLK;
224
225 case PTHREAD_MUTEX_RECURSIVE:
226 /*
227 * It's safe to do this without
228 * holding the interlock, because
229 * we only modify it if we know we
230 * own the mutex.
231 */
232 PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
233 pthread_spinunlock(&mutex->ptm_interlock);
234 if (mp->recursecount == INT_MAX)
235 return EAGAIN;
236 mp->recursecount++;
237 return 0;
238 }
239 }
240
241 if (pthread__started == 0) {
242 /* The spec says we must deadlock, so... */
243 pthread__assert(mp->type == PTHREAD_MUTEX_NORMAL);
244 (void) sigprocmask(SIG_SETMASK, NULL, &ss);
245 for (;;) {
246 sigsuspend(&ss);
247 }
248 /*NOTREACHED*/
249 }
250
251 /*
252 * Locking a mutex is not a cancellation
253 * point, so we don't need to do the
254 * test-cancellation dance. We may get woken
255 * up spuriously by pthread_cancel or signals,
256 * but it's okay since we're just going to
257 * retry.
258 */
259 self->pt_sleeponq = 1;
260 self->pt_sleepobj = &mutex->ptm_blocked;
261 pthread_spinunlock(&mutex->ptm_interlock);
262 (void)pthread__park(self, &mutex->ptm_interlock,
263 &mutex->ptm_blocked, NULL, 0, &mutex->ptm_blocked);
264 }
265
266 return 0;
267 }
268
269
270 int
271 pthread_mutex_trylock(pthread_mutex_t *mutex)
272 {
273 struct mutex_private *mp;
274 pthread_t self;
275
276 pthread__error(EINVAL, "Invalid mutex",
277 mutex->ptm_magic == _PT_MUTEX_MAGIC);
278
279 self = pthread__self();
280
281 PTHREADD_ADD(PTHREADD_MUTEX_TRYLOCK);
282 if (pthread__simple_lock_try(&mutex->ptm_lock) == 0) {
283 /*
284 * These tests can be performed without holding the
285 * interlock because these fields are only modified
286 * if we know we own the mutex.
287 */
288 mp = mutex->ptm_private;
289 if (mp != NULL && mp->type == PTHREAD_MUTEX_RECURSIVE &&
290 mutex->ptm_owner == self) {
291 if (mp->recursecount == INT_MAX)
292 return EAGAIN;
293 mp->recursecount++;
294 return 0;
295 }
296
297 return EBUSY;
298 }
299
300 mutex->ptm_owner = self;
301
302 return 0;
303 }
304
305
306 int
307 pthread_mutex_unlock(pthread_mutex_t *mutex)
308 {
309 struct mutex_private *mp;
310 pthread_t self;
311 int weown;
312
313 pthread__error(EINVAL, "Invalid mutex",
314 mutex->ptm_magic == _PT_MUTEX_MAGIC);
315
316 PTHREADD_ADD(PTHREADD_MUTEX_UNLOCK);
317
318 /*
319 * These tests can be performed without holding the
320 * interlock because these fields are only modified
321 * if we know we own the mutex.
322 */
323 self = pthread_self();
324 weown = (mutex->ptm_owner == self);
325 mp = mutex->ptm_private;
326
327 if (mp == NULL) {
328 if (__predict_false(!weown)) {
329 pthread__error(EPERM, "Unlocking unlocked mutex",
330 (mutex->ptm_owner != 0));
331 pthread__error(EPERM,
332 "Unlocking mutex owned by another thread", weown);
333 }
334 } else if (mp->type == PTHREAD_MUTEX_RECURSIVE) {
335 if (!weown)
336 return EPERM;
337 if (mp->recursecount != 0) {
338 mp->recursecount--;
339 return 0;
340 }
341 } else if (mp->type == PTHREAD_MUTEX_ERRORCHECK) {
342 if (!weown)
343 return EPERM;
344 if (__predict_false(!weown)) {
345 pthread__error(EPERM, "Unlocking unlocked mutex",
346 (mutex->ptm_owner != 0));
347 pthread__error(EPERM,
348 "Unlocking mutex owned by another thread", weown);
349 }
350 }
351
352 mutex->ptm_owner = NULL;
353 pthread__simple_unlock(&mutex->ptm_lock);
354
355 /*
356 * Do a double-checked locking dance to see if there are any
357 * waiters. If we don't see any waiters, we can exit, because
358 * we've already released the lock. If we do see waiters, they
359 * were probably waiting on us... there's a slight chance that
360 * they are waiting on a different thread's ownership of the
361 * lock that happened between the unlock above and this
362 * examination of the queue; if so, no harm is done, as the
363 * waiter will loop and see that the mutex is still locked.
364 */
365 pthread_spinlock(&mutex->ptm_interlock);
366 pthread__unpark_all(self, &mutex->ptm_interlock, &mutex->ptm_blocked);
367 return 0;
368 }
369
370 int
371 pthread_mutexattr_init(pthread_mutexattr_t *attr)
372 {
373 struct mutexattr_private *map;
374
375 map = malloc(sizeof(*map));
376 if (map == NULL)
377 return ENOMEM;
378
379 *map = mutexattr_private_default;
380
381 attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
382 attr->ptma_private = map;
383
384 return 0;
385 }
386
387
388 int
389 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
390 {
391
392 pthread__error(EINVAL, "Invalid mutex attribute",
393 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
394
395 attr->ptma_magic = _PT_MUTEXATTR_DEAD;
396 if (attr->ptma_private != NULL)
397 free(attr->ptma_private);
398
399 return 0;
400 }
401
402
403 int
404 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
405 {
406 struct mutexattr_private *map;
407
408 pthread__error(EINVAL, "Invalid mutex attribute",
409 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
410
411 map = attr->ptma_private;
412
413 *typep = map->type;
414
415 return 0;
416 }
417
418
419 int
420 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
421 {
422 struct mutexattr_private *map;
423
424 pthread__error(EINVAL, "Invalid mutex attribute",
425 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
426
427 map = attr->ptma_private;
428
429 switch (type) {
430 case PTHREAD_MUTEX_NORMAL:
431 case PTHREAD_MUTEX_ERRORCHECK:
432 case PTHREAD_MUTEX_RECURSIVE:
433 map->type = type;
434 break;
435
436 default:
437 return EINVAL;
438 }
439
440 return 0;
441 }
442
443
444 static void
445 once_cleanup(void *closure)
446 {
447
448 pthread_mutex_unlock((pthread_mutex_t *)closure);
449 }
450
451
452 int
453 pthread_once(pthread_once_t *once_control, void (*routine)(void))
454 {
455
456 if (once_control->pto_done == 0) {
457 pthread_mutex_lock(&once_control->pto_mutex);
458 pthread_cleanup_push(&once_cleanup, &once_control->pto_mutex);
459 if (once_control->pto_done == 0) {
460 routine();
461 once_control->pto_done = 1;
462 }
463 pthread_cleanup_pop(1);
464 }
465
466 return 0;
467 }
468
469 int
470 pthread__mutex_owned(pthread_t thread, pthread_mutex_t *mutex)
471 {
472
473 return mutex->ptm_owner == thread;
474 }
475
476 #endif /* !PTHREAD__HAVE_ATOMIC */
477