Home | History | Annotate | Line # | Download | only in libpthread
pthread_mutex.c revision 1.39
      1 /*	$NetBSD: pthread_mutex.c,v 1.39 2007/12/24 14:46:29 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2003, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __RCSID("$NetBSD: pthread_mutex.c,v 1.39 2007/12/24 14:46:29 ad Exp $");
     41 
     42 #include <errno.h>
     43 #include <limits.h>
     44 #include <stdlib.h>
     45 #include <string.h>
     46 
     47 #include <sys/types.h>
     48 #include <sys/lock.h>
     49 
     50 #include "pthread.h"
     51 #include "pthread_int.h"
     52 
     53 #ifndef	PTHREAD__HAVE_ATOMIC
     54 
     55 static int pthread_mutex_lock_slow(pthread_t, pthread_mutex_t *);
     56 
     57 int		_pthread_mutex_held_np(pthread_mutex_t *);
     58 pthread_t	_pthread_mutex_owner_np(pthread_mutex_t *);
     59 
     60 __weak_alias(pthread_mutex_held_np,_pthread_mutex_held_np)
     61 __weak_alias(pthread_mutex_owner_np,_pthread_mutex_owner_np)
     62 
     63 __strong_alias(__libc_mutex_init,pthread_mutex_init)
     64 __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
     65 __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
     66 __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
     67 __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
     68 
     69 __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
     70 __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
     71 __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
     72 
     73 __strong_alias(__libc_thr_once,pthread_once)
     74 
     75 struct mutex_private {
     76 	int	type;
     77 	int	recursecount;
     78 };
     79 
     80 static const struct mutex_private mutex_private_default = {
     81 	PTHREAD_MUTEX_DEFAULT,
     82 	0,
     83 };
     84 
     85 struct mutexattr_private {
     86 	int	type;
     87 };
     88 
     89 static const struct mutexattr_private mutexattr_private_default = {
     90 	PTHREAD_MUTEX_DEFAULT,
     91 };
     92 
     93 int
     94 pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
     95 {
     96 	struct mutexattr_private *map;
     97 	struct mutex_private *mp;
     98 
     99 	pthread__error(EINVAL, "Invalid mutex attribute",
    100 	    (attr == NULL) || (attr->ptma_magic == _PT_MUTEXATTR_MAGIC));
    101 
    102 	if (attr != NULL && (map = attr->ptma_private) != NULL &&
    103 	    memcmp(map, &mutexattr_private_default, sizeof(*map)) != 0) {
    104 		mp = malloc(sizeof(*mp));
    105 		if (mp == NULL)
    106 			return ENOMEM;
    107 
    108 		mp->type = map->type;
    109 		mp->recursecount = 0;
    110 	} else {
    111 		/* LINTED cast away const */
    112 		mp = (struct mutex_private *) &mutex_private_default;
    113 	}
    114 
    115 	mutex->ptm_magic = _PT_MUTEX_MAGIC;
    116 	mutex->ptm_owner = NULL;
    117 	pthread_lockinit(&mutex->ptm_lock);
    118 	pthread_lockinit(&mutex->ptm_interlock);
    119 	PTQ_INIT(&mutex->ptm_blocked);
    120 	mutex->ptm_private = mp;
    121 
    122 	return 0;
    123 }
    124 
    125 
    126 int
    127 pthread_mutex_destroy(pthread_mutex_t *mutex)
    128 {
    129 
    130 	pthread__error(EINVAL, "Invalid mutex",
    131 	    mutex->ptm_magic == _PT_MUTEX_MAGIC);
    132 	pthread__error(EBUSY, "Destroying locked mutex",
    133 	    __SIMPLELOCK_UNLOCKED_P(&mutex->ptm_lock));
    134 
    135 	mutex->ptm_magic = _PT_MUTEX_DEAD;
    136 	if (mutex->ptm_private != NULL &&
    137 	    mutex->ptm_private != (const void *)&mutex_private_default)
    138 		free(mutex->ptm_private);
    139 
    140 	return 0;
    141 }
    142 
    143 
    144 /*
    145  * Note regarding memory visibility: Pthreads has rules about memory
    146  * visibility and mutexes. Very roughly: Memory a thread can see when
    147  * it unlocks a mutex can be seen by another thread that locks the
    148  * same mutex.
    149  *
    150  * A memory barrier after a lock and before an unlock will provide
    151  * this behavior. This code relies on pthread__spintrylock() to issue
    152  * a barrier after obtaining a lock, and on pthread__spinunlock() to
    153  * issue a barrier before releasing a lock.
    154  */
    155 
    156 int
    157 pthread_mutex_lock(pthread_mutex_t *mutex)
    158 {
    159 	pthread_t self;
    160 	int error;
    161 
    162 	self = pthread__self();
    163 
    164 	/*
    165 	 * Note that if we get the lock, we don't have to deal with any
    166 	 * non-default lock type handling.
    167 	 */
    168 	if (__predict_false(pthread__spintrylock(self, &mutex->ptm_lock) == 0)) {
    169 		error = pthread_mutex_lock_slow(self, mutex);
    170 		if (error)
    171 			return error;
    172 	}
    173 
    174 	/*
    175 	 * We have the lock!
    176 	 */
    177 	mutex->ptm_owner = self;
    178 
    179 	return 0;
    180 }
    181 
    182 
    183 static int
    184 pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
    185 {
    186 	extern int pthread__started;
    187 	struct mutex_private *mp;
    188 	sigset_t ss;
    189 	int count;
    190 
    191 	pthread__error(EINVAL, "Invalid mutex",
    192 	    mutex->ptm_magic == _PT_MUTEX_MAGIC);
    193 
    194 	for (;;) {
    195 		/* Spin for a while. */
    196 		count = pthread__nspins;
    197 		while (__SIMPLELOCK_LOCKED_P(&mutex->ptm_lock)  && --count > 0)
    198 			pthread__smt_pause();
    199 		if (count > 0) {
    200 			if (pthread__spintrylock(self, &mutex->ptm_lock) != 0)
    201 				break;
    202 			continue;
    203 		}
    204 
    205 		/* Okay, didn't look free. Get the interlock... */
    206 		pthread__spinlock(self, &mutex->ptm_interlock);
    207 
    208 		/*
    209 		 * The mutex_unlock routine will get the interlock
    210 		 * before looking at the list of sleepers, so if the
    211 		 * lock is held we can safely put ourselves on the
    212 		 * sleep queue. If it's not held, we can try taking it
    213 		 * again.
    214 		 */
    215 		PTQ_INSERT_HEAD(&mutex->ptm_blocked, self, pt_sleep);
    216 		if (__SIMPLELOCK_UNLOCKED_P(&mutex->ptm_lock)) {
    217 			PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
    218 			pthread__spinunlock(self, &mutex->ptm_interlock);
    219 			continue;
    220 		}
    221 
    222 		mp = mutex->ptm_private;
    223 		if (mutex->ptm_owner == self && mp != NULL) {
    224 			switch (mp->type) {
    225 			case PTHREAD_MUTEX_ERRORCHECK:
    226 				PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
    227 				pthread__spinunlock(self, &mutex->ptm_interlock);
    228 				return EDEADLK;
    229 
    230 			case PTHREAD_MUTEX_RECURSIVE:
    231 				/*
    232 				 * It's safe to do this without
    233 				 * holding the interlock, because
    234 				 * we only modify it if we know we
    235 				 * own the mutex.
    236 				 */
    237 				PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
    238 				pthread__spinunlock(self, &mutex->ptm_interlock);
    239 				if (mp->recursecount == INT_MAX)
    240 					return EAGAIN;
    241 				mp->recursecount++;
    242 				return 0;
    243 			}
    244 		}
    245 
    246 		if (pthread__started == 0) {
    247 			/* The spec says we must deadlock, so... */
    248 			pthread__assert(mp->type == PTHREAD_MUTEX_NORMAL);
    249 			(void) sigprocmask(SIG_SETMASK, NULL, &ss);
    250 			for (;;) {
    251 				sigsuspend(&ss);
    252 			}
    253 			/*NOTREACHED*/
    254 		}
    255 
    256 		/*
    257 		 * Locking a mutex is not a cancellation
    258 		 * point, so we don't need to do the
    259 		 * test-cancellation dance. We may get woken
    260 		 * up spuriously by pthread_cancel or signals,
    261 		 * but it's okay since we're just going to
    262 		 * retry.
    263 		 */
    264 		self->pt_sleeponq = 1;
    265 		self->pt_sleepobj = &mutex->ptm_blocked;
    266 		pthread__spinunlock(self, &mutex->ptm_interlock);
    267 		(void)pthread__park(self, &mutex->ptm_interlock,
    268 		    &mutex->ptm_blocked, NULL, 0, &mutex->ptm_blocked);
    269 	}
    270 
    271 	return 0;
    272 }
    273 
    274 
    275 int
    276 pthread_mutex_trylock(pthread_mutex_t *mutex)
    277 {
    278 	struct mutex_private *mp;
    279 	pthread_t self;
    280 
    281 	pthread__error(EINVAL, "Invalid mutex",
    282 	    mutex->ptm_magic == _PT_MUTEX_MAGIC);
    283 
    284 	self = pthread__self();
    285 
    286 	if (pthread__spintrylock(self, &mutex->ptm_lock) == 0) {
    287 		/*
    288 		 * These tests can be performed without holding the
    289 		 * interlock because these fields are only modified
    290 		 * if we know we own the mutex.
    291 		 */
    292 		mp = mutex->ptm_private;
    293 		if (mp != NULL && mp->type == PTHREAD_MUTEX_RECURSIVE &&
    294 		    mutex->ptm_owner == self) {
    295 			if (mp->recursecount == INT_MAX)
    296 				return EAGAIN;
    297 			mp->recursecount++;
    298 			return 0;
    299 		}
    300 
    301 		return EBUSY;
    302 	}
    303 
    304 	mutex->ptm_owner = self;
    305 
    306 	return 0;
    307 }
    308 
    309 
    310 int
    311 pthread_mutex_unlock(pthread_mutex_t *mutex)
    312 {
    313 	struct mutex_private *mp;
    314 	pthread_t self;
    315 	int weown;
    316 
    317 	pthread__error(EINVAL, "Invalid mutex",
    318 	    mutex->ptm_magic == _PT_MUTEX_MAGIC);
    319 
    320 	/*
    321 	 * These tests can be performed without holding the
    322 	 * interlock because these fields are only modified
    323 	 * if we know we own the mutex.
    324 	 */
    325 	self = pthread__self();
    326 	weown = (mutex->ptm_owner == self);
    327 	mp = mutex->ptm_private;
    328 
    329 	if (mp == NULL) {
    330 		if (__predict_false(!weown)) {
    331 			pthread__error(EPERM, "Unlocking unlocked mutex",
    332 			    (mutex->ptm_owner != 0));
    333 			pthread__error(EPERM,
    334 			    "Unlocking mutex owned by another thread", weown);
    335 		}
    336 	} else if (mp->type == PTHREAD_MUTEX_RECURSIVE) {
    337 		if (!weown)
    338 			return EPERM;
    339 		if (mp->recursecount != 0) {
    340 			mp->recursecount--;
    341 			return 0;
    342 		}
    343 	} else if (mp->type == PTHREAD_MUTEX_ERRORCHECK) {
    344 		if (!weown)
    345 			return EPERM;
    346 		if (__predict_false(!weown)) {
    347 			pthread__error(EPERM, "Unlocking unlocked mutex",
    348 			    (mutex->ptm_owner != 0));
    349 			pthread__error(EPERM,
    350 			    "Unlocking mutex owned by another thread", weown);
    351 		}
    352 	}
    353 
    354 	mutex->ptm_owner = NULL;
    355 	pthread__spinunlock(self, &mutex->ptm_lock);
    356 
    357 	/*
    358 	 * Do a double-checked locking dance to see if there are any
    359 	 * waiters.  If we don't see any waiters, we can exit, because
    360 	 * we've already released the lock. If we do see waiters, they
    361 	 * were probably waiting on us... there's a slight chance that
    362 	 * they are waiting on a different thread's ownership of the
    363 	 * lock that happened between the unlock above and this
    364 	 * examination of the queue; if so, no harm is done, as the
    365 	 * waiter will loop and see that the mutex is still locked.
    366 	 */
    367 	pthread__spinlock(self, &mutex->ptm_interlock);
    368 	pthread__unpark_all(self, &mutex->ptm_interlock, &mutex->ptm_blocked);
    369 	return 0;
    370 }
    371 
    372 int
    373 pthread_mutexattr_init(pthread_mutexattr_t *attr)
    374 {
    375 	struct mutexattr_private *map;
    376 
    377 	map = malloc(sizeof(*map));
    378 	if (map == NULL)
    379 		return ENOMEM;
    380 
    381 	*map = mutexattr_private_default;
    382 
    383 	attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
    384 	attr->ptma_private = map;
    385 
    386 	return 0;
    387 }
    388 
    389 
    390 int
    391 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
    392 {
    393 
    394 	pthread__error(EINVAL, "Invalid mutex attribute",
    395 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    396 
    397 	attr->ptma_magic = _PT_MUTEXATTR_DEAD;
    398 	if (attr->ptma_private != NULL)
    399 		free(attr->ptma_private);
    400 
    401 	return 0;
    402 }
    403 
    404 
    405 int
    406 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
    407 {
    408 	struct mutexattr_private *map;
    409 
    410 	pthread__error(EINVAL, "Invalid mutex attribute",
    411 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    412 
    413 	map = attr->ptma_private;
    414 
    415 	*typep = map->type;
    416 
    417 	return 0;
    418 }
    419 
    420 
    421 int
    422 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
    423 {
    424 	struct mutexattr_private *map;
    425 
    426 	pthread__error(EINVAL, "Invalid mutex attribute",
    427 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    428 
    429 	map = attr->ptma_private;
    430 
    431 	switch (type) {
    432 	case PTHREAD_MUTEX_NORMAL:
    433 	case PTHREAD_MUTEX_ERRORCHECK:
    434 	case PTHREAD_MUTEX_RECURSIVE:
    435 		map->type = type;
    436 		break;
    437 
    438 	default:
    439 		return EINVAL;
    440 	}
    441 
    442 	return 0;
    443 }
    444 
    445 
    446 static void
    447 once_cleanup(void *closure)
    448 {
    449 
    450        pthread_mutex_unlock((pthread_mutex_t *)closure);
    451 }
    452 
    453 
    454 int
    455 pthread_once(pthread_once_t *once_control, void (*routine)(void))
    456 {
    457 
    458 	if (once_control->pto_done == 0) {
    459 		pthread_mutex_lock(&once_control->pto_mutex);
    460 		pthread_cleanup_push(&once_cleanup, &once_control->pto_mutex);
    461 		if (once_control->pto_done == 0) {
    462 			routine();
    463 			once_control->pto_done = 1;
    464 		}
    465 		pthread_cleanup_pop(1);
    466 	}
    467 
    468 	return 0;
    469 }
    470 
    471 int
    472 pthread__mutex_deferwake(pthread_t thread, pthread_mutex_t *mutex)
    473 {
    474 
    475 	return mutex->ptm_owner == thread;
    476 }
    477 
    478 int
    479 _pthread_mutex_held_np(pthread_mutex_t *mutex)
    480 {
    481 
    482 	return mutex->ptm_owner == pthread__self();
    483 }
    484 
    485 pthread_t
    486 _pthread_mutex_owner_np(pthread_mutex_t *mutex)
    487 {
    488 
    489 	return (pthread_t)mutex->ptm_owner;
    490 }
    491 
    492 #endif	/* !PTHREAD__HAVE_ATOMIC */
    493