Home | History | Annotate | Line # | Download | only in libpthread
pthread_mutex.c revision 1.13
      1 /*	$NetBSD: pthread_mutex.c,v 1.13 2003/04/18 21:36:38 nathanw Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2003 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, and by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __RCSID("$NetBSD: pthread_mutex.c,v 1.13 2003/04/18 21:36:38 nathanw Exp $");
     41 
     42 #include <errno.h>
     43 #include <limits.h>
     44 #include <stdlib.h>
     45 #include <string.h>
     46 
     47 #include "pthread.h"
     48 #include "pthread_int.h"
     49 
     50 static int pthread_mutex_lock_slow(pthread_mutex_t *);
     51 
     52 __strong_alias(__libc_mutex_init,pthread_mutex_init)
     53 __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
     54 __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
     55 __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
     56 __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
     57 
     58 __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
     59 __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
     60 __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
     61 
     62 __strong_alias(__libc_thr_once,pthread_once)
     63 
     64 struct mutex_private {
     65 	int	type;
     66 	int	recursecount;
     67 };
     68 
     69 static const struct mutex_private mutex_private_default = {
     70 	PTHREAD_MUTEX_DEFAULT,
     71 	0,
     72 };
     73 
     74 struct mutexattr_private {
     75 	int	type;
     76 };
     77 
     78 static const struct mutexattr_private mutexattr_private_default = {
     79 	PTHREAD_MUTEX_DEFAULT,
     80 };
     81 
     82 /*
     83  * If the mutex does not already have private data (i.e. was statically
     84  * initialized), then give it the default.
     85  */
     86 #define	GET_MUTEX_PRIVATE(mutex, mp)					\
     87 do {									\
     88 	if (__predict_false((mp = (mutex)->ptm_private) == NULL)) {	\
     89 		/* LINTED cast away const */				\
     90 		mp = ((mutex)->ptm_private =				\
     91 		    (void *)&mutex_private_default);			\
     92 	}								\
     93 } while (/*CONSTCOND*/0)
     94 
     95 int
     96 pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
     97 {
     98 	struct mutexattr_private *map;
     99 	struct mutex_private *mp;
    100 
    101 	pthread__assert((attr == NULL) ||
    102 	    (attr->ptma_magic == _PT_MUTEXATTR_MAGIC));
    103 
    104 	if (attr != NULL && (map = attr->ptma_private) != NULL &&
    105 	    memcmp(map, &mutexattr_private_default, sizeof(*map)) != 0) {
    106 		mp = malloc(sizeof(*mp));
    107 		if (mp == NULL)
    108 			return ENOMEM;
    109 
    110 		mp->type = map->type;
    111 		mp->recursecount = 0;
    112 	} else {
    113 		/* LINTED cast away const */
    114 		mp = (struct mutex_private *) &mutex_private_default;
    115 	}
    116 
    117 	mutex->ptm_magic = _PT_MUTEX_MAGIC;
    118 	mutex->ptm_owner = NULL;
    119 	pthread_lockinit(&mutex->ptm_lock);
    120 	pthread_lockinit(&mutex->ptm_interlock);
    121 	PTQ_INIT(&mutex->ptm_blocked);
    122 	mutex->ptm_private = mp;
    123 
    124 	return 0;
    125 }
    126 
    127 
    128 int
    129 pthread_mutex_destroy(pthread_mutex_t *mutex)
    130 {
    131 
    132 	pthread__assert(mutex->ptm_magic == _PT_MUTEX_MAGIC);
    133 	pthread__assert(mutex->ptm_lock == __SIMPLELOCK_UNLOCKED);
    134 
    135 	mutex->ptm_magic = _PT_MUTEX_DEAD;
    136 	if (mutex->ptm_private != NULL &&
    137 	    mutex->ptm_private != (const void *)&mutex_private_default)
    138 		free(mutex->ptm_private);
    139 
    140 	return 0;
    141 }
    142 
    143 
    144 /*
    145  * Note regarding memory visibility: Pthreads has rules about memory
    146  * visibility and mutexes. Very roughly: Memory a thread can see when
    147  * it unlocks a mutex can be seen by another thread that locks the
    148  * same mutex.
    149  *
    150  * A memory barrier after a lock and before an unlock will provide
    151  * this behavior. This code relies on pthread__simple_lock_try() to issue
    152  * a barrier after obtaining a lock, and on pthread__simple_unlock() to
    153  * issue a barrier before releasing a lock.
    154  */
    155 
    156 int
    157 pthread_mutex_lock(pthread_mutex_t *mutex)
    158 {
    159 	int error;
    160 
    161 	PTHREADD_ADD(PTHREADD_MUTEX_LOCK);
    162 	/*
    163 	 * Note that if we get the lock, we don't have to deal with any
    164 	 * non-default lock type handling.
    165 	 */
    166 	if (__predict_false(pthread__simple_lock_try(&mutex->ptm_lock) == 0)) {
    167 		error = pthread_mutex_lock_slow(mutex);
    168 		if (error)
    169 			return error;
    170 	}
    171 
    172 	/* We have the lock! */
    173 	/*
    174 	 * Identifying ourselves may be slow, and this assignment is
    175 	 * only needed for (a) debugging identity of the owning thread
    176 	 * and (b) handling errorcheck and recursive mutexes. It's
    177 	 * better to just stash our stack pointer here and let those
    178 	 * slow exception cases compute the stack->thread mapping.
    179 	 */
    180 	mutex->ptm_owner = (pthread_t)pthread__sp();
    181 
    182 	return 0;
    183 }
    184 
    185 
    186 static int
    187 pthread_mutex_lock_slow(pthread_mutex_t *mutex)
    188 {
    189 	pthread_t self;
    190 
    191 	pthread__assert(mutex->ptm_magic == _PT_MUTEX_MAGIC);
    192 
    193 	self = pthread__self();
    194 
    195 	PTHREADD_ADD(PTHREADD_MUTEX_LOCK_SLOW);
    196 	while (/*CONSTCOND*/1) {
    197 		if (pthread__simple_lock_try(&mutex->ptm_lock))
    198 			break; /* got it! */
    199 
    200 		/* Okay, didn't look free. Get the interlock... */
    201 		pthread_spinlock(self, &mutex->ptm_interlock);
    202 		/*
    203 		 * The mutex_unlock routine will get the interlock
    204 		 * before looking at the list of sleepers, so if the
    205 		 * lock is held we can safely put ourselves on the
    206 		 * sleep queue. If it's not held, we can try taking it
    207 		 * again.
    208 		 */
    209 		if (mutex->ptm_lock == __SIMPLELOCK_LOCKED) {
    210 			struct mutex_private *mp;
    211 
    212 			GET_MUTEX_PRIVATE(mutex, mp);
    213 
    214 			if (pthread__id(mutex->ptm_owner) == self) {
    215 				switch (mp->type) {
    216 				case PTHREAD_MUTEX_ERRORCHECK:
    217 					pthread_spinunlock(self,
    218 					    &mutex->ptm_interlock);
    219 					return EDEADLK;
    220 
    221 				case PTHREAD_MUTEX_RECURSIVE:
    222 					/*
    223 					 * It's safe to do this without
    224 					 * holding the interlock, because
    225 					 * we only modify it if we know we
    226 					 * own the mutex.
    227 					 */
    228 					pthread_spinunlock(self,
    229 					    &mutex->ptm_interlock);
    230 					if (mp->recursecount == INT_MAX)
    231 						return EAGAIN;
    232 					mp->recursecount++;
    233 					return 0;
    234 				}
    235 			}
    236 
    237 			PTQ_INSERT_HEAD(&mutex->ptm_blocked, self, pt_sleep);
    238 			/*
    239 			 * Locking a mutex is not a cancellation
    240 			 * point, so we don't need to do the
    241 			 * test-cancellation dance. We may get woken
    242 			 * up spuriously by pthread_cancel or signals,
    243 			 * but it's okay since we're just going to
    244 			 * retry.
    245 			 */
    246 			pthread_spinlock(self, &self->pt_statelock);
    247 			self->pt_state = PT_STATE_BLOCKED_QUEUE;
    248 			self->pt_sleepobj = mutex;
    249 			self->pt_sleepq = &mutex->ptm_blocked;
    250 			self->pt_sleeplock = &mutex->ptm_interlock;
    251 			pthread_spinunlock(self, &self->pt_statelock);
    252 
    253 			pthread__block(self, &mutex->ptm_interlock);
    254 			/* interlock is not held when we return */
    255 		} else {
    256 			pthread_spinunlock(self, &mutex->ptm_interlock);
    257 		}
    258 		/* Go around for another try. */
    259 	}
    260 
    261 	return 0;
    262 }
    263 
    264 
    265 int
    266 pthread_mutex_trylock(pthread_mutex_t *mutex)
    267 {
    268 
    269 	pthread__assert(mutex->ptm_magic == _PT_MUTEX_MAGIC);
    270 
    271 	PTHREADD_ADD(PTHREADD_MUTEX_TRYLOCK);
    272 	if (pthread__simple_lock_try(&mutex->ptm_lock) == 0) {
    273 		struct mutex_private *mp;
    274 
    275 		GET_MUTEX_PRIVATE(mutex, mp);
    276 
    277 		/*
    278 		 * These tests can be performed without holding the
    279 		 * interlock because these fields are only modified
    280 		 * if we know we own the mutex.
    281 		 */
    282 		if ((mp->type == PTHREAD_MUTEX_RECURSIVE) &&
    283 		    (pthread__id(mutex->ptm_owner) == pthread__self())) {
    284 			if (mp->recursecount == INT_MAX)
    285 				return EAGAIN;
    286 			mp->recursecount++;
    287 			return 0;
    288 		}
    289 
    290 		return EBUSY;
    291 	}
    292 
    293 	/* see comment at the end of pthread_mutex_lock() */
    294 	mutex->ptm_owner = (pthread_t)pthread__sp();
    295 
    296 	return 0;
    297 }
    298 
    299 
    300 int
    301 pthread_mutex_unlock(pthread_mutex_t *mutex)
    302 {
    303 	struct mutex_private *mp;
    304 	pthread_t self, blocked;
    305 	int weown;
    306 
    307 	pthread__assert(mutex->ptm_magic == _PT_MUTEX_MAGIC);
    308 
    309 	PTHREADD_ADD(PTHREADD_MUTEX_UNLOCK);
    310 
    311 	GET_MUTEX_PRIVATE(mutex, mp);
    312 
    313 	/*
    314 	 * These tests can be performed without holding the
    315 	 * interlock because these fields are only modified
    316 	 * if we know we own the mutex.
    317 	 */
    318 	weown = (pthread__id(mutex->ptm_owner) == pthread__self());
    319 	switch (mp->type) {
    320 	case PTHREAD_MUTEX_RECURSIVE:
    321 		if (!weown)
    322 			return EPERM;
    323 		if (mp->recursecount != 0) {
    324 			mp->recursecount--;
    325 			return 0;
    326 		}
    327 		break;
    328 	case PTHREAD_MUTEX_ERRORCHECK:
    329 		if (!weown)
    330 			return EPERM;
    331 	default:
    332 		pthread__assert(weown);
    333 		break;
    334 	}
    335 
    336 	mutex->ptm_owner = NULL;
    337 	pthread__simple_unlock(&mutex->ptm_lock);
    338 	/*
    339 	 * Do a double-checked locking dance to see if there are any
    340 	 * waiters.  If we don't see any waiters, we can exit, because
    341 	 * we've already released the lock. If we do see waiters, they
    342 	 * were probably waiting on us... there's a slight chance that
    343 	 * they are waiting on a different thread's ownership of the
    344 	 * lock that happened between the unlock above and this
    345 	 * examination of the queue; if so, no harm is done, as the
    346 	 * waiter will loop and see that the mutex is still locked.
    347 	 */
    348 	if (!PTQ_EMPTY(&mutex->ptm_blocked)) {
    349 		self = pthread__self();
    350 		pthread_spinlock(self, &mutex->ptm_interlock);
    351 		blocked = PTQ_FIRST(&mutex->ptm_blocked);
    352 		if (blocked)
    353 			PTQ_REMOVE(&mutex->ptm_blocked, blocked, pt_sleep);
    354 		pthread_spinunlock(self, &mutex->ptm_interlock);
    355 
    356 		/* Give the head of the blocked queue another try. */
    357 		if (blocked) {
    358 			PTHREADD_ADD(PTHREADD_MUTEX_UNLOCK_UNBLOCK);
    359 			pthread__sched(self, blocked);
    360 		}
    361 	}
    362 	return 0;
    363 }
    364 
    365 int
    366 pthread_mutexattr_init(pthread_mutexattr_t *attr)
    367 {
    368 	struct mutexattr_private *map;
    369 
    370 	map = malloc(sizeof(*map));
    371 	if (map == NULL)
    372 		return ENOMEM;
    373 
    374 	*map = mutexattr_private_default;
    375 
    376 	attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
    377 	attr->ptma_private = map;
    378 
    379 	return 0;
    380 }
    381 
    382 
    383 int
    384 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
    385 {
    386 
    387 	pthread__assert(attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    388 
    389 	attr->ptma_magic = _PT_MUTEXATTR_DEAD;
    390 	if (attr->ptma_private != NULL)
    391 		free(attr->ptma_private);
    392 
    393 	return 0;
    394 }
    395 
    396 
    397 int
    398 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
    399 {
    400 	struct mutexattr_private *map;
    401 
    402 	pthread__assert(attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    403 
    404 	map = attr->ptma_private;
    405 
    406 	*typep = map->type;
    407 
    408 	return 0;
    409 }
    410 
    411 
    412 int
    413 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
    414 {
    415 	struct mutexattr_private *map;
    416 
    417 	pthread__assert(attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
    418 
    419 	map = attr->ptma_private;
    420 
    421 	switch (type) {
    422 	case PTHREAD_MUTEX_NORMAL:
    423 	case PTHREAD_MUTEX_ERRORCHECK:
    424 	case PTHREAD_MUTEX_RECURSIVE:
    425 		map->type = type;
    426 		break;
    427 
    428 	default:
    429 		return EINVAL;
    430 	}
    431 
    432 	return 0;
    433 }
    434 
    435 
    436 int
    437 pthread_once(pthread_once_t *once_control, void (*routine)(void))
    438 {
    439 
    440 	if (once_control->pto_done == 0) {
    441 		pthread_mutex_lock(&once_control->pto_mutex);
    442 		if (once_control->pto_done == 0) {
    443 			routine();
    444 			once_control->pto_done = 1;
    445 		}
    446 		pthread_mutex_unlock(&once_control->pto_mutex);
    447 	}
    448 
    449 	return 0;
    450 }
    451