Home | History | Annotate | Line # | Download | only in libpthread
pthread_lock.c revision 1.19
      1 /*	$NetBSD: pthread_lock.c,v 1.19 2007/03/05 23:30:17 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __RCSID("$NetBSD: pthread_lock.c,v 1.19 2007/03/05 23:30:17 ad Exp $");
     41 
     42 #include <sys/types.h>
     43 #include <sys/lock.h>
     44 #include <sys/ras.h>
     45 
     46 #include <errno.h>
     47 #include <unistd.h>
     48 #include <stdio.h>
     49 
     50 #include "pthread.h"
     51 #include "pthread_int.h"
     52 
     53 #ifdef PTHREAD_SPIN_DEBUG_PRINT
     54 #define SDPRINTF(x) DPRINTF(x)
     55 #else
     56 #define SDPRINTF(x)
     57 #endif
     58 
     59 /* This does not belong here. */
     60 #if defined(i386) || defined(__x86_64__)
     61 #define	smt_pause()	__asm __volatile("rep; nop" ::: "memory")
     62 #else
     63 #define	smt_pause()	/* nothing */
     64 #endif
     65 
     66 extern int pthread__nspins;
     67 
     68 RAS_DECL(pthread__lock);
     69 
     70 static void
     71 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp)
     72 {
     73 
     74 	*alp = __SIMPLELOCK_UNLOCKED;
     75 }
     76 
     77 static int
     78 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp)
     79 {
     80 	__cpu_simple_lock_t old;
     81 
     82 	RAS_START(pthread__lock);
     83 	old = *alp;
     84 	*alp = __SIMPLELOCK_LOCKED;
     85 	RAS_END(pthread__lock);
     86 
     87 	return (old == __SIMPLELOCK_UNLOCKED);
     88 }
     89 
     90 static void
     91 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp)
     92 {
     93 
     94 	*alp = __SIMPLELOCK_UNLOCKED;
     95 }
     96 
     97 static const struct pthread_lock_ops pthread__lock_ops_ras = {
     98 	pthread__ras_simple_lock_init,
     99 	pthread__ras_simple_lock_try,
    100 	pthread__ras_simple_unlock,
    101 };
    102 
    103 static void
    104 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp)
    105 {
    106 
    107 	__cpu_simple_lock_init(alp);
    108 }
    109 
    110 static int
    111 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp)
    112 {
    113 
    114 	return (__cpu_simple_lock_try(alp));
    115 }
    116 
    117 static void
    118 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp)
    119 {
    120 
    121 	__cpu_simple_unlock(alp);
    122 }
    123 
    124 static const struct pthread_lock_ops pthread__lock_ops_atomic = {
    125 	pthread__atomic_simple_lock_init,
    126 	pthread__atomic_simple_lock_try,
    127 	pthread__atomic_simple_unlock,
    128 };
    129 
    130 /*
    131  * We default to pointing to the RAS primitives; we might need to use
    132  * locks early, but before main() starts.  This is safe, since no other
    133  * threads will be active for the process, so atomicity will not be
    134  * required.
    135  */
    136 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras;
    137 
    138 /*
    139  * Initialize the locking primitives.  On uniprocessors, we always
    140  * use Restartable Atomic Sequences if they are available.  Otherwise,
    141  * we fall back onto machine-dependent atomic lock primitives.
    142  */
    143 void
    144 pthread__lockprim_init(int ncpu)
    145 {
    146 
    147 	if (ncpu == 1 && rasctl(RAS_ADDR(pthread__lock),
    148 	    RAS_SIZE(pthread__lock), RAS_INSTALL) == 0) {
    149 		pthread__lock_ops = &pthread__lock_ops_ras;
    150 		return;
    151 	}
    152 
    153 	pthread__lock_ops = &pthread__lock_ops_atomic;
    154 }
    155 
    156 void
    157 pthread_lockinit(pthread_spin_t *lock)
    158 {
    159 
    160 	pthread__simple_lock_init(lock);
    161 }
    162 
    163 void
    164 pthread_spinlock(pthread_t thread, pthread_spin_t *lock)
    165 {
    166 	int count, ret;
    167 
    168 	count = pthread__nspins;
    169 	SDPRINTF(("(pthread_spinlock %p) spinlock %p (count %d)\n",
    170 	    thread, lock, thread->pt_spinlocks));
    171 #ifdef PTHREAD_SPIN_DEBUG
    172 	pthread__assert(thread->pt_spinlocks >= 0);
    173 #endif
    174 
    175 	thread->pt_spinlocks++;
    176 	if (__predict_true(pthread__simple_lock_try(lock))) {
    177 		PTHREADD_ADD(PTHREADD_SPINLOCKS);
    178 		return;
    179 	}
    180 
    181 	do {
    182 		while ((ret = pthread__simple_lock_try(lock)) == 0 &&
    183 		    --count) {
    184 			smt_pause();
    185 		}
    186 
    187 		if (ret == 1)
    188 			break;
    189 
    190 		SDPRINTF(("(pthread_spinlock %p) retrying spinlock %p "
    191 		    "(count %d)\n", thread, lock,
    192 		    thread->pt_spinlocks));
    193 		thread->pt_spinlocks--;
    194 
    195 		/* XXXLWP far from ideal */
    196 		sched_yield();
    197 		count = pthread__nspins;
    198 		thread->pt_spinlocks++;
    199 	} while (/*CONSTCOND*/ 1);
    200 
    201 	PTHREADD_ADD(PTHREADD_SPINLOCKS);
    202 }
    203 
    204 int
    205 pthread_spintrylock(pthread_t thread, pthread_spin_t *lock)
    206 {
    207 	int ret;
    208 
    209 	SDPRINTF(("(pthread_spintrylock %p) spinlock %p (count %d)\n",
    210 	    thread, lock, thread->pt_spinlocks));
    211 
    212 	thread->pt_spinlocks++;
    213 	ret = pthread__simple_lock_try(lock);
    214 	if (!ret)
    215 		thread->pt_spinlocks--;
    216 
    217 	return ret;
    218 }
    219 
    220 void
    221 pthread_spinunlock(pthread_t thread, pthread_spin_t *lock)
    222 {
    223 
    224 	SDPRINTF(("(pthread_spinunlock %p) spinlock %p (count %d)\n",
    225 	    thread, lock, thread->pt_spinlocks));
    226 
    227 	pthread__simple_unlock(lock);
    228 	thread->pt_spinlocks--;
    229 #ifdef PTHREAD_SPIN_DEBUG
    230 	pthread__assert(thread->pt_spinlocks >= 0);
    231 #endif
    232 	PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
    233 }
    234 
    235 
    236 /*
    237  * Public (POSIX-specified) spinlocks.
    238  */
    239 int
    240 pthread_spin_init(pthread_spinlock_t *lock, int pshared)
    241 {
    242 
    243 #ifdef ERRORCHECK
    244 	if (lock == NULL || (pshared != PTHREAD_PROCESS_PRIVATE &&
    245 	    pshared != PTHREAD_PROCESS_SHARED))
    246 		return EINVAL;
    247 #endif
    248 	lock->pts_magic = _PT_SPINLOCK_MAGIC;
    249 
    250 	/*
    251 	 * We don't actually use the pshared flag for anything;
    252 	 * CPU simple locks have all the process-shared properties
    253 	 * that we want anyway.
    254 	 */
    255 	lock->pts_flags = pshared;
    256 	pthread_lockinit(&lock->pts_spin);
    257 
    258 	return 0;
    259 }
    260 
    261 int
    262 pthread_spin_destroy(pthread_spinlock_t *lock)
    263 {
    264 
    265 #ifdef ERRORCHECK
    266 	if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
    267 		return EINVAL;
    268 	if (lock->pts_spin != __SIMPLELOCK_UNLOCKED)
    269 		return EBUSY;
    270 #endif
    271 
    272 	lock->pts_magic = _PT_SPINLOCK_DEAD;
    273 
    274 	return 0;
    275 }
    276 
    277 int
    278 pthread_spin_lock(pthread_spinlock_t *lock)
    279 {
    280 
    281 #ifdef ERRORCHECK
    282 	if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
    283 		return EINVAL;
    284 #endif
    285 
    286 	while (pthread__simple_lock_try(&lock->pts_spin) == 0) {
    287 		smt_pause();
    288 	}
    289 
    290 	return 0;
    291 }
    292 
    293 int
    294 pthread_spin_trylock(pthread_spinlock_t *lock)
    295 {
    296 
    297 #ifdef ERRORCHECK
    298 	if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
    299 		return EINVAL;
    300 #endif
    301 
    302 	if (pthread__simple_lock_try(&lock->pts_spin) == 0)
    303 		return EBUSY;
    304 
    305 	return 0;
    306 }
    307 
    308 int
    309 pthread_spin_unlock(pthread_spinlock_t *lock)
    310 {
    311 
    312 #ifdef ERRORCHECK
    313 	if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
    314 		return EINVAL;
    315 #endif
    316 
    317 	pthread__simple_unlock(&lock->pts_spin);
    318 
    319 	return 0;
    320 }
    321