Home | History | Annotate | Line # | Download | only in libpthread
pthread_lock.c revision 1.29
      1 /*	$NetBSD: pthread_lock.c,v 1.29 2007/09/24 12:19:39 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * libpthread internal spinlock routines.
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __RCSID("$NetBSD: pthread_lock.c,v 1.29 2007/09/24 12:19:39 skrll Exp $");
     45 
     46 #include <sys/types.h>
     47 #include <sys/lock.h>
     48 #include <sys/ras.h>
     49 
     50 #include <errno.h>
     51 #include <unistd.h>
     52 #include <stdio.h>
     53 #include <stdlib.h>
     54 
     55 #include "pthread.h"
     56 #include "pthread_int.h"
     57 
     58 /* How many times to try acquiring spin locks on MP systems. */
     59 #define	PTHREAD__NSPINS		1024
     60 
     61 #ifdef PTHREAD_SPIN_DEBUG_PRINT
     62 #define SDPRINTF(x) DPRINTF(x)
     63 #else
     64 #define SDPRINTF(x)
     65 #endif
     66 
     67 static void pthread_spinlock_slow(pthread_spin_t *);
     68 
     69 RAS_DECL(pthread__lock);
     70 
     71 int
     72 pthread__simple_locked_p(__cpu_simple_lock_t *alp)
     73 {
     74 	return __SIMPLELOCK_LOCKED_P(alp);
     75 }
     76 
     77 #ifdef PTHREAD__ASM_RASOPS
     78 
     79 void pthread__ras_simple_lock_init(__cpu_simple_lock_t *);
     80 int pthread__ras_simple_lock_try(__cpu_simple_lock_t *);
     81 void pthread__ras_simple_unlock(__cpu_simple_lock_t *);
     82 
     83 #else
     84 
     85 static void
     86 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp)
     87 {
     88 
     89 	__cpu_simple_lock_clear(alp);
     90 }
     91 
     92 static int
     93 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp)
     94 {
     95 	int locked;
     96 
     97 	RAS_START(pthread__lock);
     98 	locked = __SIMPLELOCK_LOCKED_P(alp);
     99 	__cpu_simple_lock_set(alp);
    100 	RAS_END(pthread__lock);
    101 
    102 	return !locked;
    103 }
    104 
    105 static void
    106 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp)
    107 {
    108 
    109 	__cpu_simple_lock_clear(alp);
    110 }
    111 
    112 #endif /* PTHREAD__ASM_RASOPS */
    113 
    114 static const struct pthread_lock_ops pthread__lock_ops_ras = {
    115 	pthread__ras_simple_lock_init,
    116 	pthread__ras_simple_lock_try,
    117 	pthread__ras_simple_unlock,
    118 };
    119 
    120 static void
    121 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp)
    122 {
    123 
    124 	__cpu_simple_lock_init(alp);
    125 }
    126 
    127 static int
    128 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp)
    129 {
    130 
    131 	return (__cpu_simple_lock_try(alp));
    132 }
    133 
    134 static void
    135 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp)
    136 {
    137 
    138 	__cpu_simple_unlock(alp);
    139 }
    140 
    141 static const struct pthread_lock_ops pthread__lock_ops_atomic = {
    142 	pthread__atomic_simple_lock_init,
    143 	pthread__atomic_simple_lock_try,
    144 	pthread__atomic_simple_unlock,
    145 };
    146 
    147 /*
    148  * We default to pointing to the RAS primitives; we might need to use
    149  * locks early, but before main() starts.  This is safe, since no other
    150  * threads will be active for the process, so atomicity will not be
    151  * required.
    152  */
    153 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras;
    154 
    155 void
    156 pthread_spinlock(pthread_spin_t *lock)
    157 {
    158 #ifdef PTHREAD_SPIN_DEBUG
    159 	pthread_t thread = pthread__self();
    160 
    161 	SDPRINTF(("(pthread_spinlock %p) spinlock %p (count %d)\n",
    162 	    thread, lock, thread->pt_spinlocks));
    163 	pthread__assert(thread->pt_spinlocks >= 0);
    164 	thread->pt_spinlocks++;
    165 	PTHREADD_ADD(PTHREADD_SPINLOCKS);
    166 #endif
    167 
    168 	if (__predict_true(pthread__simple_lock_try(lock)))
    169 		return;
    170 
    171 	pthread_spinlock_slow(lock);
    172 }
    173 
    174 /*
    175  * Prevent this routine from being inlined.  The common case is no
    176  * contention and it's better to not burden the instruction decoder.
    177  */
    178 #if __GNUC_PREREQ__(3, 0)
    179 __attribute ((noinline))
    180 #endif
    181 static void
    182 pthread_spinlock_slow(pthread_spin_t *lock)
    183 {
    184 	int count;
    185 #ifdef PTHREAD_SPIN_DEBUG
    186 	pthread_t thread = pthread__self();
    187 #endif
    188 
    189 	do {
    190 		count = pthread__nspins;
    191 		while (pthread__simple_locked_p(lock) && --count > 0)
    192 			pthread__smt_pause();
    193 		if (count > 0) {
    194 			if (pthread__simple_lock_try(lock))
    195 				break;
    196 			continue;
    197 		}
    198 
    199 #ifdef PTHREAD_SPIN_DEBUG
    200 		SDPRINTF(("(pthread_spinlock %p) retrying spinlock %p "
    201 		    "(count %d)\n", thread, lock,
    202 		    thread->pt_spinlocks));
    203 		thread->pt_spinlocks--;
    204 		/* XXXLWP far from ideal */
    205 		sched_yield();
    206 		thread->pt_spinlocks++;
    207 #else
    208 		/* XXXLWP far from ideal */
    209 		sched_yield();
    210 #endif
    211 	} while (/*CONSTCOND*/ 1);
    212 }
    213 
    214 int
    215 pthread_spintrylock(pthread_spin_t *lock)
    216 {
    217 #ifdef PTHREAD_SPIN_DEBUG
    218 	pthread_t thread = pthread__self();
    219 	int ret;
    220 
    221 	SDPRINTF(("(pthread_spintrylock %p) spinlock %p (count %d)\n",
    222 	    thread, lock, thread->pt_spinlocks));
    223 	thread->pt_spinlocks++;
    224 	ret = pthread__simple_lock_try(lock);
    225 	if (!ret)
    226 		thread->pt_spinlocks--;
    227 	return ret;
    228 #else
    229 	return pthread__simple_lock_try(lock);
    230 #endif
    231 }
    232 
    233 void
    234 pthread_spinunlock(pthread_spin_t *lock)
    235 {
    236 #ifdef PTHREAD_SPIN_DEBUG
    237 	pthread_t thread = pthread__self();
    238 
    239 	SDPRINTF(("(pthread_spinunlock %p) spinlock %p (count %d)\n",
    240 	    thread, lock, thread->pt_spinlocks));
    241 
    242 	pthread__simple_unlock(lock);
    243 	thread->pt_spinlocks--;
    244 	pthread__assert(thread->pt_spinlocks >= 0);
    245 	PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
    246 #else
    247 	pthread__simple_unlock(lock);
    248 #endif
    249 }
    250 
    251 /*
    252  * Initialize the locking primitives.  On uniprocessors, we always
    253  * use Restartable Atomic Sequences if they are available.  Otherwise,
    254  * we fall back onto machine-dependent atomic lock primitives.
    255  */
    256 void
    257 pthread__lockprim_init(void)
    258 {
    259 	char *p;
    260 
    261 	if ((p = getenv("PTHREAD_NSPINS")) != NULL)
    262 		pthread__nspins = atoi(p);
    263 	else if (pthread__concurrency != 1)
    264 		pthread__nspins = PTHREAD__NSPINS;
    265 	else
    266 		pthread__nspins = 1;
    267 
    268 	if (pthread__concurrency != 1) {
    269 		pthread__lock_ops = &pthread__lock_ops_atomic;
    270 		return;
    271 	}
    272 
    273 	if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock),
    274 	    RAS_INSTALL) != 0) {
    275 		pthread__lock_ops = &pthread__lock_ops_atomic;
    276 		return;
    277 	}
    278 }
    279 
    280 void
    281 pthread_lockinit(pthread_spin_t *lock)
    282 {
    283 
    284 	pthread__simple_lock_init(lock);
    285 }
    286