Home | History | Annotate | Line # | Download | only in libpthread
pthread_lock.c revision 1.31
      1  1.31       ad /*	$NetBSD: pthread_lock.c,v 1.31 2007/10/04 21:04:32 ad Exp $	*/
      2   1.2  thorpej 
      3   1.2  thorpej /*-
      4  1.19       ad  * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
      5   1.2  thorpej  * All rights reserved.
      6   1.2  thorpej  *
      7   1.2  thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8  1.19       ad  * by Nathan J. Williams and Andrew Doran.
      9   1.2  thorpej  *
     10   1.2  thorpej  * Redistribution and use in source and binary forms, with or without
     11   1.2  thorpej  * modification, are permitted provided that the following conditions
     12   1.2  thorpej  * are met:
     13   1.2  thorpej  * 1. Redistributions of source code must retain the above copyright
     14   1.2  thorpej  *    notice, this list of conditions and the following disclaimer.
     15   1.2  thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.2  thorpej  *    notice, this list of conditions and the following disclaimer in the
     17   1.2  thorpej  *    documentation and/or other materials provided with the distribution.
     18   1.2  thorpej  * 3. All advertising materials mentioning features or use of this software
     19   1.2  thorpej  *    must display the following acknowledgement:
     20   1.2  thorpej  *        This product includes software developed by the NetBSD
     21   1.2  thorpej  *        Foundation, Inc. and its contributors.
     22   1.2  thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23   1.2  thorpej  *    contributors may be used to endorse or promote products derived
     24   1.2  thorpej  *    from this software without specific prior written permission.
     25   1.2  thorpej  *
     26   1.2  thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27   1.2  thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28   1.2  thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29   1.2  thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30   1.2  thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31   1.2  thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32   1.2  thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33   1.2  thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34   1.2  thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35   1.2  thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36   1.2  thorpej  * POSSIBILITY OF SUCH DAMAGE.
     37   1.2  thorpej  */
     38   1.6    lukem 
     39  1.23       ad /*
     40  1.23       ad  * libpthread internal spinlock routines.
     41  1.23       ad  */
     42  1.23       ad 
     43   1.6    lukem #include <sys/cdefs.h>
     44  1.31       ad __RCSID("$NetBSD: pthread_lock.c,v 1.31 2007/10/04 21:04:32 ad Exp $");
     45   1.2  thorpej 
     46  1.12       he #include <sys/types.h>
     47  1.11       cl #include <sys/lock.h>
     48   1.2  thorpej #include <sys/ras.h>
     49   1.2  thorpej 
     50   1.2  thorpej #include <errno.h>
     51   1.2  thorpej #include <unistd.h>
     52  1.19       ad #include <stdio.h>
     53  1.22       ad #include <stdlib.h>
     54   1.2  thorpej 
     55   1.2  thorpej #include "pthread.h"
     56   1.2  thorpej #include "pthread_int.h"
     57   1.2  thorpej 
     58  1.22       ad /* How many times to try acquiring spin locks on MP systems. */
     59  1.31       ad #define	PTHREAD__NSPINS		64
     60  1.22       ad 
     61  1.23       ad static void pthread_spinlock_slow(pthread_spin_t *);
     62  1.23       ad 
     63  1.10  thorpej RAS_DECL(pthread__lock);
     64   1.2  thorpej 
     65  1.27    skrll int
     66  1.27    skrll pthread__simple_locked_p(__cpu_simple_lock_t *alp)
     67  1.27    skrll {
     68  1.27    skrll 	return __SIMPLELOCK_LOCKED_P(alp);
     69  1.27    skrll }
     70  1.27    skrll 
     71  1.29    skrll #ifdef PTHREAD__ASM_RASOPS
     72  1.29    skrll 
     73  1.29    skrll void pthread__ras_simple_lock_init(__cpu_simple_lock_t *);
     74  1.29    skrll int pthread__ras_simple_lock_try(__cpu_simple_lock_t *);
     75  1.29    skrll void pthread__ras_simple_unlock(__cpu_simple_lock_t *);
     76  1.29    skrll 
     77  1.29    skrll #else
     78  1.29    skrll 
     79  1.29    skrll static void
     80  1.29    skrll pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp)
     81   1.2  thorpej {
     82   1.2  thorpej 
     83  1.27    skrll 	__cpu_simple_lock_clear(alp);
     84   1.2  thorpej }
     85   1.2  thorpej 
     86  1.29    skrll static int
     87  1.29    skrll pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp)
     88   1.2  thorpej {
     89  1.28    skrll 	int locked;
     90   1.2  thorpej 
     91  1.10  thorpej 	RAS_START(pthread__lock);
     92  1.28    skrll 	locked = __SIMPLELOCK_LOCKED_P(alp);
     93  1.27    skrll 	__cpu_simple_lock_set(alp);
     94  1.10  thorpej 	RAS_END(pthread__lock);
     95   1.2  thorpej 
     96  1.28    skrll 	return !locked;
     97   1.2  thorpej }
     98   1.2  thorpej 
     99  1.29    skrll static void
    100  1.29    skrll pthread__ras_simple_unlock(__cpu_simple_lock_t *alp)
    101  1.29    skrll {
    102  1.29    skrll 
    103  1.29    skrll 	__cpu_simple_lock_clear(alp);
    104  1.29    skrll }
    105  1.29    skrll 
    106  1.29    skrll #endif /* PTHREAD__ASM_RASOPS */
    107  1.29    skrll 
    108  1.29    skrll static const struct pthread_lock_ops pthread__lock_ops_ras = {
    109  1.29    skrll 	pthread__ras_simple_lock_init,
    110  1.29    skrll 	pthread__ras_simple_lock_try,
    111  1.29    skrll 	pthread__ras_simple_unlock,
    112  1.29    skrll };
    113  1.29    skrll 
    114  1.29    skrll static void
    115  1.29    skrll pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp)
    116  1.29    skrll {
    117  1.29    skrll 
    118  1.29    skrll 	__cpu_simple_lock_init(alp);
    119  1.29    skrll }
    120  1.29    skrll 
    121  1.29    skrll static int
    122  1.29    skrll pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp)
    123  1.29    skrll {
    124  1.29    skrll 
    125  1.29    skrll 	return (__cpu_simple_lock_try(alp));
    126  1.29    skrll }
    127  1.29    skrll 
    128  1.29    skrll static void
    129  1.29    skrll pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp)
    130   1.2  thorpej {
    131   1.2  thorpej 
    132  1.23       ad 	__cpu_simple_unlock(alp);
    133  1.29    skrll }
    134  1.29    skrll 
    135  1.29    skrll static const struct pthread_lock_ops pthread__lock_ops_atomic = {
    136  1.29    skrll 	pthread__atomic_simple_lock_init,
    137  1.29    skrll 	pthread__atomic_simple_lock_try,
    138  1.29    skrll 	pthread__atomic_simple_unlock,
    139  1.29    skrll };
    140  1.27    skrll 
    141  1.29    skrll /*
    142  1.29    skrll  * We default to pointing to the RAS primitives; we might need to use
    143  1.29    skrll  * locks early, but before main() starts.  This is safe, since no other
    144  1.29    skrll  * threads will be active for the process, so atomicity will not be
    145  1.29    skrll  * required.
    146  1.29    skrll  */
    147  1.29    skrll const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras;
    148   1.2  thorpej 
    149   1.2  thorpej void
    150  1.23       ad pthread_spinlock(pthread_spin_t *lock)
    151   1.2  thorpej {
    152  1.29    skrll 	if (__predict_true(pthread__simple_lock_try(lock)))
    153  1.29    skrll 		return;
    154  1.24       ad 
    155  1.24       ad 	pthread_spinlock_slow(lock);
    156   1.2  thorpej }
    157   1.2  thorpej 
    158  1.23       ad /*
    159  1.23       ad  * Prevent this routine from being inlined.  The common case is no
    160  1.23       ad  * contention and it's better to not burden the instruction decoder.
    161  1.23       ad  */
    162  1.23       ad #if __GNUC_PREREQ__(3, 0)
    163  1.23       ad __attribute ((noinline))
    164  1.23       ad #endif
    165  1.23       ad static void
    166  1.23       ad pthread_spinlock_slow(pthread_spin_t *lock)
    167   1.2  thorpej {
    168  1.21       ad 	int count;
    169  1.19       ad 
    170   1.2  thorpej 	do {
    171  1.21       ad 		count = pthread__nspins;
    172  1.27    skrll 		while (pthread__simple_locked_p(lock) && --count > 0)
    173  1.21       ad 			pthread__smt_pause();
    174  1.21       ad 		if (count > 0) {
    175  1.21       ad 			if (pthread__simple_lock_try(lock))
    176  1.21       ad 				break;
    177  1.21       ad 			continue;
    178  1.17       ad 		}
    179  1.26       ad 		sched_yield();
    180  1.19       ad 	} while (/*CONSTCOND*/ 1);
    181   1.2  thorpej }
    182   1.2  thorpej 
    183   1.2  thorpej int
    184  1.23       ad pthread_spintrylock(pthread_spin_t *lock)
    185   1.2  thorpej {
    186  1.23       ad 	return pthread__simple_lock_try(lock);
    187   1.2  thorpej }
    188   1.2  thorpej 
    189   1.2  thorpej void
    190  1.23       ad pthread_spinunlock(pthread_spin_t *lock)
    191   1.2  thorpej {
    192  1.23       ad 	pthread__simple_unlock(lock);
    193   1.2  thorpej }
    194   1.2  thorpej 
    195  1.23       ad /*
    196  1.23       ad  * Initialize the locking primitives.  On uniprocessors, we always
    197  1.23       ad  * use Restartable Atomic Sequences if they are available.  Otherwise,
    198  1.23       ad  * we fall back onto machine-dependent atomic lock primitives.
    199   1.2  thorpej  */
    200  1.23       ad void
    201  1.23       ad pthread__lockprim_init(void)
    202   1.2  thorpej {
    203  1.23       ad 	char *p;
    204   1.2  thorpej 
    205  1.23       ad 	if ((p = getenv("PTHREAD_NSPINS")) != NULL)
    206  1.23       ad 		pthread__nspins = atoi(p);
    207  1.23       ad 	else if (pthread__concurrency != 1)
    208  1.23       ad 		pthread__nspins = PTHREAD__NSPINS;
    209  1.23       ad 	else
    210  1.23       ad 		pthread__nspins = 1;
    211  1.19       ad 
    212  1.23       ad 	if (pthread__concurrency != 1) {
    213  1.29    skrll 		pthread__lock_ops = &pthread__lock_ops_atomic;
    214  1.23       ad 		return;
    215  1.23       ad 	}
    216   1.2  thorpej 
    217  1.23       ad 	if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock),
    218  1.23       ad 	    RAS_INSTALL) != 0) {
    219  1.29    skrll 		pthread__lock_ops = &pthread__lock_ops_atomic;
    220  1.29    skrll 		return;
    221  1.17       ad 	}
    222   1.2  thorpej }
    223   1.2  thorpej 
    224  1.23       ad void
    225  1.23       ad pthread_lockinit(pthread_spin_t *lock)
    226   1.2  thorpej {
    227   1.2  thorpej 
    228  1.23       ad 	pthread__simple_lock_init(lock);
    229   1.2  thorpej }
    230