Home | History | Annotate | Line # | Download | only in libpthread
pthread_lock.c revision 1.24.2.2
      1  1.24.2.2     matt /*	$NetBSD: pthread_lock.c,v 1.24.2.2 2008/01/09 01:36:36 matt Exp $	*/
      2       1.2  thorpej 
      3       1.2  thorpej /*-
      4      1.19       ad  * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
      5       1.2  thorpej  * All rights reserved.
      6       1.2  thorpej  *
      7       1.2  thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8      1.19       ad  * by Nathan J. Williams and Andrew Doran.
      9       1.2  thorpej  *
     10       1.2  thorpej  * Redistribution and use in source and binary forms, with or without
     11       1.2  thorpej  * modification, are permitted provided that the following conditions
     12       1.2  thorpej  * are met:
     13       1.2  thorpej  * 1. Redistributions of source code must retain the above copyright
     14       1.2  thorpej  *    notice, this list of conditions and the following disclaimer.
     15       1.2  thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.2  thorpej  *    notice, this list of conditions and the following disclaimer in the
     17       1.2  thorpej  *    documentation and/or other materials provided with the distribution.
     18       1.2  thorpej  * 3. All advertising materials mentioning features or use of this software
     19       1.2  thorpej  *    must display the following acknowledgement:
     20       1.2  thorpej  *        This product includes software developed by the NetBSD
     21       1.2  thorpej  *        Foundation, Inc. and its contributors.
     22       1.2  thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23       1.2  thorpej  *    contributors may be used to endorse or promote products derived
     24       1.2  thorpej  *    from this software without specific prior written permission.
     25       1.2  thorpej  *
     26       1.2  thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27       1.2  thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28       1.2  thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29       1.2  thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30       1.2  thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31       1.2  thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32       1.2  thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33       1.2  thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34       1.2  thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35       1.2  thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36       1.2  thorpej  * POSSIBILITY OF SUCH DAMAGE.
     37       1.2  thorpej  */
     38       1.6    lukem 
     39      1.23       ad /*
     40      1.23       ad  * libpthread internal spinlock routines.
     41      1.23       ad  */
     42      1.23       ad 
     43       1.6    lukem #include <sys/cdefs.h>
     44  1.24.2.2     matt __RCSID("$NetBSD: pthread_lock.c,v 1.24.2.2 2008/01/09 01:36:36 matt Exp $");
     45       1.2  thorpej 
     46      1.12       he #include <sys/types.h>
     47       1.2  thorpej #include <sys/ras.h>
     48       1.2  thorpej 
     49  1.24.2.2     matt #include <machine/lock.h>
     50  1.24.2.2     matt 
     51       1.2  thorpej #include <errno.h>
     52       1.2  thorpej #include <unistd.h>
     53      1.19       ad #include <stdio.h>
     54      1.22       ad #include <stdlib.h>
     55       1.2  thorpej 
     56       1.2  thorpej #include "pthread.h"
     57       1.2  thorpej #include "pthread_int.h"
     58       1.2  thorpej 
     59      1.22       ad /* How many times to try acquiring spin locks on MP systems. */
     60  1.24.2.2     matt #define	PTHREAD__NSPINS         64
     61      1.23       ad 
     62      1.10  thorpej RAS_DECL(pthread__lock);
     63       1.2  thorpej 
     64  1.24.2.2     matt static void 	pthread__spinlock_slow(pthread_spin_t *);
     65       1.2  thorpej 
     66  1.24.2.1     matt #ifdef PTHREAD__ASM_RASOPS
     67      1.20       ad 
     68  1.24.2.1     matt void pthread__ras_simple_lock_init(__cpu_simple_lock_t *);
     69  1.24.2.1     matt int pthread__ras_simple_lock_try(__cpu_simple_lock_t *);
     70  1.24.2.1     matt void pthread__ras_simple_unlock(__cpu_simple_lock_t *);
     71       1.2  thorpej 
     72  1.24.2.1     matt #else
     73  1.24.2.1     matt 
     74  1.24.2.1     matt static void
     75  1.24.2.1     matt pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp)
     76       1.2  thorpej {
     77       1.2  thorpej 
     78  1.24.2.1     matt 	__cpu_simple_lock_clear(alp);
     79  1.24.2.1     matt }
     80  1.24.2.1     matt 
     81  1.24.2.1     matt static int
     82  1.24.2.1     matt pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp)
     83  1.24.2.1     matt {
     84  1.24.2.1     matt 	int locked;
     85      1.20       ad 
     86      1.10  thorpej 	RAS_START(pthread__lock);
     87  1.24.2.1     matt 	locked = __SIMPLELOCK_LOCKED_P(alp);
     88  1.24.2.1     matt 	__cpu_simple_lock_set(alp);
     89      1.10  thorpej 	RAS_END(pthread__lock);
     90       1.2  thorpej 
     91  1.24.2.1     matt 	return !locked;
     92       1.2  thorpej }
     93       1.2  thorpej 
     94  1.24.2.1     matt static void
     95  1.24.2.1     matt pthread__ras_simple_unlock(__cpu_simple_lock_t *alp)
     96       1.2  thorpej {
     97       1.2  thorpej 
     98  1.24.2.1     matt 	__cpu_simple_lock_clear(alp);
     99       1.2  thorpej }
    100       1.2  thorpej 
    101  1.24.2.1     matt #endif /* PTHREAD__ASM_RASOPS */
    102  1.24.2.1     matt 
    103  1.24.2.1     matt static const struct pthread_lock_ops pthread__lock_ops_ras = {
    104  1.24.2.1     matt 	pthread__ras_simple_lock_init,
    105  1.24.2.1     matt 	pthread__ras_simple_lock_try,
    106  1.24.2.1     matt 	pthread__ras_simple_unlock,
    107  1.24.2.2     matt 	pthread__spinlock_slow,
    108  1.24.2.1     matt };
    109  1.24.2.1     matt 
    110  1.24.2.1     matt static void
    111  1.24.2.1     matt pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp)
    112       1.2  thorpej {
    113      1.22       ad 
    114  1.24.2.1     matt 	__cpu_simple_lock_init(alp);
    115  1.24.2.1     matt }
    116      1.20       ad 
    117  1.24.2.1     matt static int
    118  1.24.2.1     matt pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp)
    119  1.24.2.1     matt {
    120      1.23       ad 
    121  1.24.2.1     matt 	return (__cpu_simple_lock_try(alp));
    122  1.24.2.1     matt }
    123  1.24.2.1     matt 
    124  1.24.2.1     matt static void
    125  1.24.2.1     matt pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp)
    126  1.24.2.1     matt {
    127  1.24.2.1     matt 
    128  1.24.2.1     matt 	__cpu_simple_unlock(alp);
    129  1.24.2.1     matt }
    130  1.24.2.1     matt 
    131  1.24.2.1     matt static const struct pthread_lock_ops pthread__lock_ops_atomic = {
    132  1.24.2.1     matt 	pthread__atomic_simple_lock_init,
    133  1.24.2.1     matt 	pthread__atomic_simple_lock_try,
    134  1.24.2.1     matt 	pthread__atomic_simple_unlock,
    135  1.24.2.2     matt 	pthread__spinlock_slow,
    136  1.24.2.1     matt };
    137  1.24.2.1     matt 
    138  1.24.2.1     matt /*
    139  1.24.2.1     matt  * We default to pointing to the RAS primitives; we might need to use
    140  1.24.2.1     matt  * locks early, but before main() starts.  This is safe, since no other
    141  1.24.2.1     matt  * threads will be active for the process, so atomicity will not be
    142  1.24.2.1     matt  * required.
    143  1.24.2.1     matt  */
    144  1.24.2.1     matt const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras;
    145  1.24.2.1     matt 
    146      1.23       ad /*
    147      1.23       ad  * Prevent this routine from being inlined.  The common case is no
    148      1.23       ad  * contention and it's better to not burden the instruction decoder.
    149      1.23       ad  */
    150      1.23       ad static void
    151  1.24.2.2     matt pthread__spinlock_slow(pthread_spin_t *lock)
    152       1.2  thorpej {
    153  1.24.2.2     matt 	pthread_t self;
    154      1.21       ad 	int count;
    155      1.19       ad 
    156  1.24.2.2     matt 	self = pthread__self();
    157  1.24.2.2     matt 
    158       1.2  thorpej 	do {
    159      1.21       ad 		count = pthread__nspins;
    160  1.24.2.2     matt 		while (__SIMPLELOCK_LOCKED_P(lock) && --count > 0)
    161      1.21       ad 			pthread__smt_pause();
    162      1.21       ad 		if (count > 0) {
    163  1.24.2.2     matt 			if ((*self->pt_lockops.plo_try)(lock))
    164      1.21       ad 				break;
    165      1.21       ad 			continue;
    166      1.17       ad 		}
    167      1.23       ad 		sched_yield();
    168      1.19       ad 	} while (/*CONSTCOND*/ 1);
    169       1.2  thorpej }
    170       1.2  thorpej 
    171      1.23       ad /*
    172      1.23       ad  * Initialize the locking primitives.  On uniprocessors, we always
    173      1.23       ad  * use Restartable Atomic Sequences if they are available.  Otherwise,
    174      1.23       ad  * we fall back onto machine-dependent atomic lock primitives.
    175       1.2  thorpej  */
    176      1.23       ad void
    177      1.23       ad pthread__lockprim_init(void)
    178       1.2  thorpej {
    179      1.23       ad 	char *p;
    180       1.2  thorpej 
    181  1.24.2.2     matt 	if ((p = pthread__getenv("PTHREAD_NSPINS")) != NULL)
    182      1.23       ad 		pthread__nspins = atoi(p);
    183      1.23       ad 	else if (pthread__concurrency != 1)
    184      1.23       ad 		pthread__nspins = PTHREAD__NSPINS;
    185      1.23       ad 	else
    186      1.23       ad 		pthread__nspins = 1;
    187      1.19       ad 
    188      1.23       ad 	if (pthread__concurrency != 1) {
    189  1.24.2.1     matt 		pthread__lock_ops = &pthread__lock_ops_atomic;
    190      1.23       ad 		return;
    191      1.23       ad 	}
    192       1.2  thorpej 
    193      1.23       ad 	if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock),
    194      1.23       ad 	    RAS_INSTALL) != 0) {
    195  1.24.2.1     matt 		pthread__lock_ops = &pthread__lock_ops_atomic;
    196  1.24.2.1     matt 		return;
    197      1.17       ad 	}
    198       1.2  thorpej }
    199       1.2  thorpej 
    200      1.23       ad void
    201      1.23       ad pthread_lockinit(pthread_spin_t *lock)
    202       1.2  thorpej {
    203       1.2  thorpej 
    204      1.23       ad 	pthread__simple_lock_init(lock);
    205       1.2  thorpej }
    206