Home | History | Annotate | Line # | Download | only in libpthread
pthread_lock.c revision 1.6
      1  1.6     lukem /*	$NetBSD: pthread_lock.c,v 1.6 2003/03/08 08:03:35 lukem Exp $	*/
      2  1.2   thorpej 
      3  1.2   thorpej /*-
      4  1.2   thorpej  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      5  1.2   thorpej  * All rights reserved.
      6  1.2   thorpej  *
      7  1.2   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8  1.2   thorpej  * by Nathan J. Williams.
      9  1.2   thorpej  *
     10  1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     11  1.2   thorpej  * modification, are permitted provided that the following conditions
     12  1.2   thorpej  * are met:
     13  1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     14  1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     15  1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     17  1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     18  1.2   thorpej  * 3. All advertising materials mentioning features or use of this software
     19  1.2   thorpej  *    must display the following acknowledgement:
     20  1.2   thorpej  *        This product includes software developed by the NetBSD
     21  1.2   thorpej  *        Foundation, Inc. and its contributors.
     22  1.2   thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  1.2   thorpej  *    contributors may be used to endorse or promote products derived
     24  1.2   thorpej  *    from this software without specific prior written permission.
     25  1.2   thorpej  *
     26  1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  1.2   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     37  1.2   thorpej  */
     38  1.6     lukem 
     39  1.6     lukem #include <sys/cdefs.h>
     40  1.6     lukem __RCSID("$NetBSD: pthread_lock.c,v 1.6 2003/03/08 08:03:35 lukem Exp $");
     41  1.2   thorpej 
     42  1.2   thorpej #include <sys/param.h>
     43  1.2   thorpej #include <sys/ras.h>
     44  1.2   thorpej #include <sys/sysctl.h>
     45  1.2   thorpej 
     46  1.2   thorpej #include <errno.h>
     47  1.2   thorpej #include <unistd.h>
     48  1.2   thorpej 
     49  1.2   thorpej #include "pthread.h"
     50  1.2   thorpej #include "pthread_int.h"
     51  1.2   thorpej 
     52  1.5   nathanw #ifdef PTHREAD_SPIN_DEBUG_PRINT
     53  1.2   thorpej #define SDPRINTF(x) DPRINTF(x)
     54  1.2   thorpej #else
     55  1.2   thorpej #define SDPRINTF(x)
     56  1.2   thorpej #endif
     57  1.2   thorpej 
     58  1.2   thorpej /* How many times to try before checking whether we've been continued. */
     59  1.2   thorpej #define NSPINS 1	/* no point in actually spinning until MP works */
     60  1.2   thorpej 
     61  1.2   thorpej static int nspins = NSPINS;
     62  1.2   thorpej 
     63  1.4       scw extern void pthread__lock_ras_start(void), pthread__lock_ras_end(void);
     64  1.2   thorpej 
     65  1.2   thorpej static void
     66  1.2   thorpej pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp)
     67  1.2   thorpej {
     68  1.2   thorpej 
     69  1.2   thorpej 	*alp = __SIMPLELOCK_UNLOCKED;
     70  1.2   thorpej }
     71  1.2   thorpej 
     72  1.2   thorpej static int
     73  1.2   thorpej pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp)
     74  1.2   thorpej {
     75  1.2   thorpej 	__cpu_simple_lock_t old;
     76  1.2   thorpej 
     77  1.2   thorpej 	/* This is the atomic sequence. */
     78  1.2   thorpej 	__asm __volatile("pthread__lock_ras_start:");
     79  1.2   thorpej 	old = *alp;
     80  1.2   thorpej 	*alp = __SIMPLELOCK_LOCKED;
     81  1.2   thorpej 	__asm __volatile("pthread__lock_ras_end:");
     82  1.2   thorpej 
     83  1.2   thorpej 	return (old == __SIMPLELOCK_UNLOCKED);
     84  1.2   thorpej }
     85  1.2   thorpej 
     86  1.2   thorpej static void
     87  1.2   thorpej pthread__ras_simple_unlock(__cpu_simple_lock_t *alp)
     88  1.2   thorpej {
     89  1.2   thorpej 
     90  1.2   thorpej 	*alp = __SIMPLELOCK_UNLOCKED;
     91  1.2   thorpej }
     92  1.2   thorpej 
     93  1.2   thorpej static const struct pthread_lock_ops pthread__lock_ops_ras = {
     94  1.2   thorpej 	pthread__ras_simple_lock_init,
     95  1.2   thorpej 	pthread__ras_simple_lock_try,
     96  1.2   thorpej 	pthread__ras_simple_unlock,
     97  1.2   thorpej };
     98  1.2   thorpej 
     99  1.2   thorpej static void
    100  1.2   thorpej pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp)
    101  1.2   thorpej {
    102  1.2   thorpej 
    103  1.2   thorpej 	__cpu_simple_lock_init(alp);
    104  1.2   thorpej }
    105  1.2   thorpej 
    106  1.2   thorpej static int
    107  1.2   thorpej pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp)
    108  1.2   thorpej {
    109  1.2   thorpej 
    110  1.2   thorpej 	return (__cpu_simple_lock_try(alp));
    111  1.2   thorpej }
    112  1.2   thorpej 
    113  1.2   thorpej static void
    114  1.2   thorpej pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp)
    115  1.2   thorpej {
    116  1.2   thorpej 
    117  1.2   thorpej 	__cpu_simple_unlock(alp);
    118  1.2   thorpej }
    119  1.2   thorpej 
    120  1.2   thorpej static const struct pthread_lock_ops pthread__lock_ops_atomic = {
    121  1.2   thorpej 	pthread__atomic_simple_lock_init,
    122  1.2   thorpej 	pthread__atomic_simple_lock_try,
    123  1.2   thorpej 	pthread__atomic_simple_unlock,
    124  1.2   thorpej };
    125  1.2   thorpej 
    126  1.2   thorpej /*
    127  1.2   thorpej  * We default to pointing to the RAS primitives; we might need to use
    128  1.2   thorpej  * locks early, but before main() starts.  This is safe, since no other
    129  1.2   thorpej  * threads will be active for the process, so atomicity will not be
    130  1.2   thorpej  * required.
    131  1.2   thorpej  */
    132  1.2   thorpej const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras;
    133  1.2   thorpej 
    134  1.2   thorpej /*
    135  1.2   thorpej  * Initialize the locking primitives.  On uniprocessors, we always
    136  1.2   thorpej  * use Restartable Atomic Sequences if they are available.  Otherwise,
    137  1.2   thorpej  * we fall back onto machine-dependent atomic lock primitives.
    138  1.2   thorpej  */
    139  1.2   thorpej void
    140  1.2   thorpej pthread__lockprim_init(void)
    141  1.2   thorpej {
    142  1.2   thorpej 	int mib[2];
    143  1.2   thorpej 	size_t len;
    144  1.2   thorpej 	int ncpu;
    145  1.2   thorpej 
    146  1.2   thorpej 	mib[0] = CTL_HW;
    147  1.2   thorpej 	mib[1] = HW_NCPU;
    148  1.2   thorpej 
    149  1.2   thorpej 	len = sizeof(ncpu);
    150  1.2   thorpej 	sysctl(mib, 2, &ncpu, &len, NULL, 0);
    151  1.2   thorpej 
    152  1.4       scw 	if (ncpu == 1 && rasctl((void *)pthread__lock_ras_start,
    153  1.4       scw 	    (size_t)((uintptr_t)pthread__lock_ras_end -
    154  1.4       scw 	             (uintptr_t)pthread__lock_ras_start),
    155  1.3  christos 	    RAS_INSTALL) == 0) {
    156  1.2   thorpej 		pthread__lock_ops = &pthread__lock_ops_ras;
    157  1.2   thorpej 		return;
    158  1.2   thorpej 	}
    159  1.2   thorpej 
    160  1.2   thorpej 	pthread__lock_ops = &pthread__lock_ops_atomic;
    161  1.2   thorpej }
    162  1.2   thorpej 
    163  1.2   thorpej void
    164  1.2   thorpej pthread_lockinit(pthread_spin_t *lock)
    165  1.2   thorpej {
    166  1.2   thorpej 
    167  1.2   thorpej 	pthread__simple_lock_init(lock);
    168  1.2   thorpej }
    169  1.2   thorpej 
    170  1.2   thorpej void
    171  1.2   thorpej pthread_spinlock(pthread_t thread, pthread_spin_t *lock)
    172  1.2   thorpej {
    173  1.2   thorpej 	int count, ret;
    174  1.2   thorpej 
    175  1.2   thorpej 	count = nspins;
    176  1.2   thorpej 	SDPRINTF(("(pthread_spinlock %p) incrementing spinlock %p (count %d)\n",
    177  1.2   thorpej 		thread, lock, thread->pt_spinlocks));
    178  1.2   thorpej #ifdef PTHREAD_SPIN_DEBUG
    179  1.5   nathanw 	pthread__assert(thread->pt_spinlocks >= 0);
    180  1.2   thorpej #endif
    181  1.2   thorpej 	++thread->pt_spinlocks;
    182  1.2   thorpej 
    183  1.2   thorpej 	do {
    184  1.2   thorpej 		while (((ret = pthread__simple_lock_try(lock)) == 0) && --count)
    185  1.2   thorpej 			;
    186  1.2   thorpej 
    187  1.2   thorpej 		if (ret == 1)
    188  1.2   thorpej 			break;
    189  1.2   thorpej 
    190  1.2   thorpej 	SDPRINTF(("(pthread_spinlock %p) decrementing spinlock %p (count %d)\n",
    191  1.2   thorpej 		thread, lock, thread->pt_spinlocks));
    192  1.2   thorpej 		--thread->pt_spinlocks;
    193  1.2   thorpej 
    194  1.2   thorpej 		/*
    195  1.2   thorpej 		 * We may be preempted while spinning. If so, we will
    196  1.2   thorpej 		 * be restarted here if thread->pt_spinlocks is
    197  1.2   thorpej 		 * nonzero, which can happen if:
    198  1.2   thorpej 		 * a) we just got the lock
    199  1.2   thorpej 		 * b) we haven't yet decremented the lock count.
    200  1.2   thorpej 		 * If we're at this point, (b) applies. Therefore,
    201  1.2   thorpej 		 * check if we're being continued, and if so, bail.
    202  1.2   thorpej 		 * (in case (a), we should let the code finish and
    203  1.2   thorpej 		 * we will bail out in pthread_spinunlock()).
    204  1.2   thorpej 		 */
    205  1.2   thorpej 		if (thread->pt_next != NULL) {
    206  1.2   thorpej 			PTHREADD_ADD(PTHREADD_SPINPREEMPT);
    207  1.2   thorpej 			pthread__switch(thread, thread->pt_next);
    208  1.2   thorpej 		}
    209  1.2   thorpej 		/* try again */
    210  1.2   thorpej 		count = nspins;
    211  1.2   thorpej 	SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n",
    212  1.2   thorpej 		thread, thread->pt_spinlocks));
    213  1.2   thorpej 		++thread->pt_spinlocks;
    214  1.2   thorpej 	} while (/*CONSTCOND*/1);
    215  1.2   thorpej 
    216  1.2   thorpej 	PTHREADD_ADD(PTHREADD_SPINLOCKS);
    217  1.2   thorpej 	/* Got it! We're out of here. */
    218  1.2   thorpej }
    219  1.2   thorpej 
    220  1.2   thorpej 
    221  1.2   thorpej int
    222  1.2   thorpej pthread_spintrylock(pthread_t thread, pthread_spin_t *lock)
    223  1.2   thorpej {
    224  1.2   thorpej 	int ret;
    225  1.2   thorpej 
    226  1.2   thorpej 	SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n",
    227  1.2   thorpej 		thread, thread->pt_spinlocks));
    228  1.2   thorpej 	++thread->pt_spinlocks;
    229  1.2   thorpej 
    230  1.2   thorpej 	ret = pthread__simple_lock_try(lock);
    231  1.2   thorpej 
    232  1.2   thorpej 	if (ret == 0) {
    233  1.2   thorpej 	SDPRINTF(("(pthread_spintrylock %p) decrementing spinlock from %d\n",
    234  1.2   thorpej 		thread, thread->pt_spinlocks));
    235  1.2   thorpej 		--thread->pt_spinlocks;
    236  1.2   thorpej 		/* See above. */
    237  1.2   thorpej 		if (thread->pt_next != NULL) {
    238  1.2   thorpej 			PTHREADD_ADD(PTHREADD_SPINPREEMPT);
    239  1.2   thorpej 			pthread__switch(thread, thread->pt_next);
    240  1.2   thorpej 		}
    241  1.2   thorpej 	}
    242  1.2   thorpej 
    243  1.2   thorpej 	return ret;
    244  1.2   thorpej }
    245  1.2   thorpej 
    246  1.2   thorpej 
    247  1.2   thorpej void
    248  1.2   thorpej pthread_spinunlock(pthread_t thread, pthread_spin_t *lock)
    249  1.2   thorpej {
    250  1.2   thorpej 
    251  1.2   thorpej 	pthread__simple_unlock(lock);
    252  1.2   thorpej 	SDPRINTF(("(pthread_spinunlock %p) decrementing spinlock %p (count %d)\n",
    253  1.2   thorpej 		thread, lock, thread->pt_spinlocks));
    254  1.2   thorpej 	--thread->pt_spinlocks;
    255  1.2   thorpej #ifdef PTHREAD_SPIN_DEBUG
    256  1.5   nathanw 	pthread__assert(thread->pt_spinlocks >= 0);
    257  1.2   thorpej #endif
    258  1.2   thorpej 	PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
    259  1.2   thorpej 
    260  1.2   thorpej 	/*
    261  1.2   thorpej 	 * If we were preempted while holding a spinlock, the
    262  1.2   thorpej 	 * scheduler will notice this and continue us. To be good
    263  1.2   thorpej 	 * citzens, we must now get out of here if that was our
    264  1.2   thorpej 	 * last spinlock.
    265  1.2   thorpej 	 * XXX when will we ever have more than one?
    266  1.2   thorpej 	 */
    267  1.2   thorpej 
    268  1.2   thorpej 	if ((thread->pt_spinlocks == 0) && (thread->pt_next != NULL)) {
    269  1.2   thorpej 		PTHREADD_ADD(PTHREADD_SPINPREEMPT);
    270  1.2   thorpej 		pthread__switch(thread, thread->pt_next);
    271  1.2   thorpej 	}
    272  1.2   thorpej }
    273  1.2   thorpej 
    274  1.2   thorpej 
    275  1.2   thorpej /*
    276  1.2   thorpej  * Public (POSIX-specified) spinlocks.
    277  1.2   thorpej  * These don't interact with the spin-preemption code, nor do they
    278  1.2   thorpej  * perform any adaptive sleeping.
    279  1.2   thorpej  */
    280  1.2   thorpej 
    281  1.2   thorpej int
    282  1.2   thorpej pthread_spin_init(pthread_spinlock_t *lock, int pshared)
    283  1.2   thorpej {
    284  1.2   thorpej 
    285  1.2   thorpej #ifdef ERRORCHECK
    286  1.2   thorpej 	if ((lock == NULL) ||
    287  1.2   thorpej 	    ((pshared != PTHREAD_PROCESS_PRIVATE) &&
    288  1.2   thorpej 		(pshared != PTHREAD_PROCESS_SHARED)))
    289  1.2   thorpej 		return EINVAL;
    290  1.2   thorpej #endif
    291  1.2   thorpej 	lock->pts_magic = _PT_SPINLOCK_MAGIC;
    292  1.2   thorpej 	/*
    293  1.2   thorpej 	 * We don't actually use the pshared flag for anything;
    294  1.2   thorpej 	 * cpu simple locks have all the process-shared properties
    295  1.2   thorpej 	 * that we want anyway.
    296  1.2   thorpej 	 */
    297  1.2   thorpej 	lock->pts_flags = pshared;
    298  1.2   thorpej 	pthread_lockinit(&lock->pts_spin);
    299  1.2   thorpej 
    300  1.2   thorpej 	return 0;
    301  1.2   thorpej }
    302  1.2   thorpej 
    303  1.2   thorpej int
    304  1.2   thorpej pthread_spin_destroy(pthread_spinlock_t *lock)
    305  1.2   thorpej {
    306  1.2   thorpej 
    307  1.2   thorpej #ifdef ERRORCHECK
    308  1.2   thorpej 	if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
    309  1.2   thorpej 		return EINVAL;
    310  1.2   thorpej 
    311  1.2   thorpej 	if (lock->pts_spin != __SIMPLELOCK_UNLOCKED)
    312  1.2   thorpej 		return EBUSY;
    313  1.2   thorpej #endif
    314  1.2   thorpej 
    315  1.2   thorpej 	lock->pts_magic = _PT_SPINLOCK_DEAD;
    316  1.2   thorpej 
    317  1.2   thorpej 	return 0;
    318  1.2   thorpej }
    319  1.2   thorpej 
    320  1.2   thorpej int
    321  1.2   thorpej pthread_spin_lock(pthread_spinlock_t *lock)
    322  1.2   thorpej {
    323  1.2   thorpej 
    324  1.2   thorpej #ifdef ERRORCHECK
    325  1.2   thorpej 	if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
    326  1.2   thorpej 		return EINVAL;
    327  1.2   thorpej #endif
    328  1.2   thorpej 
    329  1.2   thorpej 	while (pthread__simple_lock_try(&lock->pts_spin) == 0)
    330  1.2   thorpej 		/* spin */ ;
    331  1.2   thorpej 
    332  1.2   thorpej 	return 0;
    333  1.2   thorpej }
    334  1.2   thorpej 
    335  1.2   thorpej int
    336  1.2   thorpej pthread_spin_trylock(pthread_spinlock_t *lock)
    337  1.2   thorpej {
    338  1.2   thorpej 
    339  1.2   thorpej #ifdef ERRORCHECK
    340  1.2   thorpej 	if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
    341  1.2   thorpej 		return EINVAL;
    342  1.2   thorpej #endif
    343  1.2   thorpej 
    344  1.2   thorpej 	if (pthread__simple_lock_try(&lock->pts_spin) == 0)
    345  1.2   thorpej 		return EBUSY;
    346  1.2   thorpej 
    347  1.2   thorpej 	return 0;
    348  1.2   thorpej }
    349  1.2   thorpej 
    350  1.2   thorpej int
    351  1.2   thorpej pthread_spin_unlock(pthread_spinlock_t *lock)
    352  1.2   thorpej {
    353  1.2   thorpej 
    354  1.2   thorpej #ifdef ERRORCHECK
    355  1.2   thorpej 	if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
    356  1.2   thorpej 		return EINVAL;
    357  1.2   thorpej #endif
    358  1.2   thorpej 
    359  1.2   thorpej 	pthread__simple_unlock(&lock->pts_spin);
    360  1.2   thorpej 
    361  1.2   thorpej 	return 0;
    362  1.2   thorpej }
    363