Home | History | Annotate | Line # | Download | only in libpthread
pthread_lock.c revision 1.2
      1 /*	$NetBSD: pthread_lock.c,v 1.2 2003/01/18 10:34:15 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/param.h>
     40 #include <sys/ras.h>
     41 #include <sys/sysctl.h>
     42 
     43 #include <assert.h>
     44 #include <errno.h>
     45 #include <unistd.h>
     46 
     47 #include "pthread.h"
     48 #include "pthread_int.h"
     49 
     50 #undef PTHREAD_SPIN_DEBUG
     51 
     52 #ifdef PTHREAD_SPIN_DEBUG
     53 #define SDPRINTF(x) DPRINTF(x)
     54 #else
     55 #define SDPRINTF(x)
     56 #endif
     57 
     58 /* How many times to try before checking whether we've been continued. */
     59 #define NSPINS 1	/* no point in actually spinning until MP works */
     60 
     61 static int nspins = NSPINS;
     62 
     63 extern char pthread__lock_ras_start[], pthread__lock_ras_end[];
     64 
     65 static void
     66 pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp)
     67 {
     68 
     69 	*alp = __SIMPLELOCK_UNLOCKED;
     70 }
     71 
     72 static int
     73 pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp)
     74 {
     75 	__cpu_simple_lock_t old;
     76 
     77 	/* This is the atomic sequence. */
     78 	__asm __volatile("pthread__lock_ras_start:");
     79 	old = *alp;
     80 	*alp = __SIMPLELOCK_LOCKED;
     81 	__asm __volatile("pthread__lock_ras_end:");
     82 
     83 	return (old == __SIMPLELOCK_UNLOCKED);
     84 }
     85 
     86 static void
     87 pthread__ras_simple_unlock(__cpu_simple_lock_t *alp)
     88 {
     89 
     90 	*alp = __SIMPLELOCK_UNLOCKED;
     91 }
     92 
     93 static const struct pthread_lock_ops pthread__lock_ops_ras = {
     94 	pthread__ras_simple_lock_init,
     95 	pthread__ras_simple_lock_try,
     96 	pthread__ras_simple_unlock,
     97 };
     98 
     99 static void
    100 pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp)
    101 {
    102 
    103 	__cpu_simple_lock_init(alp);
    104 }
    105 
    106 static int
    107 pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp)
    108 {
    109 
    110 	return (__cpu_simple_lock_try(alp));
    111 }
    112 
    113 static void
    114 pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp)
    115 {
    116 
    117 	__cpu_simple_unlock(alp);
    118 }
    119 
    120 static const struct pthread_lock_ops pthread__lock_ops_atomic = {
    121 	pthread__atomic_simple_lock_init,
    122 	pthread__atomic_simple_lock_try,
    123 	pthread__atomic_simple_unlock,
    124 };
    125 
    126 /*
    127  * We default to pointing to the RAS primitives; we might need to use
    128  * locks early, but before main() starts.  This is safe, since no other
    129  * threads will be active for the process, so atomicity will not be
    130  * required.
    131  */
    132 const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras;
    133 
    134 /*
    135  * Initialize the locking primitives.  On uniprocessors, we always
    136  * use Restartable Atomic Sequences if they are available.  Otherwise,
    137  * we fall back onto machine-dependent atomic lock primitives.
    138  */
    139 void
    140 pthread__lockprim_init(void)
    141 {
    142 	int mib[2];
    143 	size_t len;
    144 	int ncpu;
    145 
    146 	mib[0] = CTL_HW;
    147 	mib[1] = HW_NCPU;
    148 
    149 	len = sizeof(ncpu);
    150 	sysctl(mib, 2, &ncpu, &len, NULL, 0);
    151 
    152 	if (ncpu == 1 &&
    153 	    rasctl(pthread__lock_ras_start,
    154 	    	   (caddr_t)pthread__lock_ras_end -
    155 	    	   (caddr_t)pthread__lock_ras_start, RAS_INSTALL) == 0) {
    156 		pthread__lock_ops = &pthread__lock_ops_ras;
    157 		return;
    158 	}
    159 
    160 	pthread__lock_ops = &pthread__lock_ops_atomic;
    161 }
    162 
    163 void
    164 pthread_lockinit(pthread_spin_t *lock)
    165 {
    166 
    167 	pthread__simple_lock_init(lock);
    168 }
    169 
    170 void
    171 pthread_spinlock(pthread_t thread, pthread_spin_t *lock)
    172 {
    173 	int count, ret;
    174 
    175 	count = nspins;
    176 	SDPRINTF(("(pthread_spinlock %p) incrementing spinlock %p (count %d)\n",
    177 		thread, lock, thread->pt_spinlocks));
    178 #ifdef PTHREAD_SPIN_DEBUG
    179 	if(!(thread->pt_spinlocks >= 0)) {
    180 		(void)kill(getpid(), SIGABRT);
    181 		_exit(1);
    182 	}
    183 #endif
    184 	++thread->pt_spinlocks;
    185 
    186 	do {
    187 		while (((ret = pthread__simple_lock_try(lock)) == 0) && --count)
    188 			;
    189 
    190 		if (ret == 1)
    191 			break;
    192 
    193 	SDPRINTF(("(pthread_spinlock %p) decrementing spinlock %p (count %d)\n",
    194 		thread, lock, thread->pt_spinlocks));
    195 		--thread->pt_spinlocks;
    196 
    197 		/*
    198 		 * We may be preempted while spinning. If so, we will
    199 		 * be restarted here if thread->pt_spinlocks is
    200 		 * nonzero, which can happen if:
    201 		 * a) we just got the lock
    202 		 * b) we haven't yet decremented the lock count.
    203 		 * If we're at this point, (b) applies. Therefore,
    204 		 * check if we're being continued, and if so, bail.
    205 		 * (in case (a), we should let the code finish and
    206 		 * we will bail out in pthread_spinunlock()).
    207 		 */
    208 		if (thread->pt_next != NULL) {
    209 			PTHREADD_ADD(PTHREADD_SPINPREEMPT);
    210 			pthread__switch(thread, thread->pt_next);
    211 		}
    212 		/* try again */
    213 		count = nspins;
    214 	SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n",
    215 		thread, thread->pt_spinlocks));
    216 		++thread->pt_spinlocks;
    217 	} while (/*CONSTCOND*/1);
    218 
    219 	PTHREADD_ADD(PTHREADD_SPINLOCKS);
    220 	/* Got it! We're out of here. */
    221 }
    222 
    223 
    224 int
    225 pthread_spintrylock(pthread_t thread, pthread_spin_t *lock)
    226 {
    227 	int ret;
    228 
    229 	SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n",
    230 		thread, thread->pt_spinlocks));
    231 	++thread->pt_spinlocks;
    232 
    233 	ret = pthread__simple_lock_try(lock);
    234 
    235 	if (ret == 0) {
    236 	SDPRINTF(("(pthread_spintrylock %p) decrementing spinlock from %d\n",
    237 		thread, thread->pt_spinlocks));
    238 		--thread->pt_spinlocks;
    239 		/* See above. */
    240 		if (thread->pt_next != NULL) {
    241 			PTHREADD_ADD(PTHREADD_SPINPREEMPT);
    242 			pthread__switch(thread, thread->pt_next);
    243 		}
    244 	}
    245 
    246 	return ret;
    247 }
    248 
    249 
    250 void
    251 pthread_spinunlock(pthread_t thread, pthread_spin_t *lock)
    252 {
    253 
    254 	pthread__simple_unlock(lock);
    255 	SDPRINTF(("(pthread_spinunlock %p) decrementing spinlock %p (count %d)\n",
    256 		thread, lock, thread->pt_spinlocks));
    257 	--thread->pt_spinlocks;
    258 #ifdef PTHREAD_SPIN_DEBUG
    259 	if (!(thread->pt_spinlocks >= 0)) {
    260 		(void)kill(getpid(), SIGABRT);
    261 		_exit(1);
    262 	}
    263 #endif
    264 	PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
    265 
    266 	/*
    267 	 * If we were preempted while holding a spinlock, the
    268 	 * scheduler will notice this and continue us. To be good
    269 	 * citzens, we must now get out of here if that was our
    270 	 * last spinlock.
    271 	 * XXX when will we ever have more than one?
    272 	 */
    273 
    274 	if ((thread->pt_spinlocks == 0) && (thread->pt_next != NULL)) {
    275 		PTHREADD_ADD(PTHREADD_SPINPREEMPT);
    276 		pthread__switch(thread, thread->pt_next);
    277 	}
    278 }
    279 
    280 
    281 /*
    282  * Public (POSIX-specified) spinlocks.
    283  * These don't interact with the spin-preemption code, nor do they
    284  * perform any adaptive sleeping.
    285  */
    286 
    287 int
    288 pthread_spin_init(pthread_spinlock_t *lock, int pshared)
    289 {
    290 
    291 #ifdef ERRORCHECK
    292 	if ((lock == NULL) ||
    293 	    ((pshared != PTHREAD_PROCESS_PRIVATE) &&
    294 		(pshared != PTHREAD_PROCESS_SHARED)))
    295 		return EINVAL;
    296 #endif
    297 	lock->pts_magic = _PT_SPINLOCK_MAGIC;
    298 	/*
    299 	 * We don't actually use the pshared flag for anything;
    300 	 * cpu simple locks have all the process-shared properties
    301 	 * that we want anyway.
    302 	 */
    303 	lock->pts_flags = pshared;
    304 	pthread_lockinit(&lock->pts_spin);
    305 
    306 	return 0;
    307 }
    308 
    309 int
    310 pthread_spin_destroy(pthread_spinlock_t *lock)
    311 {
    312 
    313 #ifdef ERRORCHECK
    314 	if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
    315 		return EINVAL;
    316 
    317 	if (lock->pts_spin != __SIMPLELOCK_UNLOCKED)
    318 		return EBUSY;
    319 #endif
    320 
    321 	lock->pts_magic = _PT_SPINLOCK_DEAD;
    322 
    323 	return 0;
    324 }
    325 
    326 int
    327 pthread_spin_lock(pthread_spinlock_t *lock)
    328 {
    329 
    330 #ifdef ERRORCHECK
    331 	if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
    332 		return EINVAL;
    333 #endif
    334 
    335 	while (pthread__simple_lock_try(&lock->pts_spin) == 0)
    336 		/* spin */ ;
    337 
    338 	return 0;
    339 }
    340 
    341 int
    342 pthread_spin_trylock(pthread_spinlock_t *lock)
    343 {
    344 
    345 #ifdef ERRORCHECK
    346 	if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
    347 		return EINVAL;
    348 #endif
    349 
    350 	if (pthread__simple_lock_try(&lock->pts_spin) == 0)
    351 		return EBUSY;
    352 
    353 	return 0;
    354 }
    355 
    356 int
    357 pthread_spin_unlock(pthread_spinlock_t *lock)
    358 {
    359 
    360 #ifdef ERRORCHECK
    361 	if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
    362 		return EINVAL;
    363 #endif
    364 
    365 	pthread__simple_unlock(&lock->pts_spin);
    366 
    367 	return 0;
    368 }
    369