Home | History | Annotate | Line # | Download | only in libpthread
pthread_lock.c revision 1.1.2.2
      1 /* Copyright */
      2 
      3 #include <assert.h>
      4 #include <errno.h>
      5 #include <signal.h>
      6 #include <stdlib.h>
      7 #include <ucontext.h>
      8 #include <sys/queue.h>
      9 
     10 #include "pthread.h"
     11 #include "pthread_int.h"
     12 
     13 /* How many times to try before checking whether we've been continued. */
     14 #define NSPINS 20	/* XXX arbitrary */
     15 
     16 static int nspins = NSPINS;
     17 
     18 void
     19 pthread_lockinit(pt_spin_t *lock)
     20 {
     21 
     22 	__cpu_simple_lock_init(lock);
     23 }
     24 
     25 void
     26 pthread_spinlock(pthread_t thread, pt_spin_t *lock)
     27 {
     28 	int count, ret;
     29 
     30 	count = nspins;
     31 	++thread->pt_spinlocks;
     32 
     33 	do {
     34 		while (((ret = __cpu_simple_lock_try(lock)) == 0) && --count)
     35 			;
     36 
     37 		if (ret == 1)
     38 			break;
     39 
     40 		--thread->pt_spinlocks;
     41 
     42 		/* We may be preempted while spinning. If so, we will
     43 		 * be restarted here if thread->pt_spinlocks is
     44 		 * nonzero, which can happen if:
     45 		 * a) we just got the lock
     46 		 * b) we haven't yet decremented the lock count.
     47 		 * If we're at this point, (b) applies. Therefore,
     48 		 * check if we're being continued, and if so, bail.
     49 		 * (in case (a), we should let the code finish and
     50 		 * we will bail out in pthread_spinunlock()).
     51 		 */
     52 		if (thread->pt_next != NULL) {
     53 			PTHREADD_ADD(PTHREADD_SPINPREEMPT);
     54 			pthread__switch(thread, thread->pt_next, 0);
     55 		}
     56 		/* try again */
     57 		count = nspins;
     58 		++thread->pt_spinlocks;
     59 	} while (/*CONSTCOND*/1);
     60 
     61 	PTHREADD_ADD(PTHREADD_SPINLOCKS);
     62 	/* Got it! We're out of here. */
     63 }
     64 
     65 
     66 int
     67 pthread_spintrylock(pthread_t thread, pt_spin_t *lock)
     68 {
     69 	int ret;
     70 
     71 	++thread->pt_spinlocks;
     72 
     73 	ret = __cpu_simple_lock_try(lock);
     74 
     75 	if (ret == 0) {
     76 		--thread->pt_spinlocks;
     77 		/* See above. */
     78 		if (thread->pt_next != NULL) {
     79 			PTHREADD_ADD(PTHREADD_SPINPREEMPT);
     80 			pthread__switch(thread, thread->pt_next, 0);
     81 		}
     82 	}
     83 
     84 	return ret;
     85 }
     86 
     87 
     88 void
     89 pthread_spinunlock(pthread_t thread, pt_spin_t *lock)
     90 {
     91 	__cpu_simple_unlock(lock);
     92 	--thread->pt_spinlocks;
     93 
     94 	PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
     95 
     96 	/* If we were preempted while holding a spinlock, the
     97 	 * scheduler will notice this and continue us. To be good
     98 	 * citzens, we must now get out of here if that was our
     99 	 * last spinlock.
    100 	 * XXX when will we ever have more than one?
    101 	 */
    102 
    103 	if ((thread->pt_spinlocks == 0) && (thread->pt_next != NULL)) {
    104 		PTHREADD_ADD(PTHREADD_SPINPREEMPT);
    105 		pthread__switch(thread, thread->pt_next, 0);
    106 	}
    107 }
    108 
    109