Home | History | Annotate | Line # | Download | only in libpthread
pthread_lock.c revision 1.1.2.2
      1  1.1.2.1  nathanw /* Copyright */
      2  1.1.2.1  nathanw 
      3  1.1.2.1  nathanw #include <assert.h>
      4  1.1.2.1  nathanw #include <errno.h>
      5  1.1.2.1  nathanw #include <signal.h>
      6  1.1.2.1  nathanw #include <stdlib.h>
      7  1.1.2.1  nathanw #include <ucontext.h>
      8  1.1.2.1  nathanw #include <sys/queue.h>
      9  1.1.2.1  nathanw 
     10  1.1.2.1  nathanw #include "pthread.h"
     11  1.1.2.1  nathanw #include "pthread_int.h"
     12  1.1.2.1  nathanw 
     13  1.1.2.1  nathanw /* How many times to try before checking whether we've been continued. */
     14  1.1.2.1  nathanw #define NSPINS 20	/* XXX arbitrary */
     15  1.1.2.1  nathanw 
     16  1.1.2.1  nathanw static int nspins = NSPINS;
     17  1.1.2.1  nathanw 
     18  1.1.2.1  nathanw void
     19  1.1.2.2  nathanw pthread_lockinit(pt_spin_t *lock)
     20  1.1.2.2  nathanw {
     21  1.1.2.2  nathanw 
     22  1.1.2.2  nathanw 	__cpu_simple_lock_init(lock);
     23  1.1.2.2  nathanw }
     24  1.1.2.2  nathanw 
     25  1.1.2.2  nathanw void
     26  1.1.2.1  nathanw pthread_spinlock(pthread_t thread, pt_spin_t *lock)
     27  1.1.2.1  nathanw {
     28  1.1.2.1  nathanw 	int count, ret;
     29  1.1.2.1  nathanw 
     30  1.1.2.1  nathanw 	count = nspins;
     31  1.1.2.1  nathanw 	++thread->pt_spinlocks;
     32  1.1.2.1  nathanw 
     33  1.1.2.1  nathanw 	do {
     34  1.1.2.1  nathanw 		while (((ret = __cpu_simple_lock_try(lock)) == 0) && --count)
     35  1.1.2.1  nathanw 			;
     36  1.1.2.1  nathanw 
     37  1.1.2.1  nathanw 		if (ret == 1)
     38  1.1.2.1  nathanw 			break;
     39  1.1.2.1  nathanw 
     40  1.1.2.1  nathanw 		--thread->pt_spinlocks;
     41  1.1.2.1  nathanw 
     42  1.1.2.1  nathanw 		/* We may be preempted while spinning. If so, we will
     43  1.1.2.1  nathanw 		 * be restarted here if thread->pt_spinlocks is
     44  1.1.2.1  nathanw 		 * nonzero, which can happen if:
     45  1.1.2.1  nathanw 		 * a) we just got the lock
     46  1.1.2.1  nathanw 		 * b) we haven't yet decremented the lock count.
     47  1.1.2.1  nathanw 		 * If we're at this point, (b) applies. Therefore,
     48  1.1.2.1  nathanw 		 * check if we're being continued, and if so, bail.
     49  1.1.2.1  nathanw 		 * (in case (a), we should let the code finish and
     50  1.1.2.1  nathanw 		 * we will bail out in pthread_spinunlock()).
     51  1.1.2.1  nathanw 		 */
     52  1.1.2.1  nathanw 		if (thread->pt_next != NULL) {
     53  1.1.2.1  nathanw 			PTHREADD_ADD(PTHREADD_SPINPREEMPT);
     54  1.1.2.1  nathanw 			pthread__switch(thread, thread->pt_next, 0);
     55  1.1.2.1  nathanw 		}
     56  1.1.2.1  nathanw 		/* try again */
     57  1.1.2.1  nathanw 		count = nspins;
     58  1.1.2.1  nathanw 		++thread->pt_spinlocks;
     59  1.1.2.1  nathanw 	} while (/*CONSTCOND*/1);
     60  1.1.2.1  nathanw 
     61  1.1.2.1  nathanw 	PTHREADD_ADD(PTHREADD_SPINLOCKS);
     62  1.1.2.1  nathanw 	/* Got it! We're out of here. */
     63  1.1.2.1  nathanw }
     64  1.1.2.1  nathanw 
     65  1.1.2.1  nathanw 
     66  1.1.2.1  nathanw int
     67  1.1.2.1  nathanw pthread_spintrylock(pthread_t thread, pt_spin_t *lock)
     68  1.1.2.1  nathanw {
     69  1.1.2.1  nathanw 	int ret;
     70  1.1.2.1  nathanw 
     71  1.1.2.1  nathanw 	++thread->pt_spinlocks;
     72  1.1.2.1  nathanw 
     73  1.1.2.1  nathanw 	ret = __cpu_simple_lock_try(lock);
     74  1.1.2.1  nathanw 
     75  1.1.2.1  nathanw 	if (ret == 0) {
     76  1.1.2.1  nathanw 		--thread->pt_spinlocks;
     77  1.1.2.1  nathanw 		/* See above. */
     78  1.1.2.1  nathanw 		if (thread->pt_next != NULL) {
     79  1.1.2.1  nathanw 			PTHREADD_ADD(PTHREADD_SPINPREEMPT);
     80  1.1.2.1  nathanw 			pthread__switch(thread, thread->pt_next, 0);
     81  1.1.2.1  nathanw 		}
     82  1.1.2.1  nathanw 	}
     83  1.1.2.1  nathanw 
     84  1.1.2.1  nathanw 	return ret;
     85  1.1.2.1  nathanw }
     86  1.1.2.1  nathanw 
     87  1.1.2.1  nathanw 
     88  1.1.2.1  nathanw void
     89  1.1.2.1  nathanw pthread_spinunlock(pthread_t thread, pt_spin_t *lock)
     90  1.1.2.1  nathanw {
     91  1.1.2.1  nathanw 	__cpu_simple_unlock(lock);
     92  1.1.2.1  nathanw 	--thread->pt_spinlocks;
     93  1.1.2.1  nathanw 
     94  1.1.2.1  nathanw 	PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
     95  1.1.2.1  nathanw 
     96  1.1.2.1  nathanw 	/* If we were preempted while holding a spinlock, the
     97  1.1.2.1  nathanw 	 * scheduler will notice this and continue us. To be good
     98  1.1.2.1  nathanw 	 * citzens, we must now get out of here if that was our
     99  1.1.2.1  nathanw 	 * last spinlock.
    100  1.1.2.1  nathanw 	 * XXX when will we ever have more than one?
    101  1.1.2.1  nathanw 	 */
    102  1.1.2.1  nathanw 
    103  1.1.2.1  nathanw 	if ((thread->pt_spinlocks == 0) && (thread->pt_next != NULL)) {
    104  1.1.2.1  nathanw 		PTHREADD_ADD(PTHREADD_SPINPREEMPT);
    105  1.1.2.1  nathanw 		pthread__switch(thread, thread->pt_next, 0);
    106  1.1.2.1  nathanw 	}
    107  1.1.2.1  nathanw }
    108  1.1.2.1  nathanw 
    109