pthread_lock.c revision 1.1.2.1 1 1.1.2.1 nathanw /* Copyright */
2 1.1.2.1 nathanw
3 1.1.2.1 nathanw #include <assert.h>
4 1.1.2.1 nathanw #include <errno.h>
5 1.1.2.1 nathanw #include <signal.h>
6 1.1.2.1 nathanw #include <stdlib.h>
7 1.1.2.1 nathanw #include <ucontext.h>
8 1.1.2.1 nathanw #include <sys/queue.h>
9 1.1.2.1 nathanw
10 1.1.2.1 nathanw #include "pthread.h"
11 1.1.2.1 nathanw #include "pthread_int.h"
12 1.1.2.1 nathanw
13 1.1.2.1 nathanw /* How many times to try before checking whether we've been continued. */
14 1.1.2.1 nathanw #define NSPINS 20 /* XXX arbitrary */
15 1.1.2.1 nathanw
16 1.1.2.1 nathanw static int nspins = NSPINS;
17 1.1.2.1 nathanw
18 1.1.2.1 nathanw void
19 1.1.2.1 nathanw pthread_spinlock(pthread_t thread, pt_spin_t *lock)
20 1.1.2.1 nathanw {
21 1.1.2.1 nathanw int count, ret;
22 1.1.2.1 nathanw
23 1.1.2.1 nathanw count = nspins;
24 1.1.2.1 nathanw ++thread->pt_spinlocks;
25 1.1.2.1 nathanw
26 1.1.2.1 nathanw do {
27 1.1.2.1 nathanw while (((ret = __cpu_simple_lock_try(lock)) == 0) && --count)
28 1.1.2.1 nathanw ;
29 1.1.2.1 nathanw
30 1.1.2.1 nathanw if (ret == 1)
31 1.1.2.1 nathanw break;
32 1.1.2.1 nathanw
33 1.1.2.1 nathanw --thread->pt_spinlocks;
34 1.1.2.1 nathanw
35 1.1.2.1 nathanw /* We may be preempted while spinning. If so, we will
36 1.1.2.1 nathanw * be restarted here if thread->pt_spinlocks is
37 1.1.2.1 nathanw * nonzero, which can happen if:
38 1.1.2.1 nathanw * a) we just got the lock
39 1.1.2.1 nathanw * b) we haven't yet decremented the lock count.
40 1.1.2.1 nathanw * If we're at this point, (b) applies. Therefore,
41 1.1.2.1 nathanw * check if we're being continued, and if so, bail.
42 1.1.2.1 nathanw * (in case (a), we should let the code finish and
43 1.1.2.1 nathanw * we will bail out in pthread_spinunlock()).
44 1.1.2.1 nathanw */
45 1.1.2.1 nathanw if (thread->pt_next != NULL) {
46 1.1.2.1 nathanw PTHREADD_ADD(PTHREADD_SPINPREEMPT);
47 1.1.2.1 nathanw pthread__switch(thread, thread->pt_next, 0);
48 1.1.2.1 nathanw }
49 1.1.2.1 nathanw /* try again */
50 1.1.2.1 nathanw count = nspins;
51 1.1.2.1 nathanw ++thread->pt_spinlocks;
52 1.1.2.1 nathanw } while (/*CONSTCOND*/1);
53 1.1.2.1 nathanw
54 1.1.2.1 nathanw PTHREADD_ADD(PTHREADD_SPINLOCKS);
55 1.1.2.1 nathanw /* Got it! We're out of here. */
56 1.1.2.1 nathanw }
57 1.1.2.1 nathanw
58 1.1.2.1 nathanw
59 1.1.2.1 nathanw int
60 1.1.2.1 nathanw pthread_spintrylock(pthread_t thread, pt_spin_t *lock)
61 1.1.2.1 nathanw {
62 1.1.2.1 nathanw int ret;
63 1.1.2.1 nathanw
64 1.1.2.1 nathanw ++thread->pt_spinlocks;
65 1.1.2.1 nathanw
66 1.1.2.1 nathanw ret = __cpu_simple_lock_try(lock);
67 1.1.2.1 nathanw
68 1.1.2.1 nathanw if (ret == 0) {
69 1.1.2.1 nathanw --thread->pt_spinlocks;
70 1.1.2.1 nathanw /* See above. */
71 1.1.2.1 nathanw if (thread->pt_next != NULL) {
72 1.1.2.1 nathanw PTHREADD_ADD(PTHREADD_SPINPREEMPT);
73 1.1.2.1 nathanw pthread__switch(thread, thread->pt_next, 0);
74 1.1.2.1 nathanw }
75 1.1.2.1 nathanw }
76 1.1.2.1 nathanw
77 1.1.2.1 nathanw return ret;
78 1.1.2.1 nathanw }
79 1.1.2.1 nathanw
80 1.1.2.1 nathanw
81 1.1.2.1 nathanw void
82 1.1.2.1 nathanw pthread_spinunlock(pthread_t thread, pt_spin_t *lock)
83 1.1.2.1 nathanw {
84 1.1.2.1 nathanw __cpu_simple_unlock(lock);
85 1.1.2.1 nathanw --thread->pt_spinlocks;
86 1.1.2.1 nathanw
87 1.1.2.1 nathanw PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
88 1.1.2.1 nathanw
89 1.1.2.1 nathanw /* If we were preempted while holding a spinlock, the
90 1.1.2.1 nathanw * scheduler will notice this and continue us. To be good
91 1.1.2.1 nathanw * citzens, we must now get out of here if that was our
92 1.1.2.1 nathanw * last spinlock.
93 1.1.2.1 nathanw * XXX when will we ever have more than one?
94 1.1.2.1 nathanw */
95 1.1.2.1 nathanw
96 1.1.2.1 nathanw if ((thread->pt_spinlocks == 0) && (thread->pt_next != NULL)) {
97 1.1.2.1 nathanw PTHREADD_ADD(PTHREADD_SPINPREEMPT);
98 1.1.2.1 nathanw pthread__switch(thread, thread->pt_next, 0);
99 1.1.2.1 nathanw }
100 1.1.2.1 nathanw }
101 1.1.2.1 nathanw
102