pthread_lock.c revision 1.1.2.9 1 1.1.2.9 nathanw /* $NetBSD: pthread_lock.c,v 1.1.2.9 2002/10/16 18:34:40 nathanw Exp $ */
2 1.1.2.3 nathanw
3 1.1.2.3 nathanw /*-
4 1.1.2.3 nathanw * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 1.1.2.3 nathanw * All rights reserved.
6 1.1.2.3 nathanw *
7 1.1.2.3 nathanw * This code is derived from software contributed to The NetBSD Foundation
8 1.1.2.3 nathanw * by Nathan J. Williams.
9 1.1.2.3 nathanw *
10 1.1.2.3 nathanw * Redistribution and use in source and binary forms, with or without
11 1.1.2.3 nathanw * modification, are permitted provided that the following conditions
12 1.1.2.3 nathanw * are met:
13 1.1.2.3 nathanw * 1. Redistributions of source code must retain the above copyright
14 1.1.2.3 nathanw * notice, this list of conditions and the following disclaimer.
15 1.1.2.3 nathanw * 2. Redistributions in binary form must reproduce the above copyright
16 1.1.2.3 nathanw * notice, this list of conditions and the following disclaimer in the
17 1.1.2.3 nathanw * documentation and/or other materials provided with the distribution.
18 1.1.2.3 nathanw * 3. All advertising materials mentioning features or use of this software
19 1.1.2.3 nathanw * must display the following acknowledgement:
20 1.1.2.3 nathanw * This product includes software developed by the NetBSD
21 1.1.2.3 nathanw * Foundation, Inc. and its contributors.
22 1.1.2.3 nathanw * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1.2.3 nathanw * contributors may be used to endorse or promote products derived
24 1.1.2.3 nathanw * from this software without specific prior written permission.
25 1.1.2.3 nathanw *
26 1.1.2.3 nathanw * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1.2.3 nathanw * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1.2.3 nathanw * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1.2.3 nathanw * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1.2.3 nathanw * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1.2.3 nathanw * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1.2.3 nathanw * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1.2.3 nathanw * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1.2.3 nathanw * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1.2.3 nathanw * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1.2.3 nathanw * POSSIBILITY OF SUCH DAMAGE.
37 1.1.2.3 nathanw */
38 1.1.2.1 nathanw
39 1.1.2.6 nathanw #include <assert.h>
40 1.1.2.8 nathanw #include <errno.h>
41 1.1.2.6 nathanw
42 1.1.2.1 nathanw #include "pthread.h"
43 1.1.2.1 nathanw #include "pthread_int.h"
44 1.1.2.1 nathanw
45 1.1.2.6 nathanw #undef PTHREAD_SA_DEBUG
46 1.1.2.6 nathanw
47 1.1.2.6 nathanw #ifdef PTHREAD_SA_DEBUG
48 1.1.2.6 nathanw #define SDPRINTF(x) DPRINTF(x)
49 1.1.2.6 nathanw #else
50 1.1.2.6 nathanw #define SDPRINTF(x)
51 1.1.2.6 nathanw #endif
52 1.1.2.6 nathanw
53 1.1.2.1 nathanw /* How many times to try before checking whether we've been continued. */
54 1.1.2.1 nathanw #define NSPINS 20 /* XXX arbitrary */
55 1.1.2.1 nathanw
56 1.1.2.1 nathanw static int nspins = NSPINS;
57 1.1.2.1 nathanw
58 1.1.2.1 nathanw void
59 1.1.2.6 nathanw pthread_lockinit(pthread_spin_t *lock)
60 1.1.2.2 nathanw {
61 1.1.2.2 nathanw
62 1.1.2.2 nathanw __cpu_simple_lock_init(lock);
63 1.1.2.2 nathanw }
64 1.1.2.2 nathanw
65 1.1.2.2 nathanw void
66 1.1.2.6 nathanw pthread_spinlock(pthread_t thread, pthread_spin_t *lock)
67 1.1.2.1 nathanw {
68 1.1.2.1 nathanw int count, ret;
69 1.1.2.1 nathanw
70 1.1.2.1 nathanw count = nspins;
71 1.1.2.6 nathanw SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n",
72 1.1.2.6 nathanw thread, thread->pt_spinlocks));
73 1.1.2.1 nathanw ++thread->pt_spinlocks;
74 1.1.2.1 nathanw
75 1.1.2.1 nathanw do {
76 1.1.2.1 nathanw while (((ret = __cpu_simple_lock_try(lock)) == 0) && --count)
77 1.1.2.1 nathanw ;
78 1.1.2.1 nathanw
79 1.1.2.1 nathanw if (ret == 1)
80 1.1.2.1 nathanw break;
81 1.1.2.6 nathanw
82 1.1.2.9 nathanw /*
83 1.1.2.9 nathanw * As long as this is uniprocessor, encountering a
84 1.1.2.6 nathanw * locked spinlock is a bug.
85 1.1.2.6 nathanw */
86 1.1.2.6 nathanw assert (ret == 1);
87 1.1.2.6 nathanw
88 1.1.2.6 nathanw SDPRINTF(("(pthread_spinlock %p) decrementing spinlock from %d\n",
89 1.1.2.6 nathanw thread, thread->pt_spinlocks));
90 1.1.2.1 nathanw --thread->pt_spinlocks;
91 1.1.2.1 nathanw
92 1.1.2.9 nathanw /*
93 1.1.2.9 nathanw * We may be preempted while spinning. If so, we will
94 1.1.2.1 nathanw * be restarted here if thread->pt_spinlocks is
95 1.1.2.1 nathanw * nonzero, which can happen if:
96 1.1.2.1 nathanw * a) we just got the lock
97 1.1.2.1 nathanw * b) we haven't yet decremented the lock count.
98 1.1.2.1 nathanw * If we're at this point, (b) applies. Therefore,
99 1.1.2.1 nathanw * check if we're being continued, and if so, bail.
100 1.1.2.1 nathanw * (in case (a), we should let the code finish and
101 1.1.2.1 nathanw * we will bail out in pthread_spinunlock()).
102 1.1.2.1 nathanw */
103 1.1.2.7 nathanw if ((thread->pt_next != NULL) &&
104 1.1.2.7 nathanw (thread->pt_type != PT_THREAD_UPCALL)) {
105 1.1.2.1 nathanw PTHREADD_ADD(PTHREADD_SPINPREEMPT);
106 1.1.2.5 nathanw pthread__switch(thread, thread->pt_next);
107 1.1.2.1 nathanw }
108 1.1.2.1 nathanw /* try again */
109 1.1.2.1 nathanw count = nspins;
110 1.1.2.6 nathanw SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n",
111 1.1.2.6 nathanw thread, thread->pt_spinlocks));
112 1.1.2.1 nathanw ++thread->pt_spinlocks;
113 1.1.2.1 nathanw } while (/*CONSTCOND*/1);
114 1.1.2.1 nathanw
115 1.1.2.1 nathanw PTHREADD_ADD(PTHREADD_SPINLOCKS);
116 1.1.2.1 nathanw /* Got it! We're out of here. */
117 1.1.2.1 nathanw }
118 1.1.2.1 nathanw
119 1.1.2.1 nathanw
120 1.1.2.1 nathanw int
121 1.1.2.6 nathanw pthread_spintrylock(pthread_t thread, pthread_spin_t *lock)
122 1.1.2.1 nathanw {
123 1.1.2.1 nathanw int ret;
124 1.1.2.1 nathanw
125 1.1.2.6 nathanw SDPRINTF(("(pthread_spinlock %p) incrementing spinlock from %d\n",
126 1.1.2.6 nathanw thread, thread->pt_spinlocks));
127 1.1.2.1 nathanw ++thread->pt_spinlocks;
128 1.1.2.1 nathanw
129 1.1.2.1 nathanw ret = __cpu_simple_lock_try(lock);
130 1.1.2.1 nathanw
131 1.1.2.1 nathanw if (ret == 0) {
132 1.1.2.6 nathanw SDPRINTF(("(pthread_spintrylock %p) decrementing spinlock from %d\n",
133 1.1.2.6 nathanw thread, thread->pt_spinlocks));
134 1.1.2.1 nathanw --thread->pt_spinlocks;
135 1.1.2.1 nathanw /* See above. */
136 1.1.2.7 nathanw if ((thread->pt_next != NULL) &&
137 1.1.2.7 nathanw (thread->pt_type != PT_THREAD_UPCALL)) {
138 1.1.2.1 nathanw PTHREADD_ADD(PTHREADD_SPINPREEMPT);
139 1.1.2.5 nathanw pthread__switch(thread, thread->pt_next);
140 1.1.2.1 nathanw }
141 1.1.2.1 nathanw }
142 1.1.2.1 nathanw
143 1.1.2.1 nathanw return ret;
144 1.1.2.1 nathanw }
145 1.1.2.1 nathanw
146 1.1.2.1 nathanw
147 1.1.2.1 nathanw void
148 1.1.2.6 nathanw pthread_spinunlock(pthread_t thread, pthread_spin_t *lock)
149 1.1.2.1 nathanw {
150 1.1.2.9 nathanw
151 1.1.2.1 nathanw __cpu_simple_unlock(lock);
152 1.1.2.6 nathanw SDPRINTF(("(pthread_spinunlock %p) decrementing spinlock from %d\n",
153 1.1.2.6 nathanw thread, thread->pt_spinlocks));
154 1.1.2.1 nathanw --thread->pt_spinlocks;
155 1.1.2.1 nathanw
156 1.1.2.1 nathanw PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
157 1.1.2.1 nathanw
158 1.1.2.9 nathanw /*
159 1.1.2.9 nathanw * If we were preempted while holding a spinlock, the
160 1.1.2.1 nathanw * scheduler will notice this and continue us. To be good
161 1.1.2.1 nathanw * citzens, we must now get out of here if that was our
162 1.1.2.1 nathanw * last spinlock.
163 1.1.2.1 nathanw * XXX when will we ever have more than one?
164 1.1.2.1 nathanw */
165 1.1.2.1 nathanw
166 1.1.2.7 nathanw if ((thread->pt_spinlocks == 0) && (thread->pt_next != NULL)
167 1.1.2.7 nathanw && (thread->pt_type != PT_THREAD_UPCALL)) {
168 1.1.2.1 nathanw PTHREADD_ADD(PTHREADD_SPINPREEMPT);
169 1.1.2.5 nathanw pthread__switch(thread, thread->pt_next);
170 1.1.2.1 nathanw }
171 1.1.2.8 nathanw }
172 1.1.2.8 nathanw
173 1.1.2.8 nathanw
174 1.1.2.8 nathanw /*
175 1.1.2.8 nathanw * Public (POSIX-specified) spinlocks.
176 1.1.2.8 nathanw * These don't interact with the spin-preemption code, nor do they
177 1.1.2.8 nathanw * perform any adaptive sleeping.
178 1.1.2.8 nathanw */
179 1.1.2.8 nathanw
180 1.1.2.8 nathanw int
181 1.1.2.8 nathanw pthread_spin_init(pthread_spinlock_t *lock, int pshared)
182 1.1.2.8 nathanw {
183 1.1.2.8 nathanw
184 1.1.2.8 nathanw #ifdef ERRORCHECK
185 1.1.2.8 nathanw if ((lock == NULL) ||
186 1.1.2.8 nathanw ((pshared != PTHREAD_PROCESS_PRIVATE) &&
187 1.1.2.8 nathanw (pshared != PTHREAD_PROCESS_SHARED)))
188 1.1.2.8 nathanw return EINVAL;
189 1.1.2.8 nathanw #endif
190 1.1.2.8 nathanw lock->pts_magic = _PT_SPINLOCK_MAGIC;
191 1.1.2.9 nathanw /*
192 1.1.2.9 nathanw * We don't actually use the pshared flag for anything;
193 1.1.2.8 nathanw * cpu simple locks have all the process-shared properties
194 1.1.2.8 nathanw * that we want anyway.
195 1.1.2.8 nathanw */
196 1.1.2.8 nathanw lock->pts_flags = pshared;
197 1.1.2.8 nathanw pthread_lockinit(&lock->pts_spin);
198 1.1.2.8 nathanw
199 1.1.2.8 nathanw return 0;
200 1.1.2.8 nathanw }
201 1.1.2.8 nathanw
202 1.1.2.8 nathanw int
203 1.1.2.8 nathanw pthread_spin_destroy(pthread_spinlock_t *lock)
204 1.1.2.8 nathanw {
205 1.1.2.8 nathanw
206 1.1.2.8 nathanw #ifdef ERRORCHECK
207 1.1.2.8 nathanw if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
208 1.1.2.8 nathanw return EINVAL;
209 1.1.2.8 nathanw
210 1.1.2.8 nathanw if (lock->pts_spin != __SIMPLELOCK_UNLOCKED)
211 1.1.2.8 nathanw return EBUSY;
212 1.1.2.8 nathanw #endif
213 1.1.2.8 nathanw
214 1.1.2.8 nathanw lock->pts_magic = _PT_SPINLOCK_DEAD;
215 1.1.2.8 nathanw
216 1.1.2.8 nathanw return 0;
217 1.1.2.8 nathanw }
218 1.1.2.8 nathanw
219 1.1.2.8 nathanw int
220 1.1.2.8 nathanw pthread_spin_lock(pthread_spinlock_t *lock)
221 1.1.2.8 nathanw {
222 1.1.2.8 nathanw
223 1.1.2.8 nathanw #ifdef ERRORCHECK
224 1.1.2.8 nathanw if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
225 1.1.2.8 nathanw return EINVAL;
226 1.1.2.8 nathanw #endif
227 1.1.2.8 nathanw
228 1.1.2.8 nathanw __cpu_simple_lock(&lock->pts_spin);
229 1.1.2.8 nathanw
230 1.1.2.8 nathanw return 0;
231 1.1.2.8 nathanw }
232 1.1.2.8 nathanw
233 1.1.2.8 nathanw int
234 1.1.2.8 nathanw pthread_spin_trylock(pthread_spinlock_t *lock)
235 1.1.2.8 nathanw {
236 1.1.2.8 nathanw
237 1.1.2.8 nathanw #ifdef ERRORCHECK
238 1.1.2.8 nathanw if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
239 1.1.2.8 nathanw return EINVAL;
240 1.1.2.8 nathanw #endif
241 1.1.2.8 nathanw
242 1.1.2.8 nathanw if (__cpu_simple_lock_try(&lock->pts_spin) == 0)
243 1.1.2.8 nathanw return EBUSY;
244 1.1.2.8 nathanw
245 1.1.2.8 nathanw return 0;
246 1.1.2.8 nathanw }
247 1.1.2.8 nathanw
248 1.1.2.8 nathanw int
249 1.1.2.8 nathanw pthread_spin_unlock(pthread_spinlock_t *lock)
250 1.1.2.8 nathanw {
251 1.1.2.8 nathanw
252 1.1.2.8 nathanw #ifdef ERRORCHECK
253 1.1.2.8 nathanw if ((lock == NULL) || (lock->pts_magic != _PT_SPINLOCK_MAGIC))
254 1.1.2.8 nathanw return EINVAL;
255 1.1.2.8 nathanw #endif
256 1.1.2.8 nathanw
257 1.1.2.8 nathanw __cpu_simple_unlock(&lock->pts_spin);
258 1.1.2.8 nathanw
259 1.1.2.8 nathanw return 0;
260 1.1.2.1 nathanw }
261 1.1.2.1 nathanw
262