pthread_lock.c revision 1.20.2.2 1 /* $NetBSD: pthread_lock.c,v 1.20.2.2 2007/08/15 08:25:10 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread_lock.c,v 1.20.2.2 2007/08/15 08:25:10 skrll Exp $");
41
42 #include <sys/types.h>
43 #include <sys/lock.h>
44 #include <sys/ras.h>
45
46 #include <errno.h>
47 #include <unistd.h>
48 #include <stdio.h>
49
50 #include "pthread.h"
51 #include "pthread_int.h"
52
53 #ifdef PTHREAD_SPIN_DEBUG_PRINT
54 #define SDPRINTF(x) DPRINTF(x)
55 #else
56 #define SDPRINTF(x)
57 #endif
58
59 /* This does not belong here. */
60 #if defined(i386) || defined(__x86_64__)
61 #define smt_pause() __asm __volatile("rep; nop" ::: "memory")
62 #else
63 #define smt_pause() /* nothing */
64 #endif
65
66 extern int pthread__nspins;
67 static int pthread__atomic;
68
69 RAS_DECL(pthread__lock);
70
71 void
72 pthread__simple_lock_init(__cpu_simple_lock_t *alp)
73 {
74
75 if (pthread__atomic) {
76 __cpu_simple_lock_init(alp);
77 return;
78 }
79
80 __cpu_simple_lock_clear(alp);
81 }
82
83 int
84 pthread__simple_lock_try(__cpu_simple_lock_t *alp)
85 {
86 int unlocked;
87
88 if (pthread__atomic)
89 return __cpu_simple_lock_try(alp);
90
91 RAS_START(pthread__lock);
92 unlocked = __SIMPLELOCK_UNLOCKED_P(alp);
93 __cpu_simple_lock_set(alp);
94 RAS_END(pthread__lock);
95
96 return unlocked;
97 }
98
99 inline void
100 pthread__simple_unlock(__cpu_simple_lock_t *alp)
101 {
102
103 if (pthread__atomic) {
104 __cpu_simple_unlock(alp);
105 return;
106 }
107
108 __cpu_simple_lock_clear(alp);
109 }
110
111 /*
112 * Initialize the locking primitives. On uniprocessors, we always
113 * use Restartable Atomic Sequences if they are available. Otherwise,
114 * we fall back onto machine-dependent atomic lock primitives.
115 */
116 void
117 pthread__lockprim_init(int ncpu)
118 {
119
120 if (ncpu != 1) {
121 pthread__atomic = 1;
122 return;
123 }
124
125 if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock),
126 RAS_INSTALL) != 0) {
127 pthread__atomic = 1;
128 return;
129 }
130 }
131
132 void
133 pthread_lockinit(pthread_spin_t *lock)
134 {
135
136 pthread__simple_lock_init(lock);
137 }
138
139 void
140 pthread_spinlock(pthread_t thread, pthread_spin_t *lock)
141 {
142 int count, ret;
143
144 count = pthread__nspins;
145 SDPRINTF(("(pthread_spinlock %p) spinlock %p (count %d)\n",
146 thread, lock, thread->pt_spinlocks));
147 #ifdef PTHREAD_SPIN_DEBUG
148 pthread__assert(thread->pt_spinlocks >= 0);
149 #endif
150
151 thread->pt_spinlocks++;
152 if (__predict_true(pthread__simple_lock_try(lock))) {
153 PTHREADD_ADD(PTHREADD_SPINLOCKS);
154 return;
155 }
156
157 do {
158 while ((ret = pthread__simple_lock_try(lock)) == 0 &&
159 --count) {
160 smt_pause();
161 }
162
163 if (ret == 1)
164 break;
165
166 SDPRINTF(("(pthread_spinlock %p) retrying spinlock %p "
167 "(count %d)\n", thread, lock,
168 thread->pt_spinlocks));
169 thread->pt_spinlocks--;
170
171 /* XXXLWP far from ideal */
172 sched_yield();
173 count = pthread__nspins;
174 thread->pt_spinlocks++;
175 } while (/*CONSTCOND*/ 1);
176
177 PTHREADD_ADD(PTHREADD_SPINLOCKS);
178 }
179
180 int
181 pthread_spintrylock(pthread_t thread, pthread_spin_t *lock)
182 {
183 int ret;
184
185 SDPRINTF(("(pthread_spintrylock %p) spinlock %p (count %d)\n",
186 thread, lock, thread->pt_spinlocks));
187
188 thread->pt_spinlocks++;
189 ret = pthread__simple_lock_try(lock);
190 if (!ret)
191 thread->pt_spinlocks--;
192
193 return ret;
194 }
195
196 void
197 pthread_spinunlock(pthread_t thread, pthread_spin_t *lock)
198 {
199
200 SDPRINTF(("(pthread_spinunlock %p) spinlock %p (count %d)\n",
201 thread, lock, thread->pt_spinlocks));
202
203 pthread__simple_unlock(lock);
204 thread->pt_spinlocks--;
205 #ifdef PTHREAD_SPIN_DEBUG
206 pthread__assert(thread->pt_spinlocks >= 0);
207 #endif
208 PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
209 }
210
211
212 /*
213 * Public (POSIX-specified) spinlocks.
214 */
215 int
216 pthread_spin_init(pthread_spinlock_t *lock, int pshared)
217 {
218
219 #ifdef ERRORCHECK
220 if (lock == NULL || (pshared != PTHREAD_PROCESS_PRIVATE &&
221 pshared != PTHREAD_PROCESS_SHARED))
222 return EINVAL;
223 #endif
224 lock->pts_magic = _PT_SPINLOCK_MAGIC;
225
226 /*
227 * We don't actually use the pshared flag for anything;
228 * CPU simple locks have all the process-shared properties
229 * that we want anyway.
230 */
231 lock->pts_flags = pshared;
232 pthread_lockinit(&lock->pts_spin);
233
234 return 0;
235 }
236
237 int
238 pthread_spin_destroy(pthread_spinlock_t *lock)
239 {
240
241 #ifdef ERRORCHECK
242 if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
243 return EINVAL;
244 if (!__SIMPLELOCK_UNLOCKED_P(&lock->pts_spin))
245 return EBUSY;
246 #endif
247
248 lock->pts_magic = _PT_SPINLOCK_DEAD;
249
250 return 0;
251 }
252
253 int
254 pthread_spin_lock(pthread_spinlock_t *lock)
255 {
256
257 #ifdef ERRORCHECK
258 if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
259 return EINVAL;
260 #endif
261
262 while (pthread__simple_lock_try(&lock->pts_spin) == 0) {
263 smt_pause();
264 }
265
266 return 0;
267 }
268
269 int
270 pthread_spin_trylock(pthread_spinlock_t *lock)
271 {
272
273 #ifdef ERRORCHECK
274 if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
275 return EINVAL;
276 #endif
277
278 if (pthread__simple_lock_try(&lock->pts_spin) == 0)
279 return EBUSY;
280
281 return 0;
282 }
283
284 int
285 pthread_spin_unlock(pthread_spinlock_t *lock)
286 {
287
288 #ifdef ERRORCHECK
289 if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
290 return EINVAL;
291 #endif
292
293 pthread__simple_unlock(&lock->pts_spin);
294
295 return 0;
296 }
297