pthread_lock.c revision 1.21 1 /* $NetBSD: pthread_lock.c,v 1.21 2007/08/04 13:37:49 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread_lock.c,v 1.21 2007/08/04 13:37:49 ad Exp $");
41
42 #include <sys/types.h>
43 #include <sys/lock.h>
44 #include <sys/ras.h>
45
46 #include <errno.h>
47 #include <unistd.h>
48 #include <stdio.h>
49
50 #include "pthread.h"
51 #include "pthread_int.h"
52
53 #ifdef PTHREAD_SPIN_DEBUG_PRINT
54 #define SDPRINTF(x) DPRINTF(x)
55 #else
56 #define SDPRINTF(x)
57 #endif
58
59 static int pthread__atomic;
60
61 RAS_DECL(pthread__lock);
62
63 void
64 pthread__simple_lock_init(__cpu_simple_lock_t *alp)
65 {
66
67 if (pthread__atomic) {
68 __cpu_simple_lock_init(alp);
69 return;
70 }
71
72 *alp = __SIMPLELOCK_UNLOCKED;
73 }
74
75 int
76 pthread__simple_lock_try(__cpu_simple_lock_t *alp)
77 {
78 __cpu_simple_lock_t old;
79
80 if (pthread__atomic)
81 return __cpu_simple_lock_try(alp);
82
83 RAS_START(pthread__lock);
84 old = *alp;
85 *alp = __SIMPLELOCK_LOCKED;
86 RAS_END(pthread__lock);
87
88 return old == __SIMPLELOCK_UNLOCKED;
89 }
90
91 inline void
92 pthread__simple_unlock(__cpu_simple_lock_t *alp)
93 {
94
95 if (pthread__atomic) {
96 __cpu_simple_unlock(alp);
97 return;
98 }
99
100 *alp = __SIMPLELOCK_UNLOCKED;
101 }
102
103 /*
104 * Initialize the locking primitives. On uniprocessors, we always
105 * use Restartable Atomic Sequences if they are available. Otherwise,
106 * we fall back onto machine-dependent atomic lock primitives.
107 */
108 void
109 pthread__lockprim_init(int ncpu)
110 {
111
112 if (ncpu != 1) {
113 pthread__atomic = 1;
114 return;
115 }
116
117 if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock),
118 RAS_INSTALL) != 0) {
119 pthread__atomic = 1;
120 return;
121 }
122 }
123
124 void
125 pthread_lockinit(pthread_spin_t *lock)
126 {
127
128 pthread__simple_lock_init(lock);
129 }
130
131 void
132 pthread_spinlock(pthread_t thread, pthread_spin_t *lock)
133 {
134 int count;
135
136 SDPRINTF(("(pthread_spinlock %p) spinlock %p (count %d)\n",
137 thread, lock, thread->pt_spinlocks));
138 #ifdef PTHREAD_SPIN_DEBUG
139 pthread__assert(thread->pt_spinlocks >= 0);
140 #endif
141
142 thread->pt_spinlocks++;
143 if (__predict_true(pthread__simple_lock_try(lock))) {
144 PTHREADD_ADD(PTHREADD_SPINLOCKS);
145 return;
146 }
147
148 do {
149 count = pthread__nspins;
150 while (*lock == __SIMPLELOCK_LOCKED && --count > 0)
151 pthread__smt_pause();
152 if (count > 0) {
153 if (pthread__simple_lock_try(lock))
154 break;
155 continue;
156 }
157
158 SDPRINTF(("(pthread_spinlock %p) retrying spinlock %p "
159 "(count %d)\n", thread, lock,
160 thread->pt_spinlocks));
161 thread->pt_spinlocks--;
162
163 /* XXXLWP far from ideal */
164 sched_yield();
165 thread->pt_spinlocks++;
166 } while (/*CONSTCOND*/ 1);
167
168 PTHREADD_ADD(PTHREADD_SPINLOCKS);
169 }
170
171 int
172 pthread_spintrylock(pthread_t thread, pthread_spin_t *lock)
173 {
174 int ret;
175
176 SDPRINTF(("(pthread_spintrylock %p) spinlock %p (count %d)\n",
177 thread, lock, thread->pt_spinlocks));
178
179 thread->pt_spinlocks++;
180 ret = pthread__simple_lock_try(lock);
181 if (!ret)
182 thread->pt_spinlocks--;
183
184 return ret;
185 }
186
187 void
188 pthread_spinunlock(pthread_t thread, pthread_spin_t *lock)
189 {
190
191 SDPRINTF(("(pthread_spinunlock %p) spinlock %p (count %d)\n",
192 thread, lock, thread->pt_spinlocks));
193
194 pthread__simple_unlock(lock);
195 thread->pt_spinlocks--;
196 #ifdef PTHREAD_SPIN_DEBUG
197 pthread__assert(thread->pt_spinlocks >= 0);
198 #endif
199 PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
200 }
201
202
203 /*
204 * Public (POSIX-specified) spinlocks.
205 */
206 int
207 pthread_spin_init(pthread_spinlock_t *lock, int pshared)
208 {
209
210 #ifdef ERRORCHECK
211 if (lock == NULL || (pshared != PTHREAD_PROCESS_PRIVATE &&
212 pshared != PTHREAD_PROCESS_SHARED))
213 return EINVAL;
214 #endif
215 lock->pts_magic = _PT_SPINLOCK_MAGIC;
216
217 /*
218 * We don't actually use the pshared flag for anything;
219 * CPU simple locks have all the process-shared properties
220 * that we want anyway.
221 */
222 lock->pts_flags = pshared;
223 pthread_lockinit(&lock->pts_spin);
224
225 return 0;
226 }
227
228 int
229 pthread_spin_destroy(pthread_spinlock_t *lock)
230 {
231
232 #ifdef ERRORCHECK
233 if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
234 return EINVAL;
235 if (lock->pts_spin != __SIMPLELOCK_UNLOCKED)
236 return EBUSY;
237 #endif
238
239 lock->pts_magic = _PT_SPINLOCK_DEAD;
240
241 return 0;
242 }
243
244 int
245 pthread_spin_lock(pthread_spinlock_t *lock)
246 {
247
248 #ifdef ERRORCHECK
249 if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
250 return EINVAL;
251 #endif
252
253 while (pthread__simple_lock_try(&lock->pts_spin) == 0) {
254 pthread__smt_pause();
255 }
256
257 return 0;
258 }
259
260 int
261 pthread_spin_trylock(pthread_spinlock_t *lock)
262 {
263
264 #ifdef ERRORCHECK
265 if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
266 return EINVAL;
267 #endif
268
269 if (pthread__simple_lock_try(&lock->pts_spin) == 0)
270 return EBUSY;
271
272 return 0;
273 }
274
275 int
276 pthread_spin_unlock(pthread_spinlock_t *lock)
277 {
278
279 #ifdef ERRORCHECK
280 if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
281 return EINVAL;
282 #endif
283
284 pthread__simple_unlock(&lock->pts_spin);
285
286 return 0;
287 }
288