pthread_rwlock.c revision 1.1.2.4 1 /* $NetBSD: pthread_rwlock.c,v 1.1.2.4 2003/01/09 19:27:52 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <assert.h>
40 #include <errno.h>
41 #include <sys/cdefs.h>
42
43 #include "pthread.h"
44 #include "pthread_int.h"
45
46 static void pthread_rwlock__callback(void *);
47
48 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
49 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
50 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
51 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
52 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
53 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
54 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
55
56 int
57 pthread_rwlock_init(pthread_rwlock_t *rwlock,
58 const pthread_rwlockattr_t *attr)
59 {
60 #ifdef ERRORCHECK
61 if ((rwlock == NULL) ||
62 (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC)))
63 return EINVAL;
64 #endif
65 rwlock->ptr_magic = _PT_RWLOCK_MAGIC;
66 pthread_lockinit(&rwlock->ptr_interlock);
67 PTQ_INIT(&rwlock->ptr_rblocked);
68 PTQ_INIT(&rwlock->ptr_wblocked);
69 rwlock->ptr_nreaders = 0;
70 rwlock->ptr_writer = NULL;
71
72 return 0;
73 }
74
75
76 int
77 pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
78 {
79 #ifdef ERRORCHECK
80 if ((rwlock == NULL) ||
81 (rwlock->ptr_magic != _PT_RWLOCK_MAGIC) ||
82 (!PTQ_EMPTY(&rwlock->ptr_rblocked)) ||
83 (!PTQ_EMPTY(&rwlock->ptr_wblocked)) ||
84 (rwlock->ptr_nreaders != 0) ||
85 (rwlock->ptr_writer != NULL))
86 return EINVAL;
87 #endif
88 rwlock->ptr_magic = _PT_RWLOCK_DEAD;
89
90 return 0;
91 }
92
93
94 int
95 pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
96 {
97 pthread_t self;
98 #ifdef ERRORCHECK
99 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
100 return EINVAL;
101 #endif
102 self = pthread__self();
103
104 pthread_spinlock(self, &rwlock->ptr_interlock);
105 #ifdef ERRORCHECK
106 if (rwlock->ptr_writer == self) {
107 pthread_spinunlock(self, &rwlock->ptr_interlock);
108 return EDEADLK;
109 }
110 #endif
111 /*
112 * Don't get a readlock if there is a writer or if there are waiting
113 * writers; i.e. prefer writers to readers. This strategy is dictated
114 * by SUSv3.
115 */
116 while ((rwlock->ptr_writer != NULL) ||
117 (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
118 PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
119 /* Locking a rwlock is not a cancellation point; don't check */
120 pthread_spinlock(self, &self->pt_statelock);
121 self->pt_state = PT_STATE_BLOCKED_QUEUE;
122 self->pt_sleepobj = rwlock;
123 self->pt_sleepq = &rwlock->ptr_rblocked;
124 self->pt_sleeplock = &rwlock->ptr_interlock;
125 pthread_spinunlock(self, &self->pt_statelock);
126 pthread__block(self, &rwlock->ptr_interlock);
127 /* interlock is not held when we return */
128 pthread_spinlock(self, &rwlock->ptr_interlock);
129 }
130
131 rwlock->ptr_nreaders++;
132 pthread_spinunlock(self, &rwlock->ptr_interlock);
133
134 return 0;
135 }
136
137
138 int
139 pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
140 {
141 pthread_t self;
142 #ifdef ERRORCHECK
143 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
144 return EINVAL;
145 #endif
146 self = pthread__self();
147
148 pthread_spinlock(self, &rwlock->ptr_interlock);
149 #ifdef ERRORCHECK
150 if (rwlock->ptr_writer == self) {
151 pthread_spinunlock(self, &rwlock->ptr_interlock);
152 return EDEADLK;
153 }
154 #endif
155 /*
156 * Don't get a readlock if there is a writer or if there are waiting
157 * writers; i.e. prefer writers to readers. This strategy is dictated
158 * by SUSv3.
159 */
160 if ((rwlock->ptr_writer != NULL) ||
161 (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
162 pthread_spinunlock(self, &rwlock->ptr_interlock);
163 return EBUSY;
164 }
165
166 rwlock->ptr_nreaders++;
167 pthread_spinunlock(self, &rwlock->ptr_interlock);
168
169 return 0;
170 }
171
172
173 int
174 pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
175 {
176 pthread_t self;
177 #ifdef ERRORCHECK
178 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
179 return EINVAL;
180 #endif
181 self = pthread__self();
182
183 pthread_spinlock(self, &rwlock->ptr_interlock);
184 /*
185 * Prefer writers to readers here; permit writers even if there are
186 * waiting readers.
187 */
188 while ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
189 PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
190 /* Locking a rwlock is not a cancellation point; don't check */
191 pthread_spinlock(self, &self->pt_statelock);
192 self->pt_state = PT_STATE_BLOCKED_QUEUE;
193 self->pt_sleepobj = rwlock;
194 self->pt_sleepq = &rwlock->ptr_wblocked;
195 self->pt_sleeplock = &rwlock->ptr_interlock;
196 pthread_spinunlock(self, &self->pt_statelock);
197 pthread__block(self, &rwlock->ptr_interlock);
198 /* interlock is not held when we return */
199 pthread_spinlock(self, &rwlock->ptr_interlock);
200 }
201
202 rwlock->ptr_writer = self;
203 pthread_spinunlock(self, &rwlock->ptr_interlock);
204
205 return 0;
206 }
207
208
209 int
210 pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
211 {
212 pthread_t self;
213 #ifdef ERRORCHECK
214 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
215 return EINVAL;
216 #endif
217 self = pthread__self();
218
219 pthread_spinlock(self, &rwlock->ptr_interlock);
220 /*
221 * Prefer writers to readers here; permit writers even if there are
222 * waiting readers.
223 */
224 if ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
225 pthread_spinunlock(self, &rwlock->ptr_interlock);
226 return EBUSY;
227 }
228
229 rwlock->ptr_writer = self;
230 pthread_spinunlock(self, &rwlock->ptr_interlock);
231
232 return 0;
233 }
234
235
236 struct pthread_rwlock__waitarg {
237 pthread_t ptw_thread;
238 pthread_rwlock_t *ptw_rwlock;
239 struct pthread_queue_t *ptw_queue;
240 };
241
242 int
243 pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
244 const struct timespec *abs_timeout)
245 {
246 pthread_t self;
247 struct pthread_rwlock__waitarg wait;
248 struct pt_alarm_t alarm;
249 int retval;
250 #ifdef ERRORCHECK
251 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
252 return EINVAL;
253 #endif
254 self = pthread__self();
255
256 pthread_spinlock(self, &rwlock->ptr_interlock);
257 #ifdef ERRORCHECK
258 if (rwlock->ptr_writer == self) {
259 pthread_spinlock(self, &rwlock->ptr_interlock);
260 return EDEADLK;
261 }
262 #endif
263 /*
264 * Don't get a readlock if there is a writer or if there are waiting
265 * writers; i.e. prefer writers to readers. This strategy is dictated
266 * by SUSv3.
267 */
268 retval = 0;
269 while ((retval == 0) && ((rwlock->ptr_writer != NULL) ||
270 (!PTQ_EMPTY(&rwlock->ptr_wblocked)))) {
271 wait.ptw_thread = self;
272 wait.ptw_rwlock = rwlock;
273 wait.ptw_queue = &rwlock->ptr_rblocked;
274 pthread__alarm_add(self, &alarm, abs_timeout,
275 pthread_rwlock__callback, &wait);
276 PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
277 /* Locking a rwlock is not a cancellation point; don't check */
278 pthread_spinlock(self, &self->pt_statelock);
279 self->pt_state = PT_STATE_BLOCKED_QUEUE;
280 self->pt_sleepobj = rwlock;
281 self->pt_sleepq = &rwlock->ptr_rblocked;
282 self->pt_sleeplock = &rwlock->ptr_interlock;
283 pthread_spinunlock(self, &self->pt_statelock);
284 pthread__block(self, &rwlock->ptr_interlock);
285 /* interlock is not held when we return */
286 pthread__alarm_del(self, &alarm);
287 if (pthread__alarm_fired(&alarm))
288 retval = ETIMEDOUT;
289 pthread_spinlock(self, &rwlock->ptr_interlock);
290 }
291
292 if (retval == 0)
293 rwlock->ptr_nreaders++;
294 pthread_spinunlock(self, &rwlock->ptr_interlock);
295
296 return retval;
297 }
298
299
300 int
301 pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
302 const struct timespec *abs_timeout)
303 {
304 struct pthread_rwlock__waitarg wait;
305 struct pt_alarm_t alarm;
306 int retval;
307 pthread_t self;
308 #ifdef ERRORCHECK
309 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
310 return EINVAL;
311 #endif
312 self = pthread__self();
313
314 pthread_spinlock(self, &rwlock->ptr_interlock);
315 /*
316 * Prefer writers to readers here; permit writers even if there are
317 * waiting readers.
318 */
319 retval = 0;
320 while (retval == 0 &&
321 ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL))) {
322 wait.ptw_thread = self;
323 wait.ptw_rwlock = rwlock;
324 wait.ptw_queue = &rwlock->ptr_wblocked;
325 pthread__alarm_add(self, &alarm, abs_timeout,
326 pthread_rwlock__callback, &wait);
327 PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
328 /* Locking a rwlock is not a cancellation point; don't check */
329 pthread_spinlock(self, &self->pt_statelock);
330 self->pt_state = PT_STATE_BLOCKED_QUEUE;
331 self->pt_sleepobj = rwlock;
332 self->pt_sleepq = &rwlock->ptr_wblocked;
333 self->pt_sleeplock = &rwlock->ptr_interlock;
334 pthread_spinunlock(self, &self->pt_statelock);
335 pthread__block(self, &rwlock->ptr_interlock);
336 /* interlock is not held when we return */
337 pthread__alarm_del(self, &alarm);
338 if (pthread__alarm_fired(&alarm))
339 retval = ETIMEDOUT;
340 pthread_spinlock(self, &rwlock->ptr_interlock);
341 }
342
343 if (retval == 0)
344 rwlock->ptr_writer = self;
345 pthread_spinunlock(self, &rwlock->ptr_interlock);
346
347 return 0;
348 }
349
350
351 static void
352 pthread_rwlock__callback(void *arg)
353 {
354 struct pthread_rwlock__waitarg *a;
355 pthread_t self;
356
357 a = arg;
358 self = pthread__self();
359
360 pthread_spinlock(self, &a->ptw_rwlock->ptr_interlock);
361 /*
362 * Don't dequeue and schedule the thread if it's already been
363 * queued up by a signal or broadcast (but hasn't yet run as far
364 * as pthread__alarm_del(), or we wouldn't be here, and hence can't
365 * have become blocked on some *other* queue).
366 */
367 if (a->ptw_thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
368 PTQ_REMOVE(a->ptw_queue, a->ptw_thread, pt_sleep);
369 pthread__sched(self, a->ptw_thread);
370 }
371 pthread_spinunlock(self, &a->ptw_rwlock->ptr_interlock);
372
373 }
374
375
376 int
377 pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
378 {
379 pthread_t self, reader, writer;
380 struct pthread_queue_t blockedq;
381 #ifdef ERRORCHECK
382 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
383 return EINVAL;
384 #endif
385 writer = NULL;
386 PTQ_INIT(&blockedq);
387 self = pthread__self();
388
389 pthread_spinlock(self, &rwlock->ptr_interlock);
390 if (rwlock->ptr_writer != NULL) {
391 /* Releasing a write lock. */
392 #ifdef ERRORCHECK
393 if (rwlock->ptr_writer != self) {
394 pthread_spinunlock(self, &rwlock->ptr_interlock);
395 return EPERM;
396 }
397 #endif
398 rwlock->ptr_writer = NULL;
399 writer = PTQ_FIRST(&rwlock->ptr_wblocked);
400 if (writer != NULL) {
401 PTQ_REMOVE(&rwlock->ptr_wblocked, writer, pt_sleep);
402 } else {
403 blockedq = rwlock->ptr_rblocked;
404 PTQ_INIT(&rwlock->ptr_rblocked);
405 }
406 } else {
407 /* Releasing a read lock. */
408 rwlock->ptr_nreaders--;
409 if (rwlock->ptr_nreaders == 0) {
410 writer = PTQ_FIRST(&rwlock->ptr_wblocked);
411 if (writer != NULL)
412 PTQ_REMOVE(&rwlock->ptr_wblocked, writer,
413 pt_sleep);
414 }
415 }
416
417 pthread_spinunlock(self, &rwlock->ptr_interlock);
418
419 if (writer != NULL)
420 pthread__sched(self, writer);
421 else
422 PTQ_FOREACH(reader, &blockedq, pt_sleep)
423 pthread__sched(self, reader);
424
425 return 0;
426 }
427
428
429 int
430 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
431 {
432 #ifdef ERRORCHECK
433 if (attr == NULL)
434 return EINVAL;
435 #endif
436 attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
437
438 return 0;
439 }
440
441
442 int
443 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
444 {
445 #ifdef ERRORCHECK
446 if ((attr == NULL) ||
447 (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
448 return EINVAL;
449 #endif
450 attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
451
452 return 0;
453 }
454