pthread_rwlock.c revision 1.13 1 /* $NetBSD: pthread_rwlock.c,v 1.13 2005/10/19 02:15:03 chs Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread_rwlock.c,v 1.13 2005/10/19 02:15:03 chs Exp $");
41
42 #include <errno.h>
43
44 #include "pthread.h"
45 #include "pthread_int.h"
46
47 static void pthread_rwlock__callback(void *);
48
49 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
50 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
51 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
52 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
53 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
54 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
55 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
56
57 int
58 pthread_rwlock_init(pthread_rwlock_t *rwlock,
59 const pthread_rwlockattr_t *attr)
60 {
61 #ifdef ERRORCHECK
62 if ((rwlock == NULL) ||
63 (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC)))
64 return EINVAL;
65 #endif
66 rwlock->ptr_magic = _PT_RWLOCK_MAGIC;
67 pthread_lockinit(&rwlock->ptr_interlock);
68 PTQ_INIT(&rwlock->ptr_rblocked);
69 PTQ_INIT(&rwlock->ptr_wblocked);
70 rwlock->ptr_nreaders = 0;
71 rwlock->ptr_writer = NULL;
72
73 return 0;
74 }
75
76
77 int
78 pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
79 {
80 #ifdef ERRORCHECK
81 if ((rwlock == NULL) ||
82 (rwlock->ptr_magic != _PT_RWLOCK_MAGIC) ||
83 (!PTQ_EMPTY(&rwlock->ptr_rblocked)) ||
84 (!PTQ_EMPTY(&rwlock->ptr_wblocked)) ||
85 (rwlock->ptr_nreaders != 0) ||
86 (rwlock->ptr_writer != NULL))
87 return EINVAL;
88 #endif
89 rwlock->ptr_magic = _PT_RWLOCK_DEAD;
90
91 return 0;
92 }
93
94
95 int
96 pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
97 {
98 pthread_t self;
99 #ifdef ERRORCHECK
100 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
101 return EINVAL;
102 #endif
103 self = pthread__self();
104
105 pthread_spinlock(self, &rwlock->ptr_interlock);
106 #ifdef ERRORCHECK
107 if (rwlock->ptr_writer == self) {
108 pthread_spinunlock(self, &rwlock->ptr_interlock);
109 return EDEADLK;
110 }
111 #endif
112 /*
113 * Don't get a readlock if there is a writer or if there are waiting
114 * writers; i.e. prefer writers to readers. This strategy is dictated
115 * by SUSv3.
116 */
117 while ((rwlock->ptr_writer != NULL) ||
118 (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
119 PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
120 /* Locking a rwlock is not a cancellation point; don't check */
121 pthread_spinlock(self, &self->pt_statelock);
122 self->pt_state = PT_STATE_BLOCKED_QUEUE;
123 self->pt_sleepobj = rwlock;
124 self->pt_sleepq = &rwlock->ptr_rblocked;
125 self->pt_sleeplock = &rwlock->ptr_interlock;
126 pthread_spinunlock(self, &self->pt_statelock);
127 pthread__block(self, &rwlock->ptr_interlock);
128 /* interlock is not held when we return */
129 pthread_spinlock(self, &rwlock->ptr_interlock);
130 }
131
132 rwlock->ptr_nreaders++;
133 pthread_spinunlock(self, &rwlock->ptr_interlock);
134
135 return 0;
136 }
137
138
139 int
140 pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
141 {
142 pthread_t self;
143 #ifdef ERRORCHECK
144 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
145 return EINVAL;
146 #endif
147 self = pthread__self();
148
149 pthread_spinlock(self, &rwlock->ptr_interlock);
150 /*
151 * Don't get a readlock if there is a writer or if there are waiting
152 * writers; i.e. prefer writers to readers. This strategy is dictated
153 * by SUSv3.
154 */
155 if ((rwlock->ptr_writer != NULL) ||
156 (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
157 pthread_spinunlock(self, &rwlock->ptr_interlock);
158 return EBUSY;
159 }
160
161 rwlock->ptr_nreaders++;
162 pthread_spinunlock(self, &rwlock->ptr_interlock);
163
164 return 0;
165 }
166
167
168 int
169 pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
170 {
171 pthread_t self;
172 extern int pthread__started;
173
174 #ifdef ERRORCHECK
175 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
176 return EINVAL;
177 #endif
178 self = pthread__self();
179
180 pthread_spinlock(self, &rwlock->ptr_interlock);
181 #ifdef ERRORCHECK
182 if (rwlock->ptr_writer == self) {
183 pthread_spinunlock(self, &rwlock->ptr_interlock);
184 return EDEADLK;
185 }
186 #endif
187 /*
188 * Prefer writers to readers here; permit writers even if there are
189 * waiting readers.
190 */
191 while ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
192 #ifdef ERRORCHECK
193 if (pthread__started == 0) {
194 pthread_spinunlock(self, &rwlock->ptr_interlock);
195 return EDEADLK;
196 }
197 #endif
198 PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
199 /* Locking a rwlock is not a cancellation point; don't check */
200 pthread_spinlock(self, &self->pt_statelock);
201 self->pt_state = PT_STATE_BLOCKED_QUEUE;
202 self->pt_sleepobj = rwlock;
203 self->pt_sleepq = &rwlock->ptr_wblocked;
204 self->pt_sleeplock = &rwlock->ptr_interlock;
205 pthread_spinunlock(self, &self->pt_statelock);
206 pthread__block(self, &rwlock->ptr_interlock);
207 /* interlock is not held when we return */
208 pthread_spinlock(self, &rwlock->ptr_interlock);
209 }
210
211 rwlock->ptr_writer = self;
212 pthread_spinunlock(self, &rwlock->ptr_interlock);
213
214 return 0;
215 }
216
217
218 int
219 pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
220 {
221 pthread_t self;
222 #ifdef ERRORCHECK
223 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
224 return EINVAL;
225 #endif
226 self = pthread__self();
227
228 pthread_spinlock(self, &rwlock->ptr_interlock);
229 /*
230 * Prefer writers to readers here; permit writers even if there are
231 * waiting readers.
232 */
233 if ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
234 pthread_spinunlock(self, &rwlock->ptr_interlock);
235 return EBUSY;
236 }
237
238 rwlock->ptr_writer = self;
239 pthread_spinunlock(self, &rwlock->ptr_interlock);
240
241 return 0;
242 }
243
244
245 struct pthread_rwlock__waitarg {
246 pthread_t ptw_thread;
247 pthread_rwlock_t *ptw_rwlock;
248 struct pthread_queue_t *ptw_queue;
249 };
250
251 int
252 pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
253 const struct timespec *abs_timeout)
254 {
255 pthread_t self;
256 struct pthread_rwlock__waitarg wait;
257 struct pt_alarm_t alarm;
258 int retval;
259
260 #ifdef ERRORCHECK
261 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
262 return EINVAL;
263 if (abs_timeout == NULL)
264 return EINVAL;
265 #endif
266 if ((abs_timeout->tv_nsec >= 1000000000) ||
267 (abs_timeout->tv_nsec < 0) ||
268 (abs_timeout->tv_sec < 0))
269 return EINVAL;
270
271 self = pthread__self();
272 pthread_spinlock(self, &rwlock->ptr_interlock);
273 #ifdef ERRORCHECK
274 if (rwlock->ptr_writer == self) {
275 pthread_spinunlock(self, &rwlock->ptr_interlock);
276 return EDEADLK;
277 }
278 #endif
279 /*
280 * Don't get a readlock if there is a writer or if there are waiting
281 * writers; i.e. prefer writers to readers. This strategy is dictated
282 * by SUSv3.
283 */
284 retval = 0;
285 while ((retval == 0) && ((rwlock->ptr_writer != NULL) ||
286 (!PTQ_EMPTY(&rwlock->ptr_wblocked)))) {
287 wait.ptw_thread = self;
288 wait.ptw_rwlock = rwlock;
289 wait.ptw_queue = &rwlock->ptr_rblocked;
290 pthread__alarm_add(self, &alarm, abs_timeout,
291 pthread_rwlock__callback, &wait);
292 PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
293 /* Locking a rwlock is not a cancellation point; don't check */
294 pthread_spinlock(self, &self->pt_statelock);
295 self->pt_state = PT_STATE_BLOCKED_QUEUE;
296 self->pt_sleepobj = rwlock;
297 self->pt_sleepq = &rwlock->ptr_rblocked;
298 self->pt_sleeplock = &rwlock->ptr_interlock;
299 pthread_spinunlock(self, &self->pt_statelock);
300 pthread__block(self, &rwlock->ptr_interlock);
301 /* interlock is not held when we return */
302 pthread__alarm_del(self, &alarm);
303 if (pthread__alarm_fired(&alarm))
304 retval = ETIMEDOUT;
305 pthread_spinlock(self, &rwlock->ptr_interlock);
306 }
307
308 /* One last chance to get the lock, in case it was released between
309 the alarm firing and when this thread got rescheduled, or in case
310 a signal handler kept it busy */
311 if ((rwlock->ptr_writer == NULL) &&
312 (PTQ_EMPTY(&rwlock->ptr_wblocked))) {
313 rwlock->ptr_nreaders++;
314 retval = 0;
315 }
316 pthread_spinunlock(self, &rwlock->ptr_interlock);
317
318 return retval;
319 }
320
321
322 int
323 pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
324 const struct timespec *abs_timeout)
325 {
326 struct pthread_rwlock__waitarg wait;
327 struct pt_alarm_t alarm;
328 pthread_t self;
329 int retval;
330 extern int pthread__started;
331
332 #ifdef ERRORCHECK
333 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
334 return EINVAL;
335 if (abs_timeout == NULL)
336 return EINVAL;
337 #endif
338 if ((abs_timeout->tv_nsec >= 1000000000) ||
339 (abs_timeout->tv_nsec < 0) ||
340 (abs_timeout->tv_sec < 0))
341 return EINVAL;
342
343 self = pthread__self();
344 pthread_spinlock(self, &rwlock->ptr_interlock);
345 #ifdef ERRORCHECK
346 if (rwlock->ptr_writer == self) {
347 pthread_spinunlock(self, &rwlock->ptr_interlock);
348 return EDEADLK;
349 }
350 #endif
351 /*
352 * Prefer writers to readers here; permit writers even if there are
353 * waiting readers.
354 */
355 retval = 0;
356 while (retval == 0 &&
357 ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL))) {
358 #ifdef ERRORCHECK
359 if (pthread__started == 0) {
360 pthread_spinunlock(self, &rwlock->ptr_interlock);
361 return EDEADLK;
362 }
363 #endif
364 wait.ptw_thread = self;
365 wait.ptw_rwlock = rwlock;
366 wait.ptw_queue = &rwlock->ptr_wblocked;
367 pthread__alarm_add(self, &alarm, abs_timeout,
368 pthread_rwlock__callback, &wait);
369 PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
370 /* Locking a rwlock is not a cancellation point; don't check */
371 pthread_spinlock(self, &self->pt_statelock);
372 self->pt_state = PT_STATE_BLOCKED_QUEUE;
373 self->pt_sleepobj = rwlock;
374 self->pt_sleepq = &rwlock->ptr_wblocked;
375 self->pt_sleeplock = &rwlock->ptr_interlock;
376 pthread_spinunlock(self, &self->pt_statelock);
377 pthread__block(self, &rwlock->ptr_interlock);
378 /* interlock is not held when we return */
379 pthread__alarm_del(self, &alarm);
380 if (pthread__alarm_fired(&alarm))
381 retval = ETIMEDOUT;
382 pthread_spinlock(self, &rwlock->ptr_interlock);
383 }
384
385 if ((rwlock->ptr_nreaders == 0) && (rwlock->ptr_writer == NULL)) {
386 rwlock->ptr_writer = self;
387 retval = 0;
388 }
389 pthread_spinunlock(self, &rwlock->ptr_interlock);
390
391 return retval;
392 }
393
394
395 static void
396 pthread_rwlock__callback(void *arg)
397 {
398 struct pthread_rwlock__waitarg *a;
399 pthread_t self;
400
401 a = arg;
402 self = pthread__self();
403
404 pthread_spinlock(self, &a->ptw_rwlock->ptr_interlock);
405 /*
406 * Don't dequeue and schedule the thread if it's already been
407 * queued up by a signal or broadcast (but hasn't yet run as far
408 * as pthread__alarm_del(), or we wouldn't be here, and hence can't
409 * have become blocked on some *other* queue).
410 */
411 if (a->ptw_thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
412 PTQ_REMOVE(a->ptw_queue, a->ptw_thread, pt_sleep);
413 pthread__sched(self, a->ptw_thread);
414 }
415 pthread_spinunlock(self, &a->ptw_rwlock->ptr_interlock);
416
417 }
418
419
420 int
421 pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
422 {
423 pthread_t self, writer;
424 struct pthread_queue_t blockedq;
425 #ifdef ERRORCHECK
426 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
427 return EINVAL;
428 #endif
429 writer = NULL;
430 PTQ_INIT(&blockedq);
431 self = pthread__self();
432
433 pthread_spinlock(self, &rwlock->ptr_interlock);
434 if (rwlock->ptr_writer != NULL) {
435 /* Releasing a write lock. */
436 #ifdef ERRORCHECK
437 if (rwlock->ptr_writer != self) {
438 pthread_spinunlock(self, &rwlock->ptr_interlock);
439 return EPERM;
440 }
441 #endif
442 rwlock->ptr_writer = NULL;
443 writer = PTQ_FIRST(&rwlock->ptr_wblocked);
444 if (writer != NULL) {
445 PTQ_REMOVE(&rwlock->ptr_wblocked, writer, pt_sleep);
446 } else {
447 blockedq = rwlock->ptr_rblocked;
448 PTQ_INIT(&rwlock->ptr_rblocked);
449 }
450 } else
451 #ifdef ERRORCHECK
452 if (rwlock->ptr_nreaders > 0)
453 #endif
454 {
455 /* Releasing a read lock. */
456 rwlock->ptr_nreaders--;
457 if (rwlock->ptr_nreaders == 0) {
458 writer = PTQ_FIRST(&rwlock->ptr_wblocked);
459 if (writer != NULL)
460 PTQ_REMOVE(&rwlock->ptr_wblocked, writer,
461 pt_sleep);
462 }
463 #ifdef ERRORCHECK
464 } else {
465 pthread_spinunlock(self, &rwlock->ptr_interlock);
466 return EPERM;
467 #endif
468 }
469
470 if (writer != NULL)
471 pthread__sched(self, writer);
472 else
473 pthread__sched_sleepers(self, &blockedq);
474
475 pthread_spinunlock(self, &rwlock->ptr_interlock);
476
477 return 0;
478 }
479
480
481 int
482 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
483 {
484 #ifdef ERRORCHECK
485 if (attr == NULL)
486 return EINVAL;
487 #endif
488 attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
489
490 return 0;
491 }
492
493
494 int
495 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
496 {
497 #ifdef ERRORCHECK
498 if ((attr == NULL) ||
499 (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
500 return EINVAL;
501 #endif
502 attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
503
504 return 0;
505 }
506