pthread_rwlock.c revision 1.13.6.1 1 /* $NetBSD: pthread_rwlock.c,v 1.13.6.1 2007/09/10 05:24:53 wrstuden Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread_rwlock.c,v 1.13.6.1 2007/09/10 05:24:53 wrstuden Exp $");
41
42 #include <errno.h>
43
44 #include "pthread.h"
45 #include "pthread_int.h"
46
47 static void pthread_rwlock__callback(void *);
48
49 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
50 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
51 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
52 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
53 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
54 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
55 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
56
57 int
58 pthread_rwlock_init(pthread_rwlock_t *rwlock,
59 const pthread_rwlockattr_t *attr)
60 {
61 #ifdef ERRORCHECK
62 if ((rwlock == NULL) ||
63 (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC)))
64 return EINVAL;
65 #endif
66 rwlock->ptr_magic = _PT_RWLOCK_MAGIC;
67 pthread_lockinit(&rwlock->ptr_interlock);
68 PTQ_INIT(&rwlock->ptr_rblocked);
69 PTQ_INIT(&rwlock->ptr_wblocked);
70 rwlock->ptr_nreaders = 0;
71 rwlock->ptr_writer = NULL;
72
73 return 0;
74 }
75
76
77 int
78 pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
79 {
80 #ifdef ERRORCHECK
81 if ((rwlock == NULL) ||
82 (rwlock->ptr_magic != _PT_RWLOCK_MAGIC) ||
83 (!PTQ_EMPTY(&rwlock->ptr_rblocked)) ||
84 (!PTQ_EMPTY(&rwlock->ptr_wblocked)) ||
85 (rwlock->ptr_nreaders != 0) ||
86 (rwlock->ptr_writer != NULL))
87 return EINVAL;
88 #endif
89 rwlock->ptr_magic = _PT_RWLOCK_DEAD;
90
91 return 0;
92 }
93
94
95 int
96 pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
97 {
98 pthread_t self;
99 #ifdef ERRORCHECK
100 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
101 return EINVAL;
102 #endif
103 self = pthread__self();
104
105 pthread_spinlock(self, &rwlock->ptr_interlock);
106 #ifdef ERRORCHECK
107 if (rwlock->ptr_writer == self) {
108 pthread_spinunlock(self, &rwlock->ptr_interlock);
109 return EDEADLK;
110 }
111 #endif
112 /*
113 * Don't get a readlock if there is a writer or if there are waiting
114 * writers; i.e. prefer writers to readers. This strategy is dictated
115 * by SUSv3.
116 */
117 while ((rwlock->ptr_writer != NULL) ||
118 (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
119 PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
120 /* Locking a rwlock is not a cancellation point; don't check */
121 pthread_spinlock(self, &self->pt_statelock);
122 if (pthread_check_defsig(self)) {
123 pthread_spinunlock(self, &self->pt_statelock);
124 PTQ_REMOVE(&rwlock->ptr_rblocked, self, pt_sleep);
125 pthread_spinunlock(self, &rwlock->ptr_interlock);
126 pthread__signal_deferred(self, self);
127 pthread_spinlock(self, &rwlock->ptr_interlock);
128 continue;
129 }
130 self->pt_state = PT_STATE_BLOCKED_QUEUE;
131 self->pt_sleepobj = rwlock;
132 self->pt_sleepq = &rwlock->ptr_rblocked;
133 self->pt_sleeplock = &rwlock->ptr_interlock;
134 pthread_spinunlock(self, &self->pt_statelock);
135 pthread__block(self, &rwlock->ptr_interlock);
136 /* interlock is not held when we return */
137 pthread_spinlock(self, &rwlock->ptr_interlock);
138 }
139
140 rwlock->ptr_nreaders++;
141 pthread_spinunlock(self, &rwlock->ptr_interlock);
142
143 return 0;
144 }
145
146
147 int
148 pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
149 {
150 pthread_t self;
151 #ifdef ERRORCHECK
152 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
153 return EINVAL;
154 #endif
155 self = pthread__self();
156
157 pthread_spinlock(self, &rwlock->ptr_interlock);
158 /*
159 * Don't get a readlock if there is a writer or if there are waiting
160 * writers; i.e. prefer writers to readers. This strategy is dictated
161 * by SUSv3.
162 */
163 if ((rwlock->ptr_writer != NULL) ||
164 (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
165 pthread_spinunlock(self, &rwlock->ptr_interlock);
166 return EBUSY;
167 }
168
169 rwlock->ptr_nreaders++;
170 pthread_spinunlock(self, &rwlock->ptr_interlock);
171
172 return 0;
173 }
174
175
176 int
177 pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
178 {
179 pthread_t self;
180 extern int pthread__started;
181
182 #ifdef ERRORCHECK
183 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
184 return EINVAL;
185 #endif
186 self = pthread__self();
187
188 pthread_spinlock(self, &rwlock->ptr_interlock);
189 #ifdef ERRORCHECK
190 if (rwlock->ptr_writer == self) {
191 pthread_spinunlock(self, &rwlock->ptr_interlock);
192 return EDEADLK;
193 }
194 #endif
195 /*
196 * Prefer writers to readers here; permit writers even if there are
197 * waiting readers.
198 */
199 while ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
200 #ifdef ERRORCHECK
201 if (pthread__started == 0) {
202 pthread_spinunlock(self, &rwlock->ptr_interlock);
203 return EDEADLK;
204 }
205 #endif
206 PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
207 /* Locking a rwlock is not a cancellation point; don't check */
208 pthread_spinlock(self, &self->pt_statelock);
209 if (pthread_check_defsig(self)) {
210 pthread_spinunlock(self, &self->pt_statelock);
211 PTQ_REMOVE(&rwlock->ptr_wblocked, self, pt_sleep);
212 pthread_spinunlock(self, &rwlock->ptr_interlock);
213 pthread__signal_deferred(self, self);
214 pthread_spinlock(self, &rwlock->ptr_interlock);
215 continue;
216 }
217 self->pt_state = PT_STATE_BLOCKED_QUEUE;
218 self->pt_sleepobj = rwlock;
219 self->pt_sleepq = &rwlock->ptr_wblocked;
220 self->pt_sleeplock = &rwlock->ptr_interlock;
221 pthread_spinunlock(self, &self->pt_statelock);
222 pthread__block(self, &rwlock->ptr_interlock);
223 /* interlock is not held when we return */
224 pthread_spinlock(self, &rwlock->ptr_interlock);
225 }
226
227 rwlock->ptr_writer = self;
228 pthread_spinunlock(self, &rwlock->ptr_interlock);
229
230 return 0;
231 }
232
233
234 int
235 pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
236 {
237 pthread_t self;
238 #ifdef ERRORCHECK
239 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
240 return EINVAL;
241 #endif
242 self = pthread__self();
243
244 pthread_spinlock(self, &rwlock->ptr_interlock);
245 /*
246 * Prefer writers to readers here; permit writers even if there are
247 * waiting readers.
248 */
249 if ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
250 pthread_spinunlock(self, &rwlock->ptr_interlock);
251 return EBUSY;
252 }
253
254 rwlock->ptr_writer = self;
255 pthread_spinunlock(self, &rwlock->ptr_interlock);
256
257 return 0;
258 }
259
260
261 struct pthread_rwlock__waitarg {
262 pthread_t ptw_thread;
263 pthread_rwlock_t *ptw_rwlock;
264 struct pthread_queue_t *ptw_queue;
265 };
266
267 int
268 pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
269 const struct timespec *abs_timeout)
270 {
271 pthread_t self;
272 struct pthread_rwlock__waitarg wait;
273 struct pt_alarm_t alarm;
274 int retval;
275
276 #ifdef ERRORCHECK
277 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
278 return EINVAL;
279 if (abs_timeout == NULL)
280 return EINVAL;
281 #endif
282 if ((abs_timeout->tv_nsec >= 1000000000) ||
283 (abs_timeout->tv_nsec < 0) ||
284 (abs_timeout->tv_sec < 0))
285 return EINVAL;
286
287 self = pthread__self();
288 pthread_spinlock(self, &rwlock->ptr_interlock);
289 #ifdef ERRORCHECK
290 if (rwlock->ptr_writer == self) {
291 pthread_spinunlock(self, &rwlock->ptr_interlock);
292 return EDEADLK;
293 }
294 #endif
295 /*
296 * Don't get a readlock if there is a writer or if there are waiting
297 * writers; i.e. prefer writers to readers. This strategy is dictated
298 * by SUSv3.
299 */
300 retval = 0;
301 while ((retval == 0) && ((rwlock->ptr_writer != NULL) ||
302 (!PTQ_EMPTY(&rwlock->ptr_wblocked)))) {
303 wait.ptw_thread = self;
304 wait.ptw_rwlock = rwlock;
305 wait.ptw_queue = &rwlock->ptr_rblocked;
306 /* Locking a rwlock is not a cancellation point; don't check */
307 pthread_spinlock(self, &self->pt_statelock);
308 if (pthread_check_defsig(self)) {
309 pthread_spinunlock(self, &self->pt_statelock);
310 pthread_spinunlock(self, &rwlock->ptr_interlock);
311 pthread__signal_deferred(self, self);
312 pthread_spinlock(self, &rwlock->ptr_interlock);
313 continue;
314 }
315 pthread__alarm_add(self, &alarm, abs_timeout,
316 pthread_rwlock__callback, &wait);
317 PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
318 self->pt_state = PT_STATE_BLOCKED_QUEUE;
319 self->pt_sleepobj = rwlock;
320 self->pt_sleepq = &rwlock->ptr_rblocked;
321 self->pt_sleeplock = &rwlock->ptr_interlock;
322 pthread_spinunlock(self, &self->pt_statelock);
323 pthread__block(self, &rwlock->ptr_interlock);
324 /* interlock is not held when we return */
325 pthread__alarm_del(self, &alarm);
326 if (pthread__alarm_fired(&alarm))
327 retval = ETIMEDOUT;
328 pthread_spinlock(self, &rwlock->ptr_interlock);
329 }
330
331 /* One last chance to get the lock, in case it was released between
332 the alarm firing and when this thread got rescheduled, or in case
333 a signal handler kept it busy */
334 if ((rwlock->ptr_writer == NULL) &&
335 (PTQ_EMPTY(&rwlock->ptr_wblocked))) {
336 rwlock->ptr_nreaders++;
337 retval = 0;
338 }
339 pthread_spinunlock(self, &rwlock->ptr_interlock);
340
341 return retval;
342 }
343
344
345 int
346 pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
347 const struct timespec *abs_timeout)
348 {
349 struct pthread_rwlock__waitarg wait;
350 struct pt_alarm_t alarm;
351 pthread_t self;
352 int retval;
353 extern int pthread__started;
354
355 #ifdef ERRORCHECK
356 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
357 return EINVAL;
358 if (abs_timeout == NULL)
359 return EINVAL;
360 #endif
361 if ((abs_timeout->tv_nsec >= 1000000000) ||
362 (abs_timeout->tv_nsec < 0) ||
363 (abs_timeout->tv_sec < 0))
364 return EINVAL;
365
366 self = pthread__self();
367 pthread_spinlock(self, &rwlock->ptr_interlock);
368 #ifdef ERRORCHECK
369 if (rwlock->ptr_writer == self) {
370 pthread_spinunlock(self, &rwlock->ptr_interlock);
371 return EDEADLK;
372 }
373 #endif
374 /*
375 * Prefer writers to readers here; permit writers even if there are
376 * waiting readers.
377 */
378 retval = 0;
379 while (retval == 0 &&
380 ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL))) {
381 #ifdef ERRORCHECK
382 if (pthread__started == 0) {
383 pthread_spinunlock(self, &rwlock->ptr_interlock);
384 return EDEADLK;
385 }
386 #endif
387 wait.ptw_thread = self;
388 wait.ptw_rwlock = rwlock;
389 wait.ptw_queue = &rwlock->ptr_wblocked;
390 /* Locking a rwlock is not a cancellation point; don't check */
391 pthread_spinlock(self, &self->pt_statelock);
392 if (pthread_check_defsig(self)) {
393 pthread_spinunlock(self, &self->pt_statelock);
394 pthread_spinunlock(self, &rwlock->ptr_interlock);
395 pthread__signal_deferred(self, self);
396 pthread_spinlock(self, &rwlock->ptr_interlock);
397 continue;
398 }
399 pthread__alarm_add(self, &alarm, abs_timeout,
400 pthread_rwlock__callback, &wait);
401 PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
402 self->pt_state = PT_STATE_BLOCKED_QUEUE;
403 self->pt_sleepobj = rwlock;
404 self->pt_sleepq = &rwlock->ptr_wblocked;
405 self->pt_sleeplock = &rwlock->ptr_interlock;
406 pthread_spinunlock(self, &self->pt_statelock);
407 pthread__block(self, &rwlock->ptr_interlock);
408 /* interlock is not held when we return */
409 pthread__alarm_del(self, &alarm);
410 if (pthread__alarm_fired(&alarm))
411 retval = ETIMEDOUT;
412 pthread_spinlock(self, &rwlock->ptr_interlock);
413 }
414
415 if ((rwlock->ptr_nreaders == 0) && (rwlock->ptr_writer == NULL)) {
416 rwlock->ptr_writer = self;
417 retval = 0;
418 }
419 pthread_spinunlock(self, &rwlock->ptr_interlock);
420
421 return retval;
422 }
423
424
425 static void
426 pthread_rwlock__callback(void *arg)
427 {
428 struct pthread_rwlock__waitarg *a;
429 pthread_t self;
430
431 a = arg;
432 self = pthread__self();
433
434 pthread_spinlock(self, &a->ptw_rwlock->ptr_interlock);
435 /*
436 * Don't dequeue and schedule the thread if it's already been
437 * queued up by a signal or broadcast (but hasn't yet run as far
438 * as pthread__alarm_del(), or we wouldn't be here, and hence can't
439 * have become blocked on some *other* queue).
440 */
441 if (a->ptw_thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
442 PTQ_REMOVE(a->ptw_queue, a->ptw_thread, pt_sleep);
443 pthread__sched(self, a->ptw_thread);
444 }
445 pthread_spinunlock(self, &a->ptw_rwlock->ptr_interlock);
446
447 }
448
449
450 int
451 pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
452 {
453 pthread_t self, writer;
454 struct pthread_queue_t blockedq;
455 #ifdef ERRORCHECK
456 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
457 return EINVAL;
458 #endif
459 writer = NULL;
460 PTQ_INIT(&blockedq);
461 self = pthread__self();
462
463 pthread_spinlock(self, &rwlock->ptr_interlock);
464 if (rwlock->ptr_writer != NULL) {
465 /* Releasing a write lock. */
466 #ifdef ERRORCHECK
467 if (rwlock->ptr_writer != self) {
468 pthread_spinunlock(self, &rwlock->ptr_interlock);
469 return EPERM;
470 }
471 #endif
472 rwlock->ptr_writer = NULL;
473 writer = PTQ_FIRST(&rwlock->ptr_wblocked);
474 if (writer != NULL) {
475 PTQ_REMOVE(&rwlock->ptr_wblocked, writer, pt_sleep);
476 } else {
477 blockedq = rwlock->ptr_rblocked;
478 PTQ_INIT(&rwlock->ptr_rblocked);
479 }
480 } else
481 #ifdef ERRORCHECK
482 if (rwlock->ptr_nreaders > 0)
483 #endif
484 {
485 /* Releasing a read lock. */
486 rwlock->ptr_nreaders--;
487 if (rwlock->ptr_nreaders == 0) {
488 writer = PTQ_FIRST(&rwlock->ptr_wblocked);
489 if (writer != NULL)
490 PTQ_REMOVE(&rwlock->ptr_wblocked, writer,
491 pt_sleep);
492 }
493 #ifdef ERRORCHECK
494 } else {
495 pthread_spinunlock(self, &rwlock->ptr_interlock);
496 return EPERM;
497 #endif
498 }
499
500 if (writer != NULL)
501 pthread__sched(self, writer);
502 else
503 pthread__sched_sleepers(self, &blockedq);
504
505 pthread_spinunlock(self, &rwlock->ptr_interlock);
506
507 return 0;
508 }
509
510
511 int
512 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
513 {
514 #ifdef ERRORCHECK
515 if (attr == NULL)
516 return EINVAL;
517 #endif
518 attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
519
520 return 0;
521 }
522
523
524 int
525 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
526 {
527 #ifdef ERRORCHECK
528 if ((attr == NULL) ||
529 (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
530 return EINVAL;
531 #endif
532 attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
533
534 return 0;
535 }
536