pthread_rwlock.c revision 1.1.2.5 1 /* $NetBSD: pthread_rwlock.c,v 1.1.2.5 2003/01/11 09:14:35 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <assert.h>
40 #include <errno.h>
41 #include <sys/cdefs.h>
42
43 #include "pthread.h"
44 #include "pthread_int.h"
45
46 static void pthread_rwlock__callback(void *);
47
48 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
49 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
50 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
51 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
52 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
53 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
54 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
55
56 int
57 pthread_rwlock_init(pthread_rwlock_t *rwlock,
58 const pthread_rwlockattr_t *attr)
59 {
60 #ifdef ERRORCHECK
61 if ((rwlock == NULL) ||
62 (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC)))
63 return EINVAL;
64 #endif
65 rwlock->ptr_magic = _PT_RWLOCK_MAGIC;
66 pthread_lockinit(&rwlock->ptr_interlock);
67 PTQ_INIT(&rwlock->ptr_rblocked);
68 PTQ_INIT(&rwlock->ptr_wblocked);
69 rwlock->ptr_nreaders = 0;
70 rwlock->ptr_writer = NULL;
71
72 return 0;
73 }
74
75
76 int
77 pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
78 {
79 #ifdef ERRORCHECK
80 if ((rwlock == NULL) ||
81 (rwlock->ptr_magic != _PT_RWLOCK_MAGIC) ||
82 (!PTQ_EMPTY(&rwlock->ptr_rblocked)) ||
83 (!PTQ_EMPTY(&rwlock->ptr_wblocked)) ||
84 (rwlock->ptr_nreaders != 0) ||
85 (rwlock->ptr_writer != NULL))
86 return EINVAL;
87 #endif
88 rwlock->ptr_magic = _PT_RWLOCK_DEAD;
89
90 return 0;
91 }
92
93
94 int
95 pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
96 {
97 pthread_t self;
98 #ifdef ERRORCHECK
99 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
100 return EINVAL;
101 #endif
102 self = pthread__self();
103
104 pthread_spinlock(self, &rwlock->ptr_interlock);
105 #ifdef ERRORCHECK
106 if (rwlock->ptr_writer == self) {
107 pthread_spinunlock(self, &rwlock->ptr_interlock);
108 return EDEADLK;
109 }
110 #endif
111 /*
112 * Don't get a readlock if there is a writer or if there are waiting
113 * writers; i.e. prefer writers to readers. This strategy is dictated
114 * by SUSv3.
115 */
116 while ((rwlock->ptr_writer != NULL) ||
117 (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
118 PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
119 /* Locking a rwlock is not a cancellation point; don't check */
120 pthread_spinlock(self, &self->pt_statelock);
121 self->pt_state = PT_STATE_BLOCKED_QUEUE;
122 self->pt_sleepobj = rwlock;
123 self->pt_sleepq = &rwlock->ptr_rblocked;
124 self->pt_sleeplock = &rwlock->ptr_interlock;
125 pthread_spinunlock(self, &self->pt_statelock);
126 pthread__block(self, &rwlock->ptr_interlock);
127 /* interlock is not held when we return */
128 pthread_spinlock(self, &rwlock->ptr_interlock);
129 }
130
131 rwlock->ptr_nreaders++;
132 pthread_spinunlock(self, &rwlock->ptr_interlock);
133
134 return 0;
135 }
136
137
138 int
139 pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
140 {
141 pthread_t self;
142 #ifdef ERRORCHECK
143 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
144 return EINVAL;
145 #endif
146 self = pthread__self();
147
148 pthread_spinlock(self, &rwlock->ptr_interlock);
149 #ifdef ERRORCHECK
150 if (rwlock->ptr_writer == self) {
151 pthread_spinunlock(self, &rwlock->ptr_interlock);
152 return EDEADLK;
153 }
154 #endif
155 /*
156 * Don't get a readlock if there is a writer or if there are waiting
157 * writers; i.e. prefer writers to readers. This strategy is dictated
158 * by SUSv3.
159 */
160 if ((rwlock->ptr_writer != NULL) ||
161 (!PTQ_EMPTY(&rwlock->ptr_wblocked))) {
162 pthread_spinunlock(self, &rwlock->ptr_interlock);
163 return EBUSY;
164 }
165
166 rwlock->ptr_nreaders++;
167 pthread_spinunlock(self, &rwlock->ptr_interlock);
168
169 return 0;
170 }
171
172
173 int
174 pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
175 {
176 pthread_t self;
177 #ifdef ERRORCHECK
178 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
179 return EINVAL;
180 #endif
181 self = pthread__self();
182
183 pthread_spinlock(self, &rwlock->ptr_interlock);
184 /*
185 * Prefer writers to readers here; permit writers even if there are
186 * waiting readers.
187 */
188 while ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
189 PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
190 /* Locking a rwlock is not a cancellation point; don't check */
191 pthread_spinlock(self, &self->pt_statelock);
192 self->pt_state = PT_STATE_BLOCKED_QUEUE;
193 self->pt_sleepobj = rwlock;
194 self->pt_sleepq = &rwlock->ptr_wblocked;
195 self->pt_sleeplock = &rwlock->ptr_interlock;
196 pthread_spinunlock(self, &self->pt_statelock);
197 pthread__block(self, &rwlock->ptr_interlock);
198 /* interlock is not held when we return */
199 pthread_spinlock(self, &rwlock->ptr_interlock);
200 }
201
202 rwlock->ptr_writer = self;
203 pthread_spinunlock(self, &rwlock->ptr_interlock);
204
205 return 0;
206 }
207
208
209 int
210 pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
211 {
212 pthread_t self;
213 #ifdef ERRORCHECK
214 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
215 return EINVAL;
216 #endif
217 self = pthread__self();
218
219 pthread_spinlock(self, &rwlock->ptr_interlock);
220 /*
221 * Prefer writers to readers here; permit writers even if there are
222 * waiting readers.
223 */
224 if ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL)) {
225 pthread_spinunlock(self, &rwlock->ptr_interlock);
226 return EBUSY;
227 }
228
229 rwlock->ptr_writer = self;
230 pthread_spinunlock(self, &rwlock->ptr_interlock);
231
232 return 0;
233 }
234
235
236 struct pthread_rwlock__waitarg {
237 pthread_t ptw_thread;
238 pthread_rwlock_t *ptw_rwlock;
239 struct pthread_queue_t *ptw_queue;
240 };
241
242 int
243 pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
244 const struct timespec *abs_timeout)
245 {
246 pthread_t self;
247 struct pthread_rwlock__waitarg wait;
248 struct pt_alarm_t alarm;
249 int retval;
250 #ifdef ERRORCHECK
251 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
252 return EINVAL;
253 if ((abs_timeout == NULL) || (abs_timeout->tv_nsec >= 1000000000))
254 return EINVAL;
255 #endif
256 self = pthread__self();
257
258 pthread_spinlock(self, &rwlock->ptr_interlock);
259 #ifdef ERRORCHECK
260 if (rwlock->ptr_writer == self) {
261 pthread_spinlock(self, &rwlock->ptr_interlock);
262 return EDEADLK;
263 }
264 #endif
265 /*
266 * Don't get a readlock if there is a writer or if there are waiting
267 * writers; i.e. prefer writers to readers. This strategy is dictated
268 * by SUSv3.
269 */
270 retval = 0;
271 while ((retval == 0) && ((rwlock->ptr_writer != NULL) ||
272 (!PTQ_EMPTY(&rwlock->ptr_wblocked)))) {
273 wait.ptw_thread = self;
274 wait.ptw_rwlock = rwlock;
275 wait.ptw_queue = &rwlock->ptr_rblocked;
276 pthread__alarm_add(self, &alarm, abs_timeout,
277 pthread_rwlock__callback, &wait);
278 PTQ_INSERT_TAIL(&rwlock->ptr_rblocked, self, pt_sleep);
279 /* Locking a rwlock is not a cancellation point; don't check */
280 pthread_spinlock(self, &self->pt_statelock);
281 self->pt_state = PT_STATE_BLOCKED_QUEUE;
282 self->pt_sleepobj = rwlock;
283 self->pt_sleepq = &rwlock->ptr_rblocked;
284 self->pt_sleeplock = &rwlock->ptr_interlock;
285 pthread_spinunlock(self, &self->pt_statelock);
286 pthread__block(self, &rwlock->ptr_interlock);
287 /* interlock is not held when we return */
288 pthread__alarm_del(self, &alarm);
289 if (pthread__alarm_fired(&alarm))
290 retval = ETIMEDOUT;
291 pthread_spinlock(self, &rwlock->ptr_interlock);
292 }
293
294 if (retval == 0)
295 rwlock->ptr_nreaders++;
296 pthread_spinunlock(self, &rwlock->ptr_interlock);
297
298 return retval;
299 }
300
301
302 int
303 pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
304 const struct timespec *abs_timeout)
305 {
306 struct pthread_rwlock__waitarg wait;
307 struct pt_alarm_t alarm;
308 int retval;
309 pthread_t self;
310 #ifdef ERRORCHECK
311 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
312 return EINVAL;
313 #endif
314 self = pthread__self();
315
316 pthread_spinlock(self, &rwlock->ptr_interlock);
317 /*
318 * Prefer writers to readers here; permit writers even if there are
319 * waiting readers.
320 */
321 retval = 0;
322 while (retval == 0 &&
323 ((rwlock->ptr_nreaders > 0) || (rwlock->ptr_writer != NULL))) {
324 wait.ptw_thread = self;
325 wait.ptw_rwlock = rwlock;
326 wait.ptw_queue = &rwlock->ptr_wblocked;
327 pthread__alarm_add(self, &alarm, abs_timeout,
328 pthread_rwlock__callback, &wait);
329 PTQ_INSERT_TAIL(&rwlock->ptr_wblocked, self, pt_sleep);
330 /* Locking a rwlock is not a cancellation point; don't check */
331 pthread_spinlock(self, &self->pt_statelock);
332 self->pt_state = PT_STATE_BLOCKED_QUEUE;
333 self->pt_sleepobj = rwlock;
334 self->pt_sleepq = &rwlock->ptr_wblocked;
335 self->pt_sleeplock = &rwlock->ptr_interlock;
336 pthread_spinunlock(self, &self->pt_statelock);
337 pthread__block(self, &rwlock->ptr_interlock);
338 /* interlock is not held when we return */
339 pthread__alarm_del(self, &alarm);
340 if (pthread__alarm_fired(&alarm))
341 retval = ETIMEDOUT;
342 pthread_spinlock(self, &rwlock->ptr_interlock);
343 }
344
345 if (retval == 0)
346 rwlock->ptr_writer = self;
347 pthread_spinunlock(self, &rwlock->ptr_interlock);
348
349 return 0;
350 }
351
352
353 static void
354 pthread_rwlock__callback(void *arg)
355 {
356 struct pthread_rwlock__waitarg *a;
357 pthread_t self;
358
359 a = arg;
360 self = pthread__self();
361
362 pthread_spinlock(self, &a->ptw_rwlock->ptr_interlock);
363 /*
364 * Don't dequeue and schedule the thread if it's already been
365 * queued up by a signal or broadcast (but hasn't yet run as far
366 * as pthread__alarm_del(), or we wouldn't be here, and hence can't
367 * have become blocked on some *other* queue).
368 */
369 if (a->ptw_thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
370 PTQ_REMOVE(a->ptw_queue, a->ptw_thread, pt_sleep);
371 pthread__sched(self, a->ptw_thread);
372 }
373 pthread_spinunlock(self, &a->ptw_rwlock->ptr_interlock);
374
375 }
376
377
378 int
379 pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
380 {
381 pthread_t self, reader, writer;
382 struct pthread_queue_t blockedq;
383 #ifdef ERRORCHECK
384 if ((rwlock == NULL) || (rwlock->ptr_magic != _PT_RWLOCK_MAGIC))
385 return EINVAL;
386 #endif
387 writer = NULL;
388 PTQ_INIT(&blockedq);
389 self = pthread__self();
390
391 pthread_spinlock(self, &rwlock->ptr_interlock);
392 if (rwlock->ptr_writer != NULL) {
393 /* Releasing a write lock. */
394 #ifdef ERRORCHECK
395 if (rwlock->ptr_writer != self) {
396 pthread_spinunlock(self, &rwlock->ptr_interlock);
397 return EPERM;
398 }
399 #endif
400 rwlock->ptr_writer = NULL;
401 writer = PTQ_FIRST(&rwlock->ptr_wblocked);
402 if (writer != NULL) {
403 PTQ_REMOVE(&rwlock->ptr_wblocked, writer, pt_sleep);
404 } else {
405 blockedq = rwlock->ptr_rblocked;
406 PTQ_INIT(&rwlock->ptr_rblocked);
407 }
408 } else {
409 /* Releasing a read lock. */
410 rwlock->ptr_nreaders--;
411 if (rwlock->ptr_nreaders == 0) {
412 writer = PTQ_FIRST(&rwlock->ptr_wblocked);
413 if (writer != NULL)
414 PTQ_REMOVE(&rwlock->ptr_wblocked, writer,
415 pt_sleep);
416 }
417 }
418
419 pthread_spinunlock(self, &rwlock->ptr_interlock);
420
421 if (writer != NULL)
422 pthread__sched(self, writer);
423 else
424 PTQ_FOREACH(reader, &blockedq, pt_sleep)
425 pthread__sched(self, reader);
426
427 return 0;
428 }
429
430
431 int
432 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
433 {
434 #ifdef ERRORCHECK
435 if (attr == NULL)
436 return EINVAL;
437 #endif
438 attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
439
440 return 0;
441 }
442
443
444 int
445 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
446 {
447 #ifdef ERRORCHECK
448 if ((attr == NULL) ||
449 (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
450 return EINVAL;
451 #endif
452 attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
453
454 return 0;
455 }
456