pthread_rwlock.c revision 1.28.2.1 1 1.28.2.1 yamt /* $NetBSD: pthread_rwlock.c,v 1.28.2.1 2008/05/18 12:30:40 yamt Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.27 ad * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.27 ad * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 1.2 thorpej *
10 1.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.2 thorpej * modification, are permitted provided that the following conditions
12 1.2 thorpej * are met:
13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.2 thorpej *
19 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
30 1.2 thorpej */
31 1.2 thorpej
32 1.5 lukem #include <sys/cdefs.h>
33 1.28.2.1 yamt __RCSID("$NetBSD: pthread_rwlock.c,v 1.28.2.1 2008/05/18 12:30:40 yamt Exp $");
34 1.5 lukem
35 1.2 thorpej #include <errno.h>
36 1.27 ad #include <stddef.h>
37 1.2 thorpej
38 1.2 thorpej #include "pthread.h"
39 1.2 thorpej #include "pthread_int.h"
40 1.2 thorpej
41 1.27 ad #define _RW_LOCKED 0
42 1.27 ad #define _RW_WANT_WRITE 1
43 1.27 ad #define _RW_WANT_READ 2
44 1.27 ad
45 1.27 ad static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
46 1.27 ad static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
47 1.27 ad static void pthread__rwlock_early(void *);
48 1.24 christos
49 1.23 ad int _pthread_rwlock_held_np(pthread_rwlock_t *);
50 1.23 ad int _pthread_rwlock_rdheld_np(pthread_rwlock_t *);
51 1.23 ad int _pthread_rwlock_wrheld_np(pthread_rwlock_t *);
52 1.23 ad
53 1.27 ad #ifndef lint
54 1.27 ad __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np);
55 1.27 ad __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np);
56 1.27 ad __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np);
57 1.27 ad #endif
58 1.27 ad
59 1.2 thorpej __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
60 1.2 thorpej __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
61 1.2 thorpej __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
62 1.2 thorpej __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
63 1.2 thorpej __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
64 1.2 thorpej __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
65 1.2 thorpej __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
66 1.2 thorpej
67 1.27 ad static inline uintptr_t
68 1.27 ad rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
69 1.27 ad {
70 1.27 ad
71 1.27 ad return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
72 1.27 ad (void *)n);
73 1.27 ad }
74 1.27 ad
75 1.2 thorpej int
76 1.27 ad pthread_rwlock_init(pthread_rwlock_t *ptr,
77 1.2 thorpej const pthread_rwlockattr_t *attr)
78 1.2 thorpej {
79 1.27 ad
80 1.27 ad if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
81 1.2 thorpej return EINVAL;
82 1.27 ad ptr->ptr_magic = _PT_RWLOCK_MAGIC;
83 1.27 ad pthread_lockinit(&ptr->ptr_interlock);
84 1.27 ad PTQ_INIT(&ptr->ptr_rblocked);
85 1.27 ad PTQ_INIT(&ptr->ptr_wblocked);
86 1.27 ad ptr->ptr_nreaders = 0;
87 1.27 ad ptr->ptr_owner = NULL;
88 1.2 thorpej
89 1.2 thorpej return 0;
90 1.2 thorpej }
91 1.2 thorpej
92 1.2 thorpej
93 1.2 thorpej int
94 1.27 ad pthread_rwlock_destroy(pthread_rwlock_t *ptr)
95 1.2 thorpej {
96 1.27 ad
97 1.27 ad if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
98 1.27 ad (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
99 1.27 ad (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
100 1.27 ad (ptr->ptr_nreaders != 0) ||
101 1.27 ad (ptr->ptr_owner != NULL))
102 1.2 thorpej return EINVAL;
103 1.27 ad ptr->ptr_magic = _PT_RWLOCK_DEAD;
104 1.2 thorpej
105 1.2 thorpej return 0;
106 1.2 thorpej }
107 1.2 thorpej
108 1.27 ad static int
109 1.27 ad pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
110 1.2 thorpej {
111 1.27 ad uintptr_t owner, next;
112 1.2 thorpej pthread_t self;
113 1.27 ad int error;
114 1.27 ad
115 1.27 ad self = pthread__self();
116 1.27 ad
117 1.2 thorpej #ifdef ERRORCHECK
118 1.27 ad if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
119 1.2 thorpej return EINVAL;
120 1.2 thorpej #endif
121 1.27 ad
122 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
123 1.27 ad /*
124 1.27 ad * Read the lock owner field. If the need-to-wait
125 1.27 ad * indicator is clear, then try to acquire the lock.
126 1.27 ad */
127 1.27 ad if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
128 1.27 ad next = rw_cas(ptr, owner, owner + RW_READ_INCR);
129 1.27 ad if (owner == next) {
130 1.27 ad /* Got it! */
131 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
132 1.27 ad membar_enter();
133 1.27 ad #endif
134 1.27 ad return 0;
135 1.27 ad }
136 1.27 ad
137 1.27 ad /*
138 1.27 ad * Didn't get it -- spin around again (we'll
139 1.27 ad * probably sleep on the next iteration).
140 1.27 ad */
141 1.27 ad continue;
142 1.27 ad }
143 1.27 ad
144 1.27 ad if ((owner & RW_THREAD) == (uintptr_t)self)
145 1.27 ad return EDEADLK;
146 1.27 ad
147 1.27 ad /*
148 1.27 ad * Grab the interlock. Once we have that, we
149 1.27 ad * can adjust the waiter bits and sleep queue.
150 1.27 ad */
151 1.27 ad pthread__spinlock(self, &ptr->ptr_interlock);
152 1.27 ad
153 1.27 ad /*
154 1.27 ad * Mark the rwlock as having waiters. If the set fails,
155 1.27 ad * then we may not need to sleep and should spin again.
156 1.27 ad */
157 1.27 ad next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
158 1.27 ad if (owner != next) {
159 1.27 ad pthread__spinunlock(self, &ptr->ptr_interlock);
160 1.27 ad continue;
161 1.27 ad }
162 1.27 ad
163 1.27 ad /* The waiters bit is set - it's safe to sleep. */
164 1.27 ad PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
165 1.27 ad ptr->ptr_nreaders++;
166 1.27 ad self->pt_rwlocked = _RW_WANT_READ;
167 1.27 ad self->pt_sleeponq = 1;
168 1.27 ad self->pt_sleepobj = &ptr->ptr_rblocked;
169 1.27 ad self->pt_early = pthread__rwlock_early;
170 1.27 ad pthread__spinunlock(self, &ptr->ptr_interlock);
171 1.27 ad
172 1.27 ad error = pthread__park(self, &ptr->ptr_interlock,
173 1.27 ad &ptr->ptr_rblocked, ts, 0, &ptr->ptr_rblocked);
174 1.27 ad
175 1.27 ad /* Did we get the lock? */
176 1.27 ad if (self->pt_rwlocked == _RW_LOCKED) {
177 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
178 1.27 ad membar_enter();
179 1.2 thorpej #endif
180 1.27 ad return 0;
181 1.27 ad }
182 1.27 ad if (error != 0)
183 1.27 ad return error;
184 1.27 ad
185 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
186 1.27 ad "direct handoff failure");
187 1.2 thorpej }
188 1.2 thorpej }
189 1.2 thorpej
190 1.2 thorpej
191 1.2 thorpej int
192 1.27 ad pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
193 1.2 thorpej {
194 1.27 ad uintptr_t owner, next;
195 1.20 ad
196 1.2 thorpej #ifdef ERRORCHECK
197 1.27 ad if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
198 1.2 thorpej return EINVAL;
199 1.2 thorpej #endif
200 1.27 ad
201 1.2 thorpej /*
202 1.2 thorpej * Don't get a readlock if there is a writer or if there are waiting
203 1.2 thorpej * writers; i.e. prefer writers to readers. This strategy is dictated
204 1.2 thorpej * by SUSv3.
205 1.2 thorpej */
206 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
207 1.27 ad if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
208 1.27 ad return EBUSY;
209 1.27 ad next = rw_cas(ptr, owner, owner + RW_READ_INCR);
210 1.27 ad if (owner == next) {
211 1.27 ad /* Got it! */
212 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
213 1.27 ad membar_enter();
214 1.27 ad #endif
215 1.27 ad return 0;
216 1.27 ad }
217 1.2 thorpej }
218 1.2 thorpej }
219 1.2 thorpej
220 1.27 ad static int
221 1.27 ad pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
222 1.2 thorpej {
223 1.27 ad uintptr_t owner, next;
224 1.2 thorpej pthread_t self;
225 1.27 ad int error;
226 1.27 ad
227 1.27 ad self = pthread__self();
228 1.13 chs
229 1.2 thorpej #ifdef ERRORCHECK
230 1.27 ad if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
231 1.2 thorpej return EINVAL;
232 1.2 thorpej #endif
233 1.27 ad
234 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
235 1.27 ad /*
236 1.27 ad * Read the lock owner field. If the need-to-wait
237 1.27 ad * indicator is clear, then try to acquire the lock.
238 1.27 ad */
239 1.27 ad if ((owner & RW_THREAD) == 0) {
240 1.27 ad next = rw_cas(ptr, owner,
241 1.27 ad (uintptr_t)self | RW_WRITE_LOCKED);
242 1.27 ad if (owner == next) {
243 1.27 ad /* Got it! */
244 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
245 1.27 ad membar_enter();
246 1.27 ad #endif
247 1.27 ad return 0;
248 1.27 ad }
249 1.27 ad
250 1.27 ad /*
251 1.27 ad * Didn't get it -- spin around again (we'll
252 1.27 ad * probably sleep on the next iteration).
253 1.27 ad */
254 1.27 ad continue;
255 1.27 ad }
256 1.27 ad
257 1.27 ad if ((owner & RW_THREAD) == (uintptr_t)self)
258 1.13 chs return EDEADLK;
259 1.27 ad
260 1.27 ad /*
261 1.27 ad * Grab the interlock. Once we have that, we
262 1.27 ad * can adjust the waiter bits and sleep queue.
263 1.27 ad */
264 1.27 ad pthread__spinlock(self, &ptr->ptr_interlock);
265 1.27 ad
266 1.27 ad /*
267 1.27 ad * Mark the rwlock as having waiters. If the set fails,
268 1.27 ad * then we may not need to sleep and should spin again.
269 1.27 ad */
270 1.27 ad next = rw_cas(ptr, owner,
271 1.27 ad owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
272 1.27 ad if (owner != next) {
273 1.27 ad pthread__spinunlock(self, &ptr->ptr_interlock);
274 1.27 ad continue;
275 1.13 chs }
276 1.27 ad
277 1.27 ad /* The waiters bit is set - it's safe to sleep. */
278 1.27 ad PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
279 1.27 ad self->pt_rwlocked = _RW_WANT_WRITE;
280 1.27 ad self->pt_sleeponq = 1;
281 1.27 ad self->pt_sleepobj = &ptr->ptr_wblocked;
282 1.27 ad self->pt_early = pthread__rwlock_early;
283 1.27 ad pthread__spinunlock(self, &ptr->ptr_interlock);
284 1.27 ad
285 1.27 ad error = pthread__park(self, &ptr->ptr_interlock,
286 1.27 ad &ptr->ptr_wblocked, ts, 0, &ptr->ptr_wblocked);
287 1.27 ad
288 1.27 ad /* Did we get the lock? */
289 1.27 ad if (self->pt_rwlocked == _RW_LOCKED) {
290 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
291 1.27 ad membar_enter();
292 1.13 chs #endif
293 1.27 ad return 0;
294 1.27 ad }
295 1.27 ad if (error != 0)
296 1.27 ad return error;
297 1.27 ad
298 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
299 1.27 ad "direct handoff failure");
300 1.2 thorpej }
301 1.2 thorpej }
302 1.2 thorpej
303 1.2 thorpej
304 1.2 thorpej int
305 1.27 ad pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
306 1.2 thorpej {
307 1.27 ad uintptr_t owner, next;
308 1.2 thorpej pthread_t self;
309 1.27 ad
310 1.2 thorpej #ifdef ERRORCHECK
311 1.27 ad if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
312 1.2 thorpej return EINVAL;
313 1.2 thorpej #endif
314 1.27 ad
315 1.2 thorpej self = pthread__self();
316 1.27 ad
317 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
318 1.27 ad if (owner != 0)
319 1.27 ad return EBUSY;
320 1.27 ad next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
321 1.27 ad if (owner == next) {
322 1.27 ad /* Got it! */
323 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
324 1.27 ad membar_enter();
325 1.27 ad #endif
326 1.27 ad return 0;
327 1.27 ad }
328 1.2 thorpej }
329 1.27 ad }
330 1.2 thorpej
331 1.27 ad int
332 1.27 ad pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
333 1.27 ad {
334 1.2 thorpej
335 1.27 ad return pthread__rwlock_rdlock(ptr, NULL);
336 1.2 thorpej }
337 1.2 thorpej
338 1.2 thorpej int
339 1.27 ad pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
340 1.27 ad const struct timespec *abs_timeout)
341 1.2 thorpej {
342 1.12 chs
343 1.10 nathanw if (abs_timeout == NULL)
344 1.2 thorpej return EINVAL;
345 1.10 nathanw if ((abs_timeout->tv_nsec >= 1000000000) ||
346 1.10 nathanw (abs_timeout->tv_nsec < 0) ||
347 1.10 nathanw (abs_timeout->tv_sec < 0))
348 1.10 nathanw return EINVAL;
349 1.12 chs
350 1.27 ad return pthread__rwlock_rdlock(ptr, abs_timeout);
351 1.27 ad }
352 1.2 thorpej
353 1.27 ad int
354 1.27 ad pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
355 1.27 ad {
356 1.2 thorpej
357 1.27 ad return pthread__rwlock_wrlock(ptr, NULL);
358 1.2 thorpej }
359 1.2 thorpej
360 1.2 thorpej int
361 1.27 ad pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
362 1.27 ad const struct timespec *abs_timeout)
363 1.2 thorpej {
364 1.12 chs
365 1.10 nathanw if (abs_timeout == NULL)
366 1.10 nathanw return EINVAL;
367 1.10 nathanw if ((abs_timeout->tv_nsec >= 1000000000) ||
368 1.10 nathanw (abs_timeout->tv_nsec < 0) ||
369 1.10 nathanw (abs_timeout->tv_sec < 0))
370 1.10 nathanw return EINVAL;
371 1.12 chs
372 1.27 ad return pthread__rwlock_wrlock(ptr, abs_timeout);
373 1.2 thorpej }
374 1.2 thorpej
375 1.2 thorpej
376 1.2 thorpej int
377 1.27 ad pthread_rwlock_unlock(pthread_rwlock_t *ptr)
378 1.2 thorpej {
379 1.27 ad uintptr_t owner, decr, new, next;
380 1.27 ad pthread_t self, thread;
381 1.27 ad
382 1.2 thorpej #ifdef ERRORCHECK
383 1.27 ad if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
384 1.2 thorpej return EINVAL;
385 1.2 thorpej #endif
386 1.27 ad
387 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
388 1.27 ad membar_exit();
389 1.27 ad #endif
390 1.27 ad
391 1.27 ad /*
392 1.27 ad * Since we used an add operation to set the required lock
393 1.27 ad * bits, we can use a subtract to clear them, which makes
394 1.27 ad * the read-release and write-release path similar.
395 1.27 ad */
396 1.2 thorpej self = pthread__self();
397 1.27 ad owner = (uintptr_t)ptr->ptr_owner;
398 1.27 ad if ((owner & RW_WRITE_LOCKED) != 0) {
399 1.27 ad decr = (uintptr_t)self | RW_WRITE_LOCKED;
400 1.27 ad if ((owner & RW_THREAD) != (uintptr_t)self) {
401 1.27 ad return EPERM;
402 1.27 ad }
403 1.27 ad } else {
404 1.27 ad decr = RW_READ_INCR;
405 1.27 ad if (owner == 0) {
406 1.2 thorpej return EPERM;
407 1.2 thorpej }
408 1.27 ad }
409 1.27 ad
410 1.27 ad for (;; owner = next) {
411 1.27 ad /*
412 1.27 ad * Compute what we expect the new value of the lock to be.
413 1.27 ad * Only proceed to do direct handoff if there are waiters,
414 1.27 ad * and if the lock would become unowned.
415 1.27 ad */
416 1.27 ad new = (owner - decr);
417 1.27 ad if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
418 1.27 ad next = rw_cas(ptr, owner, new);
419 1.27 ad if (owner == next) {
420 1.27 ad /* Released! */
421 1.27 ad return 0;
422 1.27 ad }
423 1.27 ad continue;
424 1.27 ad }
425 1.27 ad
426 1.27 ad /*
427 1.27 ad * Grab the interlock. Once we have that, we can adjust
428 1.27 ad * the waiter bits. We must check to see if there are
429 1.27 ad * still waiters before proceeding.
430 1.27 ad */
431 1.27 ad pthread__spinlock(self, &ptr->ptr_interlock);
432 1.27 ad owner = (uintptr_t)ptr->ptr_owner;
433 1.27 ad if ((owner & RW_HAS_WAITERS) == 0) {
434 1.27 ad pthread__spinunlock(self, &ptr->ptr_interlock);
435 1.27 ad next = owner;
436 1.27 ad continue;
437 1.2 thorpej }
438 1.27 ad
439 1.27 ad /*
440 1.27 ad * Give the lock away. SUSv3 dictates that we must give
441 1.27 ad * preference to writers.
442 1.27 ad */
443 1.27 ad if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
444 1.27 ad new = (uintptr_t)thread | RW_WRITE_LOCKED;
445 1.27 ad
446 1.27 ad if (PTQ_NEXT(thread, pt_sleep) != NULL)
447 1.27 ad new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
448 1.27 ad else if (ptr->ptr_nreaders != 0)
449 1.27 ad new |= RW_HAS_WAITERS;
450 1.27 ad
451 1.27 ad /*
452 1.27 ad * Set in the new value. The lock becomes owned
453 1.27 ad * by the writer that we are about to wake.
454 1.27 ad */
455 1.27 ad (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
456 1.27 ad
457 1.27 ad /* Wake the writer. */
458 1.27 ad PTQ_REMOVE(&ptr->ptr_wblocked, thread, pt_sleep);
459 1.27 ad thread->pt_rwlocked = _RW_LOCKED;
460 1.27 ad pthread__unpark(self, &ptr->ptr_interlock,
461 1.27 ad &ptr->ptr_wblocked, thread);
462 1.27 ad } else {
463 1.27 ad new = 0;
464 1.27 ad PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
465 1.27 ad /*
466 1.27 ad * May have already been handed the lock,
467 1.27 ad * since pthread__unpark_all() can release
468 1.27 ad * our interlock before awakening all
469 1.27 ad * threads.
470 1.27 ad */
471 1.27 ad if (thread->pt_sleepobj == NULL)
472 1.27 ad continue;
473 1.27 ad new += RW_READ_INCR;
474 1.27 ad thread->pt_rwlocked = _RW_LOCKED;
475 1.27 ad }
476 1.27 ad
477 1.27 ad /*
478 1.27 ad * Set in the new value. The lock becomes owned
479 1.27 ad * by the readers that we are about to wake.
480 1.27 ad */
481 1.27 ad (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
482 1.27 ad
483 1.27 ad /* Wake up all sleeping readers. */
484 1.27 ad ptr->ptr_nreaders = 0;
485 1.27 ad pthread__unpark_all(self, &ptr->ptr_interlock,
486 1.27 ad &ptr->ptr_rblocked);
487 1.2 thorpej }
488 1.27 ad
489 1.27 ad return 0;
490 1.2 thorpej }
491 1.27 ad }
492 1.27 ad
493 1.27 ad /*
494 1.27 ad * Called when a timedlock awakens early to adjust the waiter bits.
495 1.27 ad * The rwlock's interlock is held on entry, and the caller has been
496 1.27 ad * removed from the waiters lists.
497 1.27 ad */
498 1.27 ad static void
499 1.27 ad pthread__rwlock_early(void *obj)
500 1.27 ad {
501 1.27 ad uintptr_t owner, set, new, next;
502 1.27 ad pthread_rwlock_t *ptr;
503 1.27 ad pthread_t self;
504 1.27 ad u_int off;
505 1.2 thorpej
506 1.27 ad self = pthread__self();
507 1.27 ad
508 1.27 ad switch (self->pt_rwlocked) {
509 1.27 ad case _RW_WANT_READ:
510 1.27 ad off = offsetof(pthread_rwlock_t, ptr_rblocked);
511 1.27 ad break;
512 1.27 ad case _RW_WANT_WRITE:
513 1.27 ad off = offsetof(pthread_rwlock_t, ptr_wblocked);
514 1.27 ad break;
515 1.27 ad default:
516 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
517 1.27 ad "bad value of pt_rwlocked");
518 1.27 ad off = 0;
519 1.27 ad /* NOTREACHED */
520 1.27 ad break;
521 1.27 ad }
522 1.27 ad
523 1.27 ad /* LINTED mind your own business */
524 1.27 ad ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
525 1.27 ad owner = (uintptr_t)ptr->ptr_owner;
526 1.27 ad
527 1.27 ad if ((owner & RW_THREAD) == 0) {
528 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
529 1.27 ad "lock not held");
530 1.27 ad }
531 1.27 ad
532 1.27 ad if (!PTQ_EMPTY(&ptr->ptr_wblocked))
533 1.27 ad set = RW_HAS_WAITERS | RW_WRITE_WANTED;
534 1.27 ad else if (ptr->ptr_nreaders != 0)
535 1.27 ad set = RW_HAS_WAITERS;
536 1.14 ad else
537 1.27 ad set = 0;
538 1.6 cl
539 1.27 ad for (;; owner = next) {
540 1.27 ad new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
541 1.27 ad next = rw_cas(ptr, owner, new);
542 1.27 ad if (owner == next)
543 1.27 ad break;
544 1.27 ad }
545 1.2 thorpej }
546 1.2 thorpej
547 1.2 thorpej int
548 1.27 ad _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
549 1.2 thorpej {
550 1.27 ad uintptr_t owner = (uintptr_t)ptr->ptr_owner;
551 1.2 thorpej
552 1.28 ad if ((owner & RW_WRITE_LOCKED) != 0)
553 1.28 ad return (owner & RW_THREAD) == (uintptr_t)pthread__self();
554 1.27 ad return (owner & RW_THREAD) != 0;
555 1.2 thorpej }
556 1.2 thorpej
557 1.2 thorpej int
558 1.27 ad _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
559 1.2 thorpej {
560 1.27 ad uintptr_t owner = (uintptr_t)ptr->ptr_owner;
561 1.2 thorpej
562 1.27 ad return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
563 1.2 thorpej }
564 1.21 ad
565 1.23 ad int
566 1.27 ad _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
567 1.23 ad {
568 1.27 ad uintptr_t owner = (uintptr_t)ptr->ptr_owner;
569 1.23 ad
570 1.27 ad return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
571 1.27 ad ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
572 1.23 ad }
573 1.23 ad
574 1.23 ad int
575 1.27 ad pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
576 1.23 ad {
577 1.23 ad
578 1.27 ad if (attr == NULL)
579 1.27 ad return EINVAL;
580 1.27 ad attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
581 1.27 ad
582 1.27 ad return 0;
583 1.23 ad }
584 1.23 ad
585 1.27 ad
586 1.23 ad int
587 1.27 ad pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
588 1.23 ad {
589 1.23 ad
590 1.27 ad if ((attr == NULL) ||
591 1.27 ad (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
592 1.27 ad return EINVAL;
593 1.27 ad attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
594 1.27 ad
595 1.27 ad return 0;
596 1.23 ad }
597