pthread_rwlock.c revision 1.28 1 1.28 ad /* $NetBSD: pthread_rwlock.c,v 1.28 2008/03/08 13:46:59 ad Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.27 ad * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.27 ad * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 1.2 thorpej *
10 1.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.2 thorpej * modification, are permitted provided that the following conditions
12 1.2 thorpej * are met:
13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
19 1.2 thorpej * must display the following acknowledgement:
20 1.2 thorpej * This product includes software developed by the NetBSD
21 1.2 thorpej * Foundation, Inc. and its contributors.
22 1.2 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.2 thorpej * contributors may be used to endorse or promote products derived
24 1.2 thorpej * from this software without specific prior written permission.
25 1.2 thorpej *
26 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
37 1.2 thorpej */
38 1.2 thorpej
39 1.5 lukem #include <sys/cdefs.h>
40 1.28 ad __RCSID("$NetBSD: pthread_rwlock.c,v 1.28 2008/03/08 13:46:59 ad Exp $");
41 1.5 lukem
42 1.2 thorpej #include <errno.h>
43 1.27 ad #include <stddef.h>
44 1.2 thorpej
45 1.2 thorpej #include "pthread.h"
46 1.2 thorpej #include "pthread_int.h"
47 1.2 thorpej
48 1.27 ad #define _RW_LOCKED 0
49 1.27 ad #define _RW_WANT_WRITE 1
50 1.27 ad #define _RW_WANT_READ 2
51 1.27 ad
52 1.27 ad static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
53 1.27 ad static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
54 1.27 ad static void pthread__rwlock_early(void *);
55 1.24 christos
56 1.23 ad int _pthread_rwlock_held_np(pthread_rwlock_t *);
57 1.23 ad int _pthread_rwlock_rdheld_np(pthread_rwlock_t *);
58 1.23 ad int _pthread_rwlock_wrheld_np(pthread_rwlock_t *);
59 1.23 ad
60 1.27 ad #ifndef lint
61 1.27 ad __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np);
62 1.27 ad __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np);
63 1.27 ad __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np);
64 1.27 ad #endif
65 1.27 ad
66 1.2 thorpej __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
67 1.2 thorpej __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
68 1.2 thorpej __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
69 1.2 thorpej __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
70 1.2 thorpej __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
71 1.2 thorpej __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
72 1.2 thorpej __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
73 1.2 thorpej
74 1.27 ad static inline uintptr_t
75 1.27 ad rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
76 1.27 ad {
77 1.27 ad
78 1.27 ad return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
79 1.27 ad (void *)n);
80 1.27 ad }
81 1.27 ad
82 1.2 thorpej int
83 1.27 ad pthread_rwlock_init(pthread_rwlock_t *ptr,
84 1.2 thorpej const pthread_rwlockattr_t *attr)
85 1.2 thorpej {
86 1.27 ad
87 1.27 ad if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
88 1.2 thorpej return EINVAL;
89 1.27 ad ptr->ptr_magic = _PT_RWLOCK_MAGIC;
90 1.27 ad pthread_lockinit(&ptr->ptr_interlock);
91 1.27 ad PTQ_INIT(&ptr->ptr_rblocked);
92 1.27 ad PTQ_INIT(&ptr->ptr_wblocked);
93 1.27 ad ptr->ptr_nreaders = 0;
94 1.27 ad ptr->ptr_owner = NULL;
95 1.2 thorpej
96 1.2 thorpej return 0;
97 1.2 thorpej }
98 1.2 thorpej
99 1.2 thorpej
100 1.2 thorpej int
101 1.27 ad pthread_rwlock_destroy(pthread_rwlock_t *ptr)
102 1.2 thorpej {
103 1.27 ad
104 1.27 ad if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
105 1.27 ad (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
106 1.27 ad (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
107 1.27 ad (ptr->ptr_nreaders != 0) ||
108 1.27 ad (ptr->ptr_owner != NULL))
109 1.2 thorpej return EINVAL;
110 1.27 ad ptr->ptr_magic = _PT_RWLOCK_DEAD;
111 1.2 thorpej
112 1.2 thorpej return 0;
113 1.2 thorpej }
114 1.2 thorpej
115 1.27 ad static int
116 1.27 ad pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
117 1.2 thorpej {
118 1.27 ad uintptr_t owner, next;
119 1.2 thorpej pthread_t self;
120 1.27 ad int error;
121 1.27 ad
122 1.27 ad self = pthread__self();
123 1.27 ad
124 1.2 thorpej #ifdef ERRORCHECK
125 1.27 ad if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
126 1.2 thorpej return EINVAL;
127 1.2 thorpej #endif
128 1.27 ad
129 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
130 1.27 ad /*
131 1.27 ad * Read the lock owner field. If the need-to-wait
132 1.27 ad * indicator is clear, then try to acquire the lock.
133 1.27 ad */
134 1.27 ad if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
135 1.27 ad next = rw_cas(ptr, owner, owner + RW_READ_INCR);
136 1.27 ad if (owner == next) {
137 1.27 ad /* Got it! */
138 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
139 1.27 ad membar_enter();
140 1.27 ad #endif
141 1.27 ad return 0;
142 1.27 ad }
143 1.27 ad
144 1.27 ad /*
145 1.27 ad * Didn't get it -- spin around again (we'll
146 1.27 ad * probably sleep on the next iteration).
147 1.27 ad */
148 1.27 ad continue;
149 1.27 ad }
150 1.27 ad
151 1.27 ad if ((owner & RW_THREAD) == (uintptr_t)self)
152 1.27 ad return EDEADLK;
153 1.27 ad
154 1.27 ad /*
155 1.27 ad * Grab the interlock. Once we have that, we
156 1.27 ad * can adjust the waiter bits and sleep queue.
157 1.27 ad */
158 1.27 ad pthread__spinlock(self, &ptr->ptr_interlock);
159 1.27 ad
160 1.27 ad /*
161 1.27 ad * Mark the rwlock as having waiters. If the set fails,
162 1.27 ad * then we may not need to sleep and should spin again.
163 1.27 ad */
164 1.27 ad next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
165 1.27 ad if (owner != next) {
166 1.27 ad pthread__spinunlock(self, &ptr->ptr_interlock);
167 1.27 ad continue;
168 1.27 ad }
169 1.27 ad
170 1.27 ad /* The waiters bit is set - it's safe to sleep. */
171 1.27 ad PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
172 1.27 ad ptr->ptr_nreaders++;
173 1.27 ad self->pt_rwlocked = _RW_WANT_READ;
174 1.27 ad self->pt_sleeponq = 1;
175 1.27 ad self->pt_sleepobj = &ptr->ptr_rblocked;
176 1.27 ad self->pt_early = pthread__rwlock_early;
177 1.27 ad pthread__spinunlock(self, &ptr->ptr_interlock);
178 1.27 ad
179 1.27 ad error = pthread__park(self, &ptr->ptr_interlock,
180 1.27 ad &ptr->ptr_rblocked, ts, 0, &ptr->ptr_rblocked);
181 1.27 ad
182 1.27 ad /* Did we get the lock? */
183 1.27 ad if (self->pt_rwlocked == _RW_LOCKED) {
184 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
185 1.27 ad membar_enter();
186 1.2 thorpej #endif
187 1.27 ad return 0;
188 1.27 ad }
189 1.27 ad if (error != 0)
190 1.27 ad return error;
191 1.27 ad
192 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
193 1.27 ad "direct handoff failure");
194 1.2 thorpej }
195 1.2 thorpej }
196 1.2 thorpej
197 1.2 thorpej
198 1.2 thorpej int
199 1.27 ad pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
200 1.2 thorpej {
201 1.27 ad uintptr_t owner, next;
202 1.20 ad
203 1.2 thorpej #ifdef ERRORCHECK
204 1.27 ad if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
205 1.2 thorpej return EINVAL;
206 1.2 thorpej #endif
207 1.27 ad
208 1.2 thorpej /*
209 1.2 thorpej * Don't get a readlock if there is a writer or if there are waiting
210 1.2 thorpej * writers; i.e. prefer writers to readers. This strategy is dictated
211 1.2 thorpej * by SUSv3.
212 1.2 thorpej */
213 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
214 1.27 ad if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
215 1.27 ad return EBUSY;
216 1.27 ad next = rw_cas(ptr, owner, owner + RW_READ_INCR);
217 1.27 ad if (owner == next) {
218 1.27 ad /* Got it! */
219 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
220 1.27 ad membar_enter();
221 1.27 ad #endif
222 1.27 ad return 0;
223 1.27 ad }
224 1.2 thorpej }
225 1.2 thorpej }
226 1.2 thorpej
227 1.27 ad static int
228 1.27 ad pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
229 1.2 thorpej {
230 1.27 ad uintptr_t owner, next;
231 1.2 thorpej pthread_t self;
232 1.27 ad int error;
233 1.27 ad
234 1.27 ad self = pthread__self();
235 1.13 chs
236 1.2 thorpej #ifdef ERRORCHECK
237 1.27 ad if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
238 1.2 thorpej return EINVAL;
239 1.2 thorpej #endif
240 1.27 ad
241 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
242 1.27 ad /*
243 1.27 ad * Read the lock owner field. If the need-to-wait
244 1.27 ad * indicator is clear, then try to acquire the lock.
245 1.27 ad */
246 1.27 ad if ((owner & RW_THREAD) == 0) {
247 1.27 ad next = rw_cas(ptr, owner,
248 1.27 ad (uintptr_t)self | RW_WRITE_LOCKED);
249 1.27 ad if (owner == next) {
250 1.27 ad /* Got it! */
251 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
252 1.27 ad membar_enter();
253 1.27 ad #endif
254 1.27 ad return 0;
255 1.27 ad }
256 1.27 ad
257 1.27 ad /*
258 1.27 ad * Didn't get it -- spin around again (we'll
259 1.27 ad * probably sleep on the next iteration).
260 1.27 ad */
261 1.27 ad continue;
262 1.27 ad }
263 1.27 ad
264 1.27 ad if ((owner & RW_THREAD) == (uintptr_t)self)
265 1.13 chs return EDEADLK;
266 1.27 ad
267 1.27 ad /*
268 1.27 ad * Grab the interlock. Once we have that, we
269 1.27 ad * can adjust the waiter bits and sleep queue.
270 1.27 ad */
271 1.27 ad pthread__spinlock(self, &ptr->ptr_interlock);
272 1.27 ad
273 1.27 ad /*
274 1.27 ad * Mark the rwlock as having waiters. If the set fails,
275 1.27 ad * then we may not need to sleep and should spin again.
276 1.27 ad */
277 1.27 ad next = rw_cas(ptr, owner,
278 1.27 ad owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
279 1.27 ad if (owner != next) {
280 1.27 ad pthread__spinunlock(self, &ptr->ptr_interlock);
281 1.27 ad continue;
282 1.13 chs }
283 1.27 ad
284 1.27 ad /* The waiters bit is set - it's safe to sleep. */
285 1.27 ad PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
286 1.27 ad self->pt_rwlocked = _RW_WANT_WRITE;
287 1.27 ad self->pt_sleeponq = 1;
288 1.27 ad self->pt_sleepobj = &ptr->ptr_wblocked;
289 1.27 ad self->pt_early = pthread__rwlock_early;
290 1.27 ad pthread__spinunlock(self, &ptr->ptr_interlock);
291 1.27 ad
292 1.27 ad error = pthread__park(self, &ptr->ptr_interlock,
293 1.27 ad &ptr->ptr_wblocked, ts, 0, &ptr->ptr_wblocked);
294 1.27 ad
295 1.27 ad /* Did we get the lock? */
296 1.27 ad if (self->pt_rwlocked == _RW_LOCKED) {
297 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
298 1.27 ad membar_enter();
299 1.13 chs #endif
300 1.27 ad return 0;
301 1.27 ad }
302 1.27 ad if (error != 0)
303 1.27 ad return error;
304 1.27 ad
305 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
306 1.27 ad "direct handoff failure");
307 1.2 thorpej }
308 1.2 thorpej }
309 1.2 thorpej
310 1.2 thorpej
311 1.2 thorpej int
312 1.27 ad pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
313 1.2 thorpej {
314 1.27 ad uintptr_t owner, next;
315 1.2 thorpej pthread_t self;
316 1.27 ad
317 1.2 thorpej #ifdef ERRORCHECK
318 1.27 ad if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
319 1.2 thorpej return EINVAL;
320 1.2 thorpej #endif
321 1.27 ad
322 1.2 thorpej self = pthread__self();
323 1.27 ad
324 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
325 1.27 ad if (owner != 0)
326 1.27 ad return EBUSY;
327 1.27 ad next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
328 1.27 ad if (owner == next) {
329 1.27 ad /* Got it! */
330 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
331 1.27 ad membar_enter();
332 1.27 ad #endif
333 1.27 ad return 0;
334 1.27 ad }
335 1.2 thorpej }
336 1.27 ad }
337 1.2 thorpej
338 1.27 ad int
339 1.27 ad pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
340 1.27 ad {
341 1.2 thorpej
342 1.27 ad return pthread__rwlock_rdlock(ptr, NULL);
343 1.2 thorpej }
344 1.2 thorpej
345 1.2 thorpej int
346 1.27 ad pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
347 1.27 ad const struct timespec *abs_timeout)
348 1.2 thorpej {
349 1.12 chs
350 1.10 nathanw if (abs_timeout == NULL)
351 1.2 thorpej return EINVAL;
352 1.10 nathanw if ((abs_timeout->tv_nsec >= 1000000000) ||
353 1.10 nathanw (abs_timeout->tv_nsec < 0) ||
354 1.10 nathanw (abs_timeout->tv_sec < 0))
355 1.10 nathanw return EINVAL;
356 1.12 chs
357 1.27 ad return pthread__rwlock_rdlock(ptr, abs_timeout);
358 1.27 ad }
359 1.2 thorpej
360 1.27 ad int
361 1.27 ad pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
362 1.27 ad {
363 1.2 thorpej
364 1.27 ad return pthread__rwlock_wrlock(ptr, NULL);
365 1.2 thorpej }
366 1.2 thorpej
367 1.2 thorpej int
368 1.27 ad pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
369 1.27 ad const struct timespec *abs_timeout)
370 1.2 thorpej {
371 1.12 chs
372 1.10 nathanw if (abs_timeout == NULL)
373 1.10 nathanw return EINVAL;
374 1.10 nathanw if ((abs_timeout->tv_nsec >= 1000000000) ||
375 1.10 nathanw (abs_timeout->tv_nsec < 0) ||
376 1.10 nathanw (abs_timeout->tv_sec < 0))
377 1.10 nathanw return EINVAL;
378 1.12 chs
379 1.27 ad return pthread__rwlock_wrlock(ptr, abs_timeout);
380 1.2 thorpej }
381 1.2 thorpej
382 1.2 thorpej
383 1.2 thorpej int
384 1.27 ad pthread_rwlock_unlock(pthread_rwlock_t *ptr)
385 1.2 thorpej {
386 1.27 ad uintptr_t owner, decr, new, next;
387 1.27 ad pthread_t self, thread;
388 1.27 ad
389 1.2 thorpej #ifdef ERRORCHECK
390 1.27 ad if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
391 1.2 thorpej return EINVAL;
392 1.2 thorpej #endif
393 1.27 ad
394 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
395 1.27 ad membar_exit();
396 1.27 ad #endif
397 1.27 ad
398 1.27 ad /*
399 1.27 ad * Since we used an add operation to set the required lock
400 1.27 ad * bits, we can use a subtract to clear them, which makes
401 1.27 ad * the read-release and write-release path similar.
402 1.27 ad */
403 1.2 thorpej self = pthread__self();
404 1.27 ad owner = (uintptr_t)ptr->ptr_owner;
405 1.27 ad if ((owner & RW_WRITE_LOCKED) != 0) {
406 1.27 ad decr = (uintptr_t)self | RW_WRITE_LOCKED;
407 1.27 ad if ((owner & RW_THREAD) != (uintptr_t)self) {
408 1.27 ad return EPERM;
409 1.27 ad }
410 1.27 ad } else {
411 1.27 ad decr = RW_READ_INCR;
412 1.27 ad if (owner == 0) {
413 1.2 thorpej return EPERM;
414 1.2 thorpej }
415 1.27 ad }
416 1.27 ad
417 1.27 ad for (;; owner = next) {
418 1.27 ad /*
419 1.27 ad * Compute what we expect the new value of the lock to be.
420 1.27 ad * Only proceed to do direct handoff if there are waiters,
421 1.27 ad * and if the lock would become unowned.
422 1.27 ad */
423 1.27 ad new = (owner - decr);
424 1.27 ad if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
425 1.27 ad next = rw_cas(ptr, owner, new);
426 1.27 ad if (owner == next) {
427 1.27 ad /* Released! */
428 1.27 ad return 0;
429 1.27 ad }
430 1.27 ad continue;
431 1.27 ad }
432 1.27 ad
433 1.27 ad /*
434 1.27 ad * Grab the interlock. Once we have that, we can adjust
435 1.27 ad * the waiter bits. We must check to see if there are
436 1.27 ad * still waiters before proceeding.
437 1.27 ad */
438 1.27 ad pthread__spinlock(self, &ptr->ptr_interlock);
439 1.27 ad owner = (uintptr_t)ptr->ptr_owner;
440 1.27 ad if ((owner & RW_HAS_WAITERS) == 0) {
441 1.27 ad pthread__spinunlock(self, &ptr->ptr_interlock);
442 1.27 ad next = owner;
443 1.27 ad continue;
444 1.2 thorpej }
445 1.27 ad
446 1.27 ad /*
447 1.27 ad * Give the lock away. SUSv3 dictates that we must give
448 1.27 ad * preference to writers.
449 1.27 ad */
450 1.27 ad if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
451 1.27 ad new = (uintptr_t)thread | RW_WRITE_LOCKED;
452 1.27 ad
453 1.27 ad if (PTQ_NEXT(thread, pt_sleep) != NULL)
454 1.27 ad new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
455 1.27 ad else if (ptr->ptr_nreaders != 0)
456 1.27 ad new |= RW_HAS_WAITERS;
457 1.27 ad
458 1.27 ad /*
459 1.27 ad * Set in the new value. The lock becomes owned
460 1.27 ad * by the writer that we are about to wake.
461 1.27 ad */
462 1.27 ad (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
463 1.27 ad
464 1.27 ad /* Wake the writer. */
465 1.27 ad PTQ_REMOVE(&ptr->ptr_wblocked, thread, pt_sleep);
466 1.27 ad thread->pt_rwlocked = _RW_LOCKED;
467 1.27 ad pthread__unpark(self, &ptr->ptr_interlock,
468 1.27 ad &ptr->ptr_wblocked, thread);
469 1.27 ad } else {
470 1.27 ad new = 0;
471 1.27 ad PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
472 1.27 ad /*
473 1.27 ad * May have already been handed the lock,
474 1.27 ad * since pthread__unpark_all() can release
475 1.27 ad * our interlock before awakening all
476 1.27 ad * threads.
477 1.27 ad */
478 1.27 ad if (thread->pt_sleepobj == NULL)
479 1.27 ad continue;
480 1.27 ad new += RW_READ_INCR;
481 1.27 ad thread->pt_rwlocked = _RW_LOCKED;
482 1.27 ad }
483 1.27 ad
484 1.27 ad /*
485 1.27 ad * Set in the new value. The lock becomes owned
486 1.27 ad * by the readers that we are about to wake.
487 1.27 ad */
488 1.27 ad (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
489 1.27 ad
490 1.27 ad /* Wake up all sleeping readers. */
491 1.27 ad ptr->ptr_nreaders = 0;
492 1.27 ad pthread__unpark_all(self, &ptr->ptr_interlock,
493 1.27 ad &ptr->ptr_rblocked);
494 1.2 thorpej }
495 1.27 ad
496 1.27 ad return 0;
497 1.2 thorpej }
498 1.27 ad }
499 1.27 ad
500 1.27 ad /*
501 1.27 ad * Called when a timedlock awakens early to adjust the waiter bits.
502 1.27 ad * The rwlock's interlock is held on entry, and the caller has been
503 1.27 ad * removed from the waiters lists.
504 1.27 ad */
505 1.27 ad static void
506 1.27 ad pthread__rwlock_early(void *obj)
507 1.27 ad {
508 1.27 ad uintptr_t owner, set, new, next;
509 1.27 ad pthread_rwlock_t *ptr;
510 1.27 ad pthread_t self;
511 1.27 ad u_int off;
512 1.2 thorpej
513 1.27 ad self = pthread__self();
514 1.27 ad
515 1.27 ad switch (self->pt_rwlocked) {
516 1.27 ad case _RW_WANT_READ:
517 1.27 ad off = offsetof(pthread_rwlock_t, ptr_rblocked);
518 1.27 ad break;
519 1.27 ad case _RW_WANT_WRITE:
520 1.27 ad off = offsetof(pthread_rwlock_t, ptr_wblocked);
521 1.27 ad break;
522 1.27 ad default:
523 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
524 1.27 ad "bad value of pt_rwlocked");
525 1.27 ad off = 0;
526 1.27 ad /* NOTREACHED */
527 1.27 ad break;
528 1.27 ad }
529 1.27 ad
530 1.27 ad /* LINTED mind your own business */
531 1.27 ad ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
532 1.27 ad owner = (uintptr_t)ptr->ptr_owner;
533 1.27 ad
534 1.27 ad if ((owner & RW_THREAD) == 0) {
535 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
536 1.27 ad "lock not held");
537 1.27 ad }
538 1.27 ad
539 1.27 ad if (!PTQ_EMPTY(&ptr->ptr_wblocked))
540 1.27 ad set = RW_HAS_WAITERS | RW_WRITE_WANTED;
541 1.27 ad else if (ptr->ptr_nreaders != 0)
542 1.27 ad set = RW_HAS_WAITERS;
543 1.14 ad else
544 1.27 ad set = 0;
545 1.6 cl
546 1.27 ad for (;; owner = next) {
547 1.27 ad new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
548 1.27 ad next = rw_cas(ptr, owner, new);
549 1.27 ad if (owner == next)
550 1.27 ad break;
551 1.27 ad }
552 1.2 thorpej }
553 1.2 thorpej
554 1.2 thorpej int
555 1.27 ad _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
556 1.2 thorpej {
557 1.27 ad uintptr_t owner = (uintptr_t)ptr->ptr_owner;
558 1.2 thorpej
559 1.28 ad if ((owner & RW_WRITE_LOCKED) != 0)
560 1.28 ad return (owner & RW_THREAD) == (uintptr_t)pthread__self();
561 1.27 ad return (owner & RW_THREAD) != 0;
562 1.2 thorpej }
563 1.2 thorpej
564 1.2 thorpej int
565 1.27 ad _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
566 1.2 thorpej {
567 1.27 ad uintptr_t owner = (uintptr_t)ptr->ptr_owner;
568 1.2 thorpej
569 1.27 ad return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
570 1.2 thorpej }
571 1.21 ad
572 1.23 ad int
573 1.27 ad _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
574 1.23 ad {
575 1.27 ad uintptr_t owner = (uintptr_t)ptr->ptr_owner;
576 1.23 ad
577 1.27 ad return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
578 1.27 ad ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
579 1.23 ad }
580 1.23 ad
581 1.23 ad int
582 1.27 ad pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
583 1.23 ad {
584 1.23 ad
585 1.27 ad if (attr == NULL)
586 1.27 ad return EINVAL;
587 1.27 ad attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
588 1.27 ad
589 1.27 ad return 0;
590 1.23 ad }
591 1.23 ad
592 1.27 ad
593 1.23 ad int
594 1.27 ad pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
595 1.23 ad {
596 1.23 ad
597 1.27 ad if ((attr == NULL) ||
598 1.27 ad (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
599 1.27 ad return EINVAL;
600 1.27 ad attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
601 1.27 ad
602 1.27 ad return 0;
603 1.23 ad }
604