pthread_rwlock.c revision 1.44 1 1.44 riastrad /* $NetBSD: pthread_rwlock.c,v 1.44 2022/02/12 14:59:32 riastradh Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*-
4 1.41 ad * Copyright (c) 2002, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.27 ad * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 1.2 thorpej *
10 1.2 thorpej * Redistribution and use in source and binary forms, with or without
11 1.2 thorpej * modification, are permitted provided that the following conditions
12 1.2 thorpej * are met:
13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
14 1.2 thorpej * notice, this list of conditions and the following disclaimer.
15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
17 1.2 thorpej * documentation and/or other materials provided with the distribution.
18 1.2 thorpej *
19 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
30 1.2 thorpej */
31 1.2 thorpej
32 1.5 lukem #include <sys/cdefs.h>
33 1.44 riastrad __RCSID("$NetBSD: pthread_rwlock.c,v 1.44 2022/02/12 14:59:32 riastradh Exp $");
34 1.44 riastrad
35 1.44 riastrad /* Need to use libc-private names for atomic operations. */
36 1.44 riastrad #include "../../common/lib/libc/atomic/atomic_op_namespace.h"
37 1.30 ad
38 1.30 ad #include <sys/types.h>
39 1.30 ad #include <sys/lwpctl.h>
40 1.5 lukem
41 1.35 uwe #include <assert.h>
42 1.33 christos #include <time.h>
43 1.2 thorpej #include <errno.h>
44 1.27 ad #include <stddef.h>
45 1.2 thorpej
46 1.2 thorpej #include "pthread.h"
47 1.2 thorpej #include "pthread_int.h"
48 1.33 christos #include "reentrant.h"
49 1.2 thorpej
50 1.27 ad #define _RW_LOCKED 0
51 1.27 ad #define _RW_WANT_WRITE 1
52 1.27 ad #define _RW_WANT_READ 2
53 1.27 ad
54 1.30 ad #if __GNUC_PREREQ__(3, 0)
55 1.30 ad #define NOINLINE __attribute ((noinline))
56 1.30 ad #else
57 1.30 ad #define NOINLINE /* nothing */
58 1.30 ad #endif
59 1.30 ad
60 1.27 ad static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
61 1.27 ad static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
62 1.41 ad static void pthread__rwlock_early(pthread_t, pthread_rwlock_t *,
63 1.41 ad pthread_mutex_t *);
64 1.24 christos
65 1.23 ad int _pthread_rwlock_held_np(pthread_rwlock_t *);
66 1.23 ad int _pthread_rwlock_rdheld_np(pthread_rwlock_t *);
67 1.23 ad int _pthread_rwlock_wrheld_np(pthread_rwlock_t *);
68 1.23 ad
69 1.27 ad #ifndef lint
70 1.32 yamt __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
71 1.32 yamt __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
72 1.32 yamt __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
73 1.27 ad #endif
74 1.27 ad
75 1.2 thorpej __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
76 1.2 thorpej __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
77 1.2 thorpej __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
78 1.2 thorpej __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
79 1.2 thorpej __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
80 1.2 thorpej __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
81 1.2 thorpej __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
82 1.2 thorpej
83 1.27 ad static inline uintptr_t
84 1.27 ad rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
85 1.27 ad {
86 1.27 ad
87 1.27 ad return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
88 1.27 ad (void *)n);
89 1.27 ad }
90 1.27 ad
91 1.2 thorpej int
92 1.27 ad pthread_rwlock_init(pthread_rwlock_t *ptr,
93 1.2 thorpej const pthread_rwlockattr_t *attr)
94 1.2 thorpej {
95 1.33 christos if (__predict_false(__uselibcstub))
96 1.33 christos return __libc_rwlock_init_stub(ptr, attr);
97 1.27 ad
98 1.38 kamil pthread__error(EINVAL, "Invalid rwlock attribute",
99 1.38 kamil attr == NULL || attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
100 1.38 kamil
101 1.27 ad ptr->ptr_magic = _PT_RWLOCK_MAGIC;
102 1.27 ad PTQ_INIT(&ptr->ptr_rblocked);
103 1.27 ad PTQ_INIT(&ptr->ptr_wblocked);
104 1.27 ad ptr->ptr_nreaders = 0;
105 1.27 ad ptr->ptr_owner = NULL;
106 1.2 thorpej
107 1.2 thorpej return 0;
108 1.2 thorpej }
109 1.2 thorpej
110 1.2 thorpej
111 1.2 thorpej int
112 1.27 ad pthread_rwlock_destroy(pthread_rwlock_t *ptr)
113 1.2 thorpej {
114 1.33 christos if (__predict_false(__uselibcstub))
115 1.33 christos return __libc_rwlock_destroy_stub(ptr);
116 1.27 ad
117 1.38 kamil pthread__error(EINVAL, "Invalid rwlock",
118 1.38 kamil ptr->ptr_magic == _PT_RWLOCK_MAGIC);
119 1.38 kamil
120 1.38 kamil if ((!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
121 1.27 ad (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
122 1.27 ad (ptr->ptr_nreaders != 0) ||
123 1.27 ad (ptr->ptr_owner != NULL))
124 1.2 thorpej return EINVAL;
125 1.27 ad ptr->ptr_magic = _PT_RWLOCK_DEAD;
126 1.2 thorpej
127 1.2 thorpej return 0;
128 1.2 thorpej }
129 1.2 thorpej
130 1.30 ad /* We want function call overhead. */
131 1.30 ad NOINLINE static void
132 1.30 ad pthread__rwlock_pause(void)
133 1.30 ad {
134 1.30 ad
135 1.30 ad pthread__smt_pause();
136 1.30 ad }
137 1.30 ad
138 1.30 ad NOINLINE static int
139 1.30 ad pthread__rwlock_spin(uintptr_t owner)
140 1.30 ad {
141 1.30 ad pthread_t thread;
142 1.30 ad unsigned int i;
143 1.30 ad
144 1.36 uwe if ((owner & ~RW_THREAD) != RW_WRITE_LOCKED)
145 1.36 uwe return 0;
146 1.36 uwe
147 1.30 ad thread = (pthread_t)(owner & RW_THREAD);
148 1.36 uwe if (__predict_false(thread == NULL) ||
149 1.37 ad thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
150 1.30 ad return 0;
151 1.36 uwe
152 1.30 ad for (i = 128; i != 0; i--)
153 1.30 ad pthread__rwlock_pause();
154 1.30 ad return 1;
155 1.30 ad }
156 1.30 ad
157 1.27 ad static int
158 1.27 ad pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
159 1.2 thorpej {
160 1.27 ad uintptr_t owner, next;
161 1.30 ad pthread_mutex_t *interlock;
162 1.2 thorpej pthread_t self;
163 1.27 ad int error;
164 1.27 ad
165 1.38 kamil pthread__error(EINVAL, "Invalid rwlock",
166 1.38 kamil ptr->ptr_magic == _PT_RWLOCK_MAGIC);
167 1.27 ad
168 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
169 1.27 ad /*
170 1.27 ad * Read the lock owner field. If the need-to-wait
171 1.27 ad * indicator is clear, then try to acquire the lock.
172 1.27 ad */
173 1.27 ad if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
174 1.27 ad next = rw_cas(ptr, owner, owner + RW_READ_INCR);
175 1.27 ad if (owner == next) {
176 1.27 ad /* Got it! */
177 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
178 1.27 ad membar_enter();
179 1.27 ad #endif
180 1.27 ad return 0;
181 1.27 ad }
182 1.27 ad
183 1.27 ad /*
184 1.27 ad * Didn't get it -- spin around again (we'll
185 1.27 ad * probably sleep on the next iteration).
186 1.27 ad */
187 1.27 ad continue;
188 1.27 ad }
189 1.27 ad
190 1.31 ad self = pthread__self();
191 1.27 ad if ((owner & RW_THREAD) == (uintptr_t)self)
192 1.27 ad return EDEADLK;
193 1.27 ad
194 1.30 ad /* If held write locked and no waiters, spin. */
195 1.30 ad if (pthread__rwlock_spin(owner)) {
196 1.30 ad while (pthread__rwlock_spin(owner)) {
197 1.30 ad owner = (uintptr_t)ptr->ptr_owner;
198 1.30 ad }
199 1.30 ad next = owner;
200 1.30 ad continue;
201 1.30 ad }
202 1.30 ad
203 1.27 ad /*
204 1.27 ad * Grab the interlock. Once we have that, we
205 1.27 ad * can adjust the waiter bits and sleep queue.
206 1.27 ad */
207 1.30 ad interlock = pthread__hashlock(ptr);
208 1.30 ad pthread_mutex_lock(interlock);
209 1.27 ad
210 1.27 ad /*
211 1.27 ad * Mark the rwlock as having waiters. If the set fails,
212 1.27 ad * then we may not need to sleep and should spin again.
213 1.27 ad */
214 1.27 ad next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
215 1.27 ad if (owner != next) {
216 1.30 ad pthread_mutex_unlock(interlock);
217 1.27 ad continue;
218 1.27 ad }
219 1.27 ad
220 1.27 ad /* The waiters bit is set - it's safe to sleep. */
221 1.27 ad PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
222 1.27 ad ptr->ptr_nreaders++;
223 1.27 ad self->pt_rwlocked = _RW_WANT_READ;
224 1.27 ad self->pt_sleepobj = &ptr->ptr_rblocked;
225 1.30 ad error = pthread__park(self, interlock, &ptr->ptr_rblocked,
226 1.40 ad ts, 0);
227 1.27 ad
228 1.41 ad if (self->pt_sleepobj != NULL) {
229 1.41 ad pthread__rwlock_early(self, ptr, interlock);
230 1.41 ad }
231 1.41 ad
232 1.27 ad /* Did we get the lock? */
233 1.27 ad if (self->pt_rwlocked == _RW_LOCKED) {
234 1.27 ad membar_enter();
235 1.27 ad return 0;
236 1.27 ad }
237 1.27 ad if (error != 0)
238 1.27 ad return error;
239 1.27 ad
240 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
241 1.27 ad "direct handoff failure");
242 1.2 thorpej }
243 1.2 thorpej }
244 1.2 thorpej
245 1.2 thorpej
246 1.2 thorpej int
247 1.27 ad pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
248 1.2 thorpej {
249 1.27 ad uintptr_t owner, next;
250 1.20 ad
251 1.33 christos if (__predict_false(__uselibcstub))
252 1.33 christos return __libc_rwlock_tryrdlock_stub(ptr);
253 1.33 christos
254 1.38 kamil pthread__error(EINVAL, "Invalid rwlock",
255 1.38 kamil ptr->ptr_magic == _PT_RWLOCK_MAGIC);
256 1.27 ad
257 1.2 thorpej /*
258 1.2 thorpej * Don't get a readlock if there is a writer or if there are waiting
259 1.2 thorpej * writers; i.e. prefer writers to readers. This strategy is dictated
260 1.2 thorpej * by SUSv3.
261 1.2 thorpej */
262 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
263 1.27 ad if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
264 1.27 ad return EBUSY;
265 1.27 ad next = rw_cas(ptr, owner, owner + RW_READ_INCR);
266 1.27 ad if (owner == next) {
267 1.27 ad /* Got it! */
268 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
269 1.27 ad membar_enter();
270 1.27 ad #endif
271 1.27 ad return 0;
272 1.27 ad }
273 1.2 thorpej }
274 1.2 thorpej }
275 1.2 thorpej
276 1.27 ad static int
277 1.27 ad pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
278 1.2 thorpej {
279 1.27 ad uintptr_t owner, next;
280 1.30 ad pthread_mutex_t *interlock;
281 1.2 thorpej pthread_t self;
282 1.27 ad int error;
283 1.27 ad
284 1.27 ad self = pthread__self();
285 1.35 uwe _DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
286 1.13 chs
287 1.38 kamil pthread__error(EINVAL, "Invalid rwlock",
288 1.38 kamil ptr->ptr_magic == _PT_RWLOCK_MAGIC);
289 1.27 ad
290 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
291 1.27 ad /*
292 1.27 ad * Read the lock owner field. If the need-to-wait
293 1.27 ad * indicator is clear, then try to acquire the lock.
294 1.27 ad */
295 1.27 ad if ((owner & RW_THREAD) == 0) {
296 1.27 ad next = rw_cas(ptr, owner,
297 1.27 ad (uintptr_t)self | RW_WRITE_LOCKED);
298 1.27 ad if (owner == next) {
299 1.27 ad /* Got it! */
300 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
301 1.27 ad membar_enter();
302 1.27 ad #endif
303 1.27 ad return 0;
304 1.27 ad }
305 1.27 ad
306 1.27 ad /*
307 1.27 ad * Didn't get it -- spin around again (we'll
308 1.27 ad * probably sleep on the next iteration).
309 1.27 ad */
310 1.27 ad continue;
311 1.27 ad }
312 1.27 ad
313 1.27 ad if ((owner & RW_THREAD) == (uintptr_t)self)
314 1.13 chs return EDEADLK;
315 1.27 ad
316 1.30 ad /* If held write locked and no waiters, spin. */
317 1.30 ad if (pthread__rwlock_spin(owner)) {
318 1.30 ad while (pthread__rwlock_spin(owner)) {
319 1.30 ad owner = (uintptr_t)ptr->ptr_owner;
320 1.30 ad }
321 1.30 ad next = owner;
322 1.30 ad continue;
323 1.30 ad }
324 1.30 ad
325 1.27 ad /*
326 1.27 ad * Grab the interlock. Once we have that, we
327 1.27 ad * can adjust the waiter bits and sleep queue.
328 1.27 ad */
329 1.30 ad interlock = pthread__hashlock(ptr);
330 1.30 ad pthread_mutex_lock(interlock);
331 1.27 ad
332 1.27 ad /*
333 1.27 ad * Mark the rwlock as having waiters. If the set fails,
334 1.27 ad * then we may not need to sleep and should spin again.
335 1.27 ad */
336 1.27 ad next = rw_cas(ptr, owner,
337 1.27 ad owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
338 1.27 ad if (owner != next) {
339 1.30 ad pthread_mutex_unlock(interlock);
340 1.27 ad continue;
341 1.13 chs }
342 1.27 ad
343 1.27 ad /* The waiters bit is set - it's safe to sleep. */
344 1.27 ad PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
345 1.27 ad self->pt_rwlocked = _RW_WANT_WRITE;
346 1.27 ad self->pt_sleepobj = &ptr->ptr_wblocked;
347 1.30 ad error = pthread__park(self, interlock, &ptr->ptr_wblocked,
348 1.40 ad ts, 0);
349 1.27 ad
350 1.41 ad if (self->pt_sleepobj != NULL) {
351 1.41 ad pthread__rwlock_early(self, ptr, interlock);
352 1.41 ad }
353 1.41 ad
354 1.27 ad /* Did we get the lock? */
355 1.27 ad if (self->pt_rwlocked == _RW_LOCKED) {
356 1.27 ad membar_enter();
357 1.27 ad return 0;
358 1.27 ad }
359 1.27 ad if (error != 0)
360 1.27 ad return error;
361 1.27 ad
362 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
363 1.42 joerg "direct handoff failure: %d", errno);
364 1.2 thorpej }
365 1.2 thorpej }
366 1.2 thorpej
367 1.2 thorpej int
368 1.27 ad pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
369 1.2 thorpej {
370 1.27 ad uintptr_t owner, next;
371 1.2 thorpej pthread_t self;
372 1.27 ad
373 1.33 christos if (__predict_false(__uselibcstub))
374 1.33 christos return __libc_rwlock_trywrlock_stub(ptr);
375 1.33 christos
376 1.38 kamil pthread__error(EINVAL, "Invalid rwlock",
377 1.38 kamil ptr->ptr_magic == _PT_RWLOCK_MAGIC);
378 1.27 ad
379 1.2 thorpej self = pthread__self();
380 1.35 uwe _DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
381 1.27 ad
382 1.27 ad for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
383 1.27 ad if (owner != 0)
384 1.27 ad return EBUSY;
385 1.27 ad next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
386 1.27 ad if (owner == next) {
387 1.27 ad /* Got it! */
388 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
389 1.27 ad membar_enter();
390 1.27 ad #endif
391 1.27 ad return 0;
392 1.27 ad }
393 1.2 thorpej }
394 1.27 ad }
395 1.2 thorpej
396 1.27 ad int
397 1.27 ad pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
398 1.27 ad {
399 1.33 christos if (__predict_false(__uselibcstub))
400 1.33 christos return __libc_rwlock_rdlock_stub(ptr);
401 1.2 thorpej
402 1.27 ad return pthread__rwlock_rdlock(ptr, NULL);
403 1.2 thorpej }
404 1.2 thorpej
405 1.2 thorpej int
406 1.27 ad pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
407 1.27 ad const struct timespec *abs_timeout)
408 1.2 thorpej {
409 1.10 nathanw if (abs_timeout == NULL)
410 1.2 thorpej return EINVAL;
411 1.10 nathanw if ((abs_timeout->tv_nsec >= 1000000000) ||
412 1.10 nathanw (abs_timeout->tv_nsec < 0) ||
413 1.10 nathanw (abs_timeout->tv_sec < 0))
414 1.10 nathanw return EINVAL;
415 1.12 chs
416 1.27 ad return pthread__rwlock_rdlock(ptr, abs_timeout);
417 1.27 ad }
418 1.2 thorpej
419 1.27 ad int
420 1.27 ad pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
421 1.27 ad {
422 1.33 christos if (__predict_false(__uselibcstub))
423 1.33 christos return __libc_rwlock_wrlock_stub(ptr);
424 1.2 thorpej
425 1.27 ad return pthread__rwlock_wrlock(ptr, NULL);
426 1.2 thorpej }
427 1.2 thorpej
428 1.2 thorpej int
429 1.27 ad pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
430 1.27 ad const struct timespec *abs_timeout)
431 1.2 thorpej {
432 1.10 nathanw if (abs_timeout == NULL)
433 1.10 nathanw return EINVAL;
434 1.10 nathanw if ((abs_timeout->tv_nsec >= 1000000000) ||
435 1.10 nathanw (abs_timeout->tv_nsec < 0) ||
436 1.10 nathanw (abs_timeout->tv_sec < 0))
437 1.10 nathanw return EINVAL;
438 1.12 chs
439 1.27 ad return pthread__rwlock_wrlock(ptr, abs_timeout);
440 1.2 thorpej }
441 1.2 thorpej
442 1.2 thorpej
443 1.2 thorpej int
444 1.27 ad pthread_rwlock_unlock(pthread_rwlock_t *ptr)
445 1.2 thorpej {
446 1.27 ad uintptr_t owner, decr, new, next;
447 1.30 ad pthread_mutex_t *interlock;
448 1.27 ad pthread_t self, thread;
449 1.27 ad
450 1.33 christos if (__predict_false(__uselibcstub))
451 1.33 christos return __libc_rwlock_unlock_stub(ptr);
452 1.33 christos
453 1.38 kamil pthread__error(EINVAL, "Invalid rwlock",
454 1.38 kamil ptr->ptr_magic == _PT_RWLOCK_MAGIC);
455 1.27 ad
456 1.27 ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
457 1.27 ad membar_exit();
458 1.27 ad #endif
459 1.27 ad
460 1.27 ad /*
461 1.27 ad * Since we used an add operation to set the required lock
462 1.27 ad * bits, we can use a subtract to clear them, which makes
463 1.27 ad * the read-release and write-release path similar.
464 1.27 ad */
465 1.27 ad owner = (uintptr_t)ptr->ptr_owner;
466 1.27 ad if ((owner & RW_WRITE_LOCKED) != 0) {
467 1.31 ad self = pthread__self();
468 1.27 ad decr = (uintptr_t)self | RW_WRITE_LOCKED;
469 1.27 ad if ((owner & RW_THREAD) != (uintptr_t)self) {
470 1.27 ad return EPERM;
471 1.27 ad }
472 1.27 ad } else {
473 1.27 ad decr = RW_READ_INCR;
474 1.27 ad if (owner == 0) {
475 1.2 thorpej return EPERM;
476 1.2 thorpej }
477 1.27 ad }
478 1.27 ad
479 1.27 ad for (;; owner = next) {
480 1.27 ad /*
481 1.27 ad * Compute what we expect the new value of the lock to be.
482 1.27 ad * Only proceed to do direct handoff if there are waiters,
483 1.27 ad * and if the lock would become unowned.
484 1.27 ad */
485 1.27 ad new = (owner - decr);
486 1.27 ad if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
487 1.27 ad next = rw_cas(ptr, owner, new);
488 1.27 ad if (owner == next) {
489 1.27 ad /* Released! */
490 1.27 ad return 0;
491 1.27 ad }
492 1.27 ad continue;
493 1.27 ad }
494 1.27 ad
495 1.27 ad /*
496 1.27 ad * Grab the interlock. Once we have that, we can adjust
497 1.27 ad * the waiter bits. We must check to see if there are
498 1.27 ad * still waiters before proceeding.
499 1.27 ad */
500 1.30 ad interlock = pthread__hashlock(ptr);
501 1.30 ad pthread_mutex_lock(interlock);
502 1.27 ad owner = (uintptr_t)ptr->ptr_owner;
503 1.27 ad if ((owner & RW_HAS_WAITERS) == 0) {
504 1.30 ad pthread_mutex_unlock(interlock);
505 1.27 ad next = owner;
506 1.27 ad continue;
507 1.2 thorpej }
508 1.27 ad
509 1.27 ad /*
510 1.27 ad * Give the lock away. SUSv3 dictates that we must give
511 1.27 ad * preference to writers.
512 1.27 ad */
513 1.31 ad self = pthread__self();
514 1.27 ad if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
515 1.35 uwe _DIAGASSERT(((uintptr_t)thread & RW_FLAGMASK) == 0);
516 1.27 ad new = (uintptr_t)thread | RW_WRITE_LOCKED;
517 1.27 ad
518 1.27 ad if (PTQ_NEXT(thread, pt_sleep) != NULL)
519 1.27 ad new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
520 1.27 ad else if (ptr->ptr_nreaders != 0)
521 1.27 ad new |= RW_HAS_WAITERS;
522 1.27 ad
523 1.27 ad /*
524 1.27 ad * Set in the new value. The lock becomes owned
525 1.27 ad * by the writer that we are about to wake.
526 1.27 ad */
527 1.27 ad (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
528 1.43 riastrad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
529 1.43 riastrad membar_exit();
530 1.43 riastrad #endif
531 1.27 ad
532 1.27 ad /* Wake the writer. */
533 1.27 ad thread->pt_rwlocked = _RW_LOCKED;
534 1.30 ad pthread__unpark(&ptr->ptr_wblocked, self,
535 1.30 ad interlock);
536 1.27 ad } else {
537 1.27 ad new = 0;
538 1.27 ad PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
539 1.27 ad /*
540 1.27 ad * May have already been handed the lock,
541 1.27 ad * since pthread__unpark_all() can release
542 1.27 ad * our interlock before awakening all
543 1.27 ad * threads.
544 1.27 ad */
545 1.27 ad if (thread->pt_sleepobj == NULL)
546 1.27 ad continue;
547 1.27 ad new += RW_READ_INCR;
548 1.43 riastrad membar_exit();
549 1.27 ad thread->pt_rwlocked = _RW_LOCKED;
550 1.27 ad }
551 1.27 ad
552 1.27 ad /*
553 1.27 ad * Set in the new value. The lock becomes owned
554 1.27 ad * by the readers that we are about to wake.
555 1.27 ad */
556 1.27 ad (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
557 1.27 ad
558 1.27 ad /* Wake up all sleeping readers. */
559 1.27 ad ptr->ptr_nreaders = 0;
560 1.30 ad pthread__unpark_all(&ptr->ptr_rblocked, self,
561 1.30 ad interlock);
562 1.2 thorpej }
563 1.30 ad pthread_mutex_unlock(interlock);
564 1.27 ad
565 1.27 ad return 0;
566 1.2 thorpej }
567 1.27 ad }
568 1.27 ad
569 1.27 ad /*
570 1.27 ad * Called when a timedlock awakens early to adjust the waiter bits.
571 1.27 ad * The rwlock's interlock is held on entry, and the caller has been
572 1.27 ad * removed from the waiters lists.
573 1.27 ad */
574 1.27 ad static void
575 1.41 ad pthread__rwlock_early(pthread_t self, pthread_rwlock_t *ptr,
576 1.41 ad pthread_mutex_t *interlock)
577 1.27 ad {
578 1.41 ad uintptr_t owner, set, newval, next;
579 1.41 ad pthread_queue_t *queue;
580 1.2 thorpej
581 1.41 ad pthread_mutex_lock(interlock);
582 1.41 ad if ((queue = self->pt_sleepobj) == NULL) {
583 1.41 ad pthread_mutex_unlock(interlock);
584 1.41 ad return;
585 1.27 ad }
586 1.41 ad PTQ_REMOVE(queue, self, pt_sleep);
587 1.41 ad self->pt_sleepobj = NULL;
588 1.27 ad owner = (uintptr_t)ptr->ptr_owner;
589 1.27 ad
590 1.27 ad if ((owner & RW_THREAD) == 0) {
591 1.27 ad pthread__errorfunc(__FILE__, __LINE__, __func__,
592 1.27 ad "lock not held");
593 1.27 ad }
594 1.27 ad
595 1.27 ad if (!PTQ_EMPTY(&ptr->ptr_wblocked))
596 1.27 ad set = RW_HAS_WAITERS | RW_WRITE_WANTED;
597 1.27 ad else if (ptr->ptr_nreaders != 0)
598 1.27 ad set = RW_HAS_WAITERS;
599 1.14 ad else
600 1.27 ad set = 0;
601 1.6 cl
602 1.27 ad for (;; owner = next) {
603 1.41 ad newval = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
604 1.41 ad next = rw_cas(ptr, owner, newval);
605 1.27 ad if (owner == next)
606 1.27 ad break;
607 1.27 ad }
608 1.41 ad pthread_mutex_unlock(interlock);
609 1.2 thorpej }
610 1.2 thorpej
611 1.2 thorpej int
612 1.27 ad _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
613 1.2 thorpej {
614 1.27 ad uintptr_t owner = (uintptr_t)ptr->ptr_owner;
615 1.2 thorpej
616 1.28 ad if ((owner & RW_WRITE_LOCKED) != 0)
617 1.28 ad return (owner & RW_THREAD) == (uintptr_t)pthread__self();
618 1.27 ad return (owner & RW_THREAD) != 0;
619 1.2 thorpej }
620 1.2 thorpej
621 1.2 thorpej int
622 1.27 ad _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
623 1.2 thorpej {
624 1.27 ad uintptr_t owner = (uintptr_t)ptr->ptr_owner;
625 1.2 thorpej
626 1.27 ad return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
627 1.2 thorpej }
628 1.21 ad
629 1.23 ad int
630 1.27 ad _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
631 1.23 ad {
632 1.27 ad uintptr_t owner = (uintptr_t)ptr->ptr_owner;
633 1.23 ad
634 1.27 ad return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
635 1.27 ad ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
636 1.23 ad }
637 1.23 ad
638 1.34 christos #ifdef _PTHREAD_PSHARED
639 1.34 christos int
640 1.34 christos pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
641 1.34 christos int * __restrict pshared)
642 1.34 christos {
643 1.38 kamil
644 1.38 kamil pthread__error(EINVAL, "Invalid rwlock attribute",
645 1.38 kamil ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
646 1.38 kamil
647 1.34 christos *pshared = PTHREAD_PROCESS_PRIVATE;
648 1.34 christos return 0;
649 1.34 christos }
650 1.34 christos
651 1.34 christos int
652 1.34 christos pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
653 1.34 christos {
654 1.34 christos
655 1.38 kamil pthread__error(EINVAL, "Invalid rwlock attribute",
656 1.38 kamil ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
657 1.38 kamil
658 1.34 christos switch(pshared) {
659 1.34 christos case PTHREAD_PROCESS_PRIVATE:
660 1.34 christos return 0;
661 1.34 christos case PTHREAD_PROCESS_SHARED:
662 1.34 christos return ENOSYS;
663 1.34 christos }
664 1.34 christos return EINVAL;
665 1.34 christos }
666 1.34 christos #endif
667 1.34 christos
668 1.23 ad int
669 1.27 ad pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
670 1.23 ad {
671 1.23 ad
672 1.27 ad if (attr == NULL)
673 1.27 ad return EINVAL;
674 1.27 ad attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
675 1.27 ad
676 1.27 ad return 0;
677 1.23 ad }
678 1.23 ad
679 1.27 ad
680 1.23 ad int
681 1.27 ad pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
682 1.23 ad {
683 1.23 ad
684 1.38 kamil pthread__error(EINVAL, "Invalid rwlock attribute",
685 1.38 kamil attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
686 1.38 kamil
687 1.27 ad attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
688 1.27 ad
689 1.27 ad return 0;
690 1.23 ad }
691