pthread_rwlock.c revision 1.34.18.1 1 /* $NetBSD: pthread_rwlock.c,v 1.34.18.1 2020/01/26 10:55:16 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.34.18.1 2020/01/26 10:55:16 martin Exp $");
34
35 #include <sys/types.h>
36 #include <sys/lwpctl.h>
37
38 #include <time.h>
39 #include <errno.h>
40 #include <stddef.h>
41
42 #include "pthread.h"
43 #include "pthread_int.h"
44 #include "reentrant.h"
45
46 #define _RW_LOCKED 0
47 #define _RW_WANT_WRITE 1
48 #define _RW_WANT_READ 2
49
50 #if __GNUC_PREREQ__(3, 0)
51 #define NOINLINE __attribute ((noinline))
52 #else
53 #define NOINLINE /* nothing */
54 #endif
55
56 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
57 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
58 static void pthread__rwlock_early(void *);
59
60 int _pthread_rwlock_held_np(pthread_rwlock_t *);
61 int _pthread_rwlock_rdheld_np(pthread_rwlock_t *);
62 int _pthread_rwlock_wrheld_np(pthread_rwlock_t *);
63
64 #ifndef lint
65 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
66 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
67 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
68 #endif
69
70 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
71 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
72 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
73 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
74 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
75 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
76 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
77
78 static inline uintptr_t
79 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
80 {
81
82 return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
83 (void *)n);
84 }
85
86 int
87 pthread_rwlock_init(pthread_rwlock_t *ptr,
88 const pthread_rwlockattr_t *attr)
89 {
90 if (__predict_false(__uselibcstub))
91 return __libc_rwlock_init_stub(ptr, attr);
92
93 if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
94 return EINVAL;
95 ptr->ptr_magic = _PT_RWLOCK_MAGIC;
96 PTQ_INIT(&ptr->ptr_rblocked);
97 PTQ_INIT(&ptr->ptr_wblocked);
98 ptr->ptr_nreaders = 0;
99 ptr->ptr_owner = NULL;
100
101 return 0;
102 }
103
104
105 int
106 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
107 {
108 if (__predict_false(__uselibcstub))
109 return __libc_rwlock_destroy_stub(ptr);
110
111 if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
112 (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
113 (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
114 (ptr->ptr_nreaders != 0) ||
115 (ptr->ptr_owner != NULL))
116 return EINVAL;
117 ptr->ptr_magic = _PT_RWLOCK_DEAD;
118
119 return 0;
120 }
121
122 /* We want function call overhead. */
123 NOINLINE static void
124 pthread__rwlock_pause(void)
125 {
126
127 pthread__smt_pause();
128 }
129
130 NOINLINE static int
131 pthread__rwlock_spin(uintptr_t owner)
132 {
133 pthread_t thread;
134 unsigned int i;
135
136 thread = (pthread_t)(owner & RW_THREAD);
137 if (thread == NULL || (owner & ~RW_THREAD) != RW_WRITE_LOCKED)
138 return 0;
139 if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
140 return 0;
141 for (i = 128; i != 0; i--)
142 pthread__rwlock_pause();
143 return 1;
144 }
145
146 static int
147 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
148 {
149 uintptr_t owner, next;
150 pthread_mutex_t *interlock;
151 pthread_t self;
152 int error;
153
154 #ifdef ERRORCHECK
155 if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
156 return EINVAL;
157 #endif
158
159 for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
160 /*
161 * Read the lock owner field. If the need-to-wait
162 * indicator is clear, then try to acquire the lock.
163 */
164 if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
165 next = rw_cas(ptr, owner, owner + RW_READ_INCR);
166 if (owner == next) {
167 /* Got it! */
168 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
169 membar_enter();
170 #endif
171 return 0;
172 }
173
174 /*
175 * Didn't get it -- spin around again (we'll
176 * probably sleep on the next iteration).
177 */
178 continue;
179 }
180
181 self = pthread__self();
182 if ((owner & RW_THREAD) == (uintptr_t)self)
183 return EDEADLK;
184
185 /* If held write locked and no waiters, spin. */
186 if (pthread__rwlock_spin(owner)) {
187 while (pthread__rwlock_spin(owner)) {
188 owner = (uintptr_t)ptr->ptr_owner;
189 }
190 next = owner;
191 continue;
192 }
193
194 /*
195 * Grab the interlock. Once we have that, we
196 * can adjust the waiter bits and sleep queue.
197 */
198 interlock = pthread__hashlock(ptr);
199 pthread_mutex_lock(interlock);
200
201 /*
202 * Mark the rwlock as having waiters. If the set fails,
203 * then we may not need to sleep and should spin again.
204 */
205 next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
206 if (owner != next) {
207 pthread_mutex_unlock(interlock);
208 continue;
209 }
210
211 /* The waiters bit is set - it's safe to sleep. */
212 PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
213 ptr->ptr_nreaders++;
214 self->pt_rwlocked = _RW_WANT_READ;
215 self->pt_sleepobj = &ptr->ptr_rblocked;
216 self->pt_early = pthread__rwlock_early;
217 error = pthread__park(self, interlock, &ptr->ptr_rblocked,
218 ts, 0, &ptr->ptr_rblocked);
219
220 /* Did we get the lock? */
221 if (self->pt_rwlocked == _RW_LOCKED) {
222 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
223 membar_enter();
224 #endif
225 return 0;
226 }
227 if (error != 0)
228 return error;
229
230 pthread__errorfunc(__FILE__, __LINE__, __func__,
231 "direct handoff failure");
232 }
233 }
234
235
236 int
237 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
238 {
239 uintptr_t owner, next;
240
241 if (__predict_false(__uselibcstub))
242 return __libc_rwlock_tryrdlock_stub(ptr);
243
244 #ifdef ERRORCHECK
245 if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
246 return EINVAL;
247 #endif
248
249 /*
250 * Don't get a readlock if there is a writer or if there are waiting
251 * writers; i.e. prefer writers to readers. This strategy is dictated
252 * by SUSv3.
253 */
254 for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
255 if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
256 return EBUSY;
257 next = rw_cas(ptr, owner, owner + RW_READ_INCR);
258 if (owner == next) {
259 /* Got it! */
260 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
261 membar_enter();
262 #endif
263 return 0;
264 }
265 }
266 }
267
268 static int
269 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
270 {
271 uintptr_t owner, next;
272 pthread_mutex_t *interlock;
273 pthread_t self;
274 int error;
275
276 self = pthread__self();
277
278 #ifdef ERRORCHECK
279 if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
280 return EINVAL;
281 #endif
282
283 for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
284 /*
285 * Read the lock owner field. If the need-to-wait
286 * indicator is clear, then try to acquire the lock.
287 */
288 if ((owner & RW_THREAD) == 0) {
289 next = rw_cas(ptr, owner,
290 (uintptr_t)self | RW_WRITE_LOCKED);
291 if (owner == next) {
292 /* Got it! */
293 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
294 membar_enter();
295 #endif
296 return 0;
297 }
298
299 /*
300 * Didn't get it -- spin around again (we'll
301 * probably sleep on the next iteration).
302 */
303 continue;
304 }
305
306 if ((owner & RW_THREAD) == (uintptr_t)self)
307 return EDEADLK;
308
309 /* If held write locked and no waiters, spin. */
310 if (pthread__rwlock_spin(owner)) {
311 while (pthread__rwlock_spin(owner)) {
312 owner = (uintptr_t)ptr->ptr_owner;
313 }
314 next = owner;
315 continue;
316 }
317
318 /*
319 * Grab the interlock. Once we have that, we
320 * can adjust the waiter bits and sleep queue.
321 */
322 interlock = pthread__hashlock(ptr);
323 pthread_mutex_lock(interlock);
324
325 /*
326 * Mark the rwlock as having waiters. If the set fails,
327 * then we may not need to sleep and should spin again.
328 */
329 next = rw_cas(ptr, owner,
330 owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
331 if (owner != next) {
332 pthread_mutex_unlock(interlock);
333 continue;
334 }
335
336 /* The waiters bit is set - it's safe to sleep. */
337 PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
338 self->pt_rwlocked = _RW_WANT_WRITE;
339 self->pt_sleepobj = &ptr->ptr_wblocked;
340 self->pt_early = pthread__rwlock_early;
341 error = pthread__park(self, interlock, &ptr->ptr_wblocked,
342 ts, 0, &ptr->ptr_wblocked);
343
344 /* Did we get the lock? */
345 if (self->pt_rwlocked == _RW_LOCKED) {
346 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
347 membar_enter();
348 #endif
349 return 0;
350 }
351 if (error != 0)
352 return error;
353
354 pthread__errorfunc(__FILE__, __LINE__, __func__,
355 "direct handoff failure");
356 }
357 }
358
359
360 int
361 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
362 {
363 uintptr_t owner, next;
364 pthread_t self;
365
366 if (__predict_false(__uselibcstub))
367 return __libc_rwlock_trywrlock_stub(ptr);
368
369 #ifdef ERRORCHECK
370 if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
371 return EINVAL;
372 #endif
373
374 self = pthread__self();
375
376 for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
377 if (owner != 0)
378 return EBUSY;
379 next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
380 if (owner == next) {
381 /* Got it! */
382 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
383 membar_enter();
384 #endif
385 return 0;
386 }
387 }
388 }
389
390 int
391 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
392 {
393 if (__predict_false(__uselibcstub))
394 return __libc_rwlock_rdlock_stub(ptr);
395
396 return pthread__rwlock_rdlock(ptr, NULL);
397 }
398
399 int
400 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
401 const struct timespec *abs_timeout)
402 {
403 if (abs_timeout == NULL)
404 return EINVAL;
405 if ((abs_timeout->tv_nsec >= 1000000000) ||
406 (abs_timeout->tv_nsec < 0) ||
407 (abs_timeout->tv_sec < 0))
408 return EINVAL;
409
410 return pthread__rwlock_rdlock(ptr, abs_timeout);
411 }
412
413 int
414 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
415 {
416 if (__predict_false(__uselibcstub))
417 return __libc_rwlock_wrlock_stub(ptr);
418
419 return pthread__rwlock_wrlock(ptr, NULL);
420 }
421
422 int
423 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
424 const struct timespec *abs_timeout)
425 {
426 if (abs_timeout == NULL)
427 return EINVAL;
428 if ((abs_timeout->tv_nsec >= 1000000000) ||
429 (abs_timeout->tv_nsec < 0) ||
430 (abs_timeout->tv_sec < 0))
431 return EINVAL;
432
433 return pthread__rwlock_wrlock(ptr, abs_timeout);
434 }
435
436
437 int
438 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
439 {
440 uintptr_t owner, decr, new, next;
441 pthread_mutex_t *interlock;
442 pthread_t self, thread;
443
444 if (__predict_false(__uselibcstub))
445 return __libc_rwlock_unlock_stub(ptr);
446
447 #ifdef ERRORCHECK
448 if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
449 return EINVAL;
450 #endif
451
452 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
453 membar_exit();
454 #endif
455
456 /*
457 * Since we used an add operation to set the required lock
458 * bits, we can use a subtract to clear them, which makes
459 * the read-release and write-release path similar.
460 */
461 owner = (uintptr_t)ptr->ptr_owner;
462 if ((owner & RW_WRITE_LOCKED) != 0) {
463 self = pthread__self();
464 decr = (uintptr_t)self | RW_WRITE_LOCKED;
465 if ((owner & RW_THREAD) != (uintptr_t)self) {
466 return EPERM;
467 }
468 } else {
469 decr = RW_READ_INCR;
470 if (owner == 0) {
471 return EPERM;
472 }
473 }
474
475 for (;; owner = next) {
476 /*
477 * Compute what we expect the new value of the lock to be.
478 * Only proceed to do direct handoff if there are waiters,
479 * and if the lock would become unowned.
480 */
481 new = (owner - decr);
482 if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
483 next = rw_cas(ptr, owner, new);
484 if (owner == next) {
485 /* Released! */
486 return 0;
487 }
488 continue;
489 }
490
491 /*
492 * Grab the interlock. Once we have that, we can adjust
493 * the waiter bits. We must check to see if there are
494 * still waiters before proceeding.
495 */
496 interlock = pthread__hashlock(ptr);
497 pthread_mutex_lock(interlock);
498 owner = (uintptr_t)ptr->ptr_owner;
499 if ((owner & RW_HAS_WAITERS) == 0) {
500 pthread_mutex_unlock(interlock);
501 next = owner;
502 continue;
503 }
504
505 /*
506 * Give the lock away. SUSv3 dictates that we must give
507 * preference to writers.
508 */
509 self = pthread__self();
510 if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
511 new = (uintptr_t)thread | RW_WRITE_LOCKED;
512
513 if (PTQ_NEXT(thread, pt_sleep) != NULL)
514 new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
515 else if (ptr->ptr_nreaders != 0)
516 new |= RW_HAS_WAITERS;
517
518 /*
519 * Set in the new value. The lock becomes owned
520 * by the writer that we are about to wake.
521 */
522 (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
523
524 /* Wake the writer. */
525 thread->pt_rwlocked = _RW_LOCKED;
526 pthread__unpark(&ptr->ptr_wblocked, self,
527 interlock);
528 } else {
529 new = 0;
530 PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
531 /*
532 * May have already been handed the lock,
533 * since pthread__unpark_all() can release
534 * our interlock before awakening all
535 * threads.
536 */
537 if (thread->pt_sleepobj == NULL)
538 continue;
539 new += RW_READ_INCR;
540 thread->pt_rwlocked = _RW_LOCKED;
541 }
542
543 /*
544 * Set in the new value. The lock becomes owned
545 * by the readers that we are about to wake.
546 */
547 (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
548
549 /* Wake up all sleeping readers. */
550 ptr->ptr_nreaders = 0;
551 pthread__unpark_all(&ptr->ptr_rblocked, self,
552 interlock);
553 }
554 pthread_mutex_unlock(interlock);
555
556 return 0;
557 }
558 }
559
560 /*
561 * Called when a timedlock awakens early to adjust the waiter bits.
562 * The rwlock's interlock is held on entry, and the caller has been
563 * removed from the waiters lists.
564 */
565 static void
566 pthread__rwlock_early(void *obj)
567 {
568 uintptr_t owner, set, new, next;
569 pthread_rwlock_t *ptr;
570 pthread_t self;
571 u_int off;
572
573 self = pthread__self();
574
575 switch (self->pt_rwlocked) {
576 case _RW_WANT_READ:
577 off = offsetof(pthread_rwlock_t, ptr_rblocked);
578 break;
579 case _RW_WANT_WRITE:
580 off = offsetof(pthread_rwlock_t, ptr_wblocked);
581 break;
582 default:
583 pthread__errorfunc(__FILE__, __LINE__, __func__,
584 "bad value of pt_rwlocked");
585 off = 0;
586 /* NOTREACHED */
587 break;
588 }
589
590 /* LINTED mind your own business */
591 ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
592 owner = (uintptr_t)ptr->ptr_owner;
593
594 if ((owner & RW_THREAD) == 0) {
595 pthread__errorfunc(__FILE__, __LINE__, __func__,
596 "lock not held");
597 }
598
599 if (!PTQ_EMPTY(&ptr->ptr_wblocked))
600 set = RW_HAS_WAITERS | RW_WRITE_WANTED;
601 else if (ptr->ptr_nreaders != 0)
602 set = RW_HAS_WAITERS;
603 else
604 set = 0;
605
606 for (;; owner = next) {
607 new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
608 next = rw_cas(ptr, owner, new);
609 if (owner == next)
610 break;
611 }
612 }
613
614 int
615 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
616 {
617 uintptr_t owner = (uintptr_t)ptr->ptr_owner;
618
619 if ((owner & RW_WRITE_LOCKED) != 0)
620 return (owner & RW_THREAD) == (uintptr_t)pthread__self();
621 return (owner & RW_THREAD) != 0;
622 }
623
624 int
625 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
626 {
627 uintptr_t owner = (uintptr_t)ptr->ptr_owner;
628
629 return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
630 }
631
632 int
633 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
634 {
635 uintptr_t owner = (uintptr_t)ptr->ptr_owner;
636
637 return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
638 ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
639 }
640
641 #ifdef _PTHREAD_PSHARED
642 int
643 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
644 int * __restrict pshared)
645 {
646 *pshared = PTHREAD_PROCESS_PRIVATE;
647 return 0;
648 }
649
650 int
651 pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
652 {
653
654 switch(pshared) {
655 case PTHREAD_PROCESS_PRIVATE:
656 return 0;
657 case PTHREAD_PROCESS_SHARED:
658 return ENOSYS;
659 }
660 return EINVAL;
661 }
662 #endif
663
664 int
665 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
666 {
667
668 if (attr == NULL)
669 return EINVAL;
670 attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
671
672 return 0;
673 }
674
675
676 int
677 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
678 {
679
680 if ((attr == NULL) ||
681 (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
682 return EINVAL;
683 attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
684
685 return 0;
686 }
687