pthread_rwlock.c revision 1.29.2.1 1 /* $NetBSD: pthread_rwlock.c,v 1.29.2.1 2008/06/23 04:29:54 wrstuden Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.29.2.1 2008/06/23 04:29:54 wrstuden Exp $");
34
35 #include <sys/types.h>
36 #include <sys/lwpctl.h>
37
38 #include <errno.h>
39 #include <stddef.h>
40
41 #include "pthread.h"
42 #include "pthread_int.h"
43
44 #define _RW_LOCKED 0
45 #define _RW_WANT_WRITE 1
46 #define _RW_WANT_READ 2
47
48 #if __GNUC_PREREQ__(3, 0)
49 #define NOINLINE __attribute ((noinline))
50 #else
51 #define NOINLINE /* nothing */
52 #endif
53
54 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
55 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
56 static void pthread__rwlock_early(void *);
57
58 int _pthread_rwlock_held_np(pthread_rwlock_t *);
59 int _pthread_rwlock_rdheld_np(pthread_rwlock_t *);
60 int _pthread_rwlock_wrheld_np(pthread_rwlock_t *);
61
62 #ifndef lint
63 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np);
64 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np);
65 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np);
66 #endif
67
68 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
69 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
70 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
71 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
72 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
73 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
74 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
75
76 static inline uintptr_t
77 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
78 {
79
80 return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
81 (void *)n);
82 }
83
84 int
85 pthread_rwlock_init(pthread_rwlock_t *ptr,
86 const pthread_rwlockattr_t *attr)
87 {
88
89 if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
90 return EINVAL;
91 ptr->ptr_magic = _PT_RWLOCK_MAGIC;
92 PTQ_INIT(&ptr->ptr_rblocked);
93 PTQ_INIT(&ptr->ptr_wblocked);
94 ptr->ptr_nreaders = 0;
95 ptr->ptr_owner = NULL;
96
97 return 0;
98 }
99
100
101 int
102 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
103 {
104
105 if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
106 (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
107 (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
108 (ptr->ptr_nreaders != 0) ||
109 (ptr->ptr_owner != NULL))
110 return EINVAL;
111 ptr->ptr_magic = _PT_RWLOCK_DEAD;
112
113 return 0;
114 }
115
116 /* We want function call overhead. */
117 NOINLINE static void
118 pthread__rwlock_pause(void)
119 {
120
121 pthread__smt_pause();
122 }
123
124 NOINLINE static int
125 pthread__rwlock_spin(uintptr_t owner)
126 {
127 pthread_t thread;
128 unsigned int i;
129
130 thread = (pthread_t)(owner & RW_THREAD);
131 if (thread == NULL || (owner & ~RW_THREAD) != RW_WRITE_LOCKED)
132 return 0;
133 if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE ||
134 thread->pt_blocking)
135 return 0;
136 for (i = 128; i != 0; i--)
137 pthread__rwlock_pause();
138 return 1;
139 }
140
141 static int
142 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
143 {
144 uintptr_t owner, next;
145 pthread_mutex_t *interlock;
146 pthread_t self;
147 int error;
148
149 self = pthread__self();
150
151 #ifdef ERRORCHECK
152 if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
153 return EINVAL;
154 #endif
155
156 for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
157 /*
158 * Read the lock owner field. If the need-to-wait
159 * indicator is clear, then try to acquire the lock.
160 */
161 if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
162 next = rw_cas(ptr, owner, owner + RW_READ_INCR);
163 if (owner == next) {
164 /* Got it! */
165 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
166 membar_enter();
167 #endif
168 return 0;
169 }
170
171 /*
172 * Didn't get it -- spin around again (we'll
173 * probably sleep on the next iteration).
174 */
175 continue;
176 }
177
178 if ((owner & RW_THREAD) == (uintptr_t)self)
179 return EDEADLK;
180
181 /* If held write locked and no waiters, spin. */
182 if (pthread__rwlock_spin(owner)) {
183 while (pthread__rwlock_spin(owner)) {
184 owner = (uintptr_t)ptr->ptr_owner;
185 }
186 next = owner;
187 continue;
188 }
189
190 /*
191 * Grab the interlock. Once we have that, we
192 * can adjust the waiter bits and sleep queue.
193 */
194 interlock = pthread__hashlock(ptr);
195 pthread_mutex_lock(interlock);
196
197 /*
198 * Mark the rwlock as having waiters. If the set fails,
199 * then we may not need to sleep and should spin again.
200 */
201 next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
202 if (owner != next) {
203 pthread_mutex_unlock(interlock);
204 continue;
205 }
206
207 /* The waiters bit is set - it's safe to sleep. */
208 PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
209 ptr->ptr_nreaders++;
210 self->pt_rwlocked = _RW_WANT_READ;
211 self->pt_sleepobj = &ptr->ptr_rblocked;
212 self->pt_early = pthread__rwlock_early;
213 error = pthread__park(self, interlock, &ptr->ptr_rblocked,
214 ts, 0, &ptr->ptr_rblocked);
215
216 /* Did we get the lock? */
217 if (self->pt_rwlocked == _RW_LOCKED) {
218 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
219 membar_enter();
220 #endif
221 return 0;
222 }
223 if (error != 0)
224 return error;
225
226 pthread__errorfunc(__FILE__, __LINE__, __func__,
227 "direct handoff failure");
228 }
229 }
230
231
232 int
233 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
234 {
235 uintptr_t owner, next;
236
237 #ifdef ERRORCHECK
238 if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
239 return EINVAL;
240 #endif
241
242 /*
243 * Don't get a readlock if there is a writer or if there are waiting
244 * writers; i.e. prefer writers to readers. This strategy is dictated
245 * by SUSv3.
246 */
247 for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
248 if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
249 return EBUSY;
250 next = rw_cas(ptr, owner, owner + RW_READ_INCR);
251 if (owner == next) {
252 /* Got it! */
253 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
254 membar_enter();
255 #endif
256 return 0;
257 }
258 }
259 }
260
261 static int
262 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
263 {
264 uintptr_t owner, next;
265 pthread_mutex_t *interlock;
266 pthread_t self;
267 int error;
268
269 self = pthread__self();
270
271 #ifdef ERRORCHECK
272 if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
273 return EINVAL;
274 #endif
275
276 for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
277 /*
278 * Read the lock owner field. If the need-to-wait
279 * indicator is clear, then try to acquire the lock.
280 */
281 if ((owner & RW_THREAD) == 0) {
282 next = rw_cas(ptr, owner,
283 (uintptr_t)self | RW_WRITE_LOCKED);
284 if (owner == next) {
285 /* Got it! */
286 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
287 membar_enter();
288 #endif
289 return 0;
290 }
291
292 /*
293 * Didn't get it -- spin around again (we'll
294 * probably sleep on the next iteration).
295 */
296 continue;
297 }
298
299 if ((owner & RW_THREAD) == (uintptr_t)self)
300 return EDEADLK;
301
302 /* If held write locked and no waiters, spin. */
303 if (pthread__rwlock_spin(owner)) {
304 while (pthread__rwlock_spin(owner)) {
305 owner = (uintptr_t)ptr->ptr_owner;
306 }
307 next = owner;
308 continue;
309 }
310
311 /*
312 * Grab the interlock. Once we have that, we
313 * can adjust the waiter bits and sleep queue.
314 */
315 interlock = pthread__hashlock(ptr);
316 pthread_mutex_lock(interlock);
317
318 /*
319 * Mark the rwlock as having waiters. If the set fails,
320 * then we may not need to sleep and should spin again.
321 */
322 next = rw_cas(ptr, owner,
323 owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
324 if (owner != next) {
325 pthread_mutex_unlock(interlock);
326 continue;
327 }
328
329 /* The waiters bit is set - it's safe to sleep. */
330 PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
331 self->pt_rwlocked = _RW_WANT_WRITE;
332 self->pt_sleepobj = &ptr->ptr_wblocked;
333 self->pt_early = pthread__rwlock_early;
334 error = pthread__park(self, interlock, &ptr->ptr_wblocked,
335 ts, 0, &ptr->ptr_wblocked);
336
337 /* Did we get the lock? */
338 if (self->pt_rwlocked == _RW_LOCKED) {
339 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
340 membar_enter();
341 #endif
342 return 0;
343 }
344 if (error != 0)
345 return error;
346
347 pthread__errorfunc(__FILE__, __LINE__, __func__,
348 "direct handoff failure");
349 }
350 }
351
352
353 int
354 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
355 {
356 uintptr_t owner, next;
357 pthread_t self;
358
359 #ifdef ERRORCHECK
360 if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
361 return EINVAL;
362 #endif
363
364 self = pthread__self();
365
366 for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
367 if (owner != 0)
368 return EBUSY;
369 next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
370 if (owner == next) {
371 /* Got it! */
372 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
373 membar_enter();
374 #endif
375 return 0;
376 }
377 }
378 }
379
380 int
381 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
382 {
383
384 return pthread__rwlock_rdlock(ptr, NULL);
385 }
386
387 int
388 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
389 const struct timespec *abs_timeout)
390 {
391
392 if (abs_timeout == NULL)
393 return EINVAL;
394 if ((abs_timeout->tv_nsec >= 1000000000) ||
395 (abs_timeout->tv_nsec < 0) ||
396 (abs_timeout->tv_sec < 0))
397 return EINVAL;
398
399 return pthread__rwlock_rdlock(ptr, abs_timeout);
400 }
401
402 int
403 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
404 {
405
406 return pthread__rwlock_wrlock(ptr, NULL);
407 }
408
409 int
410 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
411 const struct timespec *abs_timeout)
412 {
413
414 if (abs_timeout == NULL)
415 return EINVAL;
416 if ((abs_timeout->tv_nsec >= 1000000000) ||
417 (abs_timeout->tv_nsec < 0) ||
418 (abs_timeout->tv_sec < 0))
419 return EINVAL;
420
421 return pthread__rwlock_wrlock(ptr, abs_timeout);
422 }
423
424
425 int
426 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
427 {
428 uintptr_t owner, decr, new, next;
429 pthread_mutex_t *interlock;
430 pthread_t self, thread;
431
432 #ifdef ERRORCHECK
433 if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
434 return EINVAL;
435 #endif
436
437 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
438 membar_exit();
439 #endif
440
441 /*
442 * Since we used an add operation to set the required lock
443 * bits, we can use a subtract to clear them, which makes
444 * the read-release and write-release path similar.
445 */
446 self = pthread__self();
447 owner = (uintptr_t)ptr->ptr_owner;
448 if ((owner & RW_WRITE_LOCKED) != 0) {
449 decr = (uintptr_t)self | RW_WRITE_LOCKED;
450 if ((owner & RW_THREAD) != (uintptr_t)self) {
451 return EPERM;
452 }
453 } else {
454 decr = RW_READ_INCR;
455 if (owner == 0) {
456 return EPERM;
457 }
458 }
459
460 for (;; owner = next) {
461 /*
462 * Compute what we expect the new value of the lock to be.
463 * Only proceed to do direct handoff if there are waiters,
464 * and if the lock would become unowned.
465 */
466 new = (owner - decr);
467 if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
468 next = rw_cas(ptr, owner, new);
469 if (owner == next) {
470 /* Released! */
471 return 0;
472 }
473 continue;
474 }
475
476 /*
477 * Grab the interlock. Once we have that, we can adjust
478 * the waiter bits. We must check to see if there are
479 * still waiters before proceeding.
480 */
481 interlock = pthread__hashlock(ptr);
482 pthread_mutex_lock(interlock);
483 owner = (uintptr_t)ptr->ptr_owner;
484 if ((owner & RW_HAS_WAITERS) == 0) {
485 pthread_mutex_unlock(interlock);
486 next = owner;
487 continue;
488 }
489
490 /*
491 * Give the lock away. SUSv3 dictates that we must give
492 * preference to writers.
493 */
494 if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
495 new = (uintptr_t)thread | RW_WRITE_LOCKED;
496
497 if (PTQ_NEXT(thread, pt_sleep) != NULL)
498 new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
499 else if (ptr->ptr_nreaders != 0)
500 new |= RW_HAS_WAITERS;
501
502 /*
503 * Set in the new value. The lock becomes owned
504 * by the writer that we are about to wake.
505 */
506 (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
507
508 /* Wake the writer. */
509 thread->pt_rwlocked = _RW_LOCKED;
510 pthread__unpark(&ptr->ptr_wblocked, self,
511 interlock);
512 } else {
513 new = 0;
514 PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
515 /*
516 * May have already been handed the lock,
517 * since pthread__unpark_all() can release
518 * our interlock before awakening all
519 * threads.
520 */
521 if (thread->pt_sleepobj == NULL)
522 continue;
523 new += RW_READ_INCR;
524 thread->pt_rwlocked = _RW_LOCKED;
525 }
526
527 /*
528 * Set in the new value. The lock becomes owned
529 * by the readers that we are about to wake.
530 */
531 (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
532
533 /* Wake up all sleeping readers. */
534 ptr->ptr_nreaders = 0;
535 pthread__unpark_all(&ptr->ptr_rblocked, self,
536 interlock);
537 }
538 pthread_mutex_unlock(interlock);
539
540 return 0;
541 }
542 }
543
544 /*
545 * Called when a timedlock awakens early to adjust the waiter bits.
546 * The rwlock's interlock is held on entry, and the caller has been
547 * removed from the waiters lists.
548 */
549 static void
550 pthread__rwlock_early(void *obj)
551 {
552 uintptr_t owner, set, new, next;
553 pthread_rwlock_t *ptr;
554 pthread_t self;
555 u_int off;
556
557 self = pthread__self();
558
559 switch (self->pt_rwlocked) {
560 case _RW_WANT_READ:
561 off = offsetof(pthread_rwlock_t, ptr_rblocked);
562 break;
563 case _RW_WANT_WRITE:
564 off = offsetof(pthread_rwlock_t, ptr_wblocked);
565 break;
566 default:
567 pthread__errorfunc(__FILE__, __LINE__, __func__,
568 "bad value of pt_rwlocked");
569 off = 0;
570 /* NOTREACHED */
571 break;
572 }
573
574 /* LINTED mind your own business */
575 ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
576 owner = (uintptr_t)ptr->ptr_owner;
577
578 if ((owner & RW_THREAD) == 0) {
579 pthread__errorfunc(__FILE__, __LINE__, __func__,
580 "lock not held");
581 }
582
583 if (!PTQ_EMPTY(&ptr->ptr_wblocked))
584 set = RW_HAS_WAITERS | RW_WRITE_WANTED;
585 else if (ptr->ptr_nreaders != 0)
586 set = RW_HAS_WAITERS;
587 else
588 set = 0;
589
590 for (;; owner = next) {
591 new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
592 next = rw_cas(ptr, owner, new);
593 if (owner == next)
594 break;
595 }
596 }
597
598 int
599 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
600 {
601 uintptr_t owner = (uintptr_t)ptr->ptr_owner;
602
603 if ((owner & RW_WRITE_LOCKED) != 0)
604 return (owner & RW_THREAD) == (uintptr_t)pthread__self();
605 return (owner & RW_THREAD) != 0;
606 }
607
608 int
609 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
610 {
611 uintptr_t owner = (uintptr_t)ptr->ptr_owner;
612
613 return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
614 }
615
616 int
617 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
618 {
619 uintptr_t owner = (uintptr_t)ptr->ptr_owner;
620
621 return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
622 ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
623 }
624
625 int
626 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
627 {
628
629 if (attr == NULL)
630 return EINVAL;
631 attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
632
633 return 0;
634 }
635
636
637 int
638 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
639 {
640
641 if ((attr == NULL) ||
642 (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
643 return EINVAL;
644 attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
645
646 return 0;
647 }
648