Home | History | Annotate | Line # | Download | only in libpthread
pthread_rwlock.c revision 1.28.2.1
      1 /*	$NetBSD: pthread_rwlock.c,v 1.28.2.1 2008/05/18 12:30:40 yamt Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.28.2.1 2008/05/18 12:30:40 yamt Exp $");
     34 
     35 #include <errno.h>
     36 #include <stddef.h>
     37 
     38 #include "pthread.h"
     39 #include "pthread_int.h"
     40 
     41 #define	_RW_LOCKED		0
     42 #define	_RW_WANT_WRITE		1
     43 #define	_RW_WANT_READ		2
     44 
     45 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
     46 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
     47 static void pthread__rwlock_early(void *);
     48 
     49 int	_pthread_rwlock_held_np(pthread_rwlock_t *);
     50 int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
     51 int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
     52 
     53 #ifndef lint
     54 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np);
     55 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np);
     56 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np);
     57 #endif
     58 
     59 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
     60 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
     61 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
     62 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
     63 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
     64 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
     65 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
     66 
     67 static inline uintptr_t
     68 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
     69 {
     70 
     71 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
     72 	    (void *)n);
     73 }
     74 
     75 int
     76 pthread_rwlock_init(pthread_rwlock_t *ptr,
     77 	    const pthread_rwlockattr_t *attr)
     78 {
     79 
     80 	if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
     81 		return EINVAL;
     82 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
     83 	pthread_lockinit(&ptr->ptr_interlock);
     84 	PTQ_INIT(&ptr->ptr_rblocked);
     85 	PTQ_INIT(&ptr->ptr_wblocked);
     86 	ptr->ptr_nreaders = 0;
     87 	ptr->ptr_owner = NULL;
     88 
     89 	return 0;
     90 }
     91 
     92 
     93 int
     94 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
     95 {
     96 
     97 	if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
     98 	    (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
     99 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
    100 	    (ptr->ptr_nreaders != 0) ||
    101 	    (ptr->ptr_owner != NULL))
    102 		return EINVAL;
    103 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
    104 
    105 	return 0;
    106 }
    107 
    108 static int
    109 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
    110 {
    111 	uintptr_t owner, next;
    112 	pthread_t self;
    113 	int error;
    114 
    115 	self = pthread__self();
    116 
    117 #ifdef ERRORCHECK
    118 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    119 		return EINVAL;
    120 #endif
    121 
    122 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    123 		/*
    124 		 * Read the lock owner field.  If the need-to-wait
    125 		 * indicator is clear, then try to acquire the lock.
    126 		 */
    127 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
    128 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
    129 			if (owner == next) {
    130 				/* Got it! */
    131 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    132 				membar_enter();
    133 #endif
    134 				return 0;
    135 			}
    136 
    137 			/*
    138 			 * Didn't get it -- spin around again (we'll
    139 			 * probably sleep on the next iteration).
    140 			 */
    141 			continue;
    142 		}
    143 
    144 		if ((owner & RW_THREAD) == (uintptr_t)self)
    145 			return EDEADLK;
    146 
    147 		/*
    148 		 * Grab the interlock.  Once we have that, we
    149 		 * can adjust the waiter bits and sleep queue.
    150 		 */
    151 		pthread__spinlock(self, &ptr->ptr_interlock);
    152 
    153 		/*
    154 		 * Mark the rwlock as having waiters.  If the set fails,
    155 		 * then we may not need to sleep and should spin again.
    156 		 */
    157 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
    158 		if (owner != next) {
    159 			pthread__spinunlock(self, &ptr->ptr_interlock);
    160 			continue;
    161 		}
    162 
    163 		/* The waiters bit is set - it's safe to sleep. */
    164 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
    165 	    	ptr->ptr_nreaders++;
    166 		self->pt_rwlocked = _RW_WANT_READ;
    167 		self->pt_sleeponq = 1;
    168 		self->pt_sleepobj = &ptr->ptr_rblocked;
    169 		self->pt_early = pthread__rwlock_early;
    170 		pthread__spinunlock(self, &ptr->ptr_interlock);
    171 
    172 		error = pthread__park(self, &ptr->ptr_interlock,
    173 		    &ptr->ptr_rblocked, ts, 0, &ptr->ptr_rblocked);
    174 
    175 		/* Did we get the lock? */
    176 		if (self->pt_rwlocked == _RW_LOCKED) {
    177 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    178 			membar_enter();
    179 #endif
    180 			return 0;
    181 		}
    182 		if (error != 0)
    183 			return error;
    184 
    185 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    186 		    "direct handoff failure");
    187 	}
    188 }
    189 
    190 
    191 int
    192 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
    193 {
    194 	uintptr_t owner, next;
    195 
    196 #ifdef ERRORCHECK
    197 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    198 		return EINVAL;
    199 #endif
    200 
    201 	/*
    202 	 * Don't get a readlock if there is a writer or if there are waiting
    203 	 * writers; i.e. prefer writers to readers. This strategy is dictated
    204 	 * by SUSv3.
    205 	 */
    206 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    207 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
    208 			return EBUSY;
    209 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
    210 		if (owner == next) {
    211 			/* Got it! */
    212 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    213 			membar_enter();
    214 #endif
    215 			return 0;
    216 		}
    217 	}
    218 }
    219 
    220 static int
    221 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
    222 {
    223 	uintptr_t owner, next;
    224 	pthread_t self;
    225 	int error;
    226 
    227 	self = pthread__self();
    228 
    229 #ifdef ERRORCHECK
    230 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    231 		return EINVAL;
    232 #endif
    233 
    234 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    235 		/*
    236 		 * Read the lock owner field.  If the need-to-wait
    237 		 * indicator is clear, then try to acquire the lock.
    238 		 */
    239 		if ((owner & RW_THREAD) == 0) {
    240 			next = rw_cas(ptr, owner,
    241 			    (uintptr_t)self | RW_WRITE_LOCKED);
    242 			if (owner == next) {
    243 				/* Got it! */
    244 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    245 				membar_enter();
    246 #endif
    247 				return 0;
    248 			}
    249 
    250 			/*
    251 			 * Didn't get it -- spin around again (we'll
    252 			 * probably sleep on the next iteration).
    253 			 */
    254 			continue;
    255 		}
    256 
    257 		if ((owner & RW_THREAD) == (uintptr_t)self)
    258 			return EDEADLK;
    259 
    260 		/*
    261 		 * Grab the interlock.  Once we have that, we
    262 		 * can adjust the waiter bits and sleep queue.
    263 		 */
    264 		pthread__spinlock(self, &ptr->ptr_interlock);
    265 
    266 		/*
    267 		 * Mark the rwlock as having waiters.  If the set fails,
    268 		 * then we may not need to sleep and should spin again.
    269 		 */
    270 		next = rw_cas(ptr, owner,
    271 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
    272 		if (owner != next) {
    273 			pthread__spinunlock(self, &ptr->ptr_interlock);
    274 			continue;
    275 		}
    276 
    277 		/* The waiters bit is set - it's safe to sleep. */
    278 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
    279 		self->pt_rwlocked = _RW_WANT_WRITE;
    280 		self->pt_sleeponq = 1;
    281 		self->pt_sleepobj = &ptr->ptr_wblocked;
    282 		self->pt_early = pthread__rwlock_early;
    283 		pthread__spinunlock(self, &ptr->ptr_interlock);
    284 
    285 		error = pthread__park(self, &ptr->ptr_interlock,
    286 		    &ptr->ptr_wblocked, ts, 0, &ptr->ptr_wblocked);
    287 
    288 		/* Did we get the lock? */
    289 		if (self->pt_rwlocked == _RW_LOCKED) {
    290 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    291 			membar_enter();
    292 #endif
    293 			return 0;
    294 		}
    295 		if (error != 0)
    296 			return error;
    297 
    298 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    299 		    "direct handoff failure");
    300 	}
    301 }
    302 
    303 
    304 int
    305 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
    306 {
    307 	uintptr_t owner, next;
    308 	pthread_t self;
    309 
    310 #ifdef ERRORCHECK
    311 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    312 		return EINVAL;
    313 #endif
    314 
    315 	self = pthread__self();
    316 
    317 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    318 		if (owner != 0)
    319 			return EBUSY;
    320 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
    321 		if (owner == next) {
    322 			/* Got it! */
    323 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    324 			membar_enter();
    325 #endif
    326 			return 0;
    327 		}
    328 	}
    329 }
    330 
    331 int
    332 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
    333 {
    334 
    335 	return pthread__rwlock_rdlock(ptr, NULL);
    336 }
    337 
    338 int
    339 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
    340 			   const struct timespec *abs_timeout)
    341 {
    342 
    343 	if (abs_timeout == NULL)
    344 		return EINVAL;
    345 	if ((abs_timeout->tv_nsec >= 1000000000) ||
    346 	    (abs_timeout->tv_nsec < 0) ||
    347 	    (abs_timeout->tv_sec < 0))
    348 		return EINVAL;
    349 
    350 	return pthread__rwlock_rdlock(ptr, abs_timeout);
    351 }
    352 
    353 int
    354 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
    355 {
    356 
    357 	return pthread__rwlock_wrlock(ptr, NULL);
    358 }
    359 
    360 int
    361 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
    362 			   const struct timespec *abs_timeout)
    363 {
    364 
    365 	if (abs_timeout == NULL)
    366 		return EINVAL;
    367 	if ((abs_timeout->tv_nsec >= 1000000000) ||
    368 	    (abs_timeout->tv_nsec < 0) ||
    369 	    (abs_timeout->tv_sec < 0))
    370 		return EINVAL;
    371 
    372 	return pthread__rwlock_wrlock(ptr, abs_timeout);
    373 }
    374 
    375 
    376 int
    377 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
    378 {
    379 	uintptr_t owner, decr, new, next;
    380 	pthread_t self, thread;
    381 
    382 #ifdef ERRORCHECK
    383 	if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
    384 		return EINVAL;
    385 #endif
    386 
    387 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    388 	membar_exit();
    389 #endif
    390 
    391 	/*
    392 	 * Since we used an add operation to set the required lock
    393 	 * bits, we can use a subtract to clear them, which makes
    394 	 * the read-release and write-release path similar.
    395 	 */
    396 	self = pthread__self();
    397 	owner = (uintptr_t)ptr->ptr_owner;
    398 	if ((owner & RW_WRITE_LOCKED) != 0) {
    399 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
    400 		if ((owner & RW_THREAD) != (uintptr_t)self) {
    401 			return EPERM;
    402 		}
    403 	} else {
    404 		decr = RW_READ_INCR;
    405 		if (owner == 0) {
    406 			return EPERM;
    407 		}
    408 	}
    409 
    410 	for (;; owner = next) {
    411 		/*
    412 		 * Compute what we expect the new value of the lock to be.
    413 		 * Only proceed to do direct handoff if there are waiters,
    414 		 * and if the lock would become unowned.
    415 		 */
    416 		new = (owner - decr);
    417 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
    418 			next = rw_cas(ptr, owner, new);
    419 			if (owner == next) {
    420 				/* Released! */
    421 				return 0;
    422 			}
    423 			continue;
    424 		}
    425 
    426 		/*
    427 		 * Grab the interlock.  Once we have that, we can adjust
    428 		 * the waiter bits.  We must check to see if there are
    429 		 * still waiters before proceeding.
    430 		 */
    431 		pthread__spinlock(self, &ptr->ptr_interlock);
    432 		owner = (uintptr_t)ptr->ptr_owner;
    433 		if ((owner & RW_HAS_WAITERS) == 0) {
    434 			pthread__spinunlock(self, &ptr->ptr_interlock);
    435 			next = owner;
    436 			continue;
    437 		}
    438 
    439 		/*
    440 		 * Give the lock away.  SUSv3 dictates that we must give
    441 		 * preference to writers.
    442 		 */
    443 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
    444 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
    445 
    446 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
    447 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
    448 			else if (ptr->ptr_nreaders != 0)
    449 				new |= RW_HAS_WAITERS;
    450 
    451 			/*
    452 			 * Set in the new value.  The lock becomes owned
    453 			 * by the writer that we are about to wake.
    454 			 */
    455 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
    456 
    457 			/* Wake the writer. */
    458 			PTQ_REMOVE(&ptr->ptr_wblocked, thread, pt_sleep);
    459 			thread->pt_rwlocked = _RW_LOCKED;
    460 			pthread__unpark(self, &ptr->ptr_interlock,
    461 			    &ptr->ptr_wblocked, thread);
    462 		} else {
    463 			new = 0;
    464 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
    465 				/*
    466 				 * May have already been handed the lock,
    467 				 * since pthread__unpark_all() can release
    468 				 * our interlock before awakening all
    469 				 * threads.
    470 				 */
    471 				if (thread->pt_sleepobj == NULL)
    472 					continue;
    473 				new += RW_READ_INCR;
    474 				thread->pt_rwlocked = _RW_LOCKED;
    475 			}
    476 
    477 			/*
    478 			 * Set in the new value.  The lock becomes owned
    479 			 * by the readers that we are about to wake.
    480 			 */
    481 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
    482 
    483 			/* Wake up all sleeping readers. */
    484 			ptr->ptr_nreaders = 0;
    485 			pthread__unpark_all(self, &ptr->ptr_interlock,
    486 			    &ptr->ptr_rblocked);
    487 		}
    488 
    489 		return 0;
    490 	}
    491 }
    492 
    493 /*
    494  * Called when a timedlock awakens early to adjust the waiter bits.
    495  * The rwlock's interlock is held on entry, and the caller has been
    496  * removed from the waiters lists.
    497  */
    498 static void
    499 pthread__rwlock_early(void *obj)
    500 {
    501 	uintptr_t owner, set, new, next;
    502 	pthread_rwlock_t *ptr;
    503 	pthread_t self;
    504 	u_int off;
    505 
    506 	self = pthread__self();
    507 
    508 	switch (self->pt_rwlocked) {
    509 	case _RW_WANT_READ:
    510 		off = offsetof(pthread_rwlock_t, ptr_rblocked);
    511 		break;
    512 	case _RW_WANT_WRITE:
    513 		off = offsetof(pthread_rwlock_t, ptr_wblocked);
    514 		break;
    515 	default:
    516 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    517 		    "bad value of pt_rwlocked");
    518 		off = 0;
    519 		/* NOTREACHED */
    520 		break;
    521 	}
    522 
    523 	/* LINTED mind your own business */
    524 	ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
    525 	owner = (uintptr_t)ptr->ptr_owner;
    526 
    527 	if ((owner & RW_THREAD) == 0) {
    528 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    529 		    "lock not held");
    530 	}
    531 
    532 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
    533 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
    534 	else if (ptr->ptr_nreaders != 0)
    535 		set = RW_HAS_WAITERS;
    536 	else
    537 		set = 0;
    538 
    539 	for (;; owner = next) {
    540 		new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
    541 		next = rw_cas(ptr, owner, new);
    542 		if (owner == next)
    543 			break;
    544 	}
    545 }
    546 
    547 int
    548 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
    549 {
    550 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    551 
    552 	if ((owner & RW_WRITE_LOCKED) != 0)
    553 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
    554 	return (owner & RW_THREAD) != 0;
    555 }
    556 
    557 int
    558 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
    559 {
    560 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    561 
    562 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
    563 }
    564 
    565 int
    566 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
    567 {
    568 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    569 
    570 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
    571 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
    572 }
    573 
    574 int
    575 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
    576 {
    577 
    578 	if (attr == NULL)
    579 		return EINVAL;
    580 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
    581 
    582 	return 0;
    583 }
    584 
    585 
    586 int
    587 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
    588 {
    589 
    590 	if ((attr == NULL) ||
    591 	    (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
    592 		return EINVAL;
    593 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
    594 
    595 	return 0;
    596 }
    597