Home | History | Annotate | Line # | Download | only in libpthread
pthread_rwlock.c revision 1.35
      1 /*	$NetBSD: pthread_rwlock.c,v 1.35 2019/12/15 23:13:33 uwe Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.35 2019/12/15 23:13:33 uwe Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/lwpctl.h>
     37 
     38 #include <assert.h>
     39 #include <time.h>
     40 #include <errno.h>
     41 #include <stddef.h>
     42 
     43 #include "pthread.h"
     44 #include "pthread_int.h"
     45 #include "reentrant.h"
     46 
     47 #define	_RW_LOCKED		0
     48 #define	_RW_WANT_WRITE		1
     49 #define	_RW_WANT_READ		2
     50 
     51 #if __GNUC_PREREQ__(3, 0)
     52 #define	NOINLINE		__attribute ((noinline))
     53 #else
     54 #define	NOINLINE		/* nothing */
     55 #endif
     56 
     57 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
     58 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
     59 static void pthread__rwlock_early(void *);
     60 
     61 int	_pthread_rwlock_held_np(pthread_rwlock_t *);
     62 int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
     63 int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
     64 
     65 #ifndef lint
     66 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
     67 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
     68 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
     69 #endif
     70 
     71 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
     72 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
     73 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
     74 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
     75 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
     76 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
     77 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
     78 
     79 static inline uintptr_t
     80 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
     81 {
     82 
     83 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
     84 	    (void *)n);
     85 }
     86 
     87 int
     88 pthread_rwlock_init(pthread_rwlock_t *ptr,
     89 	    const pthread_rwlockattr_t *attr)
     90 {
     91 	if (__predict_false(__uselibcstub))
     92 		return __libc_rwlock_init_stub(ptr, attr);
     93 
     94 	if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
     95 		return EINVAL;
     96 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
     97 	PTQ_INIT(&ptr->ptr_rblocked);
     98 	PTQ_INIT(&ptr->ptr_wblocked);
     99 	ptr->ptr_nreaders = 0;
    100 	ptr->ptr_owner = NULL;
    101 
    102 	return 0;
    103 }
    104 
    105 
    106 int
    107 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
    108 {
    109 	if (__predict_false(__uselibcstub))
    110 		return __libc_rwlock_destroy_stub(ptr);
    111 
    112 	if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
    113 	    (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
    114 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
    115 	    (ptr->ptr_nreaders != 0) ||
    116 	    (ptr->ptr_owner != NULL))
    117 		return EINVAL;
    118 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
    119 
    120 	return 0;
    121 }
    122 
    123 /* We want function call overhead. */
    124 NOINLINE static void
    125 pthread__rwlock_pause(void)
    126 {
    127 
    128 	pthread__smt_pause();
    129 }
    130 
    131 NOINLINE static int
    132 pthread__rwlock_spin(uintptr_t owner)
    133 {
    134 	pthread_t thread;
    135 	unsigned int i;
    136 
    137 	thread = (pthread_t)(owner & RW_THREAD);
    138 	if (thread == NULL || (owner & ~RW_THREAD) != RW_WRITE_LOCKED)
    139 		return 0;
    140 	if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE ||
    141 	    thread->pt_blocking)
    142 		return 0;
    143 	for (i = 128; i != 0; i--)
    144 		pthread__rwlock_pause();
    145 	return 1;
    146 }
    147 
    148 static int
    149 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
    150 {
    151 	uintptr_t owner, next;
    152 	pthread_mutex_t *interlock;
    153 	pthread_t self;
    154 	int error;
    155 
    156 #ifdef ERRORCHECK
    157 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    158 		return EINVAL;
    159 #endif
    160 
    161 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    162 		/*
    163 		 * Read the lock owner field.  If the need-to-wait
    164 		 * indicator is clear, then try to acquire the lock.
    165 		 */
    166 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
    167 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
    168 			if (owner == next) {
    169 				/* Got it! */
    170 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    171 				membar_enter();
    172 #endif
    173 				return 0;
    174 			}
    175 
    176 			/*
    177 			 * Didn't get it -- spin around again (we'll
    178 			 * probably sleep on the next iteration).
    179 			 */
    180 			continue;
    181 		}
    182 
    183 		self = pthread__self();
    184 		if ((owner & RW_THREAD) == (uintptr_t)self)
    185 			return EDEADLK;
    186 
    187 		/* If held write locked and no waiters, spin. */
    188 		if (pthread__rwlock_spin(owner)) {
    189 			while (pthread__rwlock_spin(owner)) {
    190 				owner = (uintptr_t)ptr->ptr_owner;
    191 			}
    192 			next = owner;
    193 			continue;
    194 		}
    195 
    196 		/*
    197 		 * Grab the interlock.  Once we have that, we
    198 		 * can adjust the waiter bits and sleep queue.
    199 		 */
    200 		interlock = pthread__hashlock(ptr);
    201 		pthread_mutex_lock(interlock);
    202 
    203 		/*
    204 		 * Mark the rwlock as having waiters.  If the set fails,
    205 		 * then we may not need to sleep and should spin again.
    206 		 */
    207 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
    208 		if (owner != next) {
    209 			pthread_mutex_unlock(interlock);
    210 			continue;
    211 		}
    212 
    213 		/* The waiters bit is set - it's safe to sleep. */
    214 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
    215 	    	ptr->ptr_nreaders++;
    216 		self->pt_rwlocked = _RW_WANT_READ;
    217 		self->pt_sleepobj = &ptr->ptr_rblocked;
    218 		self->pt_early = pthread__rwlock_early;
    219 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
    220 		    ts, 0, &ptr->ptr_rblocked);
    221 
    222 		/* Did we get the lock? */
    223 		if (self->pt_rwlocked == _RW_LOCKED) {
    224 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    225 			membar_enter();
    226 #endif
    227 			return 0;
    228 		}
    229 		if (error != 0)
    230 			return error;
    231 
    232 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    233 		    "direct handoff failure");
    234 	}
    235 }
    236 
    237 
    238 int
    239 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
    240 {
    241 	uintptr_t owner, next;
    242 
    243 	if (__predict_false(__uselibcstub))
    244 		return __libc_rwlock_tryrdlock_stub(ptr);
    245 
    246 #ifdef ERRORCHECK
    247 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    248 		return EINVAL;
    249 #endif
    250 
    251 	/*
    252 	 * Don't get a readlock if there is a writer or if there are waiting
    253 	 * writers; i.e. prefer writers to readers. This strategy is dictated
    254 	 * by SUSv3.
    255 	 */
    256 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    257 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
    258 			return EBUSY;
    259 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
    260 		if (owner == next) {
    261 			/* Got it! */
    262 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    263 			membar_enter();
    264 #endif
    265 			return 0;
    266 		}
    267 	}
    268 }
    269 
    270 static int
    271 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
    272 {
    273 	uintptr_t owner, next;
    274 	pthread_mutex_t *interlock;
    275 	pthread_t self;
    276 	int error;
    277 
    278 	self = pthread__self();
    279 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
    280 
    281 #ifdef ERRORCHECK
    282 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    283 		return EINVAL;
    284 #endif
    285 
    286 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    287 		/*
    288 		 * Read the lock owner field.  If the need-to-wait
    289 		 * indicator is clear, then try to acquire the lock.
    290 		 */
    291 		if ((owner & RW_THREAD) == 0) {
    292 			next = rw_cas(ptr, owner,
    293 			    (uintptr_t)self | RW_WRITE_LOCKED);
    294 			if (owner == next) {
    295 				/* Got it! */
    296 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    297 				membar_enter();
    298 #endif
    299 				return 0;
    300 			}
    301 
    302 			/*
    303 			 * Didn't get it -- spin around again (we'll
    304 			 * probably sleep on the next iteration).
    305 			 */
    306 			continue;
    307 		}
    308 
    309 		if ((owner & RW_THREAD) == (uintptr_t)self)
    310 			return EDEADLK;
    311 
    312 		/* If held write locked and no waiters, spin. */
    313 		if (pthread__rwlock_spin(owner)) {
    314 			while (pthread__rwlock_spin(owner)) {
    315 				owner = (uintptr_t)ptr->ptr_owner;
    316 			}
    317 			next = owner;
    318 			continue;
    319 		}
    320 
    321 		/*
    322 		 * Grab the interlock.  Once we have that, we
    323 		 * can adjust the waiter bits and sleep queue.
    324 		 */
    325 		interlock = pthread__hashlock(ptr);
    326 		pthread_mutex_lock(interlock);
    327 
    328 		/*
    329 		 * Mark the rwlock as having waiters.  If the set fails,
    330 		 * then we may not need to sleep and should spin again.
    331 		 */
    332 		next = rw_cas(ptr, owner,
    333 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
    334 		if (owner != next) {
    335 			pthread_mutex_unlock(interlock);
    336 			continue;
    337 		}
    338 
    339 		/* The waiters bit is set - it's safe to sleep. */
    340 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
    341 		self->pt_rwlocked = _RW_WANT_WRITE;
    342 		self->pt_sleepobj = &ptr->ptr_wblocked;
    343 		self->pt_early = pthread__rwlock_early;
    344 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
    345 		    ts, 0, &ptr->ptr_wblocked);
    346 
    347 		/* Did we get the lock? */
    348 		if (self->pt_rwlocked == _RW_LOCKED) {
    349 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    350 			membar_enter();
    351 #endif
    352 			return 0;
    353 		}
    354 		if (error != 0)
    355 			return error;
    356 
    357 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    358 		    "direct handoff failure");
    359 	}
    360 }
    361 
    362 
    363 int
    364 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
    365 {
    366 	uintptr_t owner, next;
    367 	pthread_t self;
    368 
    369 	if (__predict_false(__uselibcstub))
    370 		return __libc_rwlock_trywrlock_stub(ptr);
    371 
    372 #ifdef ERRORCHECK
    373 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    374 		return EINVAL;
    375 #endif
    376 
    377 	self = pthread__self();
    378 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
    379 
    380 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    381 		if (owner != 0)
    382 			return EBUSY;
    383 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
    384 		if (owner == next) {
    385 			/* Got it! */
    386 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    387 			membar_enter();
    388 #endif
    389 			return 0;
    390 		}
    391 	}
    392 }
    393 
    394 int
    395 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
    396 {
    397 	if (__predict_false(__uselibcstub))
    398 		return __libc_rwlock_rdlock_stub(ptr);
    399 
    400 	return pthread__rwlock_rdlock(ptr, NULL);
    401 }
    402 
    403 int
    404 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
    405 			   const struct timespec *abs_timeout)
    406 {
    407 	if (abs_timeout == NULL)
    408 		return EINVAL;
    409 	if ((abs_timeout->tv_nsec >= 1000000000) ||
    410 	    (abs_timeout->tv_nsec < 0) ||
    411 	    (abs_timeout->tv_sec < 0))
    412 		return EINVAL;
    413 
    414 	return pthread__rwlock_rdlock(ptr, abs_timeout);
    415 }
    416 
    417 int
    418 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
    419 {
    420 	if (__predict_false(__uselibcstub))
    421 		return __libc_rwlock_wrlock_stub(ptr);
    422 
    423 	return pthread__rwlock_wrlock(ptr, NULL);
    424 }
    425 
    426 int
    427 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
    428 			   const struct timespec *abs_timeout)
    429 {
    430 	if (abs_timeout == NULL)
    431 		return EINVAL;
    432 	if ((abs_timeout->tv_nsec >= 1000000000) ||
    433 	    (abs_timeout->tv_nsec < 0) ||
    434 	    (abs_timeout->tv_sec < 0))
    435 		return EINVAL;
    436 
    437 	return pthread__rwlock_wrlock(ptr, abs_timeout);
    438 }
    439 
    440 
    441 int
    442 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
    443 {
    444 	uintptr_t owner, decr, new, next;
    445 	pthread_mutex_t *interlock;
    446 	pthread_t self, thread;
    447 
    448 	if (__predict_false(__uselibcstub))
    449 		return __libc_rwlock_unlock_stub(ptr);
    450 
    451 #ifdef ERRORCHECK
    452 	if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
    453 		return EINVAL;
    454 #endif
    455 
    456 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    457 	membar_exit();
    458 #endif
    459 
    460 	/*
    461 	 * Since we used an add operation to set the required lock
    462 	 * bits, we can use a subtract to clear them, which makes
    463 	 * the read-release and write-release path similar.
    464 	 */
    465 	owner = (uintptr_t)ptr->ptr_owner;
    466 	if ((owner & RW_WRITE_LOCKED) != 0) {
    467 		self = pthread__self();
    468 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
    469 		if ((owner & RW_THREAD) != (uintptr_t)self) {
    470 			return EPERM;
    471 		}
    472 	} else {
    473 		decr = RW_READ_INCR;
    474 		if (owner == 0) {
    475 			return EPERM;
    476 		}
    477 	}
    478 
    479 	for (;; owner = next) {
    480 		/*
    481 		 * Compute what we expect the new value of the lock to be.
    482 		 * Only proceed to do direct handoff if there are waiters,
    483 		 * and if the lock would become unowned.
    484 		 */
    485 		new = (owner - decr);
    486 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
    487 			next = rw_cas(ptr, owner, new);
    488 			if (owner == next) {
    489 				/* Released! */
    490 				return 0;
    491 			}
    492 			continue;
    493 		}
    494 
    495 		/*
    496 		 * Grab the interlock.  Once we have that, we can adjust
    497 		 * the waiter bits.  We must check to see if there are
    498 		 * still waiters before proceeding.
    499 		 */
    500 		interlock = pthread__hashlock(ptr);
    501 		pthread_mutex_lock(interlock);
    502 		owner = (uintptr_t)ptr->ptr_owner;
    503 		if ((owner & RW_HAS_WAITERS) == 0) {
    504 			pthread_mutex_unlock(interlock);
    505 			next = owner;
    506 			continue;
    507 		}
    508 
    509 		/*
    510 		 * Give the lock away.  SUSv3 dictates that we must give
    511 		 * preference to writers.
    512 		 */
    513 		self = pthread__self();
    514 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
    515 			_DIAGASSERT(((uintptr_t)thread & RW_FLAGMASK) == 0);
    516 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
    517 
    518 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
    519 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
    520 			else if (ptr->ptr_nreaders != 0)
    521 				new |= RW_HAS_WAITERS;
    522 
    523 			/*
    524 			 * Set in the new value.  The lock becomes owned
    525 			 * by the writer that we are about to wake.
    526 			 */
    527 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
    528 
    529 			/* Wake the writer. */
    530 			thread->pt_rwlocked = _RW_LOCKED;
    531 			pthread__unpark(&ptr->ptr_wblocked, self,
    532 			    interlock);
    533 		} else {
    534 			new = 0;
    535 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
    536 				/*
    537 				 * May have already been handed the lock,
    538 				 * since pthread__unpark_all() can release
    539 				 * our interlock before awakening all
    540 				 * threads.
    541 				 */
    542 				if (thread->pt_sleepobj == NULL)
    543 					continue;
    544 				new += RW_READ_INCR;
    545 				thread->pt_rwlocked = _RW_LOCKED;
    546 			}
    547 
    548 			/*
    549 			 * Set in the new value.  The lock becomes owned
    550 			 * by the readers that we are about to wake.
    551 			 */
    552 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
    553 
    554 			/* Wake up all sleeping readers. */
    555 			ptr->ptr_nreaders = 0;
    556 			pthread__unpark_all(&ptr->ptr_rblocked, self,
    557 			    interlock);
    558 		}
    559 		pthread_mutex_unlock(interlock);
    560 
    561 		return 0;
    562 	}
    563 }
    564 
    565 /*
    566  * Called when a timedlock awakens early to adjust the waiter bits.
    567  * The rwlock's interlock is held on entry, and the caller has been
    568  * removed from the waiters lists.
    569  */
    570 static void
    571 pthread__rwlock_early(void *obj)
    572 {
    573 	uintptr_t owner, set, new, next;
    574 	pthread_rwlock_t *ptr;
    575 	pthread_t self;
    576 	u_int off;
    577 
    578 	self = pthread__self();
    579 
    580 	switch (self->pt_rwlocked) {
    581 	case _RW_WANT_READ:
    582 		off = offsetof(pthread_rwlock_t, ptr_rblocked);
    583 		break;
    584 	case _RW_WANT_WRITE:
    585 		off = offsetof(pthread_rwlock_t, ptr_wblocked);
    586 		break;
    587 	default:
    588 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    589 		    "bad value of pt_rwlocked");
    590 		off = 0;
    591 		/* NOTREACHED */
    592 		break;
    593 	}
    594 
    595 	/* LINTED mind your own business */
    596 	ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
    597 	owner = (uintptr_t)ptr->ptr_owner;
    598 
    599 	if ((owner & RW_THREAD) == 0) {
    600 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    601 		    "lock not held");
    602 	}
    603 
    604 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
    605 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
    606 	else if (ptr->ptr_nreaders != 0)
    607 		set = RW_HAS_WAITERS;
    608 	else
    609 		set = 0;
    610 
    611 	for (;; owner = next) {
    612 		new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
    613 		next = rw_cas(ptr, owner, new);
    614 		if (owner == next)
    615 			break;
    616 	}
    617 }
    618 
    619 int
    620 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
    621 {
    622 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    623 
    624 	if ((owner & RW_WRITE_LOCKED) != 0)
    625 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
    626 	return (owner & RW_THREAD) != 0;
    627 }
    628 
    629 int
    630 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
    631 {
    632 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    633 
    634 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
    635 }
    636 
    637 int
    638 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
    639 {
    640 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    641 
    642 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
    643 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
    644 }
    645 
    646 #ifdef _PTHREAD_PSHARED
    647 int
    648 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
    649     int * __restrict pshared)
    650 {
    651 	*pshared = PTHREAD_PROCESS_PRIVATE;
    652 	return 0;
    653 }
    654 
    655 int
    656 pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
    657 {
    658 
    659 	switch(pshared) {
    660 	case PTHREAD_PROCESS_PRIVATE:
    661 		return 0;
    662 	case PTHREAD_PROCESS_SHARED:
    663 		return ENOSYS;
    664 	}
    665 	return EINVAL;
    666 }
    667 #endif
    668 
    669 int
    670 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
    671 {
    672 
    673 	if (attr == NULL)
    674 		return EINVAL;
    675 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
    676 
    677 	return 0;
    678 }
    679 
    680 
    681 int
    682 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
    683 {
    684 
    685 	if ((attr == NULL) ||
    686 	    (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
    687 		return EINVAL;
    688 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
    689 
    690 	return 0;
    691 }
    692