Home | History | Annotate | Line # | Download | only in libpthread
pthread_rwlock.c revision 1.38
      1 /*	$NetBSD: pthread_rwlock.c,v 1.38 2020/01/31 17:52:14 kamil Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __RCSID("$NetBSD: pthread_rwlock.c,v 1.38 2020/01/31 17:52:14 kamil Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/lwpctl.h>
     37 
     38 #include <assert.h>
     39 #include <time.h>
     40 #include <errno.h>
     41 #include <stddef.h>
     42 
     43 #include "pthread.h"
     44 #include "pthread_int.h"
     45 #include "reentrant.h"
     46 
     47 #define	_RW_LOCKED		0
     48 #define	_RW_WANT_WRITE		1
     49 #define	_RW_WANT_READ		2
     50 
     51 #if __GNUC_PREREQ__(3, 0)
     52 #define	NOINLINE		__attribute ((noinline))
     53 #else
     54 #define	NOINLINE		/* nothing */
     55 #endif
     56 
     57 static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
     58 static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
     59 static void pthread__rwlock_early(void *);
     60 
     61 int	_pthread_rwlock_held_np(pthread_rwlock_t *);
     62 int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
     63 int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
     64 
     65 #ifndef lint
     66 __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
     67 __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
     68 __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
     69 #endif
     70 
     71 __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
     72 __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
     73 __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
     74 __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
     75 __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
     76 __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
     77 __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
     78 
     79 static inline uintptr_t
     80 rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
     81 {
     82 
     83 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
     84 	    (void *)n);
     85 }
     86 
     87 int
     88 pthread_rwlock_init(pthread_rwlock_t *ptr,
     89 	    const pthread_rwlockattr_t *attr)
     90 {
     91 	if (__predict_false(__uselibcstub))
     92 		return __libc_rwlock_init_stub(ptr, attr);
     93 
     94 	pthread__error(EINVAL, "Invalid rwlock attribute",
     95 	    attr == NULL || attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
     96 
     97 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
     98 	PTQ_INIT(&ptr->ptr_rblocked);
     99 	PTQ_INIT(&ptr->ptr_wblocked);
    100 	ptr->ptr_nreaders = 0;
    101 	ptr->ptr_owner = NULL;
    102 
    103 	return 0;
    104 }
    105 
    106 
    107 int
    108 pthread_rwlock_destroy(pthread_rwlock_t *ptr)
    109 {
    110 	if (__predict_false(__uselibcstub))
    111 		return __libc_rwlock_destroy_stub(ptr);
    112 
    113 	pthread__error(EINVAL, "Invalid rwlock",
    114 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
    115 
    116 	if ((!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
    117 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
    118 	    (ptr->ptr_nreaders != 0) ||
    119 	    (ptr->ptr_owner != NULL))
    120 		return EINVAL;
    121 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
    122 
    123 	return 0;
    124 }
    125 
    126 /* We want function call overhead. */
    127 NOINLINE static void
    128 pthread__rwlock_pause(void)
    129 {
    130 
    131 	pthread__smt_pause();
    132 }
    133 
    134 NOINLINE static int
    135 pthread__rwlock_spin(uintptr_t owner)
    136 {
    137 	pthread_t thread;
    138 	unsigned int i;
    139 
    140 	if ((owner & ~RW_THREAD) != RW_WRITE_LOCKED)
    141 		return 0;
    142 
    143 	thread = (pthread_t)(owner & RW_THREAD);
    144 	if (__predict_false(thread == NULL) ||
    145 	    thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
    146 		return 0;
    147 
    148 	for (i = 128; i != 0; i--)
    149 		pthread__rwlock_pause();
    150 	return 1;
    151 }
    152 
    153 static int
    154 pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
    155 {
    156 	uintptr_t owner, next;
    157 	pthread_mutex_t *interlock;
    158 	pthread_t self;
    159 	int error;
    160 
    161 #ifdef ERRORCHECK
    162 	pthread__error(EINVAL, "Invalid rwlock",
    163 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
    164 #endif
    165 
    166 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    167 		/*
    168 		 * Read the lock owner field.  If the need-to-wait
    169 		 * indicator is clear, then try to acquire the lock.
    170 		 */
    171 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
    172 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
    173 			if (owner == next) {
    174 				/* Got it! */
    175 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    176 				membar_enter();
    177 #endif
    178 				return 0;
    179 			}
    180 
    181 			/*
    182 			 * Didn't get it -- spin around again (we'll
    183 			 * probably sleep on the next iteration).
    184 			 */
    185 			continue;
    186 		}
    187 
    188 		self = pthread__self();
    189 		if ((owner & RW_THREAD) == (uintptr_t)self)
    190 			return EDEADLK;
    191 
    192 		/* If held write locked and no waiters, spin. */
    193 		if (pthread__rwlock_spin(owner)) {
    194 			while (pthread__rwlock_spin(owner)) {
    195 				owner = (uintptr_t)ptr->ptr_owner;
    196 			}
    197 			next = owner;
    198 			continue;
    199 		}
    200 
    201 		/*
    202 		 * Grab the interlock.  Once we have that, we
    203 		 * can adjust the waiter bits and sleep queue.
    204 		 */
    205 		interlock = pthread__hashlock(ptr);
    206 		pthread_mutex_lock(interlock);
    207 
    208 		/*
    209 		 * Mark the rwlock as having waiters.  If the set fails,
    210 		 * then we may not need to sleep and should spin again.
    211 		 */
    212 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
    213 		if (owner != next) {
    214 			pthread_mutex_unlock(interlock);
    215 			continue;
    216 		}
    217 
    218 		/* The waiters bit is set - it's safe to sleep. */
    219 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
    220 	    	ptr->ptr_nreaders++;
    221 		self->pt_rwlocked = _RW_WANT_READ;
    222 		self->pt_sleepobj = &ptr->ptr_rblocked;
    223 		self->pt_early = pthread__rwlock_early;
    224 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
    225 		    ts, 0, &ptr->ptr_rblocked);
    226 
    227 		/* Did we get the lock? */
    228 		if (self->pt_rwlocked == _RW_LOCKED) {
    229 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    230 			membar_enter();
    231 #endif
    232 			return 0;
    233 		}
    234 		if (error != 0)
    235 			return error;
    236 
    237 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    238 		    "direct handoff failure");
    239 	}
    240 }
    241 
    242 
    243 int
    244 pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
    245 {
    246 	uintptr_t owner, next;
    247 
    248 	if (__predict_false(__uselibcstub))
    249 		return __libc_rwlock_tryrdlock_stub(ptr);
    250 
    251 #ifdef ERRORCHECK
    252 	pthread__error(EINVAL, "Invalid rwlock",
    253 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
    254 #endif
    255 
    256 	/*
    257 	 * Don't get a readlock if there is a writer or if there are waiting
    258 	 * writers; i.e. prefer writers to readers. This strategy is dictated
    259 	 * by SUSv3.
    260 	 */
    261 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    262 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
    263 			return EBUSY;
    264 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
    265 		if (owner == next) {
    266 			/* Got it! */
    267 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    268 			membar_enter();
    269 #endif
    270 			return 0;
    271 		}
    272 	}
    273 }
    274 
    275 static int
    276 pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
    277 {
    278 	uintptr_t owner, next;
    279 	pthread_mutex_t *interlock;
    280 	pthread_t self;
    281 	int error;
    282 
    283 	self = pthread__self();
    284 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
    285 
    286 #ifdef ERRORCHECK
    287 	pthread__error(EINVAL, "Invalid rwlock",
    288 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
    289 #endif
    290 
    291 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    292 		/*
    293 		 * Read the lock owner field.  If the need-to-wait
    294 		 * indicator is clear, then try to acquire the lock.
    295 		 */
    296 		if ((owner & RW_THREAD) == 0) {
    297 			next = rw_cas(ptr, owner,
    298 			    (uintptr_t)self | RW_WRITE_LOCKED);
    299 			if (owner == next) {
    300 				/* Got it! */
    301 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    302 				membar_enter();
    303 #endif
    304 				return 0;
    305 			}
    306 
    307 			/*
    308 			 * Didn't get it -- spin around again (we'll
    309 			 * probably sleep on the next iteration).
    310 			 */
    311 			continue;
    312 		}
    313 
    314 		if ((owner & RW_THREAD) == (uintptr_t)self)
    315 			return EDEADLK;
    316 
    317 		/* If held write locked and no waiters, spin. */
    318 		if (pthread__rwlock_spin(owner)) {
    319 			while (pthread__rwlock_spin(owner)) {
    320 				owner = (uintptr_t)ptr->ptr_owner;
    321 			}
    322 			next = owner;
    323 			continue;
    324 		}
    325 
    326 		/*
    327 		 * Grab the interlock.  Once we have that, we
    328 		 * can adjust the waiter bits and sleep queue.
    329 		 */
    330 		interlock = pthread__hashlock(ptr);
    331 		pthread_mutex_lock(interlock);
    332 
    333 		/*
    334 		 * Mark the rwlock as having waiters.  If the set fails,
    335 		 * then we may not need to sleep and should spin again.
    336 		 */
    337 		next = rw_cas(ptr, owner,
    338 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
    339 		if (owner != next) {
    340 			pthread_mutex_unlock(interlock);
    341 			continue;
    342 		}
    343 
    344 		/* The waiters bit is set - it's safe to sleep. */
    345 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
    346 		self->pt_rwlocked = _RW_WANT_WRITE;
    347 		self->pt_sleepobj = &ptr->ptr_wblocked;
    348 		self->pt_early = pthread__rwlock_early;
    349 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
    350 		    ts, 0, &ptr->ptr_wblocked);
    351 
    352 		/* Did we get the lock? */
    353 		if (self->pt_rwlocked == _RW_LOCKED) {
    354 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    355 			membar_enter();
    356 #endif
    357 			return 0;
    358 		}
    359 		if (error != 0)
    360 			return error;
    361 
    362 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    363 		    "direct handoff failure");
    364 	}
    365 }
    366 
    367 
    368 int
    369 pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
    370 {
    371 	uintptr_t owner, next;
    372 	pthread_t self;
    373 
    374 	if (__predict_false(__uselibcstub))
    375 		return __libc_rwlock_trywrlock_stub(ptr);
    376 
    377 #ifdef ERRORCHECK
    378 	pthread__error(EINVAL, "Invalid rwlock",
    379 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
    380 #endif
    381 
    382 	self = pthread__self();
    383 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
    384 
    385 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    386 		if (owner != 0)
    387 			return EBUSY;
    388 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
    389 		if (owner == next) {
    390 			/* Got it! */
    391 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    392 			membar_enter();
    393 #endif
    394 			return 0;
    395 		}
    396 	}
    397 }
    398 
    399 int
    400 pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
    401 {
    402 	if (__predict_false(__uselibcstub))
    403 		return __libc_rwlock_rdlock_stub(ptr);
    404 
    405 	return pthread__rwlock_rdlock(ptr, NULL);
    406 }
    407 
    408 int
    409 pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
    410 			   const struct timespec *abs_timeout)
    411 {
    412 	if (abs_timeout == NULL)
    413 		return EINVAL;
    414 	if ((abs_timeout->tv_nsec >= 1000000000) ||
    415 	    (abs_timeout->tv_nsec < 0) ||
    416 	    (abs_timeout->tv_sec < 0))
    417 		return EINVAL;
    418 
    419 	return pthread__rwlock_rdlock(ptr, abs_timeout);
    420 }
    421 
    422 int
    423 pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
    424 {
    425 	if (__predict_false(__uselibcstub))
    426 		return __libc_rwlock_wrlock_stub(ptr);
    427 
    428 	return pthread__rwlock_wrlock(ptr, NULL);
    429 }
    430 
    431 int
    432 pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
    433 			   const struct timespec *abs_timeout)
    434 {
    435 	if (abs_timeout == NULL)
    436 		return EINVAL;
    437 	if ((abs_timeout->tv_nsec >= 1000000000) ||
    438 	    (abs_timeout->tv_nsec < 0) ||
    439 	    (abs_timeout->tv_sec < 0))
    440 		return EINVAL;
    441 
    442 	return pthread__rwlock_wrlock(ptr, abs_timeout);
    443 }
    444 
    445 
    446 int
    447 pthread_rwlock_unlock(pthread_rwlock_t *ptr)
    448 {
    449 	uintptr_t owner, decr, new, next;
    450 	pthread_mutex_t *interlock;
    451 	pthread_t self, thread;
    452 
    453 	if (__predict_false(__uselibcstub))
    454 		return __libc_rwlock_unlock_stub(ptr);
    455 
    456 #ifdef ERRORCHECK
    457 	pthread__error(EINVAL, "Invalid rwlock",
    458 	    ptr->ptr_magic == _PT_RWLOCK_MAGIC);
    459 #endif
    460 
    461 #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    462 	membar_exit();
    463 #endif
    464 
    465 	/*
    466 	 * Since we used an add operation to set the required lock
    467 	 * bits, we can use a subtract to clear them, which makes
    468 	 * the read-release and write-release path similar.
    469 	 */
    470 	owner = (uintptr_t)ptr->ptr_owner;
    471 	if ((owner & RW_WRITE_LOCKED) != 0) {
    472 		self = pthread__self();
    473 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
    474 		if ((owner & RW_THREAD) != (uintptr_t)self) {
    475 			return EPERM;
    476 		}
    477 	} else {
    478 		decr = RW_READ_INCR;
    479 		if (owner == 0) {
    480 			return EPERM;
    481 		}
    482 	}
    483 
    484 	for (;; owner = next) {
    485 		/*
    486 		 * Compute what we expect the new value of the lock to be.
    487 		 * Only proceed to do direct handoff if there are waiters,
    488 		 * and if the lock would become unowned.
    489 		 */
    490 		new = (owner - decr);
    491 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
    492 			next = rw_cas(ptr, owner, new);
    493 			if (owner == next) {
    494 				/* Released! */
    495 				return 0;
    496 			}
    497 			continue;
    498 		}
    499 
    500 		/*
    501 		 * Grab the interlock.  Once we have that, we can adjust
    502 		 * the waiter bits.  We must check to see if there are
    503 		 * still waiters before proceeding.
    504 		 */
    505 		interlock = pthread__hashlock(ptr);
    506 		pthread_mutex_lock(interlock);
    507 		owner = (uintptr_t)ptr->ptr_owner;
    508 		if ((owner & RW_HAS_WAITERS) == 0) {
    509 			pthread_mutex_unlock(interlock);
    510 			next = owner;
    511 			continue;
    512 		}
    513 
    514 		/*
    515 		 * Give the lock away.  SUSv3 dictates that we must give
    516 		 * preference to writers.
    517 		 */
    518 		self = pthread__self();
    519 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
    520 			_DIAGASSERT(((uintptr_t)thread & RW_FLAGMASK) == 0);
    521 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
    522 
    523 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
    524 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
    525 			else if (ptr->ptr_nreaders != 0)
    526 				new |= RW_HAS_WAITERS;
    527 
    528 			/*
    529 			 * Set in the new value.  The lock becomes owned
    530 			 * by the writer that we are about to wake.
    531 			 */
    532 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
    533 
    534 			/* Wake the writer. */
    535 			thread->pt_rwlocked = _RW_LOCKED;
    536 			pthread__unpark(&ptr->ptr_wblocked, self,
    537 			    interlock);
    538 		} else {
    539 			new = 0;
    540 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
    541 				/*
    542 				 * May have already been handed the lock,
    543 				 * since pthread__unpark_all() can release
    544 				 * our interlock before awakening all
    545 				 * threads.
    546 				 */
    547 				if (thread->pt_sleepobj == NULL)
    548 					continue;
    549 				new += RW_READ_INCR;
    550 				thread->pt_rwlocked = _RW_LOCKED;
    551 			}
    552 
    553 			/*
    554 			 * Set in the new value.  The lock becomes owned
    555 			 * by the readers that we are about to wake.
    556 			 */
    557 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
    558 
    559 			/* Wake up all sleeping readers. */
    560 			ptr->ptr_nreaders = 0;
    561 			pthread__unpark_all(&ptr->ptr_rblocked, self,
    562 			    interlock);
    563 		}
    564 		pthread_mutex_unlock(interlock);
    565 
    566 		return 0;
    567 	}
    568 }
    569 
    570 /*
    571  * Called when a timedlock awakens early to adjust the waiter bits.
    572  * The rwlock's interlock is held on entry, and the caller has been
    573  * removed from the waiters lists.
    574  */
    575 static void
    576 pthread__rwlock_early(void *obj)
    577 {
    578 	uintptr_t owner, set, new, next;
    579 	pthread_rwlock_t *ptr;
    580 	pthread_t self;
    581 	u_int off;
    582 
    583 	self = pthread__self();
    584 
    585 	switch (self->pt_rwlocked) {
    586 	case _RW_WANT_READ:
    587 		off = offsetof(pthread_rwlock_t, ptr_rblocked);
    588 		break;
    589 	case _RW_WANT_WRITE:
    590 		off = offsetof(pthread_rwlock_t, ptr_wblocked);
    591 		break;
    592 	default:
    593 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    594 		    "bad value of pt_rwlocked");
    595 		off = 0;
    596 		/* NOTREACHED */
    597 		break;
    598 	}
    599 
    600 	/* LINTED mind your own business */
    601 	ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
    602 	owner = (uintptr_t)ptr->ptr_owner;
    603 
    604 	if ((owner & RW_THREAD) == 0) {
    605 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    606 		    "lock not held");
    607 	}
    608 
    609 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
    610 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
    611 	else if (ptr->ptr_nreaders != 0)
    612 		set = RW_HAS_WAITERS;
    613 	else
    614 		set = 0;
    615 
    616 	for (;; owner = next) {
    617 		new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
    618 		next = rw_cas(ptr, owner, new);
    619 		if (owner == next)
    620 			break;
    621 	}
    622 }
    623 
    624 int
    625 _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
    626 {
    627 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    628 
    629 	if ((owner & RW_WRITE_LOCKED) != 0)
    630 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
    631 	return (owner & RW_THREAD) != 0;
    632 }
    633 
    634 int
    635 _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
    636 {
    637 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    638 
    639 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
    640 }
    641 
    642 int
    643 _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
    644 {
    645 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    646 
    647 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
    648 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
    649 }
    650 
    651 #ifdef _PTHREAD_PSHARED
    652 int
    653 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
    654     int * __restrict pshared)
    655 {
    656 
    657 	pthread__error(EINVAL, "Invalid rwlock attribute",
    658 	    ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
    659 
    660 	*pshared = PTHREAD_PROCESS_PRIVATE;
    661 	return 0;
    662 }
    663 
    664 int
    665 pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
    666 {
    667 
    668 	pthread__error(EINVAL, "Invalid rwlock attribute",
    669 	    ptr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
    670 
    671 	switch(pshared) {
    672 	case PTHREAD_PROCESS_PRIVATE:
    673 		return 0;
    674 	case PTHREAD_PROCESS_SHARED:
    675 		return ENOSYS;
    676 	}
    677 	return EINVAL;
    678 }
    679 #endif
    680 
    681 int
    682 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
    683 {
    684 
    685 	if (attr == NULL)
    686 		return EINVAL;
    687 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
    688 
    689 	return 0;
    690 }
    691 
    692 
    693 int
    694 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
    695 {
    696 
    697 	pthread__error(EINVAL, "Invalid rwlock attribute",
    698 	    attr->ptra_magic == _PT_RWLOCKATTR_MAGIC);
    699 
    700 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
    701 
    702 	return 0;
    703 }
    704