Home | History | Annotate | Line # | Download | only in libpthread
pthread_rwlock.c revision 1.35
      1  1.35       uwe /*	$NetBSD: pthread_rwlock.c,v 1.35 2019/12/15 23:13:33 uwe Exp $ */
      2   1.2   thorpej 
      3   1.2   thorpej /*-
      4  1.27        ad  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5   1.2   thorpej  * All rights reserved.
      6   1.2   thorpej  *
      7   1.2   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8  1.27        ad  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
      9   1.2   thorpej  *
     10   1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     11   1.2   thorpej  * modification, are permitted provided that the following conditions
     12   1.2   thorpej  * are met:
     13   1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     14   1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     15   1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     17   1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     18   1.2   thorpej  *
     19   1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.2   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     30   1.2   thorpej  */
     31   1.2   thorpej 
     32   1.5     lukem #include <sys/cdefs.h>
     33  1.35       uwe __RCSID("$NetBSD: pthread_rwlock.c,v 1.35 2019/12/15 23:13:33 uwe Exp $");
     34  1.30        ad 
     35  1.30        ad #include <sys/types.h>
     36  1.30        ad #include <sys/lwpctl.h>
     37   1.5     lukem 
     38  1.35       uwe #include <assert.h>
     39  1.33  christos #include <time.h>
     40   1.2   thorpej #include <errno.h>
     41  1.27        ad #include <stddef.h>
     42   1.2   thorpej 
     43   1.2   thorpej #include "pthread.h"
     44   1.2   thorpej #include "pthread_int.h"
     45  1.33  christos #include "reentrant.h"
     46   1.2   thorpej 
     47  1.27        ad #define	_RW_LOCKED		0
     48  1.27        ad #define	_RW_WANT_WRITE		1
     49  1.27        ad #define	_RW_WANT_READ		2
     50  1.27        ad 
     51  1.30        ad #if __GNUC_PREREQ__(3, 0)
     52  1.30        ad #define	NOINLINE		__attribute ((noinline))
     53  1.30        ad #else
     54  1.30        ad #define	NOINLINE		/* nothing */
     55  1.30        ad #endif
     56  1.30        ad 
     57  1.27        ad static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
     58  1.27        ad static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
     59  1.27        ad static void pthread__rwlock_early(void *);
     60  1.24  christos 
     61  1.23        ad int	_pthread_rwlock_held_np(pthread_rwlock_t *);
     62  1.23        ad int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
     63  1.23        ad int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
     64  1.23        ad 
     65  1.27        ad #ifndef lint
     66  1.32      yamt __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
     67  1.32      yamt __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
     68  1.32      yamt __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
     69  1.27        ad #endif
     70  1.27        ad 
     71   1.2   thorpej __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
     72   1.2   thorpej __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
     73   1.2   thorpej __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
     74   1.2   thorpej __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
     75   1.2   thorpej __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
     76   1.2   thorpej __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
     77   1.2   thorpej __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
     78   1.2   thorpej 
     79  1.27        ad static inline uintptr_t
     80  1.27        ad rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
     81  1.27        ad {
     82  1.27        ad 
     83  1.27        ad 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
     84  1.27        ad 	    (void *)n);
     85  1.27        ad }
     86  1.27        ad 
     87   1.2   thorpej int
     88  1.27        ad pthread_rwlock_init(pthread_rwlock_t *ptr,
     89   1.2   thorpej 	    const pthread_rwlockattr_t *attr)
     90   1.2   thorpej {
     91  1.33  christos 	if (__predict_false(__uselibcstub))
     92  1.33  christos 		return __libc_rwlock_init_stub(ptr, attr);
     93  1.27        ad 
     94  1.27        ad 	if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
     95   1.2   thorpej 		return EINVAL;
     96  1.27        ad 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
     97  1.27        ad 	PTQ_INIT(&ptr->ptr_rblocked);
     98  1.27        ad 	PTQ_INIT(&ptr->ptr_wblocked);
     99  1.27        ad 	ptr->ptr_nreaders = 0;
    100  1.27        ad 	ptr->ptr_owner = NULL;
    101   1.2   thorpej 
    102   1.2   thorpej 	return 0;
    103   1.2   thorpej }
    104   1.2   thorpej 
    105   1.2   thorpej 
    106   1.2   thorpej int
    107  1.27        ad pthread_rwlock_destroy(pthread_rwlock_t *ptr)
    108   1.2   thorpej {
    109  1.33  christos 	if (__predict_false(__uselibcstub))
    110  1.33  christos 		return __libc_rwlock_destroy_stub(ptr);
    111  1.27        ad 
    112  1.27        ad 	if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
    113  1.27        ad 	    (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
    114  1.27        ad 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
    115  1.27        ad 	    (ptr->ptr_nreaders != 0) ||
    116  1.27        ad 	    (ptr->ptr_owner != NULL))
    117   1.2   thorpej 		return EINVAL;
    118  1.27        ad 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
    119   1.2   thorpej 
    120   1.2   thorpej 	return 0;
    121   1.2   thorpej }
    122   1.2   thorpej 
    123  1.30        ad /* We want function call overhead. */
    124  1.30        ad NOINLINE static void
    125  1.30        ad pthread__rwlock_pause(void)
    126  1.30        ad {
    127  1.30        ad 
    128  1.30        ad 	pthread__smt_pause();
    129  1.30        ad }
    130  1.30        ad 
    131  1.30        ad NOINLINE static int
    132  1.30        ad pthread__rwlock_spin(uintptr_t owner)
    133  1.30        ad {
    134  1.30        ad 	pthread_t thread;
    135  1.30        ad 	unsigned int i;
    136  1.30        ad 
    137  1.30        ad 	thread = (pthread_t)(owner & RW_THREAD);
    138  1.30        ad 	if (thread == NULL || (owner & ~RW_THREAD) != RW_WRITE_LOCKED)
    139  1.30        ad 		return 0;
    140  1.30        ad 	if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE ||
    141  1.30        ad 	    thread->pt_blocking)
    142  1.30        ad 		return 0;
    143  1.30        ad 	for (i = 128; i != 0; i--)
    144  1.30        ad 		pthread__rwlock_pause();
    145  1.30        ad 	return 1;
    146  1.30        ad }
    147  1.30        ad 
    148  1.27        ad static int
    149  1.27        ad pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
    150   1.2   thorpej {
    151  1.27        ad 	uintptr_t owner, next;
    152  1.30        ad 	pthread_mutex_t *interlock;
    153   1.2   thorpej 	pthread_t self;
    154  1.27        ad 	int error;
    155  1.27        ad 
    156   1.2   thorpej #ifdef ERRORCHECK
    157  1.27        ad 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    158   1.2   thorpej 		return EINVAL;
    159   1.2   thorpej #endif
    160  1.27        ad 
    161  1.27        ad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    162  1.27        ad 		/*
    163  1.27        ad 		 * Read the lock owner field.  If the need-to-wait
    164  1.27        ad 		 * indicator is clear, then try to acquire the lock.
    165  1.27        ad 		 */
    166  1.27        ad 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
    167  1.27        ad 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
    168  1.27        ad 			if (owner == next) {
    169  1.27        ad 				/* Got it! */
    170  1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    171  1.27        ad 				membar_enter();
    172  1.27        ad #endif
    173  1.27        ad 				return 0;
    174  1.27        ad 			}
    175  1.27        ad 
    176  1.27        ad 			/*
    177  1.27        ad 			 * Didn't get it -- spin around again (we'll
    178  1.27        ad 			 * probably sleep on the next iteration).
    179  1.27        ad 			 */
    180  1.27        ad 			continue;
    181  1.27        ad 		}
    182  1.27        ad 
    183  1.31        ad 		self = pthread__self();
    184  1.27        ad 		if ((owner & RW_THREAD) == (uintptr_t)self)
    185  1.27        ad 			return EDEADLK;
    186  1.27        ad 
    187  1.30        ad 		/* If held write locked and no waiters, spin. */
    188  1.30        ad 		if (pthread__rwlock_spin(owner)) {
    189  1.30        ad 			while (pthread__rwlock_spin(owner)) {
    190  1.30        ad 				owner = (uintptr_t)ptr->ptr_owner;
    191  1.30        ad 			}
    192  1.30        ad 			next = owner;
    193  1.30        ad 			continue;
    194  1.30        ad 		}
    195  1.30        ad 
    196  1.27        ad 		/*
    197  1.27        ad 		 * Grab the interlock.  Once we have that, we
    198  1.27        ad 		 * can adjust the waiter bits and sleep queue.
    199  1.27        ad 		 */
    200  1.30        ad 		interlock = pthread__hashlock(ptr);
    201  1.30        ad 		pthread_mutex_lock(interlock);
    202  1.27        ad 
    203  1.27        ad 		/*
    204  1.27        ad 		 * Mark the rwlock as having waiters.  If the set fails,
    205  1.27        ad 		 * then we may not need to sleep and should spin again.
    206  1.27        ad 		 */
    207  1.27        ad 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
    208  1.27        ad 		if (owner != next) {
    209  1.30        ad 			pthread_mutex_unlock(interlock);
    210  1.27        ad 			continue;
    211  1.27        ad 		}
    212  1.27        ad 
    213  1.27        ad 		/* The waiters bit is set - it's safe to sleep. */
    214  1.27        ad 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
    215  1.27        ad 	    	ptr->ptr_nreaders++;
    216  1.27        ad 		self->pt_rwlocked = _RW_WANT_READ;
    217  1.27        ad 		self->pt_sleepobj = &ptr->ptr_rblocked;
    218  1.27        ad 		self->pt_early = pthread__rwlock_early;
    219  1.30        ad 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
    220  1.30        ad 		    ts, 0, &ptr->ptr_rblocked);
    221  1.27        ad 
    222  1.27        ad 		/* Did we get the lock? */
    223  1.27        ad 		if (self->pt_rwlocked == _RW_LOCKED) {
    224  1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    225  1.27        ad 			membar_enter();
    226   1.2   thorpej #endif
    227  1.27        ad 			return 0;
    228  1.27        ad 		}
    229  1.27        ad 		if (error != 0)
    230  1.27        ad 			return error;
    231  1.27        ad 
    232  1.27        ad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    233  1.27        ad 		    "direct handoff failure");
    234   1.2   thorpej 	}
    235   1.2   thorpej }
    236   1.2   thorpej 
    237   1.2   thorpej 
    238   1.2   thorpej int
    239  1.27        ad pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
    240   1.2   thorpej {
    241  1.27        ad 	uintptr_t owner, next;
    242  1.20        ad 
    243  1.33  christos 	if (__predict_false(__uselibcstub))
    244  1.33  christos 		return __libc_rwlock_tryrdlock_stub(ptr);
    245  1.33  christos 
    246   1.2   thorpej #ifdef ERRORCHECK
    247  1.27        ad 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    248   1.2   thorpej 		return EINVAL;
    249   1.2   thorpej #endif
    250  1.27        ad 
    251   1.2   thorpej 	/*
    252   1.2   thorpej 	 * Don't get a readlock if there is a writer or if there are waiting
    253   1.2   thorpej 	 * writers; i.e. prefer writers to readers. This strategy is dictated
    254   1.2   thorpej 	 * by SUSv3.
    255   1.2   thorpej 	 */
    256  1.27        ad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    257  1.27        ad 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
    258  1.27        ad 			return EBUSY;
    259  1.27        ad 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
    260  1.27        ad 		if (owner == next) {
    261  1.27        ad 			/* Got it! */
    262  1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    263  1.27        ad 			membar_enter();
    264  1.27        ad #endif
    265  1.27        ad 			return 0;
    266  1.27        ad 		}
    267   1.2   thorpej 	}
    268   1.2   thorpej }
    269   1.2   thorpej 
    270  1.27        ad static int
    271  1.27        ad pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
    272   1.2   thorpej {
    273  1.27        ad 	uintptr_t owner, next;
    274  1.30        ad 	pthread_mutex_t *interlock;
    275   1.2   thorpej 	pthread_t self;
    276  1.27        ad 	int error;
    277  1.27        ad 
    278  1.27        ad 	self = pthread__self();
    279  1.35       uwe 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
    280  1.13       chs 
    281   1.2   thorpej #ifdef ERRORCHECK
    282  1.27        ad 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    283   1.2   thorpej 		return EINVAL;
    284   1.2   thorpej #endif
    285  1.27        ad 
    286  1.27        ad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    287  1.27        ad 		/*
    288  1.27        ad 		 * Read the lock owner field.  If the need-to-wait
    289  1.27        ad 		 * indicator is clear, then try to acquire the lock.
    290  1.27        ad 		 */
    291  1.27        ad 		if ((owner & RW_THREAD) == 0) {
    292  1.27        ad 			next = rw_cas(ptr, owner,
    293  1.27        ad 			    (uintptr_t)self | RW_WRITE_LOCKED);
    294  1.27        ad 			if (owner == next) {
    295  1.27        ad 				/* Got it! */
    296  1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    297  1.27        ad 				membar_enter();
    298  1.27        ad #endif
    299  1.27        ad 				return 0;
    300  1.27        ad 			}
    301  1.27        ad 
    302  1.27        ad 			/*
    303  1.27        ad 			 * Didn't get it -- spin around again (we'll
    304  1.27        ad 			 * probably sleep on the next iteration).
    305  1.27        ad 			 */
    306  1.27        ad 			continue;
    307  1.27        ad 		}
    308  1.27        ad 
    309  1.27        ad 		if ((owner & RW_THREAD) == (uintptr_t)self)
    310  1.13       chs 			return EDEADLK;
    311  1.27        ad 
    312  1.30        ad 		/* If held write locked and no waiters, spin. */
    313  1.30        ad 		if (pthread__rwlock_spin(owner)) {
    314  1.30        ad 			while (pthread__rwlock_spin(owner)) {
    315  1.30        ad 				owner = (uintptr_t)ptr->ptr_owner;
    316  1.30        ad 			}
    317  1.30        ad 			next = owner;
    318  1.30        ad 			continue;
    319  1.30        ad 		}
    320  1.30        ad 
    321  1.27        ad 		/*
    322  1.27        ad 		 * Grab the interlock.  Once we have that, we
    323  1.27        ad 		 * can adjust the waiter bits and sleep queue.
    324  1.27        ad 		 */
    325  1.30        ad 		interlock = pthread__hashlock(ptr);
    326  1.30        ad 		pthread_mutex_lock(interlock);
    327  1.27        ad 
    328  1.27        ad 		/*
    329  1.27        ad 		 * Mark the rwlock as having waiters.  If the set fails,
    330  1.27        ad 		 * then we may not need to sleep and should spin again.
    331  1.27        ad 		 */
    332  1.27        ad 		next = rw_cas(ptr, owner,
    333  1.27        ad 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
    334  1.27        ad 		if (owner != next) {
    335  1.30        ad 			pthread_mutex_unlock(interlock);
    336  1.27        ad 			continue;
    337  1.13       chs 		}
    338  1.27        ad 
    339  1.27        ad 		/* The waiters bit is set - it's safe to sleep. */
    340  1.27        ad 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
    341  1.27        ad 		self->pt_rwlocked = _RW_WANT_WRITE;
    342  1.27        ad 		self->pt_sleepobj = &ptr->ptr_wblocked;
    343  1.27        ad 		self->pt_early = pthread__rwlock_early;
    344  1.30        ad 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
    345  1.30        ad 		    ts, 0, &ptr->ptr_wblocked);
    346  1.27        ad 
    347  1.27        ad 		/* Did we get the lock? */
    348  1.27        ad 		if (self->pt_rwlocked == _RW_LOCKED) {
    349  1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    350  1.27        ad 			membar_enter();
    351  1.13       chs #endif
    352  1.27        ad 			return 0;
    353  1.27        ad 		}
    354  1.27        ad 		if (error != 0)
    355  1.27        ad 			return error;
    356  1.27        ad 
    357  1.27        ad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    358  1.27        ad 		    "direct handoff failure");
    359   1.2   thorpej 	}
    360   1.2   thorpej }
    361   1.2   thorpej 
    362   1.2   thorpej 
    363   1.2   thorpej int
    364  1.27        ad pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
    365   1.2   thorpej {
    366  1.27        ad 	uintptr_t owner, next;
    367   1.2   thorpej 	pthread_t self;
    368  1.27        ad 
    369  1.33  christos 	if (__predict_false(__uselibcstub))
    370  1.33  christos 		return __libc_rwlock_trywrlock_stub(ptr);
    371  1.33  christos 
    372   1.2   thorpej #ifdef ERRORCHECK
    373  1.27        ad 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    374   1.2   thorpej 		return EINVAL;
    375   1.2   thorpej #endif
    376  1.27        ad 
    377   1.2   thorpej 	self = pthread__self();
    378  1.35       uwe 	_DIAGASSERT(((uintptr_t)self & RW_FLAGMASK) == 0);
    379  1.27        ad 
    380  1.27        ad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    381  1.27        ad 		if (owner != 0)
    382  1.27        ad 			return EBUSY;
    383  1.27        ad 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
    384  1.27        ad 		if (owner == next) {
    385  1.27        ad 			/* Got it! */
    386  1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    387  1.27        ad 			membar_enter();
    388  1.27        ad #endif
    389  1.27        ad 			return 0;
    390  1.27        ad 		}
    391   1.2   thorpej 	}
    392  1.27        ad }
    393   1.2   thorpej 
    394  1.27        ad int
    395  1.27        ad pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
    396  1.27        ad {
    397  1.33  christos 	if (__predict_false(__uselibcstub))
    398  1.33  christos 		return __libc_rwlock_rdlock_stub(ptr);
    399   1.2   thorpej 
    400  1.27        ad 	return pthread__rwlock_rdlock(ptr, NULL);
    401   1.2   thorpej }
    402   1.2   thorpej 
    403   1.2   thorpej int
    404  1.27        ad pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
    405  1.27        ad 			   const struct timespec *abs_timeout)
    406   1.2   thorpej {
    407  1.10   nathanw 	if (abs_timeout == NULL)
    408   1.2   thorpej 		return EINVAL;
    409  1.10   nathanw 	if ((abs_timeout->tv_nsec >= 1000000000) ||
    410  1.10   nathanw 	    (abs_timeout->tv_nsec < 0) ||
    411  1.10   nathanw 	    (abs_timeout->tv_sec < 0))
    412  1.10   nathanw 		return EINVAL;
    413  1.12       chs 
    414  1.27        ad 	return pthread__rwlock_rdlock(ptr, abs_timeout);
    415  1.27        ad }
    416   1.2   thorpej 
    417  1.27        ad int
    418  1.27        ad pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
    419  1.27        ad {
    420  1.33  christos 	if (__predict_false(__uselibcstub))
    421  1.33  christos 		return __libc_rwlock_wrlock_stub(ptr);
    422   1.2   thorpej 
    423  1.27        ad 	return pthread__rwlock_wrlock(ptr, NULL);
    424   1.2   thorpej }
    425   1.2   thorpej 
    426   1.2   thorpej int
    427  1.27        ad pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
    428  1.27        ad 			   const struct timespec *abs_timeout)
    429   1.2   thorpej {
    430  1.10   nathanw 	if (abs_timeout == NULL)
    431  1.10   nathanw 		return EINVAL;
    432  1.10   nathanw 	if ((abs_timeout->tv_nsec >= 1000000000) ||
    433  1.10   nathanw 	    (abs_timeout->tv_nsec < 0) ||
    434  1.10   nathanw 	    (abs_timeout->tv_sec < 0))
    435  1.10   nathanw 		return EINVAL;
    436  1.12       chs 
    437  1.27        ad 	return pthread__rwlock_wrlock(ptr, abs_timeout);
    438   1.2   thorpej }
    439   1.2   thorpej 
    440   1.2   thorpej 
    441   1.2   thorpej int
    442  1.27        ad pthread_rwlock_unlock(pthread_rwlock_t *ptr)
    443   1.2   thorpej {
    444  1.27        ad 	uintptr_t owner, decr, new, next;
    445  1.30        ad 	pthread_mutex_t *interlock;
    446  1.27        ad 	pthread_t self, thread;
    447  1.27        ad 
    448  1.33  christos 	if (__predict_false(__uselibcstub))
    449  1.33  christos 		return __libc_rwlock_unlock_stub(ptr);
    450  1.33  christos 
    451   1.2   thorpej #ifdef ERRORCHECK
    452  1.27        ad 	if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
    453   1.2   thorpej 		return EINVAL;
    454   1.2   thorpej #endif
    455  1.27        ad 
    456  1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    457  1.27        ad 	membar_exit();
    458  1.27        ad #endif
    459  1.27        ad 
    460  1.27        ad 	/*
    461  1.27        ad 	 * Since we used an add operation to set the required lock
    462  1.27        ad 	 * bits, we can use a subtract to clear them, which makes
    463  1.27        ad 	 * the read-release and write-release path similar.
    464  1.27        ad 	 */
    465  1.27        ad 	owner = (uintptr_t)ptr->ptr_owner;
    466  1.27        ad 	if ((owner & RW_WRITE_LOCKED) != 0) {
    467  1.31        ad 		self = pthread__self();
    468  1.27        ad 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
    469  1.27        ad 		if ((owner & RW_THREAD) != (uintptr_t)self) {
    470  1.27        ad 			return EPERM;
    471  1.27        ad 		}
    472  1.27        ad 	} else {
    473  1.27        ad 		decr = RW_READ_INCR;
    474  1.27        ad 		if (owner == 0) {
    475   1.2   thorpej 			return EPERM;
    476   1.2   thorpej 		}
    477  1.27        ad 	}
    478  1.27        ad 
    479  1.27        ad 	for (;; owner = next) {
    480  1.27        ad 		/*
    481  1.27        ad 		 * Compute what we expect the new value of the lock to be.
    482  1.27        ad 		 * Only proceed to do direct handoff if there are waiters,
    483  1.27        ad 		 * and if the lock would become unowned.
    484  1.27        ad 		 */
    485  1.27        ad 		new = (owner - decr);
    486  1.27        ad 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
    487  1.27        ad 			next = rw_cas(ptr, owner, new);
    488  1.27        ad 			if (owner == next) {
    489  1.27        ad 				/* Released! */
    490  1.27        ad 				return 0;
    491  1.27        ad 			}
    492  1.27        ad 			continue;
    493  1.27        ad 		}
    494  1.27        ad 
    495  1.27        ad 		/*
    496  1.27        ad 		 * Grab the interlock.  Once we have that, we can adjust
    497  1.27        ad 		 * the waiter bits.  We must check to see if there are
    498  1.27        ad 		 * still waiters before proceeding.
    499  1.27        ad 		 */
    500  1.30        ad 		interlock = pthread__hashlock(ptr);
    501  1.30        ad 		pthread_mutex_lock(interlock);
    502  1.27        ad 		owner = (uintptr_t)ptr->ptr_owner;
    503  1.27        ad 		if ((owner & RW_HAS_WAITERS) == 0) {
    504  1.30        ad 			pthread_mutex_unlock(interlock);
    505  1.27        ad 			next = owner;
    506  1.27        ad 			continue;
    507   1.2   thorpej 		}
    508  1.27        ad 
    509  1.27        ad 		/*
    510  1.27        ad 		 * Give the lock away.  SUSv3 dictates that we must give
    511  1.27        ad 		 * preference to writers.
    512  1.27        ad 		 */
    513  1.31        ad 		self = pthread__self();
    514  1.27        ad 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
    515  1.35       uwe 			_DIAGASSERT(((uintptr_t)thread & RW_FLAGMASK) == 0);
    516  1.27        ad 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
    517  1.27        ad 
    518  1.27        ad 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
    519  1.27        ad 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
    520  1.27        ad 			else if (ptr->ptr_nreaders != 0)
    521  1.27        ad 				new |= RW_HAS_WAITERS;
    522  1.27        ad 
    523  1.27        ad 			/*
    524  1.27        ad 			 * Set in the new value.  The lock becomes owned
    525  1.27        ad 			 * by the writer that we are about to wake.
    526  1.27        ad 			 */
    527  1.27        ad 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
    528  1.27        ad 
    529  1.27        ad 			/* Wake the writer. */
    530  1.27        ad 			thread->pt_rwlocked = _RW_LOCKED;
    531  1.30        ad 			pthread__unpark(&ptr->ptr_wblocked, self,
    532  1.30        ad 			    interlock);
    533  1.27        ad 		} else {
    534  1.27        ad 			new = 0;
    535  1.27        ad 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
    536  1.27        ad 				/*
    537  1.27        ad 				 * May have already been handed the lock,
    538  1.27        ad 				 * since pthread__unpark_all() can release
    539  1.27        ad 				 * our interlock before awakening all
    540  1.27        ad 				 * threads.
    541  1.27        ad 				 */
    542  1.27        ad 				if (thread->pt_sleepobj == NULL)
    543  1.27        ad 					continue;
    544  1.27        ad 				new += RW_READ_INCR;
    545  1.27        ad 				thread->pt_rwlocked = _RW_LOCKED;
    546  1.27        ad 			}
    547  1.27        ad 
    548  1.27        ad 			/*
    549  1.27        ad 			 * Set in the new value.  The lock becomes owned
    550  1.27        ad 			 * by the readers that we are about to wake.
    551  1.27        ad 			 */
    552  1.27        ad 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
    553  1.27        ad 
    554  1.27        ad 			/* Wake up all sleeping readers. */
    555  1.27        ad 			ptr->ptr_nreaders = 0;
    556  1.30        ad 			pthread__unpark_all(&ptr->ptr_rblocked, self,
    557  1.30        ad 			    interlock);
    558   1.2   thorpej 		}
    559  1.30        ad 		pthread_mutex_unlock(interlock);
    560  1.27        ad 
    561  1.27        ad 		return 0;
    562   1.2   thorpej 	}
    563  1.27        ad }
    564  1.27        ad 
    565  1.27        ad /*
    566  1.27        ad  * Called when a timedlock awakens early to adjust the waiter bits.
    567  1.27        ad  * The rwlock's interlock is held on entry, and the caller has been
    568  1.27        ad  * removed from the waiters lists.
    569  1.27        ad  */
    570  1.27        ad static void
    571  1.27        ad pthread__rwlock_early(void *obj)
    572  1.27        ad {
    573  1.27        ad 	uintptr_t owner, set, new, next;
    574  1.27        ad 	pthread_rwlock_t *ptr;
    575  1.27        ad 	pthread_t self;
    576  1.27        ad 	u_int off;
    577   1.2   thorpej 
    578  1.27        ad 	self = pthread__self();
    579  1.27        ad 
    580  1.27        ad 	switch (self->pt_rwlocked) {
    581  1.27        ad 	case _RW_WANT_READ:
    582  1.27        ad 		off = offsetof(pthread_rwlock_t, ptr_rblocked);
    583  1.27        ad 		break;
    584  1.27        ad 	case _RW_WANT_WRITE:
    585  1.27        ad 		off = offsetof(pthread_rwlock_t, ptr_wblocked);
    586  1.27        ad 		break;
    587  1.27        ad 	default:
    588  1.27        ad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    589  1.27        ad 		    "bad value of pt_rwlocked");
    590  1.27        ad 		off = 0;
    591  1.27        ad 		/* NOTREACHED */
    592  1.27        ad 		break;
    593  1.27        ad 	}
    594  1.27        ad 
    595  1.27        ad 	/* LINTED mind your own business */
    596  1.27        ad 	ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
    597  1.27        ad 	owner = (uintptr_t)ptr->ptr_owner;
    598  1.27        ad 
    599  1.27        ad 	if ((owner & RW_THREAD) == 0) {
    600  1.27        ad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    601  1.27        ad 		    "lock not held");
    602  1.27        ad 	}
    603  1.27        ad 
    604  1.27        ad 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
    605  1.27        ad 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
    606  1.27        ad 	else if (ptr->ptr_nreaders != 0)
    607  1.27        ad 		set = RW_HAS_WAITERS;
    608  1.14        ad 	else
    609  1.27        ad 		set = 0;
    610   1.6        cl 
    611  1.27        ad 	for (;; owner = next) {
    612  1.27        ad 		new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
    613  1.27        ad 		next = rw_cas(ptr, owner, new);
    614  1.27        ad 		if (owner == next)
    615  1.27        ad 			break;
    616  1.27        ad 	}
    617   1.2   thorpej }
    618   1.2   thorpej 
    619   1.2   thorpej int
    620  1.27        ad _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
    621   1.2   thorpej {
    622  1.27        ad 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    623   1.2   thorpej 
    624  1.28        ad 	if ((owner & RW_WRITE_LOCKED) != 0)
    625  1.28        ad 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
    626  1.27        ad 	return (owner & RW_THREAD) != 0;
    627   1.2   thorpej }
    628   1.2   thorpej 
    629   1.2   thorpej int
    630  1.27        ad _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
    631   1.2   thorpej {
    632  1.27        ad 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    633   1.2   thorpej 
    634  1.27        ad 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
    635   1.2   thorpej }
    636  1.21        ad 
    637  1.23        ad int
    638  1.27        ad _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
    639  1.23        ad {
    640  1.27        ad 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    641  1.23        ad 
    642  1.27        ad 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
    643  1.27        ad 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
    644  1.23        ad }
    645  1.23        ad 
    646  1.34  christos #ifdef _PTHREAD_PSHARED
    647  1.34  christos int
    648  1.34  christos pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
    649  1.34  christos     int * __restrict pshared)
    650  1.34  christos {
    651  1.34  christos 	*pshared = PTHREAD_PROCESS_PRIVATE;
    652  1.34  christos 	return 0;
    653  1.34  christos }
    654  1.34  christos 
    655  1.34  christos int
    656  1.34  christos pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
    657  1.34  christos {
    658  1.34  christos 
    659  1.34  christos 	switch(pshared) {
    660  1.34  christos 	case PTHREAD_PROCESS_PRIVATE:
    661  1.34  christos 		return 0;
    662  1.34  christos 	case PTHREAD_PROCESS_SHARED:
    663  1.34  christos 		return ENOSYS;
    664  1.34  christos 	}
    665  1.34  christos 	return EINVAL;
    666  1.34  christos }
    667  1.34  christos #endif
    668  1.34  christos 
    669  1.23        ad int
    670  1.27        ad pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
    671  1.23        ad {
    672  1.23        ad 
    673  1.27        ad 	if (attr == NULL)
    674  1.27        ad 		return EINVAL;
    675  1.27        ad 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
    676  1.27        ad 
    677  1.27        ad 	return 0;
    678  1.23        ad }
    679  1.23        ad 
    680  1.27        ad 
    681  1.23        ad int
    682  1.27        ad pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
    683  1.23        ad {
    684  1.23        ad 
    685  1.27        ad 	if ((attr == NULL) ||
    686  1.27        ad 	    (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
    687  1.27        ad 		return EINVAL;
    688  1.27        ad 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
    689  1.27        ad 
    690  1.27        ad 	return 0;
    691  1.23        ad }
    692