Home | History | Annotate | Line # | Download | only in libpthread
pthread_rwlock.c revision 1.34.18.1
      1  1.34.18.1    martin /*	$NetBSD: pthread_rwlock.c,v 1.34.18.1 2020/01/26 10:55:16 martin Exp $ */
      2        1.2   thorpej 
      3        1.2   thorpej /*-
      4       1.27        ad  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5        1.2   thorpej  * All rights reserved.
      6        1.2   thorpej  *
      7        1.2   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8       1.27        ad  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
      9        1.2   thorpej  *
     10        1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     11        1.2   thorpej  * modification, are permitted provided that the following conditions
     12        1.2   thorpej  * are met:
     13        1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     14        1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     15        1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16        1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     17        1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     18        1.2   thorpej  *
     19        1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20        1.2   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21        1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22        1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23        1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24        1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25        1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26        1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27        1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28        1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29        1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     30        1.2   thorpej  */
     31        1.2   thorpej 
     32        1.5     lukem #include <sys/cdefs.h>
     33  1.34.18.1    martin __RCSID("$NetBSD: pthread_rwlock.c,v 1.34.18.1 2020/01/26 10:55:16 martin Exp $");
     34       1.30        ad 
     35       1.30        ad #include <sys/types.h>
     36       1.30        ad #include <sys/lwpctl.h>
     37        1.5     lukem 
     38       1.33  christos #include <time.h>
     39        1.2   thorpej #include <errno.h>
     40       1.27        ad #include <stddef.h>
     41        1.2   thorpej 
     42        1.2   thorpej #include "pthread.h"
     43        1.2   thorpej #include "pthread_int.h"
     44       1.33  christos #include "reentrant.h"
     45        1.2   thorpej 
     46       1.27        ad #define	_RW_LOCKED		0
     47       1.27        ad #define	_RW_WANT_WRITE		1
     48       1.27        ad #define	_RW_WANT_READ		2
     49       1.27        ad 
     50       1.30        ad #if __GNUC_PREREQ__(3, 0)
     51       1.30        ad #define	NOINLINE		__attribute ((noinline))
     52       1.30        ad #else
     53       1.30        ad #define	NOINLINE		/* nothing */
     54       1.30        ad #endif
     55       1.30        ad 
     56       1.27        ad static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
     57       1.27        ad static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
     58       1.27        ad static void pthread__rwlock_early(void *);
     59       1.24  christos 
     60       1.23        ad int	_pthread_rwlock_held_np(pthread_rwlock_t *);
     61       1.23        ad int	_pthread_rwlock_rdheld_np(pthread_rwlock_t *);
     62       1.23        ad int	_pthread_rwlock_wrheld_np(pthread_rwlock_t *);
     63       1.23        ad 
     64       1.27        ad #ifndef lint
     65       1.32      yamt __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np)
     66       1.32      yamt __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np)
     67       1.32      yamt __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np)
     68       1.27        ad #endif
     69       1.27        ad 
     70        1.2   thorpej __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
     71        1.2   thorpej __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
     72        1.2   thorpej __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
     73        1.2   thorpej __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
     74        1.2   thorpej __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
     75        1.2   thorpej __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
     76        1.2   thorpej __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
     77        1.2   thorpej 
     78       1.27        ad static inline uintptr_t
     79       1.27        ad rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
     80       1.27        ad {
     81       1.27        ad 
     82       1.27        ad 	return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
     83       1.27        ad 	    (void *)n);
     84       1.27        ad }
     85       1.27        ad 
     86        1.2   thorpej int
     87       1.27        ad pthread_rwlock_init(pthread_rwlock_t *ptr,
     88        1.2   thorpej 	    const pthread_rwlockattr_t *attr)
     89        1.2   thorpej {
     90       1.33  christos 	if (__predict_false(__uselibcstub))
     91       1.33  christos 		return __libc_rwlock_init_stub(ptr, attr);
     92       1.27        ad 
     93       1.27        ad 	if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
     94        1.2   thorpej 		return EINVAL;
     95       1.27        ad 	ptr->ptr_magic = _PT_RWLOCK_MAGIC;
     96       1.27        ad 	PTQ_INIT(&ptr->ptr_rblocked);
     97       1.27        ad 	PTQ_INIT(&ptr->ptr_wblocked);
     98       1.27        ad 	ptr->ptr_nreaders = 0;
     99       1.27        ad 	ptr->ptr_owner = NULL;
    100        1.2   thorpej 
    101        1.2   thorpej 	return 0;
    102        1.2   thorpej }
    103        1.2   thorpej 
    104        1.2   thorpej 
    105        1.2   thorpej int
    106       1.27        ad pthread_rwlock_destroy(pthread_rwlock_t *ptr)
    107        1.2   thorpej {
    108       1.33  christos 	if (__predict_false(__uselibcstub))
    109       1.33  christos 		return __libc_rwlock_destroy_stub(ptr);
    110       1.27        ad 
    111       1.27        ad 	if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
    112       1.27        ad 	    (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
    113       1.27        ad 	    (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
    114       1.27        ad 	    (ptr->ptr_nreaders != 0) ||
    115       1.27        ad 	    (ptr->ptr_owner != NULL))
    116        1.2   thorpej 		return EINVAL;
    117       1.27        ad 	ptr->ptr_magic = _PT_RWLOCK_DEAD;
    118        1.2   thorpej 
    119        1.2   thorpej 	return 0;
    120        1.2   thorpej }
    121        1.2   thorpej 
    122       1.30        ad /* We want function call overhead. */
    123       1.30        ad NOINLINE static void
    124       1.30        ad pthread__rwlock_pause(void)
    125       1.30        ad {
    126       1.30        ad 
    127       1.30        ad 	pthread__smt_pause();
    128       1.30        ad }
    129       1.30        ad 
    130       1.30        ad NOINLINE static int
    131       1.30        ad pthread__rwlock_spin(uintptr_t owner)
    132       1.30        ad {
    133       1.30        ad 	pthread_t thread;
    134       1.30        ad 	unsigned int i;
    135       1.30        ad 
    136       1.30        ad 	thread = (pthread_t)(owner & RW_THREAD);
    137       1.30        ad 	if (thread == NULL || (owner & ~RW_THREAD) != RW_WRITE_LOCKED)
    138       1.30        ad 		return 0;
    139  1.34.18.1    martin 	if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)
    140       1.30        ad 		return 0;
    141       1.30        ad 	for (i = 128; i != 0; i--)
    142       1.30        ad 		pthread__rwlock_pause();
    143       1.30        ad 	return 1;
    144       1.30        ad }
    145       1.30        ad 
    146       1.27        ad static int
    147       1.27        ad pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
    148        1.2   thorpej {
    149       1.27        ad 	uintptr_t owner, next;
    150       1.30        ad 	pthread_mutex_t *interlock;
    151        1.2   thorpej 	pthread_t self;
    152       1.27        ad 	int error;
    153       1.27        ad 
    154        1.2   thorpej #ifdef ERRORCHECK
    155       1.27        ad 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    156        1.2   thorpej 		return EINVAL;
    157        1.2   thorpej #endif
    158       1.27        ad 
    159       1.27        ad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    160       1.27        ad 		/*
    161       1.27        ad 		 * Read the lock owner field.  If the need-to-wait
    162       1.27        ad 		 * indicator is clear, then try to acquire the lock.
    163       1.27        ad 		 */
    164       1.27        ad 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
    165       1.27        ad 			next = rw_cas(ptr, owner, owner + RW_READ_INCR);
    166       1.27        ad 			if (owner == next) {
    167       1.27        ad 				/* Got it! */
    168       1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    169       1.27        ad 				membar_enter();
    170       1.27        ad #endif
    171       1.27        ad 				return 0;
    172       1.27        ad 			}
    173       1.27        ad 
    174       1.27        ad 			/*
    175       1.27        ad 			 * Didn't get it -- spin around again (we'll
    176       1.27        ad 			 * probably sleep on the next iteration).
    177       1.27        ad 			 */
    178       1.27        ad 			continue;
    179       1.27        ad 		}
    180       1.27        ad 
    181       1.31        ad 		self = pthread__self();
    182       1.27        ad 		if ((owner & RW_THREAD) == (uintptr_t)self)
    183       1.27        ad 			return EDEADLK;
    184       1.27        ad 
    185       1.30        ad 		/* If held write locked and no waiters, spin. */
    186       1.30        ad 		if (pthread__rwlock_spin(owner)) {
    187       1.30        ad 			while (pthread__rwlock_spin(owner)) {
    188       1.30        ad 				owner = (uintptr_t)ptr->ptr_owner;
    189       1.30        ad 			}
    190       1.30        ad 			next = owner;
    191       1.30        ad 			continue;
    192       1.30        ad 		}
    193       1.30        ad 
    194       1.27        ad 		/*
    195       1.27        ad 		 * Grab the interlock.  Once we have that, we
    196       1.27        ad 		 * can adjust the waiter bits and sleep queue.
    197       1.27        ad 		 */
    198       1.30        ad 		interlock = pthread__hashlock(ptr);
    199       1.30        ad 		pthread_mutex_lock(interlock);
    200       1.27        ad 
    201       1.27        ad 		/*
    202       1.27        ad 		 * Mark the rwlock as having waiters.  If the set fails,
    203       1.27        ad 		 * then we may not need to sleep and should spin again.
    204       1.27        ad 		 */
    205       1.27        ad 		next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
    206       1.27        ad 		if (owner != next) {
    207       1.30        ad 			pthread_mutex_unlock(interlock);
    208       1.27        ad 			continue;
    209       1.27        ad 		}
    210       1.27        ad 
    211       1.27        ad 		/* The waiters bit is set - it's safe to sleep. */
    212       1.27        ad 	    	PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
    213       1.27        ad 	    	ptr->ptr_nreaders++;
    214       1.27        ad 		self->pt_rwlocked = _RW_WANT_READ;
    215       1.27        ad 		self->pt_sleepobj = &ptr->ptr_rblocked;
    216       1.27        ad 		self->pt_early = pthread__rwlock_early;
    217       1.30        ad 		error = pthread__park(self, interlock, &ptr->ptr_rblocked,
    218       1.30        ad 		    ts, 0, &ptr->ptr_rblocked);
    219       1.27        ad 
    220       1.27        ad 		/* Did we get the lock? */
    221       1.27        ad 		if (self->pt_rwlocked == _RW_LOCKED) {
    222       1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    223       1.27        ad 			membar_enter();
    224        1.2   thorpej #endif
    225       1.27        ad 			return 0;
    226       1.27        ad 		}
    227       1.27        ad 		if (error != 0)
    228       1.27        ad 			return error;
    229       1.27        ad 
    230       1.27        ad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    231       1.27        ad 		    "direct handoff failure");
    232        1.2   thorpej 	}
    233        1.2   thorpej }
    234        1.2   thorpej 
    235        1.2   thorpej 
    236        1.2   thorpej int
    237       1.27        ad pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
    238        1.2   thorpej {
    239       1.27        ad 	uintptr_t owner, next;
    240       1.20        ad 
    241       1.33  christos 	if (__predict_false(__uselibcstub))
    242       1.33  christos 		return __libc_rwlock_tryrdlock_stub(ptr);
    243       1.33  christos 
    244        1.2   thorpej #ifdef ERRORCHECK
    245       1.27        ad 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    246        1.2   thorpej 		return EINVAL;
    247        1.2   thorpej #endif
    248       1.27        ad 
    249        1.2   thorpej 	/*
    250        1.2   thorpej 	 * Don't get a readlock if there is a writer or if there are waiting
    251        1.2   thorpej 	 * writers; i.e. prefer writers to readers. This strategy is dictated
    252        1.2   thorpej 	 * by SUSv3.
    253        1.2   thorpej 	 */
    254       1.27        ad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    255       1.27        ad 		if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
    256       1.27        ad 			return EBUSY;
    257       1.27        ad 		next = rw_cas(ptr, owner, owner + RW_READ_INCR);
    258       1.27        ad 		if (owner == next) {
    259       1.27        ad 			/* Got it! */
    260       1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    261       1.27        ad 			membar_enter();
    262       1.27        ad #endif
    263       1.27        ad 			return 0;
    264       1.27        ad 		}
    265        1.2   thorpej 	}
    266        1.2   thorpej }
    267        1.2   thorpej 
    268       1.27        ad static int
    269       1.27        ad pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
    270        1.2   thorpej {
    271       1.27        ad 	uintptr_t owner, next;
    272       1.30        ad 	pthread_mutex_t *interlock;
    273        1.2   thorpej 	pthread_t self;
    274       1.27        ad 	int error;
    275       1.27        ad 
    276       1.27        ad 	self = pthread__self();
    277       1.13       chs 
    278        1.2   thorpej #ifdef ERRORCHECK
    279       1.27        ad 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    280        1.2   thorpej 		return EINVAL;
    281        1.2   thorpej #endif
    282       1.27        ad 
    283       1.27        ad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    284       1.27        ad 		/*
    285       1.27        ad 		 * Read the lock owner field.  If the need-to-wait
    286       1.27        ad 		 * indicator is clear, then try to acquire the lock.
    287       1.27        ad 		 */
    288       1.27        ad 		if ((owner & RW_THREAD) == 0) {
    289       1.27        ad 			next = rw_cas(ptr, owner,
    290       1.27        ad 			    (uintptr_t)self | RW_WRITE_LOCKED);
    291       1.27        ad 			if (owner == next) {
    292       1.27        ad 				/* Got it! */
    293       1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    294       1.27        ad 				membar_enter();
    295       1.27        ad #endif
    296       1.27        ad 				return 0;
    297       1.27        ad 			}
    298       1.27        ad 
    299       1.27        ad 			/*
    300       1.27        ad 			 * Didn't get it -- spin around again (we'll
    301       1.27        ad 			 * probably sleep on the next iteration).
    302       1.27        ad 			 */
    303       1.27        ad 			continue;
    304       1.27        ad 		}
    305       1.27        ad 
    306       1.27        ad 		if ((owner & RW_THREAD) == (uintptr_t)self)
    307       1.13       chs 			return EDEADLK;
    308       1.27        ad 
    309       1.30        ad 		/* If held write locked and no waiters, spin. */
    310       1.30        ad 		if (pthread__rwlock_spin(owner)) {
    311       1.30        ad 			while (pthread__rwlock_spin(owner)) {
    312       1.30        ad 				owner = (uintptr_t)ptr->ptr_owner;
    313       1.30        ad 			}
    314       1.30        ad 			next = owner;
    315       1.30        ad 			continue;
    316       1.30        ad 		}
    317       1.30        ad 
    318       1.27        ad 		/*
    319       1.27        ad 		 * Grab the interlock.  Once we have that, we
    320       1.27        ad 		 * can adjust the waiter bits and sleep queue.
    321       1.27        ad 		 */
    322       1.30        ad 		interlock = pthread__hashlock(ptr);
    323       1.30        ad 		pthread_mutex_lock(interlock);
    324       1.27        ad 
    325       1.27        ad 		/*
    326       1.27        ad 		 * Mark the rwlock as having waiters.  If the set fails,
    327       1.27        ad 		 * then we may not need to sleep and should spin again.
    328       1.27        ad 		 */
    329       1.27        ad 		next = rw_cas(ptr, owner,
    330       1.27        ad 		    owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
    331       1.27        ad 		if (owner != next) {
    332       1.30        ad 			pthread_mutex_unlock(interlock);
    333       1.27        ad 			continue;
    334       1.13       chs 		}
    335       1.27        ad 
    336       1.27        ad 		/* The waiters bit is set - it's safe to sleep. */
    337       1.27        ad 	    	PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
    338       1.27        ad 		self->pt_rwlocked = _RW_WANT_WRITE;
    339       1.27        ad 		self->pt_sleepobj = &ptr->ptr_wblocked;
    340       1.27        ad 		self->pt_early = pthread__rwlock_early;
    341       1.30        ad 		error = pthread__park(self, interlock, &ptr->ptr_wblocked,
    342       1.30        ad 		    ts, 0, &ptr->ptr_wblocked);
    343       1.27        ad 
    344       1.27        ad 		/* Did we get the lock? */
    345       1.27        ad 		if (self->pt_rwlocked == _RW_LOCKED) {
    346       1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    347       1.27        ad 			membar_enter();
    348       1.13       chs #endif
    349       1.27        ad 			return 0;
    350       1.27        ad 		}
    351       1.27        ad 		if (error != 0)
    352       1.27        ad 			return error;
    353       1.27        ad 
    354       1.27        ad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    355       1.27        ad 		    "direct handoff failure");
    356        1.2   thorpej 	}
    357        1.2   thorpej }
    358        1.2   thorpej 
    359        1.2   thorpej 
    360        1.2   thorpej int
    361       1.27        ad pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
    362        1.2   thorpej {
    363       1.27        ad 	uintptr_t owner, next;
    364        1.2   thorpej 	pthread_t self;
    365       1.27        ad 
    366       1.33  christos 	if (__predict_false(__uselibcstub))
    367       1.33  christos 		return __libc_rwlock_trywrlock_stub(ptr);
    368       1.33  christos 
    369        1.2   thorpej #ifdef ERRORCHECK
    370       1.27        ad 	if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
    371        1.2   thorpej 		return EINVAL;
    372        1.2   thorpej #endif
    373       1.27        ad 
    374        1.2   thorpej 	self = pthread__self();
    375       1.27        ad 
    376       1.27        ad 	for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
    377       1.27        ad 		if (owner != 0)
    378       1.27        ad 			return EBUSY;
    379       1.27        ad 		next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
    380       1.27        ad 		if (owner == next) {
    381       1.27        ad 			/* Got it! */
    382       1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    383       1.27        ad 			membar_enter();
    384       1.27        ad #endif
    385       1.27        ad 			return 0;
    386       1.27        ad 		}
    387        1.2   thorpej 	}
    388       1.27        ad }
    389        1.2   thorpej 
    390       1.27        ad int
    391       1.27        ad pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
    392       1.27        ad {
    393       1.33  christos 	if (__predict_false(__uselibcstub))
    394       1.33  christos 		return __libc_rwlock_rdlock_stub(ptr);
    395        1.2   thorpej 
    396       1.27        ad 	return pthread__rwlock_rdlock(ptr, NULL);
    397        1.2   thorpej }
    398        1.2   thorpej 
    399        1.2   thorpej int
    400       1.27        ad pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
    401       1.27        ad 			   const struct timespec *abs_timeout)
    402        1.2   thorpej {
    403       1.10   nathanw 	if (abs_timeout == NULL)
    404        1.2   thorpej 		return EINVAL;
    405       1.10   nathanw 	if ((abs_timeout->tv_nsec >= 1000000000) ||
    406       1.10   nathanw 	    (abs_timeout->tv_nsec < 0) ||
    407       1.10   nathanw 	    (abs_timeout->tv_sec < 0))
    408       1.10   nathanw 		return EINVAL;
    409       1.12       chs 
    410       1.27        ad 	return pthread__rwlock_rdlock(ptr, abs_timeout);
    411       1.27        ad }
    412        1.2   thorpej 
    413       1.27        ad int
    414       1.27        ad pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
    415       1.27        ad {
    416       1.33  christos 	if (__predict_false(__uselibcstub))
    417       1.33  christos 		return __libc_rwlock_wrlock_stub(ptr);
    418        1.2   thorpej 
    419       1.27        ad 	return pthread__rwlock_wrlock(ptr, NULL);
    420        1.2   thorpej }
    421        1.2   thorpej 
    422        1.2   thorpej int
    423       1.27        ad pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
    424       1.27        ad 			   const struct timespec *abs_timeout)
    425        1.2   thorpej {
    426       1.10   nathanw 	if (abs_timeout == NULL)
    427       1.10   nathanw 		return EINVAL;
    428       1.10   nathanw 	if ((abs_timeout->tv_nsec >= 1000000000) ||
    429       1.10   nathanw 	    (abs_timeout->tv_nsec < 0) ||
    430       1.10   nathanw 	    (abs_timeout->tv_sec < 0))
    431       1.10   nathanw 		return EINVAL;
    432       1.12       chs 
    433       1.27        ad 	return pthread__rwlock_wrlock(ptr, abs_timeout);
    434        1.2   thorpej }
    435        1.2   thorpej 
    436        1.2   thorpej 
    437        1.2   thorpej int
    438       1.27        ad pthread_rwlock_unlock(pthread_rwlock_t *ptr)
    439        1.2   thorpej {
    440       1.27        ad 	uintptr_t owner, decr, new, next;
    441       1.30        ad 	pthread_mutex_t *interlock;
    442       1.27        ad 	pthread_t self, thread;
    443       1.27        ad 
    444       1.33  christos 	if (__predict_false(__uselibcstub))
    445       1.33  christos 		return __libc_rwlock_unlock_stub(ptr);
    446       1.33  christos 
    447        1.2   thorpej #ifdef ERRORCHECK
    448       1.27        ad 	if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
    449        1.2   thorpej 		return EINVAL;
    450        1.2   thorpej #endif
    451       1.27        ad 
    452       1.27        ad #ifndef PTHREAD__ATOMIC_IS_MEMBAR
    453       1.27        ad 	membar_exit();
    454       1.27        ad #endif
    455       1.27        ad 
    456       1.27        ad 	/*
    457       1.27        ad 	 * Since we used an add operation to set the required lock
    458       1.27        ad 	 * bits, we can use a subtract to clear them, which makes
    459       1.27        ad 	 * the read-release and write-release path similar.
    460       1.27        ad 	 */
    461       1.27        ad 	owner = (uintptr_t)ptr->ptr_owner;
    462       1.27        ad 	if ((owner & RW_WRITE_LOCKED) != 0) {
    463       1.31        ad 		self = pthread__self();
    464       1.27        ad 		decr = (uintptr_t)self | RW_WRITE_LOCKED;
    465       1.27        ad 		if ((owner & RW_THREAD) != (uintptr_t)self) {
    466       1.27        ad 			return EPERM;
    467       1.27        ad 		}
    468       1.27        ad 	} else {
    469       1.27        ad 		decr = RW_READ_INCR;
    470       1.27        ad 		if (owner == 0) {
    471        1.2   thorpej 			return EPERM;
    472        1.2   thorpej 		}
    473       1.27        ad 	}
    474       1.27        ad 
    475       1.27        ad 	for (;; owner = next) {
    476       1.27        ad 		/*
    477       1.27        ad 		 * Compute what we expect the new value of the lock to be.
    478       1.27        ad 		 * Only proceed to do direct handoff if there are waiters,
    479       1.27        ad 		 * and if the lock would become unowned.
    480       1.27        ad 		 */
    481       1.27        ad 		new = (owner - decr);
    482       1.27        ad 		if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
    483       1.27        ad 			next = rw_cas(ptr, owner, new);
    484       1.27        ad 			if (owner == next) {
    485       1.27        ad 				/* Released! */
    486       1.27        ad 				return 0;
    487       1.27        ad 			}
    488       1.27        ad 			continue;
    489       1.27        ad 		}
    490       1.27        ad 
    491       1.27        ad 		/*
    492       1.27        ad 		 * Grab the interlock.  Once we have that, we can adjust
    493       1.27        ad 		 * the waiter bits.  We must check to see if there are
    494       1.27        ad 		 * still waiters before proceeding.
    495       1.27        ad 		 */
    496       1.30        ad 		interlock = pthread__hashlock(ptr);
    497       1.30        ad 		pthread_mutex_lock(interlock);
    498       1.27        ad 		owner = (uintptr_t)ptr->ptr_owner;
    499       1.27        ad 		if ((owner & RW_HAS_WAITERS) == 0) {
    500       1.30        ad 			pthread_mutex_unlock(interlock);
    501       1.27        ad 			next = owner;
    502       1.27        ad 			continue;
    503        1.2   thorpej 		}
    504       1.27        ad 
    505       1.27        ad 		/*
    506       1.27        ad 		 * Give the lock away.  SUSv3 dictates that we must give
    507       1.27        ad 		 * preference to writers.
    508       1.27        ad 		 */
    509       1.31        ad 		self = pthread__self();
    510       1.27        ad 		if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
    511       1.27        ad 			new = (uintptr_t)thread | RW_WRITE_LOCKED;
    512       1.27        ad 
    513       1.27        ad 			if (PTQ_NEXT(thread, pt_sleep) != NULL)
    514       1.27        ad 				new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
    515       1.27        ad 			else if (ptr->ptr_nreaders != 0)
    516       1.27        ad 				new |= RW_HAS_WAITERS;
    517       1.27        ad 
    518       1.27        ad 			/*
    519       1.27        ad 			 * Set in the new value.  The lock becomes owned
    520       1.27        ad 			 * by the writer that we are about to wake.
    521       1.27        ad 			 */
    522       1.27        ad 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
    523       1.27        ad 
    524       1.27        ad 			/* Wake the writer. */
    525       1.27        ad 			thread->pt_rwlocked = _RW_LOCKED;
    526       1.30        ad 			pthread__unpark(&ptr->ptr_wblocked, self,
    527       1.30        ad 			    interlock);
    528       1.27        ad 		} else {
    529       1.27        ad 			new = 0;
    530       1.27        ad 			PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
    531       1.27        ad 				/*
    532       1.27        ad 				 * May have already been handed the lock,
    533       1.27        ad 				 * since pthread__unpark_all() can release
    534       1.27        ad 				 * our interlock before awakening all
    535       1.27        ad 				 * threads.
    536       1.27        ad 				 */
    537       1.27        ad 				if (thread->pt_sleepobj == NULL)
    538       1.27        ad 					continue;
    539       1.27        ad 				new += RW_READ_INCR;
    540       1.27        ad 				thread->pt_rwlocked = _RW_LOCKED;
    541       1.27        ad 			}
    542       1.27        ad 
    543       1.27        ad 			/*
    544       1.27        ad 			 * Set in the new value.  The lock becomes owned
    545       1.27        ad 			 * by the readers that we are about to wake.
    546       1.27        ad 			 */
    547       1.27        ad 			(void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
    548       1.27        ad 
    549       1.27        ad 			/* Wake up all sleeping readers. */
    550       1.27        ad 			ptr->ptr_nreaders = 0;
    551       1.30        ad 			pthread__unpark_all(&ptr->ptr_rblocked, self,
    552       1.30        ad 			    interlock);
    553        1.2   thorpej 		}
    554       1.30        ad 		pthread_mutex_unlock(interlock);
    555       1.27        ad 
    556       1.27        ad 		return 0;
    557        1.2   thorpej 	}
    558       1.27        ad }
    559       1.27        ad 
    560       1.27        ad /*
    561       1.27        ad  * Called when a timedlock awakens early to adjust the waiter bits.
    562       1.27        ad  * The rwlock's interlock is held on entry, and the caller has been
    563       1.27        ad  * removed from the waiters lists.
    564       1.27        ad  */
    565       1.27        ad static void
    566       1.27        ad pthread__rwlock_early(void *obj)
    567       1.27        ad {
    568       1.27        ad 	uintptr_t owner, set, new, next;
    569       1.27        ad 	pthread_rwlock_t *ptr;
    570       1.27        ad 	pthread_t self;
    571       1.27        ad 	u_int off;
    572        1.2   thorpej 
    573       1.27        ad 	self = pthread__self();
    574       1.27        ad 
    575       1.27        ad 	switch (self->pt_rwlocked) {
    576       1.27        ad 	case _RW_WANT_READ:
    577       1.27        ad 		off = offsetof(pthread_rwlock_t, ptr_rblocked);
    578       1.27        ad 		break;
    579       1.27        ad 	case _RW_WANT_WRITE:
    580       1.27        ad 		off = offsetof(pthread_rwlock_t, ptr_wblocked);
    581       1.27        ad 		break;
    582       1.27        ad 	default:
    583       1.27        ad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    584       1.27        ad 		    "bad value of pt_rwlocked");
    585       1.27        ad 		off = 0;
    586       1.27        ad 		/* NOTREACHED */
    587       1.27        ad 		break;
    588       1.27        ad 	}
    589       1.27        ad 
    590       1.27        ad 	/* LINTED mind your own business */
    591       1.27        ad 	ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
    592       1.27        ad 	owner = (uintptr_t)ptr->ptr_owner;
    593       1.27        ad 
    594       1.27        ad 	if ((owner & RW_THREAD) == 0) {
    595       1.27        ad 		pthread__errorfunc(__FILE__, __LINE__, __func__,
    596       1.27        ad 		    "lock not held");
    597       1.27        ad 	}
    598       1.27        ad 
    599       1.27        ad 	if (!PTQ_EMPTY(&ptr->ptr_wblocked))
    600       1.27        ad 		set = RW_HAS_WAITERS | RW_WRITE_WANTED;
    601       1.27        ad 	else if (ptr->ptr_nreaders != 0)
    602       1.27        ad 		set = RW_HAS_WAITERS;
    603       1.14        ad 	else
    604       1.27        ad 		set = 0;
    605        1.6        cl 
    606       1.27        ad 	for (;; owner = next) {
    607       1.27        ad 		new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
    608       1.27        ad 		next = rw_cas(ptr, owner, new);
    609       1.27        ad 		if (owner == next)
    610       1.27        ad 			break;
    611       1.27        ad 	}
    612        1.2   thorpej }
    613        1.2   thorpej 
    614        1.2   thorpej int
    615       1.27        ad _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
    616        1.2   thorpej {
    617       1.27        ad 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    618        1.2   thorpej 
    619       1.28        ad 	if ((owner & RW_WRITE_LOCKED) != 0)
    620       1.28        ad 		return (owner & RW_THREAD) == (uintptr_t)pthread__self();
    621       1.27        ad 	return (owner & RW_THREAD) != 0;
    622        1.2   thorpej }
    623        1.2   thorpej 
    624        1.2   thorpej int
    625       1.27        ad _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
    626        1.2   thorpej {
    627       1.27        ad 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    628        1.2   thorpej 
    629       1.27        ad 	return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
    630        1.2   thorpej }
    631       1.21        ad 
    632       1.23        ad int
    633       1.27        ad _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
    634       1.23        ad {
    635       1.27        ad 	uintptr_t owner = (uintptr_t)ptr->ptr_owner;
    636       1.23        ad 
    637       1.27        ad 	return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
    638       1.27        ad 	    ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
    639       1.23        ad }
    640       1.23        ad 
    641       1.34  christos #ifdef _PTHREAD_PSHARED
    642       1.34  christos int
    643       1.34  christos pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict attr,
    644       1.34  christos     int * __restrict pshared)
    645       1.34  christos {
    646       1.34  christos 	*pshared = PTHREAD_PROCESS_PRIVATE;
    647       1.34  christos 	return 0;
    648       1.34  christos }
    649       1.34  christos 
    650       1.34  christos int
    651       1.34  christos pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
    652       1.34  christos {
    653       1.34  christos 
    654       1.34  christos 	switch(pshared) {
    655       1.34  christos 	case PTHREAD_PROCESS_PRIVATE:
    656       1.34  christos 		return 0;
    657       1.34  christos 	case PTHREAD_PROCESS_SHARED:
    658       1.34  christos 		return ENOSYS;
    659       1.34  christos 	}
    660       1.34  christos 	return EINVAL;
    661       1.34  christos }
    662       1.34  christos #endif
    663       1.34  christos 
    664       1.23        ad int
    665       1.27        ad pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
    666       1.23        ad {
    667       1.23        ad 
    668       1.27        ad 	if (attr == NULL)
    669       1.27        ad 		return EINVAL;
    670       1.27        ad 	attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
    671       1.27        ad 
    672       1.27        ad 	return 0;
    673       1.23        ad }
    674       1.23        ad 
    675       1.27        ad 
    676       1.23        ad int
    677       1.27        ad pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
    678       1.23        ad {
    679       1.23        ad 
    680       1.27        ad 	if ((attr == NULL) ||
    681       1.27        ad 	    (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
    682       1.27        ad 		return EINVAL;
    683       1.27        ad 	attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
    684       1.27        ad 
    685       1.27        ad 	return 0;
    686       1.23        ad }
    687