Home | History | Annotate | Line # | Download | only in include
mutex.h revision 1.8
      1  1.8   yamt /*	$NetBSD: mutex.h,v 1.8 2007/11/21 10:19:07 yamt Exp $	*/
      2  1.2     ad 
      3  1.2     ad /*-
      4  1.2     ad  * Copyright (c) 2002, 2007 The NetBSD Foundation, Inc.
      5  1.2     ad  * All rights reserved.
      6  1.2     ad  *
      7  1.2     ad  * This code is derived from software contributed to The NetBSD Foundation
      8  1.2     ad  * by Jason R. Thorpe and Andrew Doran.
      9  1.2     ad  *
     10  1.2     ad  * Redistribution and use in source and binary forms, with or without
     11  1.2     ad  * modification, are permitted provided that the following conditions
     12  1.2     ad  * are met:
     13  1.2     ad  * 1. Redistributions of source code must retain the above copyright
     14  1.2     ad  *    notice, this list of conditions and the following disclaimer.
     15  1.2     ad  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.2     ad  *    notice, this list of conditions and the following disclaimer in the
     17  1.2     ad  *    documentation and/or other materials provided with the distribution.
     18  1.2     ad  * 3. All advertising materials mentioning features or use of this software
     19  1.2     ad  *    must display the following acknowledgement:
     20  1.2     ad  *	This product includes software developed by the NetBSD
     21  1.2     ad  *	Foundation, Inc. and its contributors.
     22  1.2     ad  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  1.2     ad  *    contributors may be used to endorse or promote products derived
     24  1.2     ad  *    from this software without specific prior written permission.
     25  1.2     ad  *
     26  1.2     ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  1.2     ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  1.2     ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  1.2     ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  1.2     ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  1.2     ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  1.2     ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  1.2     ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  1.2     ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  1.2     ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  1.2     ad  * POSSIBILITY OF SUCH DAMAGE.
     37  1.2     ad  */
     38  1.2     ad 
     39  1.2     ad #ifndef _HPPA_MUTEX_H_
     40  1.2     ad #define	_HPPA_MUTEX_H_
     41  1.2     ad 
     42  1.2     ad /*
     43  1.2     ad  * The HPPA mutex implementation is troublesome, because HPPA lacks
     44  1.2     ad  * a compare-and-set operation, yet there are many SMP HPPA machines
     45  1.2     ad  * in circulation.  SMP for spin mutexes is easy - we don't need to
     46  1.2     ad  * know who owns the lock.  For adaptive mutexes, we need an owner
     47  1.2     ad  * field and additional interlock
     48  1.2     ad  */
     49  1.5  skrll 
     50  1.2     ad #ifndef __ASSEMBLER__
     51  1.5  skrll 
     52  1.5  skrll #include <machine/lock.h>
     53  1.5  skrll 
     54  1.2     ad struct kmutex {
     55  1.2     ad 	union {
     56  1.2     ad 		/*
     57  1.6  skrll 		 * Only the 16 bytes aligned word of __cpu_simple_lock_t will
     58  1.6  skrll 		 * be used. It's 16 bytes to simplify the allocation.
     59  1.6  skrll 		 * See hppa/lock.h
     60  1.2     ad 		 */
     61  1.2     ad #ifdef __MUTEX_PRIVATE
     62  1.2     ad 		struct {
     63  1.6  skrll 			__cpu_simple_lock_t	mtxu_lock;	/* 0-15 */
     64  1.6  skrll 			volatile uint32_t	mtxs_owner;	/* 16-19 */
     65  1.6  skrll 			ipl_cookie_t		mtxs_ipl;	/* 20-23 */
     66  1.6  skrll 			volatile uint8_t	mtxs_waiters;	/* 24 */
     67  1.2     ad 
     68  1.2     ad 			/* For LOCKDEBUG */
     69  1.8   yamt 			uint8_t			mtxs_dodebug;	/* 25 */
     70  1.2     ad 		} s;
     71  1.2     ad #endif
     72  1.6  skrll 		uint8_t			mtxu_pad[32];	/* 0 - 32 */
     73  1.2     ad 	} u;
     74  1.2     ad } __aligned (16);
     75  1.2     ad #endif
     76  1.2     ad 
     77  1.2     ad #ifdef __MUTEX_PRIVATE
     78  1.2     ad 
     79  1.2     ad #define	__HAVE_MUTEX_STUBS	1
     80  1.2     ad 
     81  1.6  skrll #define	mtx_lock	u.s.mtxu_lock
     82  1.2     ad #define	mtx_owner	u.s.mtxs_owner
     83  1.2     ad #define	mtx_ipl		u.s.mtxs_ipl
     84  1.2     ad #define	mtx_waiters	u.s.mtxs_waiters
     85  1.8   yamt #define	mtx_dodebug	u.s.mtxs_dodebug
     86  1.2     ad 
     87  1.2     ad /* Magic constants for mtx_owner */
     88  1.2     ad #define	MUTEX_ADAPTIVE_UNOWNED		0xffffff00
     89  1.2     ad #define	MUTEX_SPIN_FLAG			0xffffff10
     90  1.2     ad #define	MUTEX_UNOWNED_OR_SPIN(x)	(((x) & 0xffffffef) == 0xffffff00)
     91  1.2     ad 
     92  1.2     ad #ifndef __ASSEMBLER__
     93  1.2     ad 
     94  1.2     ad static inline uintptr_t
     95  1.2     ad MUTEX_OWNER(uintptr_t owner)
     96  1.2     ad {
     97  1.2     ad 	return owner;
     98  1.2     ad }
     99  1.2     ad 
    100  1.2     ad static inline int
    101  1.2     ad MUTEX_OWNED(uintptr_t owner)
    102  1.2     ad {
    103  1.2     ad 	return owner != MUTEX_ADAPTIVE_UNOWNED;
    104  1.2     ad }
    105  1.2     ad 
    106  1.2     ad static inline int
    107  1.2     ad MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
    108  1.2     ad {
    109  1.2     ad 	mb_write();
    110  1.2     ad 	mtx->mtx_waiters = 1;
    111  1.2     ad 	mb_memory();
    112  1.2     ad 	return mtx->mtx_owner != MUTEX_ADAPTIVE_UNOWNED;
    113  1.2     ad }
    114  1.2     ad 
    115  1.2     ad static inline int
    116  1.2     ad MUTEX_HAS_WAITERS(volatile kmutex_t *mtx)
    117  1.2     ad {
    118  1.2     ad 	return mtx->mtx_waiters != 0;
    119  1.2     ad }
    120  1.2     ad 
    121  1.2     ad static inline void
    122  1.8   yamt MUTEX_INITIALIZE_SPIN(kmutex_t *mtx, bool dodebug, int ipl)
    123  1.2     ad {
    124  1.2     ad 	mtx->mtx_ipl = makeiplcookie(ipl);
    125  1.8   yamt 	mtx->mtx_dodebug = dodebug;
    126  1.2     ad 	mtx->mtx_owner = MUTEX_SPIN_FLAG;
    127  1.2     ad 	__cpu_simple_lock_init(&mtx->mtx_lock);
    128  1.2     ad }
    129  1.2     ad 
    130  1.2     ad static inline void
    131  1.8   yamt MUTEX_INITIALIZE_ADAPTIVE(kmutex_t *mtx, bool dodebug)
    132  1.2     ad {
    133  1.8   yamt 	mtx->mtx_dodebug = dodebug;
    134  1.2     ad 	mtx->mtx_owner = MUTEX_ADAPTIVE_UNOWNED;
    135  1.2     ad 	__cpu_simple_lock_init(&mtx->mtx_lock);
    136  1.2     ad }
    137  1.2     ad 
    138  1.2     ad static inline void
    139  1.2     ad MUTEX_DESTROY(kmutex_t *mtx)
    140  1.2     ad {
    141  1.3  skrll 	mtx->mtx_owner = 0xffffffff;
    142  1.2     ad }
    143  1.2     ad 
    144  1.8   yamt static inline bool
    145  1.8   yamt MUTEX_DEBUG_P(kmutex_t *mtx)
    146  1.2     ad {
    147  1.8   yamt 	return mtx->mtx_dodebug != 0;
    148  1.2     ad }
    149  1.2     ad 
    150  1.2     ad static inline int
    151  1.2     ad MUTEX_SPIN_P(volatile kmutex_t *mtx)
    152  1.2     ad {
    153  1.2     ad 	return mtx->mtx_owner == MUTEX_SPIN_FLAG;
    154  1.2     ad }
    155  1.2     ad 
    156  1.2     ad static inline int
    157  1.2     ad MUTEX_ADAPTIVE_P(volatile kmutex_t *mtx)
    158  1.2     ad {
    159  1.2     ad 	return mtx->mtx_owner != MUTEX_SPIN_FLAG;
    160  1.2     ad }
    161  1.2     ad 
    162  1.2     ad /* Acquire an adaptive mutex */
    163  1.2     ad static inline int
    164  1.2     ad MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
    165  1.2     ad {
    166  1.2     ad 	if (!__cpu_simple_lock_try(&mtx->mtx_lock))
    167  1.2     ad 		return 0;
    168  1.2     ad 	mtx->mtx_owner = curthread;
    169  1.2     ad 	return 1;
    170  1.2     ad }
    171  1.2     ad 
    172  1.2     ad /* Release an adaptive mutex */
    173  1.2     ad static inline void
    174  1.2     ad MUTEX_RELEASE(kmutex_t *mtx)
    175  1.2     ad {
    176  1.2     ad 	mtx->mtx_owner = MUTEX_ADAPTIVE_UNOWNED;
    177  1.2     ad 	__cpu_simple_unlock(&mtx->mtx_lock);
    178  1.2     ad 	mtx->mtx_waiters = 0;
    179  1.2     ad }
    180  1.2     ad 
    181  1.4     ad static inline void
    182  1.4     ad MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
    183  1.4     ad {
    184  1.4     ad 	mtx->mtx_waiters = 0;
    185  1.4     ad }
    186  1.4     ad 
    187  1.2     ad #endif	/* __ASSEMBLER__ */
    188  1.2     ad 
    189  1.2     ad #endif	/* __MUTEX_PRIVATE */
    190  1.2     ad 
    191  1.2     ad #endif /* _HPPA_MUTEX_H_ */
    192