Home | History | Annotate | Line # | Download | only in include
mutex.h revision 1.2
      1 /*	$NetBSD: mutex.h,v 1.2 2019/06/01 12:42:28 maxv Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2002, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _RISCV_MUTEX_H_
     33 #define	_RISCV_MUTEX_H_
     34 
     35 #ifndef __MUTEX_PRIVATE
     36 
     37 struct kmutex {
     38 	uintptr_t	mtx_pad1;
     39 };
     40 
     41 #else	/* __MUTEX_PRIVATE */
     42 
     43 #include <sys/param.h>
     44 
     45 struct kmutex {
     46 	volatile uintptr_t	mtx_owner;
     47 };
     48 
     49 #ifdef _LP64
     50 #define MTX_ASMOP_SFX ".d"		// doubleword atomic op
     51 #else
     52 #define MTX_ASMOP_SFX ".w"		// word atomic op
     53 #endif
     54 
     55 #define	MTX_LOCK			__BIT(8)	// just one bit
     56 #define	MTX_IPL				__BITS(7,4)	// only need 4 bits
     57 
     58 #undef MUTEX_SPIN_IPL			// override <sys/mutex.h>
     59 #define	MUTEX_SPIN_IPL(a)		riscv_mutex_spin_ipl(a)
     60 #define	MUTEX_INITIALIZE_SPIN_IPL(a,b)	riscv_mutex_initialize_spin_ipl(a,b)
     61 #define MUTEX_SPINBIT_LOCK_INIT(a)	riscv_mutex_spinbit_lock_init(a)
     62 #define MUTEX_SPINBIT_LOCK_TRY(a)	riscv_mutex_spinbit_lock_try(a)
     63 #define MUTEX_SPINBIT_LOCKED_P(a)	riscv_mutex_spinbit_locked_p(a)
     64 #define MUTEX_SPINBIT_LOCK_UNLOCK(a)	riscv_mutex_spinbit_lock_unlock(a)
     65 
     66 static inline ipl_cookie_t
     67 riscv_mutex_spin_ipl(kmutex_t *__mtx)
     68 {
     69 	return (ipl_cookie_t){._spl = __SHIFTOUT(__mtx->mtx_owner, MTX_IPL)};
     70 }
     71 
     72 static inline void
     73 riscv_mutex_initialize_spin_ipl(kmutex_t *__mtx, int ipl)
     74 {
     75 	__mtx->mtx_owner = (__mtx->mtx_owner & ~MTX_IPL)
     76 	    | __SHIFTIN(ipl, MTX_IPL);
     77 }
     78 
     79 static inline void
     80 riscv_mutex_spinbit_lock_init(kmutex_t *__mtx)
     81 {
     82 	__mtx->mtx_owner &= ~MTX_LOCK;
     83 }
     84 
     85 static inline bool
     86 riscv_mutex_spinbit_locked_p(const kmutex_t *__mtx)
     87 {
     88 	return (__mtx->mtx_owner & MTX_LOCK) != 0;
     89 }
     90 
     91 static inline bool
     92 riscv_mutex_spinbit_lock_try(kmutex_t *__mtx)
     93 {
     94 	uintptr_t __old;
     95 	__asm __volatile(
     96 		"amoor" MTX_ASMOP_SFX ".aq\t%0, %1, (%2)"
     97 	   :	"=r"(__old)
     98 	   :	"r"(MTX_LOCK), "r"(__mtx));
     99 	return (__old & MTX_LOCK) == 0;
    100 }
    101 
    102 static inline void
    103 riscv_mutex_spinbit_lock_unlock(kmutex_t *__mtx)
    104 {
    105 	__asm __volatile(
    106 		"amoand" MTX_ASMOP_SFX ".rl\tx0, %0, (%1)"
    107 	   ::	"r"(~MTX_LOCK), "r"(__mtx));
    108 }
    109 
    110 #if 0
    111 #define	__HAVE_MUTEX_STUBS		1
    112 #define	__HAVE_SPIN_MUTEX_STUBS		1
    113 #endif
    114 #define	__HAVE_SIMPLE_MUTEXES		1
    115 
    116 /*
    117  * MUTEX_RECEIVE: no memory barrier required; we're synchronizing against
    118  * interrupts, not multiple processors.
    119  */
    120 #ifdef MULTIPROCESSOR
    121 #define	MUTEX_RECEIVE(mtx)		membar_consumer()
    122 #else
    123 #define	MUTEX_RECEIVE(mtx)		/* nothing */
    124 #endif
    125 
    126 /*
    127  * MUTEX_GIVE: no memory barrier required; same reason.
    128  */
    129 #ifdef MULTIPROCESSOR
    130 #define	MUTEX_GIVE(mtx)			membar_producer()
    131 #else
    132 #define	MUTEX_GIVE(mtx)			/* nothing */
    133 #endif
    134 
    135 #define	MUTEX_CAS(p, o, n)		\
    136     (atomic_cas_ulong((volatile unsigned long *)(p), (o), (n)) == (o))
    137 
    138 #endif	/* __MUTEX_PRIVATE */
    139 
    140 #endif /* _RISCV_MUTEX_H_ */
    141