Home | History | Annotate | Line # | Download | only in include
      1 /* $NetBSD: lock.h,v 1.3 2022/02/13 13:42:12 riastradh Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas of 3am Software Foundry.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 /*
     32  * Machine-dependent spin lock operations.
     33  */
     34 
     35 #ifndef _OR1K_LOCK_H_
     36 #define	_OR1K_LOCK_H_
     37 
     38 static __inline int
     39 __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
     40 {
     41 	return *__ptr != __SIMPLELOCK_UNLOCKED;
     42 }
     43 
     44 static __inline int
     45 __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
     46 {
     47 	return *__ptr == __SIMPLELOCK_UNLOCKED;
     48 }
     49 
     50 static __inline void
     51 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
     52 {
     53 #if 0
     54 	__atomic_clear(__ptr, __ATOMIC_RELAXED);
     55 #else
     56 	*__ptr = __SIMPLELOCK_UNLOCKED;
     57 #endif
     58 }
     59 
     60 static __inline void
     61 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
     62 {
     63 #if 0
     64 	(void)__atomic_test_and_set(__ptr, __ATOMIC_RELAXED);
     65 #else
     66 	*__ptr = __SIMPLELOCK_LOCKED;
     67 #endif
     68 }
     69 
     70 static __inline void __unused
     71 __cpu_simple_lock_init(__cpu_simple_lock_t *__ptr)
     72 {
     73 #if 0
     74 	__atomic_clear(__ptr, __ATOMIC_RELAXED);
     75 #else
     76 	*__ptr = __SIMPLELOCK_UNLOCKED;
     77 #endif
     78 }
     79 
     80 static __inline void __unused
     81 __cpu_simple_lock(__cpu_simple_lock_t *__ptr)
     82 {
     83 #if 0
     84 	while (__atomic_test_and_set(__ptr, __ATOMIC_ACQUIRE)) {
     85 		/* do nothing */
     86 	}
     87 #else
     88 	int tmp;
     89 	/*
     90 	 * No explicit memory barrier needed around ll/sc:
     91 	 *
     92 	 * `In implementations that use a weakly-ordered memory model,
     93 	 *  l.swa nad l.lwa will serve as synchronization points,
     94 	 *  similar to lsync.'
     95 	 *
     96 	 * https://openrisc.io/or1k.html#__RefHeading__341344_552419154
     97 	 */
     98 	__asm volatile(
     99 		"1:"
    100 	"\t"	"l.lwa	%[tmp],0(%[ptr])"
    101 	"\n\t"	"l.sfeqi\t%[tmp],%[unlocked]"
    102 	"\n\t"	"l.bnf	1b"
    103 	"\n\t"	"l.nop"
    104 
    105 	"\n\t"	"l.swa	0(%[ptr]),%[newval]"
    106 	"\n\t"	"l.bnf	1b"
    107 	"\n\t"	"l.nop"
    108 	   :	[tmp] "=&r" (tmp)
    109 	   :	[newval] "r" (__SIMPLELOCK_LOCKED),
    110 		[ptr] "r" (__ptr),
    111 		[unlocked] "n" (__SIMPLELOCK_UNLOCKED)
    112 	   :    "cc", "memory");
    113 #endif
    114 }
    115 
    116 static __inline int __unused
    117 __cpu_simple_lock_try(__cpu_simple_lock_t *__ptr)
    118 {
    119 #if 0
    120 	return !__atomic_test_and_set(__ptr, __ATOMIC_ACQUIRE);
    121 #else
    122 	int oldval;
    123 	/* No explicit memory barrier needed, as in __cpu_simple_lock.  */
    124 	__asm volatile(
    125 		"1:"
    126 	"\t"	"l.lwa	%[oldval],0(%[ptr])"
    127 	"\n\t"	"l.swa	0(%[ptr]),%[newval]"
    128 	"\n\t"	"l.bnf	1b"
    129 	"\n\t"	"l.nop"
    130 	   :	[oldval] "=&r" (oldval)
    131 	   :	[newval] "r" (__SIMPLELOCK_LOCKED),
    132 		[ptr] "r" (__ptr)
    133 	   :    "cc", "memory");
    134 	return oldval == __SIMPLELOCK_UNLOCKED;
    135 #endif
    136 }
    137 
    138 static __inline void __unused
    139 __cpu_simple_unlock(__cpu_simple_lock_t *__ptr)
    140 {
    141 #if 0
    142 	__atomic_clear(__ptr, __ATOMIC_RELEASE);
    143 #else
    144 	__asm volatile("l.msync" ::: "");
    145 	*__ptr = __SIMPLELOCK_UNLOCKED;
    146 #endif
    147 }
    148 
    149 #endif /* _OR1K_LOCK_H_ */
    150