1 1.3 riastrad /* $NetBSD: lock.h,v 1.3 2022/02/13 13:42:12 riastradh Exp $ */ 2 1.1 matt 3 1.1 matt /*- 4 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc. 5 1.1 matt * All rights reserved. 6 1.1 matt * 7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation 8 1.1 matt * by Matt Thomas of 3am Software Foundry. 9 1.1 matt * 10 1.1 matt * Redistribution and use in source and binary forms, with or without 11 1.1 matt * modification, are permitted provided that the following conditions 12 1.1 matt * are met: 13 1.1 matt * 1. Redistributions of source code must retain the above copyright 14 1.1 matt * notice, this list of conditions and the following disclaimer. 15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 matt * notice, this list of conditions and the following disclaimer in the 17 1.1 matt * documentation and/or other materials provided with the distribution. 18 1.1 matt * 19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 matt * POSSIBILITY OF SUCH DAMAGE. 30 1.1 matt */ 31 1.1 matt /* 32 1.1 matt * Machine-dependent spin lock operations. 33 1.1 matt */ 34 1.1 matt 35 1.1 matt #ifndef _OR1K_LOCK_H_ 36 1.1 matt #define _OR1K_LOCK_H_ 37 1.1 matt 38 1.1 matt static __inline int 39 1.2 christos __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr) 40 1.1 matt { 41 1.1 matt return *__ptr != __SIMPLELOCK_UNLOCKED; 42 1.1 matt } 43 1.1 matt 44 1.1 matt static __inline int 45 1.2 christos __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr) 46 1.1 matt { 47 1.1 matt return *__ptr == __SIMPLELOCK_UNLOCKED; 48 1.1 matt } 49 1.1 matt 50 1.1 matt static __inline void 51 1.1 matt __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) 52 1.1 matt { 53 1.1 matt #if 0 54 1.1 matt __atomic_clear(__ptr, __ATOMIC_RELAXED); 55 1.1 matt #else 56 1.1 matt *__ptr = __SIMPLELOCK_UNLOCKED; 57 1.1 matt #endif 58 1.1 matt } 59 1.1 matt 60 1.1 matt static __inline void 61 1.1 matt __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) 62 1.1 matt { 63 1.1 matt #if 0 64 1.1 matt (void)__atomic_test_and_set(__ptr, __ATOMIC_RELAXED); 65 1.1 matt #else 66 1.1 matt *__ptr = __SIMPLELOCK_LOCKED; 67 1.1 matt #endif 68 1.1 matt } 69 1.1 matt 70 1.1 matt static __inline void __unused 71 1.1 matt __cpu_simple_lock_init(__cpu_simple_lock_t *__ptr) 72 1.1 matt { 73 1.1 matt #if 0 74 1.1 matt __atomic_clear(__ptr, __ATOMIC_RELAXED); 75 1.1 matt #else 76 1.1 matt *__ptr = __SIMPLELOCK_UNLOCKED; 77 1.1 matt #endif 78 1.1 matt } 79 1.1 matt 80 1.1 matt static __inline void __unused 81 1.1 matt __cpu_simple_lock(__cpu_simple_lock_t *__ptr) 82 1.1 matt { 83 1.1 matt #if 0 84 1.1 matt while (__atomic_test_and_set(__ptr, __ATOMIC_ACQUIRE)) { 85 1.1 matt /* do nothing */ 86 1.1 matt } 87 1.1 matt #else 88 1.1 matt int tmp; 89 1.3 riastrad /* 90 1.3 riastrad * No explicit memory barrier needed around ll/sc: 91 1.3 riastrad * 92 1.3 riastrad * `In implementations that use a weakly-ordered memory model, 93 1.3 riastrad * l.swa nad l.lwa will serve as synchronization points, 94 1.3 riastrad * similar to lsync.' 95 1.3 riastrad * 96 1.3 riastrad * https://openrisc.io/or1k.html#__RefHeading__341344_552419154 97 1.3 riastrad */ 98 1.3 riastrad __asm volatile( 99 1.1 matt "1:" 100 1.1 matt "\t" "l.lwa %[tmp],0(%[ptr])" 101 1.1 matt "\n\t" "l.sfeqi\t%[tmp],%[unlocked]" 102 1.1 matt "\n\t" "l.bnf 1b" 103 1.1 matt "\n\t" "l.nop" 104 1.1 matt 105 1.1 matt "\n\t" "l.swa 0(%[ptr]),%[newval]" 106 1.1 matt "\n\t" "l.bnf 1b" 107 1.1 matt "\n\t" "l.nop" 108 1.1 matt : [tmp] "=&r" (tmp) 109 1.1 matt : [newval] "r" (__SIMPLELOCK_LOCKED), 110 1.1 matt [ptr] "r" (__ptr), 111 1.3 riastrad [unlocked] "n" (__SIMPLELOCK_UNLOCKED) 112 1.3 riastrad : "cc", "memory"); 113 1.1 matt #endif 114 1.1 matt } 115 1.1 matt 116 1.1 matt static __inline int __unused 117 1.1 matt __cpu_simple_lock_try(__cpu_simple_lock_t *__ptr) 118 1.1 matt { 119 1.1 matt #if 0 120 1.1 matt return !__atomic_test_and_set(__ptr, __ATOMIC_ACQUIRE); 121 1.1 matt #else 122 1.1 matt int oldval; 123 1.3 riastrad /* No explicit memory barrier needed, as in __cpu_simple_lock. */ 124 1.3 riastrad __asm volatile( 125 1.1 matt "1:" 126 1.1 matt "\t" "l.lwa %[oldval],0(%[ptr])" 127 1.1 matt "\n\t" "l.swa 0(%[ptr]),%[newval]" 128 1.1 matt "\n\t" "l.bnf 1b" 129 1.1 matt "\n\t" "l.nop" 130 1.1 matt : [oldval] "=&r" (oldval) 131 1.1 matt : [newval] "r" (__SIMPLELOCK_LOCKED), 132 1.3 riastrad [ptr] "r" (__ptr) 133 1.3 riastrad : "cc", "memory"); 134 1.1 matt return oldval == __SIMPLELOCK_UNLOCKED; 135 1.1 matt #endif 136 1.1 matt } 137 1.1 matt 138 1.1 matt static __inline void __unused 139 1.1 matt __cpu_simple_unlock(__cpu_simple_lock_t *__ptr) 140 1.1 matt { 141 1.1 matt #if 0 142 1.1 matt __atomic_clear(__ptr, __ATOMIC_RELEASE); 143 1.1 matt #else 144 1.3 riastrad __asm volatile("l.msync" ::: ""); 145 1.1 matt *__ptr = __SIMPLELOCK_UNLOCKED; 146 1.1 matt #endif 147 1.1 matt } 148 1.1 matt 149 1.1 matt #endif /* _OR1K_LOCK_H_ */ 150