1 1.39 joerg /* $NetBSD: lock.h,v 1.39 2021/05/30 02:28:59 joerg Exp $ */ 2 1.1 bjh21 3 1.1 bjh21 /*- 4 1.2 thorpej * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. 5 1.1 bjh21 * All rights reserved. 6 1.1 bjh21 * 7 1.1 bjh21 * This code is derived from software contributed to The NetBSD Foundation 8 1.1 bjh21 * by Jason R. Thorpe. 9 1.1 bjh21 * 10 1.1 bjh21 * Redistribution and use in source and binary forms, with or without 11 1.1 bjh21 * modification, are permitted provided that the following conditions 12 1.1 bjh21 * are met: 13 1.1 bjh21 * 1. Redistributions of source code must retain the above copyright 14 1.1 bjh21 * notice, this list of conditions and the following disclaimer. 15 1.1 bjh21 * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 bjh21 * notice, this list of conditions and the following disclaimer in the 17 1.1 bjh21 * documentation and/or other materials provided with the distribution. 18 1.1 bjh21 * 19 1.1 bjh21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 bjh21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 bjh21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 bjh21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 bjh21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 bjh21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 bjh21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 bjh21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 bjh21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 bjh21 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 bjh21 * POSSIBILITY OF SUCH DAMAGE. 30 1.1 bjh21 */ 31 1.1 bjh21 32 1.1 bjh21 /* 33 1.1 bjh21 * Machine-dependent spin lock operations. 34 1.2 thorpej * 35 1.2 thorpej * NOTE: The SWP insn used here is available only on ARM architecture 36 1.2 thorpej * version 3 and later (as well as 2a). What we are going to do is 37 1.2 thorpej * expect that the kernel will trap and emulate the insn. That will 38 1.2 thorpej * be slow, but give us the atomicity that we need. 39 1.1 bjh21 */ 40 1.1 bjh21 41 1.2 thorpej #ifndef _ARM_LOCK_H_ 42 1.2 thorpej #define _ARM_LOCK_H_ 43 1.1 bjh21 44 1.14 skrll static __inline int 45 1.33 christos __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr) 46 1.14 skrll { 47 1.14 skrll return *__ptr == __SIMPLELOCK_LOCKED; 48 1.14 skrll } 49 1.14 skrll 50 1.14 skrll static __inline int 51 1.33 christos __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr) 52 1.14 skrll { 53 1.14 skrll return *__ptr == __SIMPLELOCK_UNLOCKED; 54 1.14 skrll } 55 1.14 skrll 56 1.14 skrll static __inline void 57 1.14 skrll __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) 58 1.14 skrll { 59 1.14 skrll *__ptr = __SIMPLELOCK_UNLOCKED; 60 1.14 skrll } 61 1.14 skrll 62 1.14 skrll static __inline void 63 1.14 skrll __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) 64 1.14 skrll { 65 1.14 skrll *__ptr = __SIMPLELOCK_LOCKED; 66 1.14 skrll } 67 1.14 skrll 68 1.37 skrll #if defined(_ARM_ARCH_6) 69 1.26 matt static __inline unsigned int 70 1.26 matt __arm_load_exclusive(__cpu_simple_lock_t *__alp) 71 1.13 thorpej { 72 1.26 matt unsigned int __rv; 73 1.29 christos if (/*CONSTCOND*/sizeof(*__alp) == 1) { 74 1.26 matt __asm __volatile("ldrexb\t%0,[%1]" : "=r"(__rv) : "r"(__alp)); 75 1.26 matt } else { 76 1.26 matt __asm __volatile("ldrex\t%0,[%1]" : "=r"(__rv) : "r"(__alp)); 77 1.26 matt } 78 1.26 matt return __rv; 79 1.26 matt } 80 1.26 matt 81 1.26 matt /* returns 0 on success and 1 on failure */ 82 1.26 matt static __inline unsigned int 83 1.26 matt __arm_store_exclusive(__cpu_simple_lock_t *__alp, unsigned int __val) 84 1.26 matt { 85 1.26 matt unsigned int __rv; 86 1.29 christos if (/*CONSTCOND*/sizeof(*__alp) == 1) { 87 1.26 matt __asm __volatile("strexb\t%0,%1,[%2]" 88 1.26 matt : "=&r"(__rv) : "r"(__val), "r"(__alp) : "cc", "memory"); 89 1.20 matt } else { 90 1.26 matt __asm __volatile("strex\t%0,%1,[%2]" 91 1.26 matt : "=&r"(__rv) : "r"(__val), "r"(__alp) : "cc", "memory"); 92 1.20 matt } 93 1.18 matt return __rv; 94 1.26 matt } 95 1.26 matt #elif defined(_KERNEL) 96 1.26 matt static __inline unsigned char 97 1.26 matt __swp(unsigned char __val, __cpu_simple_lock_t *__ptr) 98 1.26 matt { 99 1.23 matt uint32_t __val32; 100 1.26 matt __asm volatile("swpb %0, %1, [%2]" 101 1.23 matt : "=&r" (__val32) : "r" (__val), "r" (__ptr) : "memory"); 102 1.23 matt return __val32; 103 1.13 thorpej } 104 1.13 thorpej #else 105 1.20 matt /* 106 1.26 matt * On MP Cortex, SWP no longer guarantees atomic results. Thus we pad 107 1.26 matt * out SWP so that when the cpu generates an undefined exception we can replace 108 1.20 matt * the SWP/MOV instructions with the right LDREX/STREX instructions. 109 1.20 matt * 110 1.20 matt * This is why we force the SWP into the template needed for LDREX/STREX 111 1.20 matt * including the extra instructions and extra register for testing the result. 112 1.20 matt */ 113 1.7 perry static __inline int 114 1.26 matt __swp(int __val, __cpu_simple_lock_t *__ptr) 115 1.3 bjh21 { 116 1.27 matt int __tmp, __rv; 117 1.20 matt __asm volatile( 118 1.26 matt #if 1 119 1.26 matt "1:\t" "swp %[__rv], %[__val], [%[__ptr]]" 120 1.26 matt "\n\t" "b 2f" 121 1.26 matt #else 122 1.26 matt "1:\t" "ldrex %[__rv],[%[__ptr]]" 123 1.26 matt "\n\t" "strex %[__tmp],%[__val],[%[__ptr]]" 124 1.26 matt #endif 125 1.26 matt "\n\t" "cmp %[__tmp],#0" 126 1.26 matt "\n\t" "bne 1b" 127 1.26 matt "\n" "2:" 128 1.27 matt : [__rv] "=&r" (__rv), [__tmp] "=&r" (__tmp) 129 1.26 matt : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); 130 1.26 matt return __rv; 131 1.26 matt } 132 1.26 matt #endif /* !_ARM_ARCH_6 */ 133 1.26 matt 134 1.38 skrll /* load/dmb implies load-acquire */ 135 1.30 joerg static __inline void 136 1.38 skrll __arm_load_dmb(void) 137 1.26 matt { 138 1.37 skrll #if defined(_ARM_ARCH_7) 139 1.38 skrll __asm __volatile("dmb ish" ::: "memory"); 140 1.26 matt #elif defined(_ARM_ARCH_6) 141 1.38 skrll __asm __volatile("mcr\tp15,0,%0,c7,c10,5" :: "r"(0) : "memory"); 142 1.20 matt #endif 143 1.26 matt } 144 1.26 matt 145 1.38 skrll /* dmb/store implies store-release */ 146 1.30 joerg static __inline void 147 1.38 skrll __arm_dmb_store(void) 148 1.26 matt { 149 1.37 skrll #if defined(_ARM_ARCH_7) 150 1.38 skrll __asm __volatile("dmb ish" ::: "memory"); 151 1.20 matt #elif defined(_ARM_ARCH_6) 152 1.32 joerg __asm __volatile("mcr\tp15,0,%0,c7,c10,5" :: "r"(0) : "memory"); 153 1.20 matt #endif 154 1.3 bjh21 } 155 1.3 bjh21 156 1.38 skrll 157 1.25 matt static __inline void __unused 158 1.26 matt __cpu_simple_lock_init(__cpu_simple_lock_t *__alp) 159 1.2 thorpej { 160 1.2 thorpej 161 1.26 matt *__alp = __SIMPLELOCK_UNLOCKED; 162 1.2 thorpej } 163 1.2 thorpej 164 1.25 matt #if !defined(__thumb__) || defined(_ARM_ARCH_T2) 165 1.25 matt static __inline void __unused 166 1.26 matt __cpu_simple_lock(__cpu_simple_lock_t *__alp) 167 1.2 thorpej { 168 1.37 skrll #if defined(_ARM_ARCH_6) 169 1.26 matt do { 170 1.26 matt /* spin */ 171 1.26 matt } while (__arm_load_exclusive(__alp) != __SIMPLELOCK_UNLOCKED 172 1.26 matt || __arm_store_exclusive(__alp, __SIMPLELOCK_LOCKED)); 173 1.38 skrll __arm_load_dmb(); 174 1.26 matt #else 175 1.26 matt while (__swp(__SIMPLELOCK_LOCKED, __alp) != __SIMPLELOCK_UNLOCKED) 176 1.3 bjh21 continue; 177 1.26 matt #endif 178 1.2 thorpej } 179 1.25 matt #else 180 1.25 matt void __cpu_simple_lock(__cpu_simple_lock_t *); 181 1.25 matt #endif 182 1.2 thorpej 183 1.25 matt #if !defined(__thumb__) || defined(_ARM_ARCH_T2) 184 1.25 matt static __inline int __unused 185 1.26 matt __cpu_simple_lock_try(__cpu_simple_lock_t *__alp) 186 1.2 thorpej { 187 1.37 skrll #if defined(_ARM_ARCH_6) 188 1.26 matt do { 189 1.26 matt if (__arm_load_exclusive(__alp) != __SIMPLELOCK_UNLOCKED) { 190 1.26 matt return 0; 191 1.26 matt } 192 1.26 matt } while (__arm_store_exclusive(__alp, __SIMPLELOCK_LOCKED)); 193 1.38 skrll __arm_load_dmb(); 194 1.26 matt return 1; 195 1.26 matt #else 196 1.26 matt return (__swp(__SIMPLELOCK_LOCKED, __alp) == __SIMPLELOCK_UNLOCKED); 197 1.26 matt #endif 198 1.2 thorpej } 199 1.25 matt #else 200 1.25 matt int __cpu_simple_lock_try(__cpu_simple_lock_t *); 201 1.25 matt #endif 202 1.2 thorpej 203 1.25 matt static __inline void __unused 204 1.26 matt __cpu_simple_unlock(__cpu_simple_lock_t *__alp) 205 1.2 thorpej { 206 1.2 thorpej 207 1.39 joerg #if defined(_ARM_ARCH_8) && defined(__LP64__) 208 1.26 matt if (sizeof(*__alp) == 1) { 209 1.36 skrll __asm __volatile("stlrb\t%w0, [%1]" 210 1.26 matt :: "r"(__SIMPLELOCK_UNLOCKED), "r"(__alp) : "memory"); 211 1.26 matt } else { 212 1.36 skrll __asm __volatile("stlr\t%0, [%1]" 213 1.26 matt :: "r"(__SIMPLELOCK_UNLOCKED), "r"(__alp) : "memory"); 214 1.26 matt } 215 1.26 matt #else 216 1.38 skrll __arm_dmb_store(); 217 1.26 matt *__alp = __SIMPLELOCK_UNLOCKED; 218 1.20 matt #endif 219 1.2 thorpej } 220 1.2 thorpej 221 1.2 thorpej #endif /* _ARM_LOCK_H_ */ 222