lock.h revision 1.37 1 /* $NetBSD: lock.h,v 1.37 2021/04/26 16:35:54 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Machine-dependent spin lock operations.
34 *
35 * NOTE: The SWP insn used here is available only on ARM architecture
36 * version 3 and later (as well as 2a). What we are going to do is
37 * expect that the kernel will trap and emulate the insn. That will
38 * be slow, but give us the atomicity that we need.
39 */
40
41 #ifndef _ARM_LOCK_H_
42 #define _ARM_LOCK_H_
43
44 static __inline int
45 __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
46 {
47 return *__ptr == __SIMPLELOCK_LOCKED;
48 }
49
50 static __inline int
51 __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
52 {
53 return *__ptr == __SIMPLELOCK_UNLOCKED;
54 }
55
56 static __inline void
57 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
58 {
59 *__ptr = __SIMPLELOCK_UNLOCKED;
60 }
61
62 static __inline void
63 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
64 {
65 *__ptr = __SIMPLELOCK_LOCKED;
66 }
67
68 #if defined(_ARM_ARCH_6)
69 static __inline unsigned int
70 __arm_load_exclusive(__cpu_simple_lock_t *__alp)
71 {
72 unsigned int __rv;
73 if (/*CONSTCOND*/sizeof(*__alp) == 1) {
74 __asm __volatile("ldrexb\t%0,[%1]" : "=r"(__rv) : "r"(__alp));
75 } else {
76 __asm __volatile("ldrex\t%0,[%1]" : "=r"(__rv) : "r"(__alp));
77 }
78 return __rv;
79 }
80
81 /* returns 0 on success and 1 on failure */
82 static __inline unsigned int
83 __arm_store_exclusive(__cpu_simple_lock_t *__alp, unsigned int __val)
84 {
85 unsigned int __rv;
86 if (/*CONSTCOND*/sizeof(*__alp) == 1) {
87 __asm __volatile("strexb\t%0,%1,[%2]"
88 : "=&r"(__rv) : "r"(__val), "r"(__alp) : "cc", "memory");
89 } else {
90 __asm __volatile("strex\t%0,%1,[%2]"
91 : "=&r"(__rv) : "r"(__val), "r"(__alp) : "cc", "memory");
92 }
93 return __rv;
94 }
95 #elif defined(_KERNEL)
96 static __inline unsigned char
97 __swp(unsigned char __val, __cpu_simple_lock_t *__ptr)
98 {
99 uint32_t __val32;
100 __asm volatile("swpb %0, %1, [%2]"
101 : "=&r" (__val32) : "r" (__val), "r" (__ptr) : "memory");
102 return __val32;
103 }
104 #else
105 /*
106 * On MP Cortex, SWP no longer guarantees atomic results. Thus we pad
107 * out SWP so that when the cpu generates an undefined exception we can replace
108 * the SWP/MOV instructions with the right LDREX/STREX instructions.
109 *
110 * This is why we force the SWP into the template needed for LDREX/STREX
111 * including the extra instructions and extra register for testing the result.
112 */
113 static __inline int
114 __swp(int __val, __cpu_simple_lock_t *__ptr)
115 {
116 int __tmp, __rv;
117 __asm volatile(
118 #if 1
119 "1:\t" "swp %[__rv], %[__val], [%[__ptr]]"
120 "\n\t" "b 2f"
121 #else
122 "1:\t" "ldrex %[__rv],[%[__ptr]]"
123 "\n\t" "strex %[__tmp],%[__val],[%[__ptr]]"
124 #endif
125 "\n\t" "cmp %[__tmp],#0"
126 "\n\t" "bne 1b"
127 "\n" "2:"
128 : [__rv] "=&r" (__rv), [__tmp] "=&r" (__tmp)
129 : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory");
130 return __rv;
131 }
132 #endif /* !_ARM_ARCH_6 */
133
134 static __inline void
135 __arm_membar_producer(void)
136 {
137 #if defined(_ARM_ARCH_7)
138 __asm __volatile("dsb" ::: "memory");
139 #elif defined(_ARM_ARCH_6)
140 __asm __volatile("mcr\tp15,0,%0,c7,c10,4" :: "r"(0) : "memory");
141 #endif
142 }
143
144 static __inline void
145 __arm_membar_consumer(void)
146 {
147 #if defined(_ARM_ARCH_7)
148 __asm __volatile("dmb" ::: "memory");
149 #elif defined(_ARM_ARCH_6)
150 __asm __volatile("mcr\tp15,0,%0,c7,c10,5" :: "r"(0) : "memory");
151 #endif
152 }
153
154 static __inline void __unused
155 __cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
156 {
157
158 *__alp = __SIMPLELOCK_UNLOCKED;
159 __arm_membar_producer();
160 }
161
162 #if !defined(__thumb__) || defined(_ARM_ARCH_T2)
163 static __inline void __unused
164 __cpu_simple_lock(__cpu_simple_lock_t *__alp)
165 {
166 #if defined(_ARM_ARCH_6)
167 __arm_membar_consumer();
168 do {
169 /* spin */
170 } while (__arm_load_exclusive(__alp) != __SIMPLELOCK_UNLOCKED
171 || __arm_store_exclusive(__alp, __SIMPLELOCK_LOCKED));
172 __arm_membar_producer();
173 #else
174 while (__swp(__SIMPLELOCK_LOCKED, __alp) != __SIMPLELOCK_UNLOCKED)
175 continue;
176 #endif
177 }
178 #else
179 void __cpu_simple_lock(__cpu_simple_lock_t *);
180 #endif
181
182 #if !defined(__thumb__) || defined(_ARM_ARCH_T2)
183 static __inline int __unused
184 __cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
185 {
186 #if defined(_ARM_ARCH_6)
187 __arm_membar_consumer();
188 do {
189 if (__arm_load_exclusive(__alp) != __SIMPLELOCK_UNLOCKED) {
190 return 0;
191 }
192 } while (__arm_store_exclusive(__alp, __SIMPLELOCK_LOCKED));
193 __arm_membar_producer();
194 return 1;
195 #else
196 return (__swp(__SIMPLELOCK_LOCKED, __alp) == __SIMPLELOCK_UNLOCKED);
197 #endif
198 }
199 #else
200 int __cpu_simple_lock_try(__cpu_simple_lock_t *);
201 #endif
202
203 static __inline void __unused
204 __cpu_simple_unlock(__cpu_simple_lock_t *__alp)
205 {
206
207 #if defined(_ARM_ARCH_8)
208 if (sizeof(*__alp) == 1) {
209 __asm __volatile("stlrb\t%w0, [%1]"
210 :: "r"(__SIMPLELOCK_UNLOCKED), "r"(__alp) : "memory");
211 } else {
212 __asm __volatile("stlr\t%0, [%1]"
213 :: "r"(__SIMPLELOCK_UNLOCKED), "r"(__alp) : "memory");
214 }
215 #else
216 __arm_membar_consumer();
217 *__alp = __SIMPLELOCK_UNLOCKED;
218 __arm_membar_producer();
219 #endif
220 }
221
222 #endif /* _ARM_LOCK_H_ */
223