atomic.h revision 1.36 1 /* $NetBSD: atomic.h,v 1.36 2021/12/19 11:14:56 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_ATOMIC_H_
33 #define _LINUX_ATOMIC_H_
34
35 #include <sys/atomic.h>
36
37 #include <machine/limits.h>
38
39 #include <asm/barrier.h>
40
41 #define xchg(P, V) \
42 (sizeof(*(P)) == 4 ? atomic_swap_32((volatile uint32_t *)P, V) \
43 : sizeof(*(P)) == 8 ? atomic_swap_64((volatile uint64_t *)P, V) \
44 : (__builtin_abort(), 0))
45
46 #define cmpxchg(P, O, N) \
47 (sizeof(*(P)) == 4 ? atomic_cas_32((volatile uint32_t *)P, O, N) \
48 : sizeof(*(P)) == 8 ? atomic_cas_64((volatile uint64_t *)P, O, N) \
49 : (__builtin_abort(), 0))
50
51 /*
52 * atomic (u)int operations
53 *
54 * Atomics that return a value, other than atomic_read, imply a
55 * full memory_sync barrier. Those that do not return a value
56 * imply no memory barrier.
57 */
58
59 struct atomic {
60 union {
61 volatile int au_int;
62 volatile unsigned int au_uint;
63 } a_u;
64 };
65
66 #define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
67
68 typedef struct atomic atomic_t;
69
70 static inline int
71 atomic_read(const atomic_t *atomic)
72 {
73 /* no membar */
74 return atomic->a_u.au_int;
75 }
76
77 static inline void
78 atomic_set(atomic_t *atomic, int value)
79 {
80 /* no membar */
81 atomic->a_u.au_int = value;
82 }
83
84 static inline void
85 atomic_set_release(atomic_t *atomic, int value)
86 {
87 atomic_store_release(&atomic->a_u.au_int, value);
88 }
89
90 static inline void
91 atomic_add(int addend, atomic_t *atomic)
92 {
93 /* no membar */
94 atomic_add_int(&atomic->a_u.au_uint, addend);
95 }
96
97 static inline void
98 atomic_sub(int subtrahend, atomic_t *atomic)
99 {
100 /* no membar */
101 atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
102 }
103
104 static inline int
105 atomic_add_return(int addend, atomic_t *atomic)
106 {
107 int v;
108
109 smp_mb__before_atomic();
110 v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
111 smp_mb__after_atomic();
112
113 return v;
114 }
115
116 static inline int
117 atomic_sub_return(int subtrahend, atomic_t *atomic)
118 {
119 int v;
120
121 smp_mb__before_atomic();
122 v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, -subtrahend);
123 smp_mb__after_atomic();
124
125 return v;
126 }
127
128 static inline void
129 atomic_inc(atomic_t *atomic)
130 {
131 /* no membar */
132 atomic_inc_uint(&atomic->a_u.au_uint);
133 }
134
135 static inline void
136 atomic_dec(atomic_t *atomic)
137 {
138 /* no membar */
139 atomic_dec_uint(&atomic->a_u.au_uint);
140 }
141
142 static inline int
143 atomic_inc_return(atomic_t *atomic)
144 {
145 int v;
146
147 smp_mb__before_atomic();
148 v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
149 smp_mb__after_atomic();
150
151 return v;
152 }
153
154 static inline int
155 atomic_dec_return(atomic_t *atomic)
156 {
157 int v;
158
159 smp_mb__before_atomic();
160 v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
161 smp_mb__after_atomic();
162
163 return v;
164 }
165
166 static inline int
167 atomic_dec_and_test(atomic_t *atomic)
168 {
169 /* membar implied by atomic_dec_return */
170 return atomic_dec_return(atomic) == 0;
171 }
172
173 static inline int
174 atomic_dec_if_positive(atomic_t *atomic)
175 {
176 int v;
177
178 smp_mb__before_atomic();
179 do {
180 v = atomic->a_u.au_uint;
181 if (v <= 0)
182 break;
183 } while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
184 smp_mb__after_atomic();
185
186 return v - 1;
187 }
188
189 static inline void
190 atomic_or(int value, atomic_t *atomic)
191 {
192 /* no membar */
193 atomic_or_uint(&atomic->a_u.au_uint, value);
194 }
195
196 static inline void
197 atomic_andnot(int value, atomic_t *atomic)
198 {
199 /* no membar */
200 atomic_and_uint(&atomic->a_u.au_uint, ~value);
201 }
202
203 static inline int
204 atomic_fetch_xor(int value, atomic_t *atomic)
205 {
206 unsigned old, new;
207
208 smp_mb__before_atomic();
209 do {
210 old = atomic->a_u.au_uint;
211 new = old ^ value;
212 } while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
213 smp_mb__after_atomic();
214
215 return old;
216 }
217
218 static inline void
219 atomic_set_mask(unsigned long mask, atomic_t *atomic)
220 {
221 /* no membar */
222 atomic_or_uint(&atomic->a_u.au_uint, mask);
223 }
224
225 static inline void
226 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
227 {
228 /* no membar */
229 atomic_and_uint(&atomic->a_u.au_uint, ~mask);
230 }
231
232 static inline int
233 atomic_add_unless(atomic_t *atomic, int addend, int zero)
234 {
235 int value;
236
237 smp_mb__before_atomic();
238 do {
239 value = atomic->a_u.au_int;
240 if (value == zero)
241 break;
242 } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
243 != (unsigned)value);
244 smp_mb__after_atomic();
245
246 return value != zero;
247 }
248
249 static inline int
250 atomic_inc_not_zero(atomic_t *atomic)
251 {
252 /* membar implied by atomic_add_unless */
253 return atomic_add_unless(atomic, 1, 0);
254 }
255
256 static inline int
257 atomic_xchg(atomic_t *atomic, int new)
258 {
259 int old;
260
261 smp_mb__before_atomic();
262 old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
263 smp_mb__after_atomic();
264
265 return old;
266 }
267
268 static inline int
269 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
270 {
271 int old;
272
273 /*
274 * XXX As an optimization, under Linux's semantics we are
275 * allowed to skip the memory barrier if the comparison fails,
276 * but taking advantage of that is not convenient here.
277 */
278 smp_mb__before_atomic();
279 old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
280 (unsigned)new);
281 smp_mb__after_atomic();
282
283 return old;
284 }
285
286 struct atomic64 {
287 volatile uint64_t a_v;
288 };
289
290 typedef struct atomic64 atomic64_t;
291
292 #define ATOMIC64_INIT(v) { .a_v = (v) }
293
294 int linux_atomic64_init(void);
295 void linux_atomic64_fini(void);
296
297 #ifdef __HAVE_ATOMIC64_OPS
298
299 static inline uint64_t
300 atomic64_read(const struct atomic64 *a)
301 {
302 /* no membar */
303 return a->a_v;
304 }
305
306 static inline void
307 atomic64_set(struct atomic64 *a, uint64_t v)
308 {
309 /* no membar */
310 a->a_v = v;
311 }
312
313 static inline void
314 atomic64_add(int64_t d, struct atomic64 *a)
315 {
316 /* no membar */
317 atomic_add_64(&a->a_v, d);
318 }
319
320 static inline void
321 atomic64_sub(int64_t d, struct atomic64 *a)
322 {
323 /* no membar */
324 atomic_add_64(&a->a_v, -d);
325 }
326
327 static inline int64_t
328 atomic64_add_return(int64_t d, struct atomic64 *a)
329 {
330 int64_t v;
331
332 smp_mb__before_atomic();
333 v = (int64_t)atomic_add_64_nv(&a->a_v, d);
334 smp_mb__after_atomic();
335
336 return v;
337 }
338
339 static inline uint64_t
340 atomic64_xchg(struct atomic64 *a, uint64_t new)
341 {
342 uint64_t old;
343
344 smp_mb__before_atomic();
345 old = atomic_swap_64(&a->a_v, new);
346 smp_mb__after_atomic();
347
348 return old;
349 }
350
351 static inline uint64_t
352 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
353 {
354 uint64_t old;
355
356 /*
357 * XXX As an optimization, under Linux's semantics we are
358 * allowed to skip the memory barrier if the comparison fails,
359 * but taking advantage of that is not convenient here.
360 */
361 smp_mb__before_atomic();
362 old = atomic_cas_64(&atomic->a_v, expect, new);
363 smp_mb__after_atomic();
364
365 return old;
366 }
367
368 #else /* !defined(__HAVE_ATOMIC64_OPS) */
369
370 #define atomic64_add linux_atomic64_add
371 #define atomic64_add_return linux_atomic64_add_return
372 #define atomic64_cmpxchg linux_atomic64_cmpxchg
373 #define atomic64_read linux_atomic64_read
374 #define atomic64_set linux_atomic64_set
375 #define atomic64_sub linux_atomic64_sub
376 #define atomic64_xchg linux_atomic64_xchg
377
378 uint64_t atomic64_read(const struct atomic64 *);
379 void atomic64_set(struct atomic64 *, uint64_t);
380 void atomic64_add(int64_t, struct atomic64 *);
381 void atomic64_sub(int64_t, struct atomic64 *);
382 int64_t atomic64_add_return(int64_t, struct atomic64 *);
383 uint64_t atomic64_xchg(struct atomic64 *, uint64_t);
384 uint64_t atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
385
386 #endif
387
388 static inline int64_t
389 atomic64_inc_return(struct atomic64 *a)
390 {
391 return atomic64_add_return(1, a);
392 }
393
394 struct atomic_long {
395 volatile unsigned long al_v;
396 };
397
398 typedef struct atomic_long atomic_long_t;
399
400 static inline long
401 atomic_long_read(struct atomic_long *a)
402 {
403 /* no membar */
404 return (unsigned long)a->al_v;
405 }
406
407 static inline void
408 atomic_long_set(struct atomic_long *a, long v)
409 {
410 /* no membar */
411 a->al_v = v;
412 }
413
414 static inline long
415 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
416 {
417 long value;
418
419 smp_mb__before_atomic();
420 do {
421 value = (long)a->al_v;
422 if (value == zero)
423 break;
424 } while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
425 (unsigned long)(value + addend)) != (unsigned long)value);
426 smp_mb__after_atomic();
427
428 return value != zero;
429 }
430
431 static inline long
432 atomic_long_inc_not_zero(struct atomic_long *a)
433 {
434 /* membar implied by atomic_long_add_unless */
435 return atomic_long_add_unless(a, 1, 0);
436 }
437
438 static inline long
439 atomic_long_xchg(struct atomic_long *a, long new)
440 {
441 long old;
442
443 smp_mb__before_atomic();
444 old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
445 smp_mb__after_atomic();
446
447 return old;
448 }
449
450 static inline long
451 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
452 {
453 long old;
454
455 /*
456 * XXX As an optimization, under Linux's semantics we are
457 * allowed to skip the memory barrier if the comparison fails,
458 * but taking advantage of that is not convenient here.
459 */
460 smp_mb__before_atomic();
461 old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
462 (unsigned long)new);
463 smp_mb__after_atomic();
464
465 return old;
466 }
467
468 #endif /* _LINUX_ATOMIC_H_ */
469