atomic.h revision 1.39 1 1.39 riastrad /* $NetBSD: atomic.h,v 1.39 2021/12/19 11:31:11 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #ifndef _LINUX_ATOMIC_H_
33 1.2 riastrad #define _LINUX_ATOMIC_H_
34 1.2 riastrad
35 1.2 riastrad #include <sys/atomic.h>
36 1.2 riastrad
37 1.2 riastrad #include <machine/limits.h>
38 1.2 riastrad
39 1.22 maya #include <asm/barrier.h>
40 1.22 maya
41 1.39 riastrad /* XXX Hope the GCC __sync builtins work everywhere we care about! */
42 1.39 riastrad #define xchg(P, V) __sync_lock_test_and_set(P, V)
43 1.39 riastrad #define cmpxchg(P, O, N) __sync_val_compare_and_swap(P, O, N)
44 1.35 riastrad
45 1.13 riastrad /*
46 1.13 riastrad * atomic (u)int operations
47 1.13 riastrad *
48 1.13 riastrad * Atomics that return a value, other than atomic_read, imply a
49 1.13 riastrad * full memory_sync barrier. Those that do not return a value
50 1.13 riastrad * imply no memory barrier.
51 1.13 riastrad */
52 1.13 riastrad
53 1.2 riastrad struct atomic {
54 1.2 riastrad union {
55 1.3 riastrad volatile int au_int;
56 1.3 riastrad volatile unsigned int au_uint;
57 1.2 riastrad } a_u;
58 1.2 riastrad };
59 1.2 riastrad
60 1.2 riastrad #define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
61 1.2 riastrad
62 1.2 riastrad typedef struct atomic atomic_t;
63 1.2 riastrad
64 1.2 riastrad static inline int
65 1.27 riastrad atomic_read(const atomic_t *atomic)
66 1.2 riastrad {
67 1.13 riastrad /* no membar */
68 1.3 riastrad return atomic->a_u.au_int;
69 1.2 riastrad }
70 1.2 riastrad
71 1.2 riastrad static inline void
72 1.2 riastrad atomic_set(atomic_t *atomic, int value)
73 1.2 riastrad {
74 1.13 riastrad /* no membar */
75 1.2 riastrad atomic->a_u.au_int = value;
76 1.2 riastrad }
77 1.2 riastrad
78 1.2 riastrad static inline void
79 1.31 riastrad atomic_set_release(atomic_t *atomic, int value)
80 1.31 riastrad {
81 1.31 riastrad atomic_store_release(&atomic->a_u.au_int, value);
82 1.31 riastrad }
83 1.31 riastrad
84 1.31 riastrad static inline void
85 1.2 riastrad atomic_add(int addend, atomic_t *atomic)
86 1.2 riastrad {
87 1.13 riastrad /* no membar */
88 1.2 riastrad atomic_add_int(&atomic->a_u.au_uint, addend);
89 1.2 riastrad }
90 1.2 riastrad
91 1.2 riastrad static inline void
92 1.2 riastrad atomic_sub(int subtrahend, atomic_t *atomic)
93 1.2 riastrad {
94 1.13 riastrad /* no membar */
95 1.2 riastrad atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
96 1.2 riastrad }
97 1.2 riastrad
98 1.2 riastrad static inline int
99 1.2 riastrad atomic_add_return(int addend, atomic_t *atomic)
100 1.2 riastrad {
101 1.13 riastrad int v;
102 1.13 riastrad
103 1.13 riastrad smp_mb__before_atomic();
104 1.13 riastrad v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
105 1.13 riastrad smp_mb__after_atomic();
106 1.13 riastrad
107 1.13 riastrad return v;
108 1.2 riastrad }
109 1.2 riastrad
110 1.36 riastrad static inline int
111 1.36 riastrad atomic_sub_return(int subtrahend, atomic_t *atomic)
112 1.36 riastrad {
113 1.36 riastrad int v;
114 1.36 riastrad
115 1.36 riastrad smp_mb__before_atomic();
116 1.36 riastrad v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, -subtrahend);
117 1.36 riastrad smp_mb__after_atomic();
118 1.36 riastrad
119 1.36 riastrad return v;
120 1.36 riastrad }
121 1.36 riastrad
122 1.2 riastrad static inline void
123 1.2 riastrad atomic_inc(atomic_t *atomic)
124 1.2 riastrad {
125 1.13 riastrad /* no membar */
126 1.2 riastrad atomic_inc_uint(&atomic->a_u.au_uint);
127 1.2 riastrad }
128 1.2 riastrad
129 1.2 riastrad static inline void
130 1.2 riastrad atomic_dec(atomic_t *atomic)
131 1.2 riastrad {
132 1.13 riastrad /* no membar */
133 1.2 riastrad atomic_dec_uint(&atomic->a_u.au_uint);
134 1.2 riastrad }
135 1.2 riastrad
136 1.2 riastrad static inline int
137 1.2 riastrad atomic_inc_return(atomic_t *atomic)
138 1.2 riastrad {
139 1.13 riastrad int v;
140 1.13 riastrad
141 1.13 riastrad smp_mb__before_atomic();
142 1.13 riastrad v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
143 1.13 riastrad smp_mb__after_atomic();
144 1.13 riastrad
145 1.13 riastrad return v;
146 1.2 riastrad }
147 1.2 riastrad
148 1.2 riastrad static inline int
149 1.2 riastrad atomic_dec_return(atomic_t *atomic)
150 1.2 riastrad {
151 1.13 riastrad int v;
152 1.13 riastrad
153 1.13 riastrad smp_mb__before_atomic();
154 1.13 riastrad v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
155 1.13 riastrad smp_mb__after_atomic();
156 1.13 riastrad
157 1.13 riastrad return v;
158 1.2 riastrad }
159 1.2 riastrad
160 1.2 riastrad static inline int
161 1.2 riastrad atomic_dec_and_test(atomic_t *atomic)
162 1.2 riastrad {
163 1.13 riastrad /* membar implied by atomic_dec_return */
164 1.13 riastrad return atomic_dec_return(atomic) == 0;
165 1.2 riastrad }
166 1.2 riastrad
167 1.28 riastrad static inline int
168 1.28 riastrad atomic_dec_if_positive(atomic_t *atomic)
169 1.28 riastrad {
170 1.28 riastrad int v;
171 1.28 riastrad
172 1.28 riastrad smp_mb__before_atomic();
173 1.28 riastrad do {
174 1.28 riastrad v = atomic->a_u.au_uint;
175 1.28 riastrad if (v <= 0)
176 1.28 riastrad break;
177 1.28 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
178 1.28 riastrad smp_mb__after_atomic();
179 1.28 riastrad
180 1.28 riastrad return v - 1;
181 1.28 riastrad }
182 1.28 riastrad
183 1.2 riastrad static inline void
184 1.8 riastrad atomic_or(int value, atomic_t *atomic)
185 1.8 riastrad {
186 1.13 riastrad /* no membar */
187 1.8 riastrad atomic_or_uint(&atomic->a_u.au_uint, value);
188 1.8 riastrad }
189 1.8 riastrad
190 1.8 riastrad static inline void
191 1.24 riastrad atomic_andnot(int value, atomic_t *atomic)
192 1.24 riastrad {
193 1.24 riastrad /* no membar */
194 1.24 riastrad atomic_and_uint(&atomic->a_u.au_uint, ~value);
195 1.24 riastrad }
196 1.24 riastrad
197 1.25 riastrad static inline int
198 1.37 riastrad atomic_fetch_add(int value, atomic_t *atomic)
199 1.37 riastrad {
200 1.37 riastrad unsigned old, new;
201 1.37 riastrad
202 1.37 riastrad smp_mb__before_atomic();
203 1.37 riastrad do {
204 1.37 riastrad old = atomic->a_u.au_uint;
205 1.37 riastrad new = old + value;
206 1.37 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
207 1.37 riastrad smp_mb__after_atomic();
208 1.37 riastrad
209 1.37 riastrad return old;
210 1.37 riastrad }
211 1.37 riastrad
212 1.37 riastrad static inline int
213 1.37 riastrad atomic_fetch_inc(atomic_t *atomic)
214 1.37 riastrad {
215 1.37 riastrad return atomic_fetch_add(1, atomic);
216 1.37 riastrad }
217 1.37 riastrad
218 1.37 riastrad static inline int
219 1.25 riastrad atomic_fetch_xor(int value, atomic_t *atomic)
220 1.25 riastrad {
221 1.25 riastrad unsigned old, new;
222 1.25 riastrad
223 1.25 riastrad smp_mb__before_atomic();
224 1.25 riastrad do {
225 1.25 riastrad old = atomic->a_u.au_uint;
226 1.25 riastrad new = old ^ value;
227 1.25 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
228 1.25 riastrad smp_mb__after_atomic();
229 1.25 riastrad
230 1.25 riastrad return old;
231 1.25 riastrad }
232 1.25 riastrad
233 1.24 riastrad static inline void
234 1.2 riastrad atomic_set_mask(unsigned long mask, atomic_t *atomic)
235 1.2 riastrad {
236 1.13 riastrad /* no membar */
237 1.2 riastrad atomic_or_uint(&atomic->a_u.au_uint, mask);
238 1.2 riastrad }
239 1.2 riastrad
240 1.2 riastrad static inline void
241 1.2 riastrad atomic_clear_mask(unsigned long mask, atomic_t *atomic)
242 1.2 riastrad {
243 1.13 riastrad /* no membar */
244 1.2 riastrad atomic_and_uint(&atomic->a_u.au_uint, ~mask);
245 1.2 riastrad }
246 1.2 riastrad
247 1.2 riastrad static inline int
248 1.2 riastrad atomic_add_unless(atomic_t *atomic, int addend, int zero)
249 1.2 riastrad {
250 1.2 riastrad int value;
251 1.2 riastrad
252 1.13 riastrad smp_mb__before_atomic();
253 1.2 riastrad do {
254 1.2 riastrad value = atomic->a_u.au_int;
255 1.2 riastrad if (value == zero)
256 1.13 riastrad break;
257 1.2 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
258 1.21 christos != (unsigned)value);
259 1.13 riastrad smp_mb__after_atomic();
260 1.2 riastrad
261 1.13 riastrad return value != zero;
262 1.2 riastrad }
263 1.2 riastrad
264 1.2 riastrad static inline int
265 1.2 riastrad atomic_inc_not_zero(atomic_t *atomic)
266 1.2 riastrad {
267 1.13 riastrad /* membar implied by atomic_add_unless */
268 1.2 riastrad return atomic_add_unless(atomic, 1, 0);
269 1.2 riastrad }
270 1.2 riastrad
271 1.5 riastrad static inline int
272 1.5 riastrad atomic_xchg(atomic_t *atomic, int new)
273 1.5 riastrad {
274 1.13 riastrad int old;
275 1.13 riastrad
276 1.13 riastrad smp_mb__before_atomic();
277 1.13 riastrad old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
278 1.13 riastrad smp_mb__after_atomic();
279 1.13 riastrad
280 1.13 riastrad return old;
281 1.5 riastrad }
282 1.5 riastrad
283 1.5 riastrad static inline int
284 1.13 riastrad atomic_cmpxchg(atomic_t *atomic, int expect, int new)
285 1.5 riastrad {
286 1.13 riastrad int old;
287 1.13 riastrad
288 1.13 riastrad /*
289 1.13 riastrad * XXX As an optimization, under Linux's semantics we are
290 1.13 riastrad * allowed to skip the memory barrier if the comparison fails,
291 1.13 riastrad * but taking advantage of that is not convenient here.
292 1.13 riastrad */
293 1.13 riastrad smp_mb__before_atomic();
294 1.13 riastrad old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
295 1.5 riastrad (unsigned)new);
296 1.13 riastrad smp_mb__after_atomic();
297 1.13 riastrad
298 1.13 riastrad return old;
299 1.5 riastrad }
300 1.5 riastrad
301 1.6 riastrad struct atomic64 {
302 1.6 riastrad volatile uint64_t a_v;
303 1.6 riastrad };
304 1.6 riastrad
305 1.6 riastrad typedef struct atomic64 atomic64_t;
306 1.6 riastrad
307 1.16 riastrad #define ATOMIC64_INIT(v) { .a_v = (v) }
308 1.16 riastrad
309 1.15 riastrad int linux_atomic64_init(void);
310 1.15 riastrad void linux_atomic64_fini(void);
311 1.15 riastrad
312 1.15 riastrad #ifdef __HAVE_ATOMIC64_OPS
313 1.15 riastrad
314 1.6 riastrad static inline uint64_t
315 1.6 riastrad atomic64_read(const struct atomic64 *a)
316 1.6 riastrad {
317 1.13 riastrad /* no membar */
318 1.6 riastrad return a->a_v;
319 1.6 riastrad }
320 1.6 riastrad
321 1.6 riastrad static inline void
322 1.6 riastrad atomic64_set(struct atomic64 *a, uint64_t v)
323 1.6 riastrad {
324 1.13 riastrad /* no membar */
325 1.6 riastrad a->a_v = v;
326 1.6 riastrad }
327 1.6 riastrad
328 1.6 riastrad static inline void
329 1.17 riastrad atomic64_add(int64_t d, struct atomic64 *a)
330 1.6 riastrad {
331 1.13 riastrad /* no membar */
332 1.6 riastrad atomic_add_64(&a->a_v, d);
333 1.6 riastrad }
334 1.6 riastrad
335 1.6 riastrad static inline void
336 1.17 riastrad atomic64_sub(int64_t d, struct atomic64 *a)
337 1.6 riastrad {
338 1.13 riastrad /* no membar */
339 1.6 riastrad atomic_add_64(&a->a_v, -d);
340 1.6 riastrad }
341 1.6 riastrad
342 1.19 riastrad static inline int64_t
343 1.19 riastrad atomic64_add_return(int64_t d, struct atomic64 *a)
344 1.19 riastrad {
345 1.19 riastrad int64_t v;
346 1.19 riastrad
347 1.19 riastrad smp_mb__before_atomic();
348 1.19 riastrad v = (int64_t)atomic_add_64_nv(&a->a_v, d);
349 1.19 riastrad smp_mb__after_atomic();
350 1.19 riastrad
351 1.19 riastrad return v;
352 1.19 riastrad }
353 1.19 riastrad
354 1.6 riastrad static inline uint64_t
355 1.13 riastrad atomic64_xchg(struct atomic64 *a, uint64_t new)
356 1.6 riastrad {
357 1.13 riastrad uint64_t old;
358 1.13 riastrad
359 1.13 riastrad smp_mb__before_atomic();
360 1.13 riastrad old = atomic_swap_64(&a->a_v, new);
361 1.13 riastrad smp_mb__after_atomic();
362 1.13 riastrad
363 1.13 riastrad return old;
364 1.6 riastrad }
365 1.6 riastrad
366 1.9 riastrad static inline uint64_t
367 1.13 riastrad atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
368 1.9 riastrad {
369 1.13 riastrad uint64_t old;
370 1.13 riastrad
371 1.13 riastrad /*
372 1.13 riastrad * XXX As an optimization, under Linux's semantics we are
373 1.13 riastrad * allowed to skip the memory barrier if the comparison fails,
374 1.13 riastrad * but taking advantage of that is not convenient here.
375 1.13 riastrad */
376 1.13 riastrad smp_mb__before_atomic();
377 1.13 riastrad old = atomic_cas_64(&atomic->a_v, expect, new);
378 1.13 riastrad smp_mb__after_atomic();
379 1.13 riastrad
380 1.13 riastrad return old;
381 1.9 riastrad }
382 1.9 riastrad
383 1.15 riastrad #else /* !defined(__HAVE_ATOMIC64_OPS) */
384 1.15 riastrad
385 1.18 riastrad #define atomic64_add linux_atomic64_add
386 1.19 riastrad #define atomic64_add_return linux_atomic64_add_return
387 1.18 riastrad #define atomic64_cmpxchg linux_atomic64_cmpxchg
388 1.15 riastrad #define atomic64_read linux_atomic64_read
389 1.15 riastrad #define atomic64_set linux_atomic64_set
390 1.15 riastrad #define atomic64_sub linux_atomic64_sub
391 1.15 riastrad #define atomic64_xchg linux_atomic64_xchg
392 1.15 riastrad
393 1.15 riastrad uint64_t atomic64_read(const struct atomic64 *);
394 1.15 riastrad void atomic64_set(struct atomic64 *, uint64_t);
395 1.17 riastrad void atomic64_add(int64_t, struct atomic64 *);
396 1.17 riastrad void atomic64_sub(int64_t, struct atomic64 *);
397 1.19 riastrad int64_t atomic64_add_return(int64_t, struct atomic64 *);
398 1.15 riastrad uint64_t atomic64_xchg(struct atomic64 *, uint64_t);
399 1.15 riastrad uint64_t atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
400 1.15 riastrad
401 1.15 riastrad #endif
402 1.15 riastrad
403 1.19 riastrad static inline int64_t
404 1.19 riastrad atomic64_inc_return(struct atomic64 *a)
405 1.19 riastrad {
406 1.19 riastrad return atomic64_add_return(1, a);
407 1.19 riastrad }
408 1.19 riastrad
409 1.14 riastrad struct atomic_long {
410 1.14 riastrad volatile unsigned long al_v;
411 1.14 riastrad };
412 1.14 riastrad
413 1.14 riastrad typedef struct atomic_long atomic_long_t;
414 1.14 riastrad
415 1.14 riastrad static inline long
416 1.14 riastrad atomic_long_read(struct atomic_long *a)
417 1.14 riastrad {
418 1.14 riastrad /* no membar */
419 1.14 riastrad return (unsigned long)a->al_v;
420 1.14 riastrad }
421 1.14 riastrad
422 1.14 riastrad static inline void
423 1.14 riastrad atomic_long_set(struct atomic_long *a, long v)
424 1.14 riastrad {
425 1.14 riastrad /* no membar */
426 1.14 riastrad a->al_v = v;
427 1.14 riastrad }
428 1.14 riastrad
429 1.14 riastrad static inline long
430 1.14 riastrad atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
431 1.14 riastrad {
432 1.14 riastrad long value;
433 1.14 riastrad
434 1.14 riastrad smp_mb__before_atomic();
435 1.14 riastrad do {
436 1.14 riastrad value = (long)a->al_v;
437 1.14 riastrad if (value == zero)
438 1.14 riastrad break;
439 1.14 riastrad } while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
440 1.14 riastrad (unsigned long)(value + addend)) != (unsigned long)value);
441 1.14 riastrad smp_mb__after_atomic();
442 1.14 riastrad
443 1.14 riastrad return value != zero;
444 1.14 riastrad }
445 1.14 riastrad
446 1.14 riastrad static inline long
447 1.14 riastrad atomic_long_inc_not_zero(struct atomic_long *a)
448 1.14 riastrad {
449 1.14 riastrad /* membar implied by atomic_long_add_unless */
450 1.14 riastrad return atomic_long_add_unless(a, 1, 0);
451 1.14 riastrad }
452 1.14 riastrad
453 1.14 riastrad static inline long
454 1.30 riastrad atomic_long_xchg(struct atomic_long *a, long new)
455 1.30 riastrad {
456 1.30 riastrad long old;
457 1.30 riastrad
458 1.30 riastrad smp_mb__before_atomic();
459 1.30 riastrad old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
460 1.30 riastrad smp_mb__after_atomic();
461 1.30 riastrad
462 1.30 riastrad return old;
463 1.30 riastrad }
464 1.30 riastrad
465 1.30 riastrad static inline long
466 1.14 riastrad atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
467 1.14 riastrad {
468 1.14 riastrad long old;
469 1.14 riastrad
470 1.14 riastrad /*
471 1.14 riastrad * XXX As an optimization, under Linux's semantics we are
472 1.14 riastrad * allowed to skip the memory barrier if the comparison fails,
473 1.14 riastrad * but taking advantage of that is not convenient here.
474 1.14 riastrad */
475 1.14 riastrad smp_mb__before_atomic();
476 1.14 riastrad old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
477 1.14 riastrad (unsigned long)new);
478 1.14 riastrad smp_mb__after_atomic();
479 1.14 riastrad
480 1.14 riastrad return old;
481 1.14 riastrad }
482 1.14 riastrad
483 1.2 riastrad #endif /* _LINUX_ATOMIC_H_ */
484