atomic.h revision 1.14 1 /* $NetBSD: atomic.h,v 1.14 2018/08/27 13:58:16 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_ATOMIC_H_
33 #define _LINUX_ATOMIC_H_
34
35 #include <sys/atomic.h>
36
37 #include <machine/limits.h>
38
39 #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
40 # define smp_mb__before_atomic() membar_exit()
41 # define smp_mb__after_atomic() membar_enter()
42 #else
43 # define smp_mb__before_atomic() __insn_barrier()
44 # define smp_mb__after_atomic() __insn_barrier()
45 #endif
46
47 /*
48 * atomic (u)int operations
49 *
50 * Atomics that return a value, other than atomic_read, imply a
51 * full memory_sync barrier. Those that do not return a value
52 * imply no memory barrier.
53 */
54
55 struct atomic {
56 union {
57 volatile int au_int;
58 volatile unsigned int au_uint;
59 } a_u;
60 };
61
62 #define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
63
64 typedef struct atomic atomic_t;
65
66 static inline int
67 atomic_read(atomic_t *atomic)
68 {
69 /* no membar */
70 return atomic->a_u.au_int;
71 }
72
73 static inline void
74 atomic_set(atomic_t *atomic, int value)
75 {
76 /* no membar */
77 atomic->a_u.au_int = value;
78 }
79
80 static inline void
81 atomic_add(int addend, atomic_t *atomic)
82 {
83 /* no membar */
84 atomic_add_int(&atomic->a_u.au_uint, addend);
85 }
86
87 static inline void
88 atomic_sub(int subtrahend, atomic_t *atomic)
89 {
90 /* no membar */
91 atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
92 }
93
94 static inline int
95 atomic_add_return(int addend, atomic_t *atomic)
96 {
97 int v;
98
99 smp_mb__before_atomic();
100 v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
101 smp_mb__after_atomic();
102
103 return v;
104 }
105
106 static inline void
107 atomic_inc(atomic_t *atomic)
108 {
109 /* no membar */
110 atomic_inc_uint(&atomic->a_u.au_uint);
111 }
112
113 static inline void
114 atomic_dec(atomic_t *atomic)
115 {
116 /* no membar */
117 atomic_dec_uint(&atomic->a_u.au_uint);
118 }
119
120 static inline int
121 atomic_inc_return(atomic_t *atomic)
122 {
123 int v;
124
125 smp_mb__before_atomic();
126 v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
127 smp_mb__after_atomic();
128
129 return v;
130 }
131
132 static inline int
133 atomic_dec_return(atomic_t *atomic)
134 {
135 int v;
136
137 smp_mb__before_atomic();
138 v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
139 smp_mb__after_atomic();
140
141 return v;
142 }
143
144 static inline int
145 atomic_dec_and_test(atomic_t *atomic)
146 {
147 /* membar implied by atomic_dec_return */
148 return atomic_dec_return(atomic) == 0;
149 }
150
151 static inline void
152 atomic_or(int value, atomic_t *atomic)
153 {
154 /* no membar */
155 atomic_or_uint(&atomic->a_u.au_uint, value);
156 }
157
158 static inline void
159 atomic_set_mask(unsigned long mask, atomic_t *atomic)
160 {
161 /* no membar */
162 atomic_or_uint(&atomic->a_u.au_uint, mask);
163 }
164
165 static inline void
166 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
167 {
168 /* no membar */
169 atomic_and_uint(&atomic->a_u.au_uint, ~mask);
170 }
171
172 static inline int
173 atomic_add_unless(atomic_t *atomic, int addend, int zero)
174 {
175 int value;
176
177 smp_mb__before_atomic();
178 do {
179 value = atomic->a_u.au_int;
180 if (value == zero)
181 break;
182 } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
183 != value);
184 smp_mb__after_atomic();
185
186 return value != zero;
187 }
188
189 static inline int
190 atomic_inc_not_zero(atomic_t *atomic)
191 {
192 /* membar implied by atomic_add_unless */
193 return atomic_add_unless(atomic, 1, 0);
194 }
195
196 static inline int
197 atomic_xchg(atomic_t *atomic, int new)
198 {
199 int old;
200
201 smp_mb__before_atomic();
202 old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
203 smp_mb__after_atomic();
204
205 return old;
206 }
207
208 static inline int
209 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
210 {
211 int old;
212
213 /*
214 * XXX As an optimization, under Linux's semantics we are
215 * allowed to skip the memory barrier if the comparison fails,
216 * but taking advantage of that is not convenient here.
217 */
218 smp_mb__before_atomic();
219 old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
220 (unsigned)new);
221 smp_mb__after_atomic();
222
223 return old;
224 }
225
226 struct atomic64 {
227 volatile uint64_t a_v;
228 };
229
230 typedef struct atomic64 atomic64_t;
231
232 static inline uint64_t
233 atomic64_read(const struct atomic64 *a)
234 {
235 /* no membar */
236 return a->a_v;
237 }
238
239 static inline void
240 atomic64_set(struct atomic64 *a, uint64_t v)
241 {
242 /* no membar */
243 a->a_v = v;
244 }
245
246 static inline void
247 atomic64_add(long long d, struct atomic64 *a)
248 {
249 /* no membar */
250 atomic_add_64(&a->a_v, d);
251 }
252
253 static inline void
254 atomic64_sub(long long d, struct atomic64 *a)
255 {
256 /* no membar */
257 atomic_add_64(&a->a_v, -d);
258 }
259
260 static inline uint64_t
261 atomic64_xchg(struct atomic64 *a, uint64_t new)
262 {
263 uint64_t old;
264
265 smp_mb__before_atomic();
266 old = atomic_swap_64(&a->a_v, new);
267 smp_mb__after_atomic();
268
269 return old;
270 }
271
272 static inline uint64_t
273 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
274 {
275 uint64_t old;
276
277 /*
278 * XXX As an optimization, under Linux's semantics we are
279 * allowed to skip the memory barrier if the comparison fails,
280 * but taking advantage of that is not convenient here.
281 */
282 smp_mb__before_atomic();
283 old = atomic_cas_64(&atomic->a_v, expect, new);
284 smp_mb__after_atomic();
285
286 return old;
287 }
288
289 struct atomic_long {
290 volatile unsigned long al_v;
291 };
292
293 typedef struct atomic_long atomic_long_t;
294
295 static inline long
296 atomic_long_read(struct atomic_long *a)
297 {
298 /* no membar */
299 return (unsigned long)a->al_v;
300 }
301
302 static inline void
303 atomic_long_set(struct atomic_long *a, long v)
304 {
305 /* no membar */
306 a->al_v = v;
307 }
308
309 static inline long
310 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
311 {
312 long value;
313
314 smp_mb__before_atomic();
315 do {
316 value = (long)a->al_v;
317 if (value == zero)
318 break;
319 } while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
320 (unsigned long)(value + addend)) != (unsigned long)value);
321 smp_mb__after_atomic();
322
323 return value != zero;
324 }
325
326 static inline long
327 atomic_long_inc_not_zero(struct atomic_long *a)
328 {
329 /* membar implied by atomic_long_add_unless */
330 return atomic_long_add_unless(a, 1, 0);
331 }
332
333 static inline long
334 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
335 {
336 long old;
337
338 /*
339 * XXX As an optimization, under Linux's semantics we are
340 * allowed to skip the memory barrier if the comparison fails,
341 * but taking advantage of that is not convenient here.
342 */
343 smp_mb__before_atomic();
344 old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
345 (unsigned long)new);
346 smp_mb__after_atomic();
347
348 return old;
349 }
350
351 static inline void
352 set_bit(unsigned int bit, volatile unsigned long *ptr)
353 {
354 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
355
356 /* no memory barrier */
357 atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
358 }
359
360 static inline void
361 clear_bit(unsigned int bit, volatile unsigned long *ptr)
362 {
363 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
364
365 /* no memory barrier */
366 atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
367 }
368
369 static inline void
370 change_bit(unsigned int bit, volatile unsigned long *ptr)
371 {
372 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
373 volatile unsigned long *const p = &ptr[bit / units];
374 const unsigned long mask = (1UL << (bit % units));
375 unsigned long v;
376
377 /* no memory barrier */
378 do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
379 }
380
381 static inline int
382 test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
383 {
384 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
385 volatile unsigned long *const p = &ptr[bit / units];
386 const unsigned long mask = (1UL << (bit % units));
387 unsigned long v;
388
389 smp_mb__before_atomic();
390 do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
391 smp_mb__after_atomic();
392
393 return ((v & mask) != 0);
394 }
395
396 static inline int
397 test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
398 {
399 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
400 volatile unsigned long *const p = &ptr[bit / units];
401 const unsigned long mask = (1UL << (bit % units));
402 unsigned long v;
403
404 smp_mb__before_atomic();
405 do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
406 smp_mb__after_atomic();
407
408 return ((v & mask) != 0);
409 }
410
411 static inline int
412 test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
413 {
414 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
415 volatile unsigned long *const p = &ptr[bit / units];
416 const unsigned long mask = (1UL << (bit % units));
417 unsigned long v;
418
419 smp_mb__before_atomic();
420 do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
421 smp_mb__after_atomic();
422
423 return ((v & mask) != 0);
424 }
425
426 #endif /* _LINUX_ATOMIC_H_ */
427