atomic.h revision 1.18 1 /* $NetBSD: atomic.h,v 1.18 2018/08/27 15:11:04 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_ATOMIC_H_
33 #define _LINUX_ATOMIC_H_
34
35 #include <sys/atomic.h>
36
37 #include <machine/limits.h>
38
39 #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
40 # define smp_mb__before_atomic() membar_exit()
41 # define smp_mb__after_atomic() membar_enter()
42 #else
43 # define smp_mb__before_atomic() __insn_barrier()
44 # define smp_mb__after_atomic() __insn_barrier()
45 #endif
46
47 /*
48 * atomic (u)int operations
49 *
50 * Atomics that return a value, other than atomic_read, imply a
51 * full memory_sync barrier. Those that do not return a value
52 * imply no memory barrier.
53 */
54
55 struct atomic {
56 union {
57 volatile int au_int;
58 volatile unsigned int au_uint;
59 } a_u;
60 };
61
62 #define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
63
64 typedef struct atomic atomic_t;
65
66 static inline int
67 atomic_read(atomic_t *atomic)
68 {
69 /* no membar */
70 return atomic->a_u.au_int;
71 }
72
73 static inline void
74 atomic_set(atomic_t *atomic, int value)
75 {
76 /* no membar */
77 atomic->a_u.au_int = value;
78 }
79
80 static inline void
81 atomic_add(int addend, atomic_t *atomic)
82 {
83 /* no membar */
84 atomic_add_int(&atomic->a_u.au_uint, addend);
85 }
86
87 static inline void
88 atomic_sub(int subtrahend, atomic_t *atomic)
89 {
90 /* no membar */
91 atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
92 }
93
94 static inline int
95 atomic_add_return(int addend, atomic_t *atomic)
96 {
97 int v;
98
99 smp_mb__before_atomic();
100 v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
101 smp_mb__after_atomic();
102
103 return v;
104 }
105
106 static inline void
107 atomic_inc(atomic_t *atomic)
108 {
109 /* no membar */
110 atomic_inc_uint(&atomic->a_u.au_uint);
111 }
112
113 static inline void
114 atomic_dec(atomic_t *atomic)
115 {
116 /* no membar */
117 atomic_dec_uint(&atomic->a_u.au_uint);
118 }
119
120 static inline int
121 atomic_inc_return(atomic_t *atomic)
122 {
123 int v;
124
125 smp_mb__before_atomic();
126 v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
127 smp_mb__after_atomic();
128
129 return v;
130 }
131
132 static inline int
133 atomic_dec_return(atomic_t *atomic)
134 {
135 int v;
136
137 smp_mb__before_atomic();
138 v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
139 smp_mb__after_atomic();
140
141 return v;
142 }
143
144 static inline int
145 atomic_dec_and_test(atomic_t *atomic)
146 {
147 /* membar implied by atomic_dec_return */
148 return atomic_dec_return(atomic) == 0;
149 }
150
151 static inline void
152 atomic_or(int value, atomic_t *atomic)
153 {
154 /* no membar */
155 atomic_or_uint(&atomic->a_u.au_uint, value);
156 }
157
158 static inline void
159 atomic_set_mask(unsigned long mask, atomic_t *atomic)
160 {
161 /* no membar */
162 atomic_or_uint(&atomic->a_u.au_uint, mask);
163 }
164
165 static inline void
166 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
167 {
168 /* no membar */
169 atomic_and_uint(&atomic->a_u.au_uint, ~mask);
170 }
171
172 static inline int
173 atomic_add_unless(atomic_t *atomic, int addend, int zero)
174 {
175 int value;
176
177 smp_mb__before_atomic();
178 do {
179 value = atomic->a_u.au_int;
180 if (value == zero)
181 break;
182 } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
183 != value);
184 smp_mb__after_atomic();
185
186 return value != zero;
187 }
188
189 static inline int
190 atomic_inc_not_zero(atomic_t *atomic)
191 {
192 /* membar implied by atomic_add_unless */
193 return atomic_add_unless(atomic, 1, 0);
194 }
195
196 static inline int
197 atomic_xchg(atomic_t *atomic, int new)
198 {
199 int old;
200
201 smp_mb__before_atomic();
202 old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
203 smp_mb__after_atomic();
204
205 return old;
206 }
207
208 static inline int
209 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
210 {
211 int old;
212
213 /*
214 * XXX As an optimization, under Linux's semantics we are
215 * allowed to skip the memory barrier if the comparison fails,
216 * but taking advantage of that is not convenient here.
217 */
218 smp_mb__before_atomic();
219 old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
220 (unsigned)new);
221 smp_mb__after_atomic();
222
223 return old;
224 }
225
226 struct atomic64 {
227 volatile uint64_t a_v;
228 };
229
230 typedef struct atomic64 atomic64_t;
231
232 #define ATOMIC64_INIT(v) { .a_v = (v) }
233
234 int linux_atomic64_init(void);
235 void linux_atomic64_fini(void);
236
237 #ifdef __HAVE_ATOMIC64_OPS
238
239 static inline uint64_t
240 atomic64_read(const struct atomic64 *a)
241 {
242 /* no membar */
243 return a->a_v;
244 }
245
246 static inline void
247 atomic64_set(struct atomic64 *a, uint64_t v)
248 {
249 /* no membar */
250 a->a_v = v;
251 }
252
253 static inline void
254 atomic64_add(int64_t d, struct atomic64 *a)
255 {
256 /* no membar */
257 atomic_add_64(&a->a_v, d);
258 }
259
260 static inline void
261 atomic64_sub(int64_t d, struct atomic64 *a)
262 {
263 /* no membar */
264 atomic_add_64(&a->a_v, -d);
265 }
266
267 static inline uint64_t
268 atomic64_xchg(struct atomic64 *a, uint64_t new)
269 {
270 uint64_t old;
271
272 smp_mb__before_atomic();
273 old = atomic_swap_64(&a->a_v, new);
274 smp_mb__after_atomic();
275
276 return old;
277 }
278
279 static inline uint64_t
280 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
281 {
282 uint64_t old;
283
284 /*
285 * XXX As an optimization, under Linux's semantics we are
286 * allowed to skip the memory barrier if the comparison fails,
287 * but taking advantage of that is not convenient here.
288 */
289 smp_mb__before_atomic();
290 old = atomic_cas_64(&atomic->a_v, expect, new);
291 smp_mb__after_atomic();
292
293 return old;
294 }
295
296 #else /* !defined(__HAVE_ATOMIC64_OPS) */
297
298 #define atomic64_add linux_atomic64_add
299 #define atomic64_cmpxchg linux_atomic64_cmpxchg
300 #define atomic64_read linux_atomic64_read
301 #define atomic64_set linux_atomic64_set
302 #define atomic64_sub linux_atomic64_sub
303 #define atomic64_xchg linux_atomic64_xchg
304
305 uint64_t atomic64_read(const struct atomic64 *);
306 void atomic64_set(struct atomic64 *, uint64_t);
307 void atomic64_add(int64_t, struct atomic64 *);
308 void atomic64_sub(int64_t, struct atomic64 *);
309 uint64_t atomic64_xchg(struct atomic64 *, uint64_t);
310 uint64_t atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
311
312 #endif
313
314 struct atomic_long {
315 volatile unsigned long al_v;
316 };
317
318 typedef struct atomic_long atomic_long_t;
319
320 static inline long
321 atomic_long_read(struct atomic_long *a)
322 {
323 /* no membar */
324 return (unsigned long)a->al_v;
325 }
326
327 static inline void
328 atomic_long_set(struct atomic_long *a, long v)
329 {
330 /* no membar */
331 a->al_v = v;
332 }
333
334 static inline long
335 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
336 {
337 long value;
338
339 smp_mb__before_atomic();
340 do {
341 value = (long)a->al_v;
342 if (value == zero)
343 break;
344 } while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
345 (unsigned long)(value + addend)) != (unsigned long)value);
346 smp_mb__after_atomic();
347
348 return value != zero;
349 }
350
351 static inline long
352 atomic_long_inc_not_zero(struct atomic_long *a)
353 {
354 /* membar implied by atomic_long_add_unless */
355 return atomic_long_add_unless(a, 1, 0);
356 }
357
358 static inline long
359 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
360 {
361 long old;
362
363 /*
364 * XXX As an optimization, under Linux's semantics we are
365 * allowed to skip the memory barrier if the comparison fails,
366 * but taking advantage of that is not convenient here.
367 */
368 smp_mb__before_atomic();
369 old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
370 (unsigned long)new);
371 smp_mb__after_atomic();
372
373 return old;
374 }
375
376 static inline void
377 set_bit(unsigned int bit, volatile unsigned long *ptr)
378 {
379 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
380
381 /* no memory barrier */
382 atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
383 }
384
385 static inline void
386 clear_bit(unsigned int bit, volatile unsigned long *ptr)
387 {
388 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
389
390 /* no memory barrier */
391 atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
392 }
393
394 static inline void
395 change_bit(unsigned int bit, volatile unsigned long *ptr)
396 {
397 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
398 volatile unsigned long *const p = &ptr[bit / units];
399 const unsigned long mask = (1UL << (bit % units));
400 unsigned long v;
401
402 /* no memory barrier */
403 do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
404 }
405
406 static inline int
407 test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
408 {
409 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
410 volatile unsigned long *const p = &ptr[bit / units];
411 const unsigned long mask = (1UL << (bit % units));
412 unsigned long v;
413
414 smp_mb__before_atomic();
415 do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
416 smp_mb__after_atomic();
417
418 return ((v & mask) != 0);
419 }
420
421 static inline int
422 test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
423 {
424 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
425 volatile unsigned long *const p = &ptr[bit / units];
426 const unsigned long mask = (1UL << (bit % units));
427 unsigned long v;
428
429 smp_mb__before_atomic();
430 do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
431 smp_mb__after_atomic();
432
433 return ((v & mask) != 0);
434 }
435
436 static inline int
437 test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
438 {
439 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
440 volatile unsigned long *const p = &ptr[bit / units];
441 const unsigned long mask = (1UL << (bit % units));
442 unsigned long v;
443
444 smp_mb__before_atomic();
445 do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
446 smp_mb__after_atomic();
447
448 return ((v & mask) != 0);
449 }
450
451 #endif /* _LINUX_ATOMIC_H_ */
452