atomic.h revision 1.13 1 1.8 riastrad /* $NetBSD: atomic.h,v 1.13 2018/08/27 13:41:08 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #ifndef _LINUX_ATOMIC_H_
33 1.2 riastrad #define _LINUX_ATOMIC_H_
34 1.2 riastrad
35 1.2 riastrad #include <sys/atomic.h>
36 1.2 riastrad
37 1.2 riastrad #include <machine/limits.h>
38 1.2 riastrad
39 1.13 riastrad #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
40 1.13 riastrad # define smp_mb__before_atomic() membar_exit()
41 1.13 riastrad # define smp_mb__after_atomic() membar_enter()
42 1.13 riastrad #else
43 1.13 riastrad # define smp_mb__before_atomic() __insn_barrier()
44 1.13 riastrad # define smp_mb__after_atomic() __insn_barrier()
45 1.13 riastrad #endif
46 1.13 riastrad
47 1.13 riastrad /*
48 1.13 riastrad * atomic (u)int operations
49 1.13 riastrad *
50 1.13 riastrad * Atomics that return a value, other than atomic_read, imply a
51 1.13 riastrad * full memory_sync barrier. Those that do not return a value
52 1.13 riastrad * imply no memory barrier.
53 1.13 riastrad */
54 1.13 riastrad
55 1.2 riastrad struct atomic {
56 1.2 riastrad union {
57 1.3 riastrad volatile int au_int;
58 1.3 riastrad volatile unsigned int au_uint;
59 1.2 riastrad } a_u;
60 1.2 riastrad };
61 1.2 riastrad
62 1.2 riastrad #define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
63 1.2 riastrad
64 1.2 riastrad typedef struct atomic atomic_t;
65 1.2 riastrad
66 1.2 riastrad static inline int
67 1.2 riastrad atomic_read(atomic_t *atomic)
68 1.2 riastrad {
69 1.13 riastrad /* no membar */
70 1.3 riastrad return atomic->a_u.au_int;
71 1.2 riastrad }
72 1.2 riastrad
73 1.2 riastrad static inline void
74 1.2 riastrad atomic_set(atomic_t *atomic, int value)
75 1.2 riastrad {
76 1.13 riastrad /* no membar */
77 1.2 riastrad atomic->a_u.au_int = value;
78 1.2 riastrad }
79 1.2 riastrad
80 1.2 riastrad static inline void
81 1.2 riastrad atomic_add(int addend, atomic_t *atomic)
82 1.2 riastrad {
83 1.13 riastrad /* no membar */
84 1.2 riastrad atomic_add_int(&atomic->a_u.au_uint, addend);
85 1.2 riastrad }
86 1.2 riastrad
87 1.2 riastrad static inline void
88 1.2 riastrad atomic_sub(int subtrahend, atomic_t *atomic)
89 1.2 riastrad {
90 1.13 riastrad /* no membar */
91 1.2 riastrad atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
92 1.2 riastrad }
93 1.2 riastrad
94 1.2 riastrad static inline int
95 1.2 riastrad atomic_add_return(int addend, atomic_t *atomic)
96 1.2 riastrad {
97 1.13 riastrad int v;
98 1.13 riastrad
99 1.13 riastrad smp_mb__before_atomic();
100 1.13 riastrad v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
101 1.13 riastrad smp_mb__after_atomic();
102 1.13 riastrad
103 1.13 riastrad return v;
104 1.2 riastrad }
105 1.2 riastrad
106 1.2 riastrad static inline void
107 1.2 riastrad atomic_inc(atomic_t *atomic)
108 1.2 riastrad {
109 1.13 riastrad /* no membar */
110 1.2 riastrad atomic_inc_uint(&atomic->a_u.au_uint);
111 1.2 riastrad }
112 1.2 riastrad
113 1.2 riastrad static inline void
114 1.2 riastrad atomic_dec(atomic_t *atomic)
115 1.2 riastrad {
116 1.13 riastrad /* no membar */
117 1.2 riastrad atomic_dec_uint(&atomic->a_u.au_uint);
118 1.2 riastrad }
119 1.2 riastrad
120 1.2 riastrad static inline int
121 1.2 riastrad atomic_inc_return(atomic_t *atomic)
122 1.2 riastrad {
123 1.13 riastrad int v;
124 1.13 riastrad
125 1.13 riastrad smp_mb__before_atomic();
126 1.13 riastrad v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
127 1.13 riastrad smp_mb__after_atomic();
128 1.13 riastrad
129 1.13 riastrad return v;
130 1.2 riastrad }
131 1.2 riastrad
132 1.2 riastrad static inline int
133 1.2 riastrad atomic_dec_return(atomic_t *atomic)
134 1.2 riastrad {
135 1.13 riastrad int v;
136 1.13 riastrad
137 1.13 riastrad smp_mb__before_atomic();
138 1.13 riastrad v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
139 1.13 riastrad smp_mb__after_atomic();
140 1.13 riastrad
141 1.13 riastrad return v;
142 1.2 riastrad }
143 1.2 riastrad
144 1.2 riastrad static inline int
145 1.2 riastrad atomic_dec_and_test(atomic_t *atomic)
146 1.2 riastrad {
147 1.13 riastrad /* membar implied by atomic_dec_return */
148 1.13 riastrad return atomic_dec_return(atomic) == 0;
149 1.2 riastrad }
150 1.2 riastrad
151 1.2 riastrad static inline void
152 1.8 riastrad atomic_or(int value, atomic_t *atomic)
153 1.8 riastrad {
154 1.13 riastrad /* no membar */
155 1.8 riastrad atomic_or_uint(&atomic->a_u.au_uint, value);
156 1.8 riastrad }
157 1.8 riastrad
158 1.8 riastrad static inline void
159 1.2 riastrad atomic_set_mask(unsigned long mask, atomic_t *atomic)
160 1.2 riastrad {
161 1.13 riastrad /* no membar */
162 1.2 riastrad atomic_or_uint(&atomic->a_u.au_uint, mask);
163 1.2 riastrad }
164 1.2 riastrad
165 1.2 riastrad static inline void
166 1.2 riastrad atomic_clear_mask(unsigned long mask, atomic_t *atomic)
167 1.2 riastrad {
168 1.13 riastrad /* no membar */
169 1.2 riastrad atomic_and_uint(&atomic->a_u.au_uint, ~mask);
170 1.2 riastrad }
171 1.2 riastrad
172 1.2 riastrad static inline int
173 1.2 riastrad atomic_add_unless(atomic_t *atomic, int addend, int zero)
174 1.2 riastrad {
175 1.2 riastrad int value;
176 1.2 riastrad
177 1.13 riastrad smp_mb__before_atomic();
178 1.2 riastrad do {
179 1.2 riastrad value = atomic->a_u.au_int;
180 1.2 riastrad if (value == zero)
181 1.13 riastrad break;
182 1.2 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
183 1.2 riastrad != value);
184 1.13 riastrad smp_mb__after_atomic();
185 1.2 riastrad
186 1.13 riastrad return value != zero;
187 1.2 riastrad }
188 1.2 riastrad
189 1.2 riastrad static inline int
190 1.2 riastrad atomic_inc_not_zero(atomic_t *atomic)
191 1.2 riastrad {
192 1.13 riastrad /* membar implied by atomic_add_unless */
193 1.2 riastrad return atomic_add_unless(atomic, 1, 0);
194 1.2 riastrad }
195 1.2 riastrad
196 1.5 riastrad static inline int
197 1.5 riastrad atomic_xchg(atomic_t *atomic, int new)
198 1.5 riastrad {
199 1.13 riastrad int old;
200 1.13 riastrad
201 1.13 riastrad smp_mb__before_atomic();
202 1.13 riastrad old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
203 1.13 riastrad smp_mb__after_atomic();
204 1.13 riastrad
205 1.13 riastrad return old;
206 1.5 riastrad }
207 1.5 riastrad
208 1.5 riastrad static inline int
209 1.13 riastrad atomic_cmpxchg(atomic_t *atomic, int expect, int new)
210 1.5 riastrad {
211 1.13 riastrad int old;
212 1.13 riastrad
213 1.13 riastrad /*
214 1.13 riastrad * XXX As an optimization, under Linux's semantics we are
215 1.13 riastrad * allowed to skip the memory barrier if the comparison fails,
216 1.13 riastrad * but taking advantage of that is not convenient here.
217 1.13 riastrad */
218 1.13 riastrad smp_mb__before_atomic();
219 1.13 riastrad old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
220 1.5 riastrad (unsigned)new);
221 1.13 riastrad smp_mb__after_atomic();
222 1.13 riastrad
223 1.13 riastrad return old;
224 1.5 riastrad }
225 1.5 riastrad
226 1.6 riastrad struct atomic64 {
227 1.6 riastrad volatile uint64_t a_v;
228 1.6 riastrad };
229 1.6 riastrad
230 1.6 riastrad typedef struct atomic64 atomic64_t;
231 1.6 riastrad
232 1.6 riastrad static inline uint64_t
233 1.6 riastrad atomic64_read(const struct atomic64 *a)
234 1.6 riastrad {
235 1.13 riastrad /* no membar */
236 1.6 riastrad return a->a_v;
237 1.6 riastrad }
238 1.6 riastrad
239 1.6 riastrad static inline void
240 1.6 riastrad atomic64_set(struct atomic64 *a, uint64_t v)
241 1.6 riastrad {
242 1.13 riastrad /* no membar */
243 1.6 riastrad a->a_v = v;
244 1.6 riastrad }
245 1.6 riastrad
246 1.6 riastrad static inline void
247 1.6 riastrad atomic64_add(long long d, struct atomic64 *a)
248 1.6 riastrad {
249 1.13 riastrad /* no membar */
250 1.6 riastrad atomic_add_64(&a->a_v, d);
251 1.6 riastrad }
252 1.6 riastrad
253 1.6 riastrad static inline void
254 1.6 riastrad atomic64_sub(long long d, struct atomic64 *a)
255 1.6 riastrad {
256 1.13 riastrad /* no membar */
257 1.6 riastrad atomic_add_64(&a->a_v, -d);
258 1.6 riastrad }
259 1.6 riastrad
260 1.6 riastrad static inline uint64_t
261 1.13 riastrad atomic64_xchg(struct atomic64 *a, uint64_t new)
262 1.6 riastrad {
263 1.13 riastrad uint64_t old;
264 1.13 riastrad
265 1.13 riastrad smp_mb__before_atomic();
266 1.13 riastrad old = atomic_swap_64(&a->a_v, new);
267 1.13 riastrad smp_mb__after_atomic();
268 1.13 riastrad
269 1.13 riastrad return old;
270 1.6 riastrad }
271 1.6 riastrad
272 1.9 riastrad static inline uint64_t
273 1.13 riastrad atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
274 1.9 riastrad {
275 1.13 riastrad uint64_t old;
276 1.13 riastrad
277 1.13 riastrad /*
278 1.13 riastrad * XXX As an optimization, under Linux's semantics we are
279 1.13 riastrad * allowed to skip the memory barrier if the comparison fails,
280 1.13 riastrad * but taking advantage of that is not convenient here.
281 1.13 riastrad */
282 1.13 riastrad smp_mb__before_atomic();
283 1.13 riastrad old = atomic_cas_64(&atomic->a_v, expect, new);
284 1.13 riastrad smp_mb__after_atomic();
285 1.13 riastrad
286 1.13 riastrad return old;
287 1.9 riastrad }
288 1.9 riastrad
289 1.2 riastrad static inline void
290 1.2 riastrad set_bit(unsigned int bit, volatile unsigned long *ptr)
291 1.2 riastrad {
292 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
293 1.2 riastrad
294 1.13 riastrad /* no memory barrier */
295 1.2 riastrad atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
296 1.2 riastrad }
297 1.2 riastrad
298 1.2 riastrad static inline void
299 1.2 riastrad clear_bit(unsigned int bit, volatile unsigned long *ptr)
300 1.2 riastrad {
301 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
302 1.2 riastrad
303 1.13 riastrad /* no memory barrier */
304 1.2 riastrad atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
305 1.2 riastrad }
306 1.2 riastrad
307 1.2 riastrad static inline void
308 1.2 riastrad change_bit(unsigned int bit, volatile unsigned long *ptr)
309 1.2 riastrad {
310 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
311 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
312 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
313 1.2 riastrad unsigned long v;
314 1.2 riastrad
315 1.13 riastrad /* no memory barrier */
316 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
317 1.2 riastrad }
318 1.2 riastrad
319 1.11 riastrad static inline int
320 1.2 riastrad test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
321 1.2 riastrad {
322 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
323 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
324 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
325 1.2 riastrad unsigned long v;
326 1.2 riastrad
327 1.13 riastrad smp_mb__before_atomic();
328 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
329 1.13 riastrad smp_mb__after_atomic();
330 1.2 riastrad
331 1.7 riastrad return ((v & mask) != 0);
332 1.2 riastrad }
333 1.2 riastrad
334 1.11 riastrad static inline int
335 1.2 riastrad test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
336 1.2 riastrad {
337 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
338 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
339 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
340 1.2 riastrad unsigned long v;
341 1.2 riastrad
342 1.13 riastrad smp_mb__before_atomic();
343 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
344 1.13 riastrad smp_mb__after_atomic();
345 1.2 riastrad
346 1.7 riastrad return ((v & mask) != 0);
347 1.2 riastrad }
348 1.2 riastrad
349 1.11 riastrad static inline int
350 1.2 riastrad test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
351 1.2 riastrad {
352 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
353 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
354 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
355 1.2 riastrad unsigned long v;
356 1.2 riastrad
357 1.13 riastrad smp_mb__before_atomic();
358 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
359 1.13 riastrad smp_mb__after_atomic();
360 1.2 riastrad
361 1.7 riastrad return ((v & mask) != 0);
362 1.2 riastrad }
363 1.2 riastrad
364 1.2 riastrad #endif /* _LINUX_ATOMIC_H_ */
365