atomic.h revision 1.33 1 1.33 riastrad /* $NetBSD: atomic.h,v 1.33 2021/12/19 11:02:46 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #ifndef _LINUX_ATOMIC_H_
33 1.2 riastrad #define _LINUX_ATOMIC_H_
34 1.2 riastrad
35 1.2 riastrad #include <sys/atomic.h>
36 1.2 riastrad
37 1.2 riastrad #include <machine/limits.h>
38 1.2 riastrad
39 1.22 maya #include <asm/barrier.h>
40 1.22 maya
41 1.23 riastrad #define xchg(P, V) \
42 1.23 riastrad (sizeof(*(P)) == 4 ? atomic_swap_32((volatile uint32_t *)P, V) \
43 1.23 riastrad : sizeof(*(P)) == 8 ? atomic_swap_64((volatile uint64_t *)P, V) \
44 1.23 riastrad : (__builtin_abort(), 0))
45 1.23 riastrad
46 1.13 riastrad /*
47 1.13 riastrad * atomic (u)int operations
48 1.13 riastrad *
49 1.13 riastrad * Atomics that return a value, other than atomic_read, imply a
50 1.13 riastrad * full memory_sync barrier. Those that do not return a value
51 1.13 riastrad * imply no memory barrier.
52 1.13 riastrad */
53 1.13 riastrad
54 1.2 riastrad struct atomic {
55 1.2 riastrad union {
56 1.3 riastrad volatile int au_int;
57 1.3 riastrad volatile unsigned int au_uint;
58 1.2 riastrad } a_u;
59 1.2 riastrad };
60 1.2 riastrad
61 1.2 riastrad #define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
62 1.2 riastrad
63 1.2 riastrad typedef struct atomic atomic_t;
64 1.2 riastrad
65 1.2 riastrad static inline int
66 1.27 riastrad atomic_read(const atomic_t *atomic)
67 1.2 riastrad {
68 1.13 riastrad /* no membar */
69 1.3 riastrad return atomic->a_u.au_int;
70 1.2 riastrad }
71 1.2 riastrad
72 1.2 riastrad static inline void
73 1.2 riastrad atomic_set(atomic_t *atomic, int value)
74 1.2 riastrad {
75 1.13 riastrad /* no membar */
76 1.2 riastrad atomic->a_u.au_int = value;
77 1.2 riastrad }
78 1.2 riastrad
79 1.2 riastrad static inline void
80 1.31 riastrad atomic_set_release(atomic_t *atomic, int value)
81 1.31 riastrad {
82 1.31 riastrad atomic_store_release(&atomic->a_u.au_int, value);
83 1.31 riastrad }
84 1.31 riastrad
85 1.31 riastrad static inline void
86 1.2 riastrad atomic_add(int addend, atomic_t *atomic)
87 1.2 riastrad {
88 1.13 riastrad /* no membar */
89 1.2 riastrad atomic_add_int(&atomic->a_u.au_uint, addend);
90 1.2 riastrad }
91 1.2 riastrad
92 1.2 riastrad static inline void
93 1.2 riastrad atomic_sub(int subtrahend, atomic_t *atomic)
94 1.2 riastrad {
95 1.13 riastrad /* no membar */
96 1.2 riastrad atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
97 1.2 riastrad }
98 1.2 riastrad
99 1.2 riastrad static inline int
100 1.2 riastrad atomic_add_return(int addend, atomic_t *atomic)
101 1.2 riastrad {
102 1.13 riastrad int v;
103 1.13 riastrad
104 1.13 riastrad smp_mb__before_atomic();
105 1.13 riastrad v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
106 1.13 riastrad smp_mb__after_atomic();
107 1.13 riastrad
108 1.13 riastrad return v;
109 1.2 riastrad }
110 1.2 riastrad
111 1.2 riastrad static inline void
112 1.2 riastrad atomic_inc(atomic_t *atomic)
113 1.2 riastrad {
114 1.13 riastrad /* no membar */
115 1.2 riastrad atomic_inc_uint(&atomic->a_u.au_uint);
116 1.2 riastrad }
117 1.2 riastrad
118 1.2 riastrad static inline void
119 1.2 riastrad atomic_dec(atomic_t *atomic)
120 1.2 riastrad {
121 1.13 riastrad /* no membar */
122 1.2 riastrad atomic_dec_uint(&atomic->a_u.au_uint);
123 1.2 riastrad }
124 1.2 riastrad
125 1.2 riastrad static inline int
126 1.2 riastrad atomic_inc_return(atomic_t *atomic)
127 1.2 riastrad {
128 1.13 riastrad int v;
129 1.13 riastrad
130 1.13 riastrad smp_mb__before_atomic();
131 1.13 riastrad v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
132 1.13 riastrad smp_mb__after_atomic();
133 1.13 riastrad
134 1.13 riastrad return v;
135 1.2 riastrad }
136 1.2 riastrad
137 1.2 riastrad static inline int
138 1.2 riastrad atomic_dec_return(atomic_t *atomic)
139 1.2 riastrad {
140 1.13 riastrad int v;
141 1.13 riastrad
142 1.13 riastrad smp_mb__before_atomic();
143 1.13 riastrad v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
144 1.13 riastrad smp_mb__after_atomic();
145 1.13 riastrad
146 1.13 riastrad return v;
147 1.2 riastrad }
148 1.2 riastrad
149 1.2 riastrad static inline int
150 1.2 riastrad atomic_dec_and_test(atomic_t *atomic)
151 1.2 riastrad {
152 1.13 riastrad /* membar implied by atomic_dec_return */
153 1.13 riastrad return atomic_dec_return(atomic) == 0;
154 1.2 riastrad }
155 1.2 riastrad
156 1.28 riastrad static inline int
157 1.28 riastrad atomic_dec_if_positive(atomic_t *atomic)
158 1.28 riastrad {
159 1.28 riastrad int v;
160 1.28 riastrad
161 1.28 riastrad smp_mb__before_atomic();
162 1.28 riastrad do {
163 1.28 riastrad v = atomic->a_u.au_uint;
164 1.28 riastrad if (v <= 0)
165 1.28 riastrad break;
166 1.28 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
167 1.28 riastrad smp_mb__after_atomic();
168 1.28 riastrad
169 1.28 riastrad return v - 1;
170 1.28 riastrad }
171 1.28 riastrad
172 1.2 riastrad static inline void
173 1.8 riastrad atomic_or(int value, atomic_t *atomic)
174 1.8 riastrad {
175 1.13 riastrad /* no membar */
176 1.8 riastrad atomic_or_uint(&atomic->a_u.au_uint, value);
177 1.8 riastrad }
178 1.8 riastrad
179 1.8 riastrad static inline void
180 1.24 riastrad atomic_andnot(int value, atomic_t *atomic)
181 1.24 riastrad {
182 1.24 riastrad /* no membar */
183 1.24 riastrad atomic_and_uint(&atomic->a_u.au_uint, ~value);
184 1.24 riastrad }
185 1.24 riastrad
186 1.25 riastrad static inline int
187 1.25 riastrad atomic_fetch_xor(int value, atomic_t *atomic)
188 1.25 riastrad {
189 1.25 riastrad unsigned old, new;
190 1.25 riastrad
191 1.25 riastrad smp_mb__before_atomic();
192 1.25 riastrad do {
193 1.25 riastrad old = atomic->a_u.au_uint;
194 1.25 riastrad new = old ^ value;
195 1.25 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
196 1.25 riastrad smp_mb__after_atomic();
197 1.25 riastrad
198 1.25 riastrad return old;
199 1.25 riastrad }
200 1.25 riastrad
201 1.24 riastrad static inline void
202 1.2 riastrad atomic_set_mask(unsigned long mask, atomic_t *atomic)
203 1.2 riastrad {
204 1.13 riastrad /* no membar */
205 1.2 riastrad atomic_or_uint(&atomic->a_u.au_uint, mask);
206 1.2 riastrad }
207 1.2 riastrad
208 1.2 riastrad static inline void
209 1.2 riastrad atomic_clear_mask(unsigned long mask, atomic_t *atomic)
210 1.2 riastrad {
211 1.13 riastrad /* no membar */
212 1.2 riastrad atomic_and_uint(&atomic->a_u.au_uint, ~mask);
213 1.2 riastrad }
214 1.2 riastrad
215 1.2 riastrad static inline int
216 1.2 riastrad atomic_add_unless(atomic_t *atomic, int addend, int zero)
217 1.2 riastrad {
218 1.2 riastrad int value;
219 1.2 riastrad
220 1.13 riastrad smp_mb__before_atomic();
221 1.2 riastrad do {
222 1.2 riastrad value = atomic->a_u.au_int;
223 1.2 riastrad if (value == zero)
224 1.13 riastrad break;
225 1.2 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
226 1.21 christos != (unsigned)value);
227 1.13 riastrad smp_mb__after_atomic();
228 1.2 riastrad
229 1.13 riastrad return value != zero;
230 1.2 riastrad }
231 1.2 riastrad
232 1.2 riastrad static inline int
233 1.2 riastrad atomic_inc_not_zero(atomic_t *atomic)
234 1.2 riastrad {
235 1.13 riastrad /* membar implied by atomic_add_unless */
236 1.2 riastrad return atomic_add_unless(atomic, 1, 0);
237 1.2 riastrad }
238 1.2 riastrad
239 1.5 riastrad static inline int
240 1.5 riastrad atomic_xchg(atomic_t *atomic, int new)
241 1.5 riastrad {
242 1.13 riastrad int old;
243 1.13 riastrad
244 1.13 riastrad smp_mb__before_atomic();
245 1.13 riastrad old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
246 1.13 riastrad smp_mb__after_atomic();
247 1.13 riastrad
248 1.13 riastrad return old;
249 1.5 riastrad }
250 1.5 riastrad
251 1.5 riastrad static inline int
252 1.13 riastrad atomic_cmpxchg(atomic_t *atomic, int expect, int new)
253 1.5 riastrad {
254 1.13 riastrad int old;
255 1.13 riastrad
256 1.13 riastrad /*
257 1.13 riastrad * XXX As an optimization, under Linux's semantics we are
258 1.13 riastrad * allowed to skip the memory barrier if the comparison fails,
259 1.13 riastrad * but taking advantage of that is not convenient here.
260 1.13 riastrad */
261 1.13 riastrad smp_mb__before_atomic();
262 1.13 riastrad old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
263 1.5 riastrad (unsigned)new);
264 1.13 riastrad smp_mb__after_atomic();
265 1.13 riastrad
266 1.13 riastrad return old;
267 1.5 riastrad }
268 1.5 riastrad
269 1.6 riastrad struct atomic64 {
270 1.6 riastrad volatile uint64_t a_v;
271 1.6 riastrad };
272 1.6 riastrad
273 1.6 riastrad typedef struct atomic64 atomic64_t;
274 1.6 riastrad
275 1.16 riastrad #define ATOMIC64_INIT(v) { .a_v = (v) }
276 1.16 riastrad
277 1.15 riastrad int linux_atomic64_init(void);
278 1.15 riastrad void linux_atomic64_fini(void);
279 1.15 riastrad
280 1.15 riastrad #ifdef __HAVE_ATOMIC64_OPS
281 1.15 riastrad
282 1.6 riastrad static inline uint64_t
283 1.6 riastrad atomic64_read(const struct atomic64 *a)
284 1.6 riastrad {
285 1.13 riastrad /* no membar */
286 1.6 riastrad return a->a_v;
287 1.6 riastrad }
288 1.6 riastrad
289 1.6 riastrad static inline void
290 1.6 riastrad atomic64_set(struct atomic64 *a, uint64_t v)
291 1.6 riastrad {
292 1.13 riastrad /* no membar */
293 1.6 riastrad a->a_v = v;
294 1.6 riastrad }
295 1.6 riastrad
296 1.6 riastrad static inline void
297 1.17 riastrad atomic64_add(int64_t d, struct atomic64 *a)
298 1.6 riastrad {
299 1.13 riastrad /* no membar */
300 1.6 riastrad atomic_add_64(&a->a_v, d);
301 1.6 riastrad }
302 1.6 riastrad
303 1.6 riastrad static inline void
304 1.17 riastrad atomic64_sub(int64_t d, struct atomic64 *a)
305 1.6 riastrad {
306 1.13 riastrad /* no membar */
307 1.6 riastrad atomic_add_64(&a->a_v, -d);
308 1.6 riastrad }
309 1.6 riastrad
310 1.19 riastrad static inline int64_t
311 1.19 riastrad atomic64_add_return(int64_t d, struct atomic64 *a)
312 1.19 riastrad {
313 1.19 riastrad int64_t v;
314 1.19 riastrad
315 1.19 riastrad smp_mb__before_atomic();
316 1.19 riastrad v = (int64_t)atomic_add_64_nv(&a->a_v, d);
317 1.19 riastrad smp_mb__after_atomic();
318 1.19 riastrad
319 1.19 riastrad return v;
320 1.19 riastrad }
321 1.19 riastrad
322 1.6 riastrad static inline uint64_t
323 1.13 riastrad atomic64_xchg(struct atomic64 *a, uint64_t new)
324 1.6 riastrad {
325 1.13 riastrad uint64_t old;
326 1.13 riastrad
327 1.13 riastrad smp_mb__before_atomic();
328 1.13 riastrad old = atomic_swap_64(&a->a_v, new);
329 1.13 riastrad smp_mb__after_atomic();
330 1.13 riastrad
331 1.13 riastrad return old;
332 1.6 riastrad }
333 1.6 riastrad
334 1.9 riastrad static inline uint64_t
335 1.13 riastrad atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
336 1.9 riastrad {
337 1.13 riastrad uint64_t old;
338 1.13 riastrad
339 1.13 riastrad /*
340 1.13 riastrad * XXX As an optimization, under Linux's semantics we are
341 1.13 riastrad * allowed to skip the memory barrier if the comparison fails,
342 1.13 riastrad * but taking advantage of that is not convenient here.
343 1.13 riastrad */
344 1.13 riastrad smp_mb__before_atomic();
345 1.13 riastrad old = atomic_cas_64(&atomic->a_v, expect, new);
346 1.13 riastrad smp_mb__after_atomic();
347 1.13 riastrad
348 1.13 riastrad return old;
349 1.9 riastrad }
350 1.9 riastrad
351 1.15 riastrad #else /* !defined(__HAVE_ATOMIC64_OPS) */
352 1.15 riastrad
353 1.18 riastrad #define atomic64_add linux_atomic64_add
354 1.19 riastrad #define atomic64_add_return linux_atomic64_add_return
355 1.18 riastrad #define atomic64_cmpxchg linux_atomic64_cmpxchg
356 1.15 riastrad #define atomic64_read linux_atomic64_read
357 1.15 riastrad #define atomic64_set linux_atomic64_set
358 1.15 riastrad #define atomic64_sub linux_atomic64_sub
359 1.15 riastrad #define atomic64_xchg linux_atomic64_xchg
360 1.15 riastrad
361 1.15 riastrad uint64_t atomic64_read(const struct atomic64 *);
362 1.15 riastrad void atomic64_set(struct atomic64 *, uint64_t);
363 1.17 riastrad void atomic64_add(int64_t, struct atomic64 *);
364 1.17 riastrad void atomic64_sub(int64_t, struct atomic64 *);
365 1.19 riastrad int64_t atomic64_add_return(int64_t, struct atomic64 *);
366 1.15 riastrad uint64_t atomic64_xchg(struct atomic64 *, uint64_t);
367 1.15 riastrad uint64_t atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
368 1.15 riastrad
369 1.15 riastrad #endif
370 1.15 riastrad
371 1.19 riastrad static inline int64_t
372 1.19 riastrad atomic64_inc_return(struct atomic64 *a)
373 1.19 riastrad {
374 1.19 riastrad return atomic64_add_return(1, a);
375 1.19 riastrad }
376 1.19 riastrad
377 1.14 riastrad struct atomic_long {
378 1.14 riastrad volatile unsigned long al_v;
379 1.14 riastrad };
380 1.14 riastrad
381 1.14 riastrad typedef struct atomic_long atomic_long_t;
382 1.14 riastrad
383 1.14 riastrad static inline long
384 1.14 riastrad atomic_long_read(struct atomic_long *a)
385 1.14 riastrad {
386 1.14 riastrad /* no membar */
387 1.14 riastrad return (unsigned long)a->al_v;
388 1.14 riastrad }
389 1.14 riastrad
390 1.14 riastrad static inline void
391 1.14 riastrad atomic_long_set(struct atomic_long *a, long v)
392 1.14 riastrad {
393 1.14 riastrad /* no membar */
394 1.14 riastrad a->al_v = v;
395 1.14 riastrad }
396 1.14 riastrad
397 1.14 riastrad static inline long
398 1.14 riastrad atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
399 1.14 riastrad {
400 1.14 riastrad long value;
401 1.14 riastrad
402 1.14 riastrad smp_mb__before_atomic();
403 1.14 riastrad do {
404 1.14 riastrad value = (long)a->al_v;
405 1.14 riastrad if (value == zero)
406 1.14 riastrad break;
407 1.14 riastrad } while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
408 1.14 riastrad (unsigned long)(value + addend)) != (unsigned long)value);
409 1.14 riastrad smp_mb__after_atomic();
410 1.14 riastrad
411 1.14 riastrad return value != zero;
412 1.14 riastrad }
413 1.14 riastrad
414 1.14 riastrad static inline long
415 1.14 riastrad atomic_long_inc_not_zero(struct atomic_long *a)
416 1.14 riastrad {
417 1.14 riastrad /* membar implied by atomic_long_add_unless */
418 1.14 riastrad return atomic_long_add_unless(a, 1, 0);
419 1.14 riastrad }
420 1.14 riastrad
421 1.14 riastrad static inline long
422 1.30 riastrad atomic_long_xchg(struct atomic_long *a, long new)
423 1.30 riastrad {
424 1.30 riastrad long old;
425 1.30 riastrad
426 1.30 riastrad smp_mb__before_atomic();
427 1.30 riastrad old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
428 1.30 riastrad smp_mb__after_atomic();
429 1.30 riastrad
430 1.30 riastrad return old;
431 1.30 riastrad }
432 1.30 riastrad
433 1.30 riastrad static inline long
434 1.14 riastrad atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
435 1.14 riastrad {
436 1.14 riastrad long old;
437 1.14 riastrad
438 1.14 riastrad /*
439 1.14 riastrad * XXX As an optimization, under Linux's semantics we are
440 1.14 riastrad * allowed to skip the memory barrier if the comparison fails,
441 1.14 riastrad * but taking advantage of that is not convenient here.
442 1.14 riastrad */
443 1.14 riastrad smp_mb__before_atomic();
444 1.14 riastrad old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
445 1.14 riastrad (unsigned long)new);
446 1.14 riastrad smp_mb__after_atomic();
447 1.14 riastrad
448 1.14 riastrad return old;
449 1.14 riastrad }
450 1.14 riastrad
451 1.2 riastrad static inline void
452 1.2 riastrad set_bit(unsigned int bit, volatile unsigned long *ptr)
453 1.2 riastrad {
454 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
455 1.2 riastrad
456 1.13 riastrad /* no memory barrier */
457 1.2 riastrad atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
458 1.2 riastrad }
459 1.2 riastrad
460 1.2 riastrad static inline void
461 1.2 riastrad clear_bit(unsigned int bit, volatile unsigned long *ptr)
462 1.2 riastrad {
463 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
464 1.2 riastrad
465 1.13 riastrad /* no memory barrier */
466 1.2 riastrad atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
467 1.2 riastrad }
468 1.2 riastrad
469 1.2 riastrad static inline void
470 1.32 riastrad clear_bit_unlock(unsigned int bit, volatile unsigned long *ptr)
471 1.32 riastrad {
472 1.32 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
473 1.32 riastrad
474 1.32 riastrad /* store-release */
475 1.32 riastrad smp_mb__before_atomic();
476 1.32 riastrad atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
477 1.32 riastrad }
478 1.32 riastrad
479 1.32 riastrad static inline void
480 1.2 riastrad change_bit(unsigned int bit, volatile unsigned long *ptr)
481 1.2 riastrad {
482 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
483 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
484 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
485 1.2 riastrad unsigned long v;
486 1.2 riastrad
487 1.13 riastrad /* no memory barrier */
488 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
489 1.2 riastrad }
490 1.2 riastrad
491 1.11 riastrad static inline int
492 1.2 riastrad test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
493 1.2 riastrad {
494 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
495 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
496 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
497 1.2 riastrad unsigned long v;
498 1.2 riastrad
499 1.13 riastrad smp_mb__before_atomic();
500 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
501 1.13 riastrad smp_mb__after_atomic();
502 1.2 riastrad
503 1.7 riastrad return ((v & mask) != 0);
504 1.2 riastrad }
505 1.2 riastrad
506 1.11 riastrad static inline int
507 1.2 riastrad test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
508 1.2 riastrad {
509 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
510 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
511 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
512 1.2 riastrad unsigned long v;
513 1.2 riastrad
514 1.13 riastrad smp_mb__before_atomic();
515 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
516 1.13 riastrad smp_mb__after_atomic();
517 1.2 riastrad
518 1.7 riastrad return ((v & mask) != 0);
519 1.2 riastrad }
520 1.2 riastrad
521 1.11 riastrad static inline int
522 1.2 riastrad test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
523 1.2 riastrad {
524 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
525 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
526 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
527 1.2 riastrad unsigned long v;
528 1.2 riastrad
529 1.13 riastrad smp_mb__before_atomic();
530 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
531 1.13 riastrad smp_mb__after_atomic();
532 1.2 riastrad
533 1.7 riastrad return ((v & mask) != 0);
534 1.2 riastrad }
535 1.2 riastrad
536 1.2 riastrad #endif /* _LINUX_ATOMIC_H_ */
537