atomic.h revision 1.7.30.2 1 1.7.30.2 martin /* $NetBSD: atomic.h,v 1.7.30.2 2020/04/08 14:08:27 martin Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #ifndef _LINUX_ATOMIC_H_
33 1.2 riastrad #define _LINUX_ATOMIC_H_
34 1.2 riastrad
35 1.2 riastrad #include <sys/atomic.h>
36 1.2 riastrad
37 1.2 riastrad #include <machine/limits.h>
38 1.2 riastrad
39 1.7.30.2 martin #include <asm/barrier.h>
40 1.7.30.2 martin
41 1.7.30.1 christos #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
42 1.7.30.1 christos # define smp_mb__before_atomic() membar_exit()
43 1.7.30.1 christos # define smp_mb__after_atomic() membar_enter()
44 1.7.30.1 christos #else
45 1.7.30.1 christos # define smp_mb__before_atomic() __insn_barrier()
46 1.7.30.1 christos # define smp_mb__after_atomic() __insn_barrier()
47 1.7.30.1 christos #endif
48 1.7.30.1 christos
49 1.7.30.1 christos /*
50 1.7.30.1 christos * atomic (u)int operations
51 1.7.30.1 christos *
52 1.7.30.1 christos * Atomics that return a value, other than atomic_read, imply a
53 1.7.30.1 christos * full memory_sync barrier. Those that do not return a value
54 1.7.30.1 christos * imply no memory barrier.
55 1.7.30.1 christos */
56 1.7.30.1 christos
57 1.2 riastrad struct atomic {
58 1.2 riastrad union {
59 1.3 riastrad volatile int au_int;
60 1.3 riastrad volatile unsigned int au_uint;
61 1.2 riastrad } a_u;
62 1.2 riastrad };
63 1.2 riastrad
64 1.2 riastrad #define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
65 1.2 riastrad
66 1.2 riastrad typedef struct atomic atomic_t;
67 1.2 riastrad
68 1.2 riastrad static inline int
69 1.2 riastrad atomic_read(atomic_t *atomic)
70 1.2 riastrad {
71 1.7.30.1 christos /* no membar */
72 1.3 riastrad return atomic->a_u.au_int;
73 1.2 riastrad }
74 1.2 riastrad
75 1.2 riastrad static inline void
76 1.2 riastrad atomic_set(atomic_t *atomic, int value)
77 1.2 riastrad {
78 1.7.30.1 christos /* no membar */
79 1.2 riastrad atomic->a_u.au_int = value;
80 1.2 riastrad }
81 1.2 riastrad
82 1.2 riastrad static inline void
83 1.2 riastrad atomic_add(int addend, atomic_t *atomic)
84 1.2 riastrad {
85 1.7.30.1 christos /* no membar */
86 1.2 riastrad atomic_add_int(&atomic->a_u.au_uint, addend);
87 1.2 riastrad }
88 1.2 riastrad
89 1.2 riastrad static inline void
90 1.2 riastrad atomic_sub(int subtrahend, atomic_t *atomic)
91 1.2 riastrad {
92 1.7.30.1 christos /* no membar */
93 1.2 riastrad atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
94 1.2 riastrad }
95 1.2 riastrad
96 1.2 riastrad static inline int
97 1.2 riastrad atomic_add_return(int addend, atomic_t *atomic)
98 1.2 riastrad {
99 1.7.30.1 christos int v;
100 1.7.30.1 christos
101 1.7.30.1 christos smp_mb__before_atomic();
102 1.7.30.1 christos v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
103 1.7.30.1 christos smp_mb__after_atomic();
104 1.7.30.1 christos
105 1.7.30.1 christos return v;
106 1.2 riastrad }
107 1.2 riastrad
108 1.2 riastrad static inline void
109 1.2 riastrad atomic_inc(atomic_t *atomic)
110 1.2 riastrad {
111 1.7.30.1 christos /* no membar */
112 1.2 riastrad atomic_inc_uint(&atomic->a_u.au_uint);
113 1.2 riastrad }
114 1.2 riastrad
115 1.2 riastrad static inline void
116 1.2 riastrad atomic_dec(atomic_t *atomic)
117 1.2 riastrad {
118 1.7.30.1 christos /* no membar */
119 1.2 riastrad atomic_dec_uint(&atomic->a_u.au_uint);
120 1.2 riastrad }
121 1.2 riastrad
122 1.2 riastrad static inline int
123 1.2 riastrad atomic_inc_return(atomic_t *atomic)
124 1.2 riastrad {
125 1.7.30.1 christos int v;
126 1.7.30.1 christos
127 1.7.30.1 christos smp_mb__before_atomic();
128 1.7.30.1 christos v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
129 1.7.30.1 christos smp_mb__after_atomic();
130 1.7.30.1 christos
131 1.7.30.1 christos return v;
132 1.2 riastrad }
133 1.2 riastrad
134 1.2 riastrad static inline int
135 1.2 riastrad atomic_dec_return(atomic_t *atomic)
136 1.2 riastrad {
137 1.7.30.1 christos int v;
138 1.7.30.1 christos
139 1.7.30.1 christos smp_mb__before_atomic();
140 1.7.30.1 christos v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
141 1.7.30.1 christos smp_mb__after_atomic();
142 1.7.30.1 christos
143 1.7.30.1 christos return v;
144 1.2 riastrad }
145 1.2 riastrad
146 1.2 riastrad static inline int
147 1.2 riastrad atomic_dec_and_test(atomic_t *atomic)
148 1.2 riastrad {
149 1.7.30.1 christos /* membar implied by atomic_dec_return */
150 1.7.30.1 christos return atomic_dec_return(atomic) == 0;
151 1.7.30.1 christos }
152 1.7.30.1 christos
153 1.7.30.1 christos static inline void
154 1.7.30.1 christos atomic_or(int value, atomic_t *atomic)
155 1.7.30.1 christos {
156 1.7.30.1 christos /* no membar */
157 1.7.30.1 christos atomic_or_uint(&atomic->a_u.au_uint, value);
158 1.2 riastrad }
159 1.2 riastrad
160 1.2 riastrad static inline void
161 1.2 riastrad atomic_set_mask(unsigned long mask, atomic_t *atomic)
162 1.2 riastrad {
163 1.7.30.1 christos /* no membar */
164 1.2 riastrad atomic_or_uint(&atomic->a_u.au_uint, mask);
165 1.2 riastrad }
166 1.2 riastrad
167 1.2 riastrad static inline void
168 1.2 riastrad atomic_clear_mask(unsigned long mask, atomic_t *atomic)
169 1.2 riastrad {
170 1.7.30.1 christos /* no membar */
171 1.2 riastrad atomic_and_uint(&atomic->a_u.au_uint, ~mask);
172 1.2 riastrad }
173 1.2 riastrad
174 1.2 riastrad static inline int
175 1.2 riastrad atomic_add_unless(atomic_t *atomic, int addend, int zero)
176 1.2 riastrad {
177 1.2 riastrad int value;
178 1.2 riastrad
179 1.7.30.1 christos smp_mb__before_atomic();
180 1.2 riastrad do {
181 1.2 riastrad value = atomic->a_u.au_int;
182 1.2 riastrad if (value == zero)
183 1.7.30.1 christos break;
184 1.2 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
185 1.2 riastrad != value);
186 1.7.30.1 christos smp_mb__after_atomic();
187 1.2 riastrad
188 1.7.30.1 christos return value != zero;
189 1.2 riastrad }
190 1.2 riastrad
191 1.2 riastrad static inline int
192 1.2 riastrad atomic_inc_not_zero(atomic_t *atomic)
193 1.2 riastrad {
194 1.7.30.1 christos /* membar implied by atomic_add_unless */
195 1.2 riastrad return atomic_add_unless(atomic, 1, 0);
196 1.2 riastrad }
197 1.2 riastrad
198 1.5 riastrad static inline int
199 1.5 riastrad atomic_xchg(atomic_t *atomic, int new)
200 1.5 riastrad {
201 1.7.30.1 christos int old;
202 1.7.30.1 christos
203 1.7.30.1 christos smp_mb__before_atomic();
204 1.7.30.1 christos old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
205 1.7.30.1 christos smp_mb__after_atomic();
206 1.7.30.1 christos
207 1.7.30.1 christos return old;
208 1.5 riastrad }
209 1.5 riastrad
210 1.5 riastrad static inline int
211 1.7.30.1 christos atomic_cmpxchg(atomic_t *atomic, int expect, int new)
212 1.5 riastrad {
213 1.7.30.1 christos int old;
214 1.7.30.1 christos
215 1.7.30.1 christos /*
216 1.7.30.1 christos * XXX As an optimization, under Linux's semantics we are
217 1.7.30.1 christos * allowed to skip the memory barrier if the comparison fails,
218 1.7.30.1 christos * but taking advantage of that is not convenient here.
219 1.7.30.1 christos */
220 1.7.30.1 christos smp_mb__before_atomic();
221 1.7.30.1 christos old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
222 1.5 riastrad (unsigned)new);
223 1.7.30.1 christos smp_mb__after_atomic();
224 1.7.30.1 christos
225 1.7.30.1 christos return old;
226 1.5 riastrad }
227 1.5 riastrad
228 1.6 riastrad struct atomic64 {
229 1.6 riastrad volatile uint64_t a_v;
230 1.6 riastrad };
231 1.6 riastrad
232 1.6 riastrad typedef struct atomic64 atomic64_t;
233 1.6 riastrad
234 1.7.30.1 christos #define ATOMIC64_INIT(v) { .a_v = (v) }
235 1.7.30.1 christos
236 1.7.30.1 christos int linux_atomic64_init(void);
237 1.7.30.1 christos void linux_atomic64_fini(void);
238 1.7.30.1 christos
239 1.7.30.1 christos #ifdef __HAVE_ATOMIC64_OPS
240 1.7.30.1 christos
241 1.6 riastrad static inline uint64_t
242 1.6 riastrad atomic64_read(const struct atomic64 *a)
243 1.6 riastrad {
244 1.7.30.1 christos /* no membar */
245 1.6 riastrad return a->a_v;
246 1.6 riastrad }
247 1.6 riastrad
248 1.6 riastrad static inline void
249 1.6 riastrad atomic64_set(struct atomic64 *a, uint64_t v)
250 1.6 riastrad {
251 1.7.30.1 christos /* no membar */
252 1.6 riastrad a->a_v = v;
253 1.6 riastrad }
254 1.6 riastrad
255 1.6 riastrad static inline void
256 1.7.30.1 christos atomic64_add(int64_t d, struct atomic64 *a)
257 1.6 riastrad {
258 1.7.30.1 christos /* no membar */
259 1.6 riastrad atomic_add_64(&a->a_v, d);
260 1.6 riastrad }
261 1.6 riastrad
262 1.6 riastrad static inline void
263 1.7.30.1 christos atomic64_sub(int64_t d, struct atomic64 *a)
264 1.6 riastrad {
265 1.7.30.1 christos /* no membar */
266 1.6 riastrad atomic_add_64(&a->a_v, -d);
267 1.6 riastrad }
268 1.6 riastrad
269 1.7.30.1 christos static inline int64_t
270 1.7.30.1 christos atomic64_add_return(int64_t d, struct atomic64 *a)
271 1.7.30.1 christos {
272 1.7.30.1 christos int64_t v;
273 1.7.30.1 christos
274 1.7.30.1 christos smp_mb__before_atomic();
275 1.7.30.1 christos v = (int64_t)atomic_add_64_nv(&a->a_v, d);
276 1.7.30.1 christos smp_mb__after_atomic();
277 1.7.30.1 christos
278 1.7.30.1 christos return v;
279 1.7.30.1 christos }
280 1.7.30.1 christos
281 1.6 riastrad static inline uint64_t
282 1.7.30.1 christos atomic64_xchg(struct atomic64 *a, uint64_t new)
283 1.6 riastrad {
284 1.7.30.1 christos uint64_t old;
285 1.7.30.1 christos
286 1.7.30.1 christos smp_mb__before_atomic();
287 1.7.30.1 christos old = atomic_swap_64(&a->a_v, new);
288 1.7.30.1 christos smp_mb__after_atomic();
289 1.7.30.1 christos
290 1.7.30.1 christos return old;
291 1.7.30.1 christos }
292 1.7.30.1 christos
293 1.7.30.1 christos static inline uint64_t
294 1.7.30.1 christos atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
295 1.7.30.1 christos {
296 1.7.30.1 christos uint64_t old;
297 1.7.30.1 christos
298 1.7.30.1 christos /*
299 1.7.30.1 christos * XXX As an optimization, under Linux's semantics we are
300 1.7.30.1 christos * allowed to skip the memory barrier if the comparison fails,
301 1.7.30.1 christos * but taking advantage of that is not convenient here.
302 1.7.30.1 christos */
303 1.7.30.1 christos smp_mb__before_atomic();
304 1.7.30.1 christos old = atomic_cas_64(&atomic->a_v, expect, new);
305 1.7.30.1 christos smp_mb__after_atomic();
306 1.7.30.1 christos
307 1.7.30.1 christos return old;
308 1.7.30.1 christos }
309 1.7.30.1 christos
310 1.7.30.1 christos #else /* !defined(__HAVE_ATOMIC64_OPS) */
311 1.7.30.1 christos
312 1.7.30.1 christos #define atomic64_add linux_atomic64_add
313 1.7.30.1 christos #define atomic64_add_return linux_atomic64_add_return
314 1.7.30.1 christos #define atomic64_cmpxchg linux_atomic64_cmpxchg
315 1.7.30.1 christos #define atomic64_read linux_atomic64_read
316 1.7.30.1 christos #define atomic64_set linux_atomic64_set
317 1.7.30.1 christos #define atomic64_sub linux_atomic64_sub
318 1.7.30.1 christos #define atomic64_xchg linux_atomic64_xchg
319 1.7.30.1 christos
320 1.7.30.1 christos uint64_t atomic64_read(const struct atomic64 *);
321 1.7.30.1 christos void atomic64_set(struct atomic64 *, uint64_t);
322 1.7.30.1 christos void atomic64_add(int64_t, struct atomic64 *);
323 1.7.30.1 christos void atomic64_sub(int64_t, struct atomic64 *);
324 1.7.30.1 christos int64_t atomic64_add_return(int64_t, struct atomic64 *);
325 1.7.30.1 christos uint64_t atomic64_xchg(struct atomic64 *, uint64_t);
326 1.7.30.1 christos uint64_t atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
327 1.7.30.1 christos
328 1.7.30.1 christos #endif
329 1.7.30.1 christos
330 1.7.30.1 christos static inline int64_t
331 1.7.30.1 christos atomic64_inc_return(struct atomic64 *a)
332 1.7.30.1 christos {
333 1.7.30.1 christos return atomic64_add_return(1, a);
334 1.7.30.1 christos }
335 1.7.30.1 christos
336 1.7.30.1 christos struct atomic_long {
337 1.7.30.1 christos volatile unsigned long al_v;
338 1.7.30.1 christos };
339 1.7.30.1 christos
340 1.7.30.1 christos typedef struct atomic_long atomic_long_t;
341 1.7.30.1 christos
342 1.7.30.1 christos static inline long
343 1.7.30.1 christos atomic_long_read(struct atomic_long *a)
344 1.7.30.1 christos {
345 1.7.30.1 christos /* no membar */
346 1.7.30.1 christos return (unsigned long)a->al_v;
347 1.7.30.1 christos }
348 1.7.30.1 christos
349 1.7.30.1 christos static inline void
350 1.7.30.1 christos atomic_long_set(struct atomic_long *a, long v)
351 1.7.30.1 christos {
352 1.7.30.1 christos /* no membar */
353 1.7.30.1 christos a->al_v = v;
354 1.7.30.1 christos }
355 1.7.30.1 christos
356 1.7.30.1 christos static inline long
357 1.7.30.1 christos atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
358 1.7.30.1 christos {
359 1.7.30.1 christos long value;
360 1.7.30.1 christos
361 1.7.30.1 christos smp_mb__before_atomic();
362 1.7.30.1 christos do {
363 1.7.30.1 christos value = (long)a->al_v;
364 1.7.30.1 christos if (value == zero)
365 1.7.30.1 christos break;
366 1.7.30.1 christos } while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
367 1.7.30.1 christos (unsigned long)(value + addend)) != (unsigned long)value);
368 1.7.30.1 christos smp_mb__after_atomic();
369 1.7.30.1 christos
370 1.7.30.1 christos return value != zero;
371 1.7.30.1 christos }
372 1.7.30.1 christos
373 1.7.30.1 christos static inline long
374 1.7.30.1 christos atomic_long_inc_not_zero(struct atomic_long *a)
375 1.7.30.1 christos {
376 1.7.30.1 christos /* membar implied by atomic_long_add_unless */
377 1.7.30.1 christos return atomic_long_add_unless(a, 1, 0);
378 1.7.30.1 christos }
379 1.7.30.1 christos
380 1.7.30.1 christos static inline long
381 1.7.30.1 christos atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
382 1.7.30.1 christos {
383 1.7.30.1 christos long old;
384 1.7.30.1 christos
385 1.7.30.1 christos /*
386 1.7.30.1 christos * XXX As an optimization, under Linux's semantics we are
387 1.7.30.1 christos * allowed to skip the memory barrier if the comparison fails,
388 1.7.30.1 christos * but taking advantage of that is not convenient here.
389 1.7.30.1 christos */
390 1.7.30.1 christos smp_mb__before_atomic();
391 1.7.30.1 christos old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
392 1.7.30.1 christos (unsigned long)new);
393 1.7.30.1 christos smp_mb__after_atomic();
394 1.7.30.1 christos
395 1.7.30.1 christos return old;
396 1.6 riastrad }
397 1.6 riastrad
398 1.2 riastrad static inline void
399 1.2 riastrad set_bit(unsigned int bit, volatile unsigned long *ptr)
400 1.2 riastrad {
401 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
402 1.2 riastrad
403 1.7.30.1 christos /* no memory barrier */
404 1.2 riastrad atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
405 1.2 riastrad }
406 1.2 riastrad
407 1.2 riastrad static inline void
408 1.2 riastrad clear_bit(unsigned int bit, volatile unsigned long *ptr)
409 1.2 riastrad {
410 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
411 1.2 riastrad
412 1.7.30.1 christos /* no memory barrier */
413 1.2 riastrad atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
414 1.2 riastrad }
415 1.2 riastrad
416 1.2 riastrad static inline void
417 1.2 riastrad change_bit(unsigned int bit, volatile unsigned long *ptr)
418 1.2 riastrad {
419 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
420 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
421 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
422 1.2 riastrad unsigned long v;
423 1.2 riastrad
424 1.7.30.1 christos /* no memory barrier */
425 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
426 1.2 riastrad }
427 1.2 riastrad
428 1.7.30.1 christos static inline int
429 1.2 riastrad test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
430 1.2 riastrad {
431 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
432 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
433 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
434 1.2 riastrad unsigned long v;
435 1.2 riastrad
436 1.7.30.1 christos smp_mb__before_atomic();
437 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
438 1.7.30.1 christos smp_mb__after_atomic();
439 1.2 riastrad
440 1.7 riastrad return ((v & mask) != 0);
441 1.2 riastrad }
442 1.2 riastrad
443 1.7.30.1 christos static inline int
444 1.2 riastrad test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
445 1.2 riastrad {
446 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
447 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
448 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
449 1.2 riastrad unsigned long v;
450 1.2 riastrad
451 1.7.30.1 christos smp_mb__before_atomic();
452 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
453 1.7.30.1 christos smp_mb__after_atomic();
454 1.2 riastrad
455 1.7 riastrad return ((v & mask) != 0);
456 1.2 riastrad }
457 1.2 riastrad
458 1.7.30.1 christos static inline int
459 1.2 riastrad test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
460 1.2 riastrad {
461 1.2 riastrad const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
462 1.2 riastrad volatile unsigned long *const p = &ptr[bit / units];
463 1.2 riastrad const unsigned long mask = (1UL << (bit % units));
464 1.2 riastrad unsigned long v;
465 1.2 riastrad
466 1.7.30.1 christos smp_mb__before_atomic();
467 1.2 riastrad do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
468 1.7.30.1 christos smp_mb__after_atomic();
469 1.2 riastrad
470 1.7 riastrad return ((v & mask) != 0);
471 1.2 riastrad }
472 1.2 riastrad
473 1.2 riastrad #endif /* _LINUX_ATOMIC_H_ */
474