atomic.h revision 1.42 1 1.42 riastrad /* $NetBSD: atomic.h,v 1.42 2021/12/19 12:21:30 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad #ifndef _LINUX_ATOMIC_H_
33 1.2 riastrad #define _LINUX_ATOMIC_H_
34 1.2 riastrad
35 1.2 riastrad #include <sys/atomic.h>
36 1.2 riastrad
37 1.2 riastrad #include <machine/limits.h>
38 1.2 riastrad
39 1.22 maya #include <asm/barrier.h>
40 1.22 maya
41 1.39 riastrad /* XXX Hope the GCC __sync builtins work everywhere we care about! */
42 1.39 riastrad #define xchg(P, V) __sync_lock_test_and_set(P, V)
43 1.39 riastrad #define cmpxchg(P, O, N) __sync_val_compare_and_swap(P, O, N)
44 1.41 riastrad #define try_cmpxchg(P, V, N) \
45 1.41 riastrad ({ \
46 1.41 riastrad __typeof__(*(V)) *__tcx_v = (V), __tcx_expected = *__tcx_v; \
47 1.41 riastrad (*__tcx_v = cmpxchg((P), __tcx_expected, (N))) == __tcx_expected; \
48 1.41 riastrad })
49 1.35 riastrad
50 1.13 riastrad /*
51 1.13 riastrad * atomic (u)int operations
52 1.13 riastrad *
53 1.13 riastrad * Atomics that return a value, other than atomic_read, imply a
54 1.13 riastrad * full memory_sync barrier. Those that do not return a value
55 1.13 riastrad * imply no memory barrier.
56 1.13 riastrad */
57 1.13 riastrad
58 1.2 riastrad struct atomic {
59 1.2 riastrad union {
60 1.3 riastrad volatile int au_int;
61 1.3 riastrad volatile unsigned int au_uint;
62 1.2 riastrad } a_u;
63 1.2 riastrad };
64 1.2 riastrad
65 1.2 riastrad #define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
66 1.2 riastrad
67 1.2 riastrad typedef struct atomic atomic_t;
68 1.2 riastrad
69 1.2 riastrad static inline int
70 1.27 riastrad atomic_read(const atomic_t *atomic)
71 1.2 riastrad {
72 1.13 riastrad /* no membar */
73 1.3 riastrad return atomic->a_u.au_int;
74 1.2 riastrad }
75 1.2 riastrad
76 1.2 riastrad static inline void
77 1.2 riastrad atomic_set(atomic_t *atomic, int value)
78 1.2 riastrad {
79 1.13 riastrad /* no membar */
80 1.2 riastrad atomic->a_u.au_int = value;
81 1.2 riastrad }
82 1.2 riastrad
83 1.2 riastrad static inline void
84 1.31 riastrad atomic_set_release(atomic_t *atomic, int value)
85 1.31 riastrad {
86 1.31 riastrad atomic_store_release(&atomic->a_u.au_int, value);
87 1.31 riastrad }
88 1.31 riastrad
89 1.31 riastrad static inline void
90 1.2 riastrad atomic_add(int addend, atomic_t *atomic)
91 1.2 riastrad {
92 1.13 riastrad /* no membar */
93 1.2 riastrad atomic_add_int(&atomic->a_u.au_uint, addend);
94 1.2 riastrad }
95 1.2 riastrad
96 1.2 riastrad static inline void
97 1.2 riastrad atomic_sub(int subtrahend, atomic_t *atomic)
98 1.2 riastrad {
99 1.13 riastrad /* no membar */
100 1.2 riastrad atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
101 1.2 riastrad }
102 1.2 riastrad
103 1.2 riastrad static inline int
104 1.2 riastrad atomic_add_return(int addend, atomic_t *atomic)
105 1.2 riastrad {
106 1.13 riastrad int v;
107 1.13 riastrad
108 1.13 riastrad smp_mb__before_atomic();
109 1.13 riastrad v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
110 1.13 riastrad smp_mb__after_atomic();
111 1.13 riastrad
112 1.13 riastrad return v;
113 1.2 riastrad }
114 1.2 riastrad
115 1.36 riastrad static inline int
116 1.36 riastrad atomic_sub_return(int subtrahend, atomic_t *atomic)
117 1.36 riastrad {
118 1.36 riastrad int v;
119 1.36 riastrad
120 1.36 riastrad smp_mb__before_atomic();
121 1.36 riastrad v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, -subtrahend);
122 1.36 riastrad smp_mb__after_atomic();
123 1.36 riastrad
124 1.36 riastrad return v;
125 1.36 riastrad }
126 1.36 riastrad
127 1.2 riastrad static inline void
128 1.2 riastrad atomic_inc(atomic_t *atomic)
129 1.2 riastrad {
130 1.13 riastrad /* no membar */
131 1.2 riastrad atomic_inc_uint(&atomic->a_u.au_uint);
132 1.2 riastrad }
133 1.2 riastrad
134 1.2 riastrad static inline void
135 1.2 riastrad atomic_dec(atomic_t *atomic)
136 1.2 riastrad {
137 1.13 riastrad /* no membar */
138 1.2 riastrad atomic_dec_uint(&atomic->a_u.au_uint);
139 1.2 riastrad }
140 1.2 riastrad
141 1.2 riastrad static inline int
142 1.2 riastrad atomic_inc_return(atomic_t *atomic)
143 1.2 riastrad {
144 1.13 riastrad int v;
145 1.13 riastrad
146 1.13 riastrad smp_mb__before_atomic();
147 1.13 riastrad v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
148 1.13 riastrad smp_mb__after_atomic();
149 1.13 riastrad
150 1.13 riastrad return v;
151 1.2 riastrad }
152 1.2 riastrad
153 1.2 riastrad static inline int
154 1.2 riastrad atomic_dec_return(atomic_t *atomic)
155 1.2 riastrad {
156 1.13 riastrad int v;
157 1.13 riastrad
158 1.13 riastrad smp_mb__before_atomic();
159 1.13 riastrad v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
160 1.13 riastrad smp_mb__after_atomic();
161 1.13 riastrad
162 1.13 riastrad return v;
163 1.2 riastrad }
164 1.2 riastrad
165 1.2 riastrad static inline int
166 1.2 riastrad atomic_dec_and_test(atomic_t *atomic)
167 1.2 riastrad {
168 1.13 riastrad /* membar implied by atomic_dec_return */
169 1.13 riastrad return atomic_dec_return(atomic) == 0;
170 1.2 riastrad }
171 1.2 riastrad
172 1.28 riastrad static inline int
173 1.28 riastrad atomic_dec_if_positive(atomic_t *atomic)
174 1.28 riastrad {
175 1.28 riastrad int v;
176 1.28 riastrad
177 1.28 riastrad smp_mb__before_atomic();
178 1.28 riastrad do {
179 1.28 riastrad v = atomic->a_u.au_uint;
180 1.28 riastrad if (v <= 0)
181 1.28 riastrad break;
182 1.28 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
183 1.28 riastrad smp_mb__after_atomic();
184 1.28 riastrad
185 1.28 riastrad return v - 1;
186 1.28 riastrad }
187 1.28 riastrad
188 1.2 riastrad static inline void
189 1.8 riastrad atomic_or(int value, atomic_t *atomic)
190 1.8 riastrad {
191 1.13 riastrad /* no membar */
192 1.8 riastrad atomic_or_uint(&atomic->a_u.au_uint, value);
193 1.8 riastrad }
194 1.8 riastrad
195 1.8 riastrad static inline void
196 1.40 riastrad atomic_and(int value, atomic_t *atomic)
197 1.40 riastrad {
198 1.40 riastrad /* no membar */
199 1.40 riastrad atomic_and_uint(&atomic->a_u.au_uint, value);
200 1.40 riastrad }
201 1.40 riastrad
202 1.40 riastrad static inline void
203 1.24 riastrad atomic_andnot(int value, atomic_t *atomic)
204 1.24 riastrad {
205 1.24 riastrad /* no membar */
206 1.24 riastrad atomic_and_uint(&atomic->a_u.au_uint, ~value);
207 1.24 riastrad }
208 1.24 riastrad
209 1.25 riastrad static inline int
210 1.37 riastrad atomic_fetch_add(int value, atomic_t *atomic)
211 1.37 riastrad {
212 1.37 riastrad unsigned old, new;
213 1.37 riastrad
214 1.37 riastrad smp_mb__before_atomic();
215 1.37 riastrad do {
216 1.37 riastrad old = atomic->a_u.au_uint;
217 1.37 riastrad new = old + value;
218 1.37 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
219 1.37 riastrad smp_mb__after_atomic();
220 1.37 riastrad
221 1.37 riastrad return old;
222 1.37 riastrad }
223 1.37 riastrad
224 1.37 riastrad static inline int
225 1.37 riastrad atomic_fetch_inc(atomic_t *atomic)
226 1.37 riastrad {
227 1.37 riastrad return atomic_fetch_add(1, atomic);
228 1.37 riastrad }
229 1.37 riastrad
230 1.37 riastrad static inline int
231 1.25 riastrad atomic_fetch_xor(int value, atomic_t *atomic)
232 1.25 riastrad {
233 1.25 riastrad unsigned old, new;
234 1.25 riastrad
235 1.25 riastrad smp_mb__before_atomic();
236 1.25 riastrad do {
237 1.25 riastrad old = atomic->a_u.au_uint;
238 1.25 riastrad new = old ^ value;
239 1.25 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
240 1.25 riastrad smp_mb__after_atomic();
241 1.25 riastrad
242 1.25 riastrad return old;
243 1.25 riastrad }
244 1.25 riastrad
245 1.24 riastrad static inline void
246 1.2 riastrad atomic_set_mask(unsigned long mask, atomic_t *atomic)
247 1.2 riastrad {
248 1.13 riastrad /* no membar */
249 1.2 riastrad atomic_or_uint(&atomic->a_u.au_uint, mask);
250 1.2 riastrad }
251 1.2 riastrad
252 1.2 riastrad static inline void
253 1.2 riastrad atomic_clear_mask(unsigned long mask, atomic_t *atomic)
254 1.2 riastrad {
255 1.13 riastrad /* no membar */
256 1.2 riastrad atomic_and_uint(&atomic->a_u.au_uint, ~mask);
257 1.2 riastrad }
258 1.2 riastrad
259 1.2 riastrad static inline int
260 1.2 riastrad atomic_add_unless(atomic_t *atomic, int addend, int zero)
261 1.2 riastrad {
262 1.2 riastrad int value;
263 1.2 riastrad
264 1.13 riastrad smp_mb__before_atomic();
265 1.2 riastrad do {
266 1.2 riastrad value = atomic->a_u.au_int;
267 1.2 riastrad if (value == zero)
268 1.13 riastrad break;
269 1.2 riastrad } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
270 1.21 christos != (unsigned)value);
271 1.13 riastrad smp_mb__after_atomic();
272 1.2 riastrad
273 1.13 riastrad return value != zero;
274 1.2 riastrad }
275 1.2 riastrad
276 1.2 riastrad static inline int
277 1.2 riastrad atomic_inc_not_zero(atomic_t *atomic)
278 1.2 riastrad {
279 1.13 riastrad /* membar implied by atomic_add_unless */
280 1.2 riastrad return atomic_add_unless(atomic, 1, 0);
281 1.2 riastrad }
282 1.2 riastrad
283 1.5 riastrad static inline int
284 1.5 riastrad atomic_xchg(atomic_t *atomic, int new)
285 1.5 riastrad {
286 1.13 riastrad int old;
287 1.13 riastrad
288 1.13 riastrad smp_mb__before_atomic();
289 1.13 riastrad old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
290 1.13 riastrad smp_mb__after_atomic();
291 1.13 riastrad
292 1.13 riastrad return old;
293 1.5 riastrad }
294 1.5 riastrad
295 1.5 riastrad static inline int
296 1.13 riastrad atomic_cmpxchg(atomic_t *atomic, int expect, int new)
297 1.5 riastrad {
298 1.13 riastrad int old;
299 1.13 riastrad
300 1.13 riastrad /*
301 1.13 riastrad * XXX As an optimization, under Linux's semantics we are
302 1.13 riastrad * allowed to skip the memory barrier if the comparison fails,
303 1.13 riastrad * but taking advantage of that is not convenient here.
304 1.13 riastrad */
305 1.13 riastrad smp_mb__before_atomic();
306 1.13 riastrad old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
307 1.5 riastrad (unsigned)new);
308 1.13 riastrad smp_mb__after_atomic();
309 1.13 riastrad
310 1.13 riastrad return old;
311 1.5 riastrad }
312 1.5 riastrad
313 1.40 riastrad static inline bool
314 1.40 riastrad atomic_try_cmpxchg(atomic_t *atomic, int *valuep, int new)
315 1.40 riastrad {
316 1.40 riastrad int expect = *valuep;
317 1.40 riastrad
318 1.40 riastrad *valuep = atomic_cmpxchg(atomic, expect, new);
319 1.40 riastrad
320 1.40 riastrad return *valuep == expect;
321 1.40 riastrad }
322 1.40 riastrad
323 1.6 riastrad struct atomic64 {
324 1.6 riastrad volatile uint64_t a_v;
325 1.6 riastrad };
326 1.6 riastrad
327 1.6 riastrad typedef struct atomic64 atomic64_t;
328 1.6 riastrad
329 1.16 riastrad #define ATOMIC64_INIT(v) { .a_v = (v) }
330 1.16 riastrad
331 1.15 riastrad int linux_atomic64_init(void);
332 1.15 riastrad void linux_atomic64_fini(void);
333 1.15 riastrad
334 1.15 riastrad #ifdef __HAVE_ATOMIC64_OPS
335 1.15 riastrad
336 1.6 riastrad static inline uint64_t
337 1.6 riastrad atomic64_read(const struct atomic64 *a)
338 1.6 riastrad {
339 1.13 riastrad /* no membar */
340 1.6 riastrad return a->a_v;
341 1.6 riastrad }
342 1.6 riastrad
343 1.6 riastrad static inline void
344 1.6 riastrad atomic64_set(struct atomic64 *a, uint64_t v)
345 1.6 riastrad {
346 1.13 riastrad /* no membar */
347 1.6 riastrad a->a_v = v;
348 1.6 riastrad }
349 1.6 riastrad
350 1.6 riastrad static inline void
351 1.17 riastrad atomic64_add(int64_t d, struct atomic64 *a)
352 1.6 riastrad {
353 1.13 riastrad /* no membar */
354 1.6 riastrad atomic_add_64(&a->a_v, d);
355 1.6 riastrad }
356 1.6 riastrad
357 1.6 riastrad static inline void
358 1.17 riastrad atomic64_sub(int64_t d, struct atomic64 *a)
359 1.6 riastrad {
360 1.13 riastrad /* no membar */
361 1.6 riastrad atomic_add_64(&a->a_v, -d);
362 1.6 riastrad }
363 1.6 riastrad
364 1.19 riastrad static inline int64_t
365 1.19 riastrad atomic64_add_return(int64_t d, struct atomic64 *a)
366 1.19 riastrad {
367 1.19 riastrad int64_t v;
368 1.19 riastrad
369 1.19 riastrad smp_mb__before_atomic();
370 1.19 riastrad v = (int64_t)atomic_add_64_nv(&a->a_v, d);
371 1.19 riastrad smp_mb__after_atomic();
372 1.19 riastrad
373 1.19 riastrad return v;
374 1.19 riastrad }
375 1.19 riastrad
376 1.6 riastrad static inline uint64_t
377 1.13 riastrad atomic64_xchg(struct atomic64 *a, uint64_t new)
378 1.6 riastrad {
379 1.13 riastrad uint64_t old;
380 1.13 riastrad
381 1.13 riastrad smp_mb__before_atomic();
382 1.13 riastrad old = atomic_swap_64(&a->a_v, new);
383 1.13 riastrad smp_mb__after_atomic();
384 1.13 riastrad
385 1.13 riastrad return old;
386 1.6 riastrad }
387 1.6 riastrad
388 1.9 riastrad static inline uint64_t
389 1.13 riastrad atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
390 1.9 riastrad {
391 1.13 riastrad uint64_t old;
392 1.13 riastrad
393 1.13 riastrad /*
394 1.13 riastrad * XXX As an optimization, under Linux's semantics we are
395 1.13 riastrad * allowed to skip the memory barrier if the comparison fails,
396 1.13 riastrad * but taking advantage of that is not convenient here.
397 1.13 riastrad */
398 1.13 riastrad smp_mb__before_atomic();
399 1.13 riastrad old = atomic_cas_64(&atomic->a_v, expect, new);
400 1.13 riastrad smp_mb__after_atomic();
401 1.13 riastrad
402 1.13 riastrad return old;
403 1.9 riastrad }
404 1.9 riastrad
405 1.15 riastrad #else /* !defined(__HAVE_ATOMIC64_OPS) */
406 1.15 riastrad
407 1.18 riastrad #define atomic64_add linux_atomic64_add
408 1.19 riastrad #define atomic64_add_return linux_atomic64_add_return
409 1.18 riastrad #define atomic64_cmpxchg linux_atomic64_cmpxchg
410 1.15 riastrad #define atomic64_read linux_atomic64_read
411 1.15 riastrad #define atomic64_set linux_atomic64_set
412 1.15 riastrad #define atomic64_sub linux_atomic64_sub
413 1.15 riastrad #define atomic64_xchg linux_atomic64_xchg
414 1.15 riastrad
415 1.15 riastrad uint64_t atomic64_read(const struct atomic64 *);
416 1.15 riastrad void atomic64_set(struct atomic64 *, uint64_t);
417 1.17 riastrad void atomic64_add(int64_t, struct atomic64 *);
418 1.17 riastrad void atomic64_sub(int64_t, struct atomic64 *);
419 1.19 riastrad int64_t atomic64_add_return(int64_t, struct atomic64 *);
420 1.15 riastrad uint64_t atomic64_xchg(struct atomic64 *, uint64_t);
421 1.15 riastrad uint64_t atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
422 1.15 riastrad
423 1.15 riastrad #endif
424 1.15 riastrad
425 1.42 riastrad static inline void
426 1.42 riastrad atomic64_inc(struct atomic64 *a)
427 1.42 riastrad {
428 1.42 riastrad atomic64_add(1, a);
429 1.42 riastrad }
430 1.42 riastrad
431 1.19 riastrad static inline int64_t
432 1.19 riastrad atomic64_inc_return(struct atomic64 *a)
433 1.19 riastrad {
434 1.19 riastrad return atomic64_add_return(1, a);
435 1.19 riastrad }
436 1.19 riastrad
437 1.14 riastrad struct atomic_long {
438 1.14 riastrad volatile unsigned long al_v;
439 1.14 riastrad };
440 1.14 riastrad
441 1.14 riastrad typedef struct atomic_long atomic_long_t;
442 1.14 riastrad
443 1.14 riastrad static inline long
444 1.14 riastrad atomic_long_read(struct atomic_long *a)
445 1.14 riastrad {
446 1.14 riastrad /* no membar */
447 1.14 riastrad return (unsigned long)a->al_v;
448 1.14 riastrad }
449 1.14 riastrad
450 1.14 riastrad static inline void
451 1.14 riastrad atomic_long_set(struct atomic_long *a, long v)
452 1.14 riastrad {
453 1.14 riastrad /* no membar */
454 1.14 riastrad a->al_v = v;
455 1.14 riastrad }
456 1.14 riastrad
457 1.14 riastrad static inline long
458 1.14 riastrad atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
459 1.14 riastrad {
460 1.14 riastrad long value;
461 1.14 riastrad
462 1.14 riastrad smp_mb__before_atomic();
463 1.14 riastrad do {
464 1.14 riastrad value = (long)a->al_v;
465 1.14 riastrad if (value == zero)
466 1.14 riastrad break;
467 1.14 riastrad } while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
468 1.14 riastrad (unsigned long)(value + addend)) != (unsigned long)value);
469 1.14 riastrad smp_mb__after_atomic();
470 1.14 riastrad
471 1.14 riastrad return value != zero;
472 1.14 riastrad }
473 1.14 riastrad
474 1.14 riastrad static inline long
475 1.14 riastrad atomic_long_inc_not_zero(struct atomic_long *a)
476 1.14 riastrad {
477 1.14 riastrad /* membar implied by atomic_long_add_unless */
478 1.14 riastrad return atomic_long_add_unless(a, 1, 0);
479 1.14 riastrad }
480 1.14 riastrad
481 1.14 riastrad static inline long
482 1.30 riastrad atomic_long_xchg(struct atomic_long *a, long new)
483 1.30 riastrad {
484 1.30 riastrad long old;
485 1.30 riastrad
486 1.30 riastrad smp_mb__before_atomic();
487 1.30 riastrad old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
488 1.30 riastrad smp_mb__after_atomic();
489 1.30 riastrad
490 1.30 riastrad return old;
491 1.30 riastrad }
492 1.30 riastrad
493 1.30 riastrad static inline long
494 1.14 riastrad atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
495 1.14 riastrad {
496 1.14 riastrad long old;
497 1.14 riastrad
498 1.14 riastrad /*
499 1.14 riastrad * XXX As an optimization, under Linux's semantics we are
500 1.14 riastrad * allowed to skip the memory barrier if the comparison fails,
501 1.14 riastrad * but taking advantage of that is not convenient here.
502 1.14 riastrad */
503 1.14 riastrad smp_mb__before_atomic();
504 1.14 riastrad old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
505 1.14 riastrad (unsigned long)new);
506 1.14 riastrad smp_mb__after_atomic();
507 1.14 riastrad
508 1.14 riastrad return old;
509 1.14 riastrad }
510 1.14 riastrad
511 1.2 riastrad #endif /* _LINUX_ATOMIC_H_ */
512