Home | History | Annotate | Line # | Download | only in linux
      1 /*	$NetBSD: atomic.h,v 1.42 2021/12/19 12:21:30 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_ATOMIC_H_
     33 #define _LINUX_ATOMIC_H_
     34 
     35 #include <sys/atomic.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <asm/barrier.h>
     40 
     41 /* XXX Hope the GCC __sync builtins work everywhere we care about!  */
     42 #define	xchg(P, V)		__sync_lock_test_and_set(P, V)
     43 #define	cmpxchg(P, O, N)	__sync_val_compare_and_swap(P, O, N)
     44 #define	try_cmpxchg(P, V, N)						      \
     45 ({									      \
     46 	__typeof__(*(V)) *__tcx_v = (V), __tcx_expected = *__tcx_v;	      \
     47 	(*__tcx_v = cmpxchg((P), __tcx_expected, (N))) == __tcx_expected;     \
     48 })
     49 
     50 /*
     51  * atomic (u)int operations
     52  *
     53  *	Atomics that return a value, other than atomic_read, imply a
     54  *	full memory_sync barrier.  Those that do not return a value
     55  *	imply no memory barrier.
     56  */
     57 
     58 struct atomic {
     59 	union {
     60 		volatile int au_int;
     61 		volatile unsigned int au_uint;
     62 	} a_u;
     63 };
     64 
     65 #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     66 
     67 typedef struct atomic atomic_t;
     68 
     69 static inline int
     70 atomic_read(const atomic_t *atomic)
     71 {
     72 	/* no membar */
     73 	return atomic->a_u.au_int;
     74 }
     75 
     76 static inline void
     77 atomic_set(atomic_t *atomic, int value)
     78 {
     79 	/* no membar */
     80 	atomic->a_u.au_int = value;
     81 }
     82 
     83 static inline void
     84 atomic_set_release(atomic_t *atomic, int value)
     85 {
     86 	atomic_store_release(&atomic->a_u.au_int, value);
     87 }
     88 
     89 static inline void
     90 atomic_add(int addend, atomic_t *atomic)
     91 {
     92 	/* no membar */
     93 	atomic_add_int(&atomic->a_u.au_uint, addend);
     94 }
     95 
     96 static inline void
     97 atomic_sub(int subtrahend, atomic_t *atomic)
     98 {
     99 	/* no membar */
    100 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
    101 }
    102 
    103 static inline int
    104 atomic_add_return(int addend, atomic_t *atomic)
    105 {
    106 	int v;
    107 
    108 	smp_mb__before_atomic();
    109 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
    110 	smp_mb__after_atomic();
    111 
    112 	return v;
    113 }
    114 
    115 static inline int
    116 atomic_sub_return(int subtrahend, atomic_t *atomic)
    117 {
    118 	int v;
    119 
    120 	smp_mb__before_atomic();
    121 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, -subtrahend);
    122 	smp_mb__after_atomic();
    123 
    124 	return v;
    125 }
    126 
    127 static inline void
    128 atomic_inc(atomic_t *atomic)
    129 {
    130 	/* no membar */
    131 	atomic_inc_uint(&atomic->a_u.au_uint);
    132 }
    133 
    134 static inline void
    135 atomic_dec(atomic_t *atomic)
    136 {
    137 	/* no membar */
    138 	atomic_dec_uint(&atomic->a_u.au_uint);
    139 }
    140 
    141 static inline int
    142 atomic_inc_return(atomic_t *atomic)
    143 {
    144 	int v;
    145 
    146 	smp_mb__before_atomic();
    147 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
    148 	smp_mb__after_atomic();
    149 
    150 	return v;
    151 }
    152 
    153 static inline int
    154 atomic_dec_return(atomic_t *atomic)
    155 {
    156 	int v;
    157 
    158 	smp_mb__before_atomic();
    159 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    160 	smp_mb__after_atomic();
    161 
    162 	return v;
    163 }
    164 
    165 static inline int
    166 atomic_dec_and_test(atomic_t *atomic)
    167 {
    168 	/* membar implied by atomic_dec_return */
    169 	return atomic_dec_return(atomic) == 0;
    170 }
    171 
    172 static inline int
    173 atomic_dec_if_positive(atomic_t *atomic)
    174 {
    175 	int v;
    176 
    177 	smp_mb__before_atomic();
    178 	do {
    179 		v = atomic->a_u.au_uint;
    180 		if (v <= 0)
    181 			break;
    182 	} while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
    183 	smp_mb__after_atomic();
    184 
    185 	return v - 1;
    186 }
    187 
    188 static inline void
    189 atomic_or(int value, atomic_t *atomic)
    190 {
    191 	/* no membar */
    192 	atomic_or_uint(&atomic->a_u.au_uint, value);
    193 }
    194 
    195 static inline void
    196 atomic_and(int value, atomic_t *atomic)
    197 {
    198 	/* no membar */
    199 	atomic_and_uint(&atomic->a_u.au_uint, value);
    200 }
    201 
    202 static inline void
    203 atomic_andnot(int value, atomic_t *atomic)
    204 {
    205 	/* no membar */
    206 	atomic_and_uint(&atomic->a_u.au_uint, ~value);
    207 }
    208 
    209 static inline int
    210 atomic_fetch_add(int value, atomic_t *atomic)
    211 {
    212 	unsigned old, new;
    213 
    214 	smp_mb__before_atomic();
    215 	do {
    216 		old = atomic->a_u.au_uint;
    217 		new = old + value;
    218 	} while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
    219 	smp_mb__after_atomic();
    220 
    221 	return old;
    222 }
    223 
    224 static inline int
    225 atomic_fetch_inc(atomic_t *atomic)
    226 {
    227 	return atomic_fetch_add(1, atomic);
    228 }
    229 
    230 static inline int
    231 atomic_fetch_xor(int value, atomic_t *atomic)
    232 {
    233 	unsigned old, new;
    234 
    235 	smp_mb__before_atomic();
    236 	do {
    237 		old = atomic->a_u.au_uint;
    238 		new = old ^ value;
    239 	} while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
    240 	smp_mb__after_atomic();
    241 
    242 	return old;
    243 }
    244 
    245 static inline void
    246 atomic_set_mask(unsigned long mask, atomic_t *atomic)
    247 {
    248 	/* no membar */
    249 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    250 }
    251 
    252 static inline void
    253 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    254 {
    255 	/* no membar */
    256 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    257 }
    258 
    259 static inline int
    260 atomic_add_unless(atomic_t *atomic, int addend, int zero)
    261 {
    262 	int value;
    263 
    264 	smp_mb__before_atomic();
    265 	do {
    266 		value = atomic->a_u.au_int;
    267 		if (value == zero)
    268 			break;
    269 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    270 	    != (unsigned)value);
    271 	smp_mb__after_atomic();
    272 
    273 	return value != zero;
    274 }
    275 
    276 static inline int
    277 atomic_inc_not_zero(atomic_t *atomic)
    278 {
    279 	/* membar implied by atomic_add_unless */
    280 	return atomic_add_unless(atomic, 1, 0);
    281 }
    282 
    283 static inline int
    284 atomic_xchg(atomic_t *atomic, int new)
    285 {
    286 	int old;
    287 
    288 	smp_mb__before_atomic();
    289 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    290 	smp_mb__after_atomic();
    291 
    292 	return old;
    293 }
    294 
    295 static inline int
    296 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
    297 {
    298 	int old;
    299 
    300 	/*
    301 	 * XXX As an optimization, under Linux's semantics we are
    302 	 * allowed to skip the memory barrier if the comparison fails,
    303 	 * but taking advantage of that is not convenient here.
    304 	 */
    305 	smp_mb__before_atomic();
    306 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
    307 	    (unsigned)new);
    308 	smp_mb__after_atomic();
    309 
    310 	return old;
    311 }
    312 
    313 static inline bool
    314 atomic_try_cmpxchg(atomic_t *atomic, int *valuep, int new)
    315 {
    316 	int expect = *valuep;
    317 
    318 	*valuep = atomic_cmpxchg(atomic, expect, new);
    319 
    320 	return *valuep == expect;
    321 }
    322 
    323 struct atomic64 {
    324 	volatile uint64_t	a_v;
    325 };
    326 
    327 typedef struct atomic64 atomic64_t;
    328 
    329 #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
    330 
    331 int		linux_atomic64_init(void);
    332 void		linux_atomic64_fini(void);
    333 
    334 #ifdef __HAVE_ATOMIC64_OPS
    335 
    336 static inline uint64_t
    337 atomic64_read(const struct atomic64 *a)
    338 {
    339 	/* no membar */
    340 	return a->a_v;
    341 }
    342 
    343 static inline void
    344 atomic64_set(struct atomic64 *a, uint64_t v)
    345 {
    346 	/* no membar */
    347 	a->a_v = v;
    348 }
    349 
    350 static inline void
    351 atomic64_add(int64_t d, struct atomic64 *a)
    352 {
    353 	/* no membar */
    354 	atomic_add_64(&a->a_v, d);
    355 }
    356 
    357 static inline void
    358 atomic64_sub(int64_t d, struct atomic64 *a)
    359 {
    360 	/* no membar */
    361 	atomic_add_64(&a->a_v, -d);
    362 }
    363 
    364 static inline int64_t
    365 atomic64_add_return(int64_t d, struct atomic64 *a)
    366 {
    367 	int64_t v;
    368 
    369 	smp_mb__before_atomic();
    370 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
    371 	smp_mb__after_atomic();
    372 
    373 	return v;
    374 }
    375 
    376 static inline uint64_t
    377 atomic64_xchg(struct atomic64 *a, uint64_t new)
    378 {
    379 	uint64_t old;
    380 
    381 	smp_mb__before_atomic();
    382 	old = atomic_swap_64(&a->a_v, new);
    383 	smp_mb__after_atomic();
    384 
    385 	return old;
    386 }
    387 
    388 static inline uint64_t
    389 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
    390 {
    391 	uint64_t old;
    392 
    393 	/*
    394 	 * XXX As an optimization, under Linux's semantics we are
    395 	 * allowed to skip the memory barrier if the comparison fails,
    396 	 * but taking advantage of that is not convenient here.
    397 	 */
    398 	smp_mb__before_atomic();
    399 	old = atomic_cas_64(&atomic->a_v, expect, new);
    400 	smp_mb__after_atomic();
    401 
    402 	return old;
    403 }
    404 
    405 #else  /* !defined(__HAVE_ATOMIC64_OPS) */
    406 
    407 #define	atomic64_add		linux_atomic64_add
    408 #define	atomic64_add_return	linux_atomic64_add_return
    409 #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
    410 #define	atomic64_read		linux_atomic64_read
    411 #define	atomic64_set		linux_atomic64_set
    412 #define	atomic64_sub		linux_atomic64_sub
    413 #define	atomic64_xchg		linux_atomic64_xchg
    414 
    415 uint64_t	atomic64_read(const struct atomic64 *);
    416 void		atomic64_set(struct atomic64 *, uint64_t);
    417 void		atomic64_add(int64_t, struct atomic64 *);
    418 void		atomic64_sub(int64_t, struct atomic64 *);
    419 int64_t		atomic64_add_return(int64_t, struct atomic64 *);
    420 uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
    421 uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
    422 
    423 #endif
    424 
    425 static inline void
    426 atomic64_inc(struct atomic64 *a)
    427 {
    428 	atomic64_add(1, a);
    429 }
    430 
    431 static inline int64_t
    432 atomic64_inc_return(struct atomic64 *a)
    433 {
    434 	return atomic64_add_return(1, a);
    435 }
    436 
    437 struct atomic_long {
    438 	volatile unsigned long	al_v;
    439 };
    440 
    441 typedef struct atomic_long atomic_long_t;
    442 
    443 static inline long
    444 atomic_long_read(struct atomic_long *a)
    445 {
    446 	/* no membar */
    447 	return (unsigned long)a->al_v;
    448 }
    449 
    450 static inline void
    451 atomic_long_set(struct atomic_long *a, long v)
    452 {
    453 	/* no membar */
    454 	a->al_v = v;
    455 }
    456 
    457 static inline long
    458 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
    459 {
    460 	long value;
    461 
    462 	smp_mb__before_atomic();
    463 	do {
    464 		value = (long)a->al_v;
    465 		if (value == zero)
    466 			break;
    467 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
    468 		(unsigned long)(value + addend)) != (unsigned long)value);
    469 	smp_mb__after_atomic();
    470 
    471 	return value != zero;
    472 }
    473 
    474 static inline long
    475 atomic_long_inc_not_zero(struct atomic_long *a)
    476 {
    477 	/* membar implied by atomic_long_add_unless */
    478 	return atomic_long_add_unless(a, 1, 0);
    479 }
    480 
    481 static inline long
    482 atomic_long_xchg(struct atomic_long *a, long new)
    483 {
    484 	long old;
    485 
    486 	smp_mb__before_atomic();
    487 	old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
    488 	smp_mb__after_atomic();
    489 
    490 	return old;
    491 }
    492 
    493 static inline long
    494 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
    495 {
    496 	long old;
    497 
    498 	/*
    499 	 * XXX As an optimization, under Linux's semantics we are
    500 	 * allowed to skip the memory barrier if the comparison fails,
    501 	 * but taking advantage of that is not convenient here.
    502 	 */
    503 	smp_mb__before_atomic();
    504 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
    505 	    (unsigned long)new);
    506 	smp_mb__after_atomic();
    507 
    508 	return old;
    509 }
    510 
    511 #endif  /* _LINUX_ATOMIC_H_ */
    512