Home | History | Annotate | Line # | Download | only in linux
atomic.h revision 1.38
      1 /*	$NetBSD: atomic.h,v 1.38 2021/12/19 11:26:42 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_ATOMIC_H_
     33 #define _LINUX_ATOMIC_H_
     34 
     35 #include <sys/atomic.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <asm/barrier.h>
     40 
     41 #define	xchg(P, V)							      \
     42 	(sizeof(*(P)) == 4 ? atomic_swap_32((volatile uint32_t *)(P),	      \
     43 		(uint32_t)(V))						      \
     44 	    : sizeof(*(P)) == 8 ? atomic_swap_64((volatile uint64_t *)(P),    \
     45 		(uint64_t)(V))						      \
     46 	    : (__builtin_abort(), 0))
     47 
     48 #define	cmpxchg(P, O, N)						      \
     49 	(sizeof(*(P)) == 4 ? atomic_cas_32((volatile uint32_t *)(P),	      \
     50 		(uint32_t)(O), (uint32_t)(N))				      \
     51 	    : sizeof(*(P)) == 8 ? atomic_cas_64((volatile uint64_t *)(P),     \
     52 		(uint64_t)(O), (uint64_t)(N))				      \
     53 	    : (__builtin_abort(), 0))
     54 
     55 /*
     56  * atomic (u)int operations
     57  *
     58  *	Atomics that return a value, other than atomic_read, imply a
     59  *	full memory_sync barrier.  Those that do not return a value
     60  *	imply no memory barrier.
     61  */
     62 
     63 struct atomic {
     64 	union {
     65 		volatile int au_int;
     66 		volatile unsigned int au_uint;
     67 	} a_u;
     68 };
     69 
     70 #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     71 
     72 typedef struct atomic atomic_t;
     73 
     74 static inline int
     75 atomic_read(const atomic_t *atomic)
     76 {
     77 	/* no membar */
     78 	return atomic->a_u.au_int;
     79 }
     80 
     81 static inline void
     82 atomic_set(atomic_t *atomic, int value)
     83 {
     84 	/* no membar */
     85 	atomic->a_u.au_int = value;
     86 }
     87 
     88 static inline void
     89 atomic_set_release(atomic_t *atomic, int value)
     90 {
     91 	atomic_store_release(&atomic->a_u.au_int, value);
     92 }
     93 
     94 static inline void
     95 atomic_add(int addend, atomic_t *atomic)
     96 {
     97 	/* no membar */
     98 	atomic_add_int(&atomic->a_u.au_uint, addend);
     99 }
    100 
    101 static inline void
    102 atomic_sub(int subtrahend, atomic_t *atomic)
    103 {
    104 	/* no membar */
    105 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
    106 }
    107 
    108 static inline int
    109 atomic_add_return(int addend, atomic_t *atomic)
    110 {
    111 	int v;
    112 
    113 	smp_mb__before_atomic();
    114 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
    115 	smp_mb__after_atomic();
    116 
    117 	return v;
    118 }
    119 
    120 static inline int
    121 atomic_sub_return(int subtrahend, atomic_t *atomic)
    122 {
    123 	int v;
    124 
    125 	smp_mb__before_atomic();
    126 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, -subtrahend);
    127 	smp_mb__after_atomic();
    128 
    129 	return v;
    130 }
    131 
    132 static inline void
    133 atomic_inc(atomic_t *atomic)
    134 {
    135 	/* no membar */
    136 	atomic_inc_uint(&atomic->a_u.au_uint);
    137 }
    138 
    139 static inline void
    140 atomic_dec(atomic_t *atomic)
    141 {
    142 	/* no membar */
    143 	atomic_dec_uint(&atomic->a_u.au_uint);
    144 }
    145 
    146 static inline int
    147 atomic_inc_return(atomic_t *atomic)
    148 {
    149 	int v;
    150 
    151 	smp_mb__before_atomic();
    152 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
    153 	smp_mb__after_atomic();
    154 
    155 	return v;
    156 }
    157 
    158 static inline int
    159 atomic_dec_return(atomic_t *atomic)
    160 {
    161 	int v;
    162 
    163 	smp_mb__before_atomic();
    164 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    165 	smp_mb__after_atomic();
    166 
    167 	return v;
    168 }
    169 
    170 static inline int
    171 atomic_dec_and_test(atomic_t *atomic)
    172 {
    173 	/* membar implied by atomic_dec_return */
    174 	return atomic_dec_return(atomic) == 0;
    175 }
    176 
    177 static inline int
    178 atomic_dec_if_positive(atomic_t *atomic)
    179 {
    180 	int v;
    181 
    182 	smp_mb__before_atomic();
    183 	do {
    184 		v = atomic->a_u.au_uint;
    185 		if (v <= 0)
    186 			break;
    187 	} while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
    188 	smp_mb__after_atomic();
    189 
    190 	return v - 1;
    191 }
    192 
    193 static inline void
    194 atomic_or(int value, atomic_t *atomic)
    195 {
    196 	/* no membar */
    197 	atomic_or_uint(&atomic->a_u.au_uint, value);
    198 }
    199 
    200 static inline void
    201 atomic_andnot(int value, atomic_t *atomic)
    202 {
    203 	/* no membar */
    204 	atomic_and_uint(&atomic->a_u.au_uint, ~value);
    205 }
    206 
    207 static inline int
    208 atomic_fetch_add(int value, atomic_t *atomic)
    209 {
    210 	unsigned old, new;
    211 
    212 	smp_mb__before_atomic();
    213 	do {
    214 		old = atomic->a_u.au_uint;
    215 		new = old + value;
    216 	} while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
    217 	smp_mb__after_atomic();
    218 
    219 	return old;
    220 }
    221 
    222 static inline int
    223 atomic_fetch_inc(atomic_t *atomic)
    224 {
    225 	return atomic_fetch_add(1, atomic);
    226 }
    227 
    228 static inline int
    229 atomic_fetch_xor(int value, atomic_t *atomic)
    230 {
    231 	unsigned old, new;
    232 
    233 	smp_mb__before_atomic();
    234 	do {
    235 		old = atomic->a_u.au_uint;
    236 		new = old ^ value;
    237 	} while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
    238 	smp_mb__after_atomic();
    239 
    240 	return old;
    241 }
    242 
    243 static inline void
    244 atomic_set_mask(unsigned long mask, atomic_t *atomic)
    245 {
    246 	/* no membar */
    247 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    248 }
    249 
    250 static inline void
    251 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    252 {
    253 	/* no membar */
    254 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    255 }
    256 
    257 static inline int
    258 atomic_add_unless(atomic_t *atomic, int addend, int zero)
    259 {
    260 	int value;
    261 
    262 	smp_mb__before_atomic();
    263 	do {
    264 		value = atomic->a_u.au_int;
    265 		if (value == zero)
    266 			break;
    267 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    268 	    != (unsigned)value);
    269 	smp_mb__after_atomic();
    270 
    271 	return value != zero;
    272 }
    273 
    274 static inline int
    275 atomic_inc_not_zero(atomic_t *atomic)
    276 {
    277 	/* membar implied by atomic_add_unless */
    278 	return atomic_add_unless(atomic, 1, 0);
    279 }
    280 
    281 static inline int
    282 atomic_xchg(atomic_t *atomic, int new)
    283 {
    284 	int old;
    285 
    286 	smp_mb__before_atomic();
    287 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    288 	smp_mb__after_atomic();
    289 
    290 	return old;
    291 }
    292 
    293 static inline int
    294 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
    295 {
    296 	int old;
    297 
    298 	/*
    299 	 * XXX As an optimization, under Linux's semantics we are
    300 	 * allowed to skip the memory barrier if the comparison fails,
    301 	 * but taking advantage of that is not convenient here.
    302 	 */
    303 	smp_mb__before_atomic();
    304 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
    305 	    (unsigned)new);
    306 	smp_mb__after_atomic();
    307 
    308 	return old;
    309 }
    310 
    311 struct atomic64 {
    312 	volatile uint64_t	a_v;
    313 };
    314 
    315 typedef struct atomic64 atomic64_t;
    316 
    317 #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
    318 
    319 int		linux_atomic64_init(void);
    320 void		linux_atomic64_fini(void);
    321 
    322 #ifdef __HAVE_ATOMIC64_OPS
    323 
    324 static inline uint64_t
    325 atomic64_read(const struct atomic64 *a)
    326 {
    327 	/* no membar */
    328 	return a->a_v;
    329 }
    330 
    331 static inline void
    332 atomic64_set(struct atomic64 *a, uint64_t v)
    333 {
    334 	/* no membar */
    335 	a->a_v = v;
    336 }
    337 
    338 static inline void
    339 atomic64_add(int64_t d, struct atomic64 *a)
    340 {
    341 	/* no membar */
    342 	atomic_add_64(&a->a_v, d);
    343 }
    344 
    345 static inline void
    346 atomic64_sub(int64_t d, struct atomic64 *a)
    347 {
    348 	/* no membar */
    349 	atomic_add_64(&a->a_v, -d);
    350 }
    351 
    352 static inline int64_t
    353 atomic64_add_return(int64_t d, struct atomic64 *a)
    354 {
    355 	int64_t v;
    356 
    357 	smp_mb__before_atomic();
    358 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
    359 	smp_mb__after_atomic();
    360 
    361 	return v;
    362 }
    363 
    364 static inline uint64_t
    365 atomic64_xchg(struct atomic64 *a, uint64_t new)
    366 {
    367 	uint64_t old;
    368 
    369 	smp_mb__before_atomic();
    370 	old = atomic_swap_64(&a->a_v, new);
    371 	smp_mb__after_atomic();
    372 
    373 	return old;
    374 }
    375 
    376 static inline uint64_t
    377 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
    378 {
    379 	uint64_t old;
    380 
    381 	/*
    382 	 * XXX As an optimization, under Linux's semantics we are
    383 	 * allowed to skip the memory barrier if the comparison fails,
    384 	 * but taking advantage of that is not convenient here.
    385 	 */
    386 	smp_mb__before_atomic();
    387 	old = atomic_cas_64(&atomic->a_v, expect, new);
    388 	smp_mb__after_atomic();
    389 
    390 	return old;
    391 }
    392 
    393 #else  /* !defined(__HAVE_ATOMIC64_OPS) */
    394 
    395 #define	atomic64_add		linux_atomic64_add
    396 #define	atomic64_add_return	linux_atomic64_add_return
    397 #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
    398 #define	atomic64_read		linux_atomic64_read
    399 #define	atomic64_set		linux_atomic64_set
    400 #define	atomic64_sub		linux_atomic64_sub
    401 #define	atomic64_xchg		linux_atomic64_xchg
    402 
    403 uint64_t	atomic64_read(const struct atomic64 *);
    404 void		atomic64_set(struct atomic64 *, uint64_t);
    405 void		atomic64_add(int64_t, struct atomic64 *);
    406 void		atomic64_sub(int64_t, struct atomic64 *);
    407 int64_t		atomic64_add_return(int64_t, struct atomic64 *);
    408 uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
    409 uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
    410 
    411 #endif
    412 
    413 static inline int64_t
    414 atomic64_inc_return(struct atomic64 *a)
    415 {
    416 	return atomic64_add_return(1, a);
    417 }
    418 
    419 struct atomic_long {
    420 	volatile unsigned long	al_v;
    421 };
    422 
    423 typedef struct atomic_long atomic_long_t;
    424 
    425 static inline long
    426 atomic_long_read(struct atomic_long *a)
    427 {
    428 	/* no membar */
    429 	return (unsigned long)a->al_v;
    430 }
    431 
    432 static inline void
    433 atomic_long_set(struct atomic_long *a, long v)
    434 {
    435 	/* no membar */
    436 	a->al_v = v;
    437 }
    438 
    439 static inline long
    440 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
    441 {
    442 	long value;
    443 
    444 	smp_mb__before_atomic();
    445 	do {
    446 		value = (long)a->al_v;
    447 		if (value == zero)
    448 			break;
    449 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
    450 		(unsigned long)(value + addend)) != (unsigned long)value);
    451 	smp_mb__after_atomic();
    452 
    453 	return value != zero;
    454 }
    455 
    456 static inline long
    457 atomic_long_inc_not_zero(struct atomic_long *a)
    458 {
    459 	/* membar implied by atomic_long_add_unless */
    460 	return atomic_long_add_unless(a, 1, 0);
    461 }
    462 
    463 static inline long
    464 atomic_long_xchg(struct atomic_long *a, long new)
    465 {
    466 	long old;
    467 
    468 	smp_mb__before_atomic();
    469 	old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
    470 	smp_mb__after_atomic();
    471 
    472 	return old;
    473 }
    474 
    475 static inline long
    476 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
    477 {
    478 	long old;
    479 
    480 	/*
    481 	 * XXX As an optimization, under Linux's semantics we are
    482 	 * allowed to skip the memory barrier if the comparison fails,
    483 	 * but taking advantage of that is not convenient here.
    484 	 */
    485 	smp_mb__before_atomic();
    486 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
    487 	    (unsigned long)new);
    488 	smp_mb__after_atomic();
    489 
    490 	return old;
    491 }
    492 
    493 #endif  /* _LINUX_ATOMIC_H_ */
    494