Home | History | Annotate | Line # | Download | only in linux
atomic.h revision 1.37
      1 /*	$NetBSD: atomic.h,v 1.37 2021/12/19 11:16:00 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_ATOMIC_H_
     33 #define _LINUX_ATOMIC_H_
     34 
     35 #include <sys/atomic.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <asm/barrier.h>
     40 
     41 #define	xchg(P, V)							      \
     42 	(sizeof(*(P)) == 4 ? atomic_swap_32((volatile uint32_t *)P, V)	      \
     43 	    : sizeof(*(P)) == 8 ? atomic_swap_64((volatile uint64_t *)P, V)   \
     44 	    : (__builtin_abort(), 0))
     45 
     46 #define	cmpxchg(P, O, N)						      \
     47 	(sizeof(*(P)) == 4 ? atomic_cas_32((volatile uint32_t *)P, O, N)      \
     48 	    : sizeof(*(P)) == 8 ? atomic_cas_64((volatile uint64_t *)P, O, N) \
     49 	    : (__builtin_abort(), 0))
     50 
     51 /*
     52  * atomic (u)int operations
     53  *
     54  *	Atomics that return a value, other than atomic_read, imply a
     55  *	full memory_sync barrier.  Those that do not return a value
     56  *	imply no memory barrier.
     57  */
     58 
     59 struct atomic {
     60 	union {
     61 		volatile int au_int;
     62 		volatile unsigned int au_uint;
     63 	} a_u;
     64 };
     65 
     66 #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     67 
     68 typedef struct atomic atomic_t;
     69 
     70 static inline int
     71 atomic_read(const atomic_t *atomic)
     72 {
     73 	/* no membar */
     74 	return atomic->a_u.au_int;
     75 }
     76 
     77 static inline void
     78 atomic_set(atomic_t *atomic, int value)
     79 {
     80 	/* no membar */
     81 	atomic->a_u.au_int = value;
     82 }
     83 
     84 static inline void
     85 atomic_set_release(atomic_t *atomic, int value)
     86 {
     87 	atomic_store_release(&atomic->a_u.au_int, value);
     88 }
     89 
     90 static inline void
     91 atomic_add(int addend, atomic_t *atomic)
     92 {
     93 	/* no membar */
     94 	atomic_add_int(&atomic->a_u.au_uint, addend);
     95 }
     96 
     97 static inline void
     98 atomic_sub(int subtrahend, atomic_t *atomic)
     99 {
    100 	/* no membar */
    101 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
    102 }
    103 
    104 static inline int
    105 atomic_add_return(int addend, atomic_t *atomic)
    106 {
    107 	int v;
    108 
    109 	smp_mb__before_atomic();
    110 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
    111 	smp_mb__after_atomic();
    112 
    113 	return v;
    114 }
    115 
    116 static inline int
    117 atomic_sub_return(int subtrahend, atomic_t *atomic)
    118 {
    119 	int v;
    120 
    121 	smp_mb__before_atomic();
    122 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, -subtrahend);
    123 	smp_mb__after_atomic();
    124 
    125 	return v;
    126 }
    127 
    128 static inline void
    129 atomic_inc(atomic_t *atomic)
    130 {
    131 	/* no membar */
    132 	atomic_inc_uint(&atomic->a_u.au_uint);
    133 }
    134 
    135 static inline void
    136 atomic_dec(atomic_t *atomic)
    137 {
    138 	/* no membar */
    139 	atomic_dec_uint(&atomic->a_u.au_uint);
    140 }
    141 
    142 static inline int
    143 atomic_inc_return(atomic_t *atomic)
    144 {
    145 	int v;
    146 
    147 	smp_mb__before_atomic();
    148 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
    149 	smp_mb__after_atomic();
    150 
    151 	return v;
    152 }
    153 
    154 static inline int
    155 atomic_dec_return(atomic_t *atomic)
    156 {
    157 	int v;
    158 
    159 	smp_mb__before_atomic();
    160 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    161 	smp_mb__after_atomic();
    162 
    163 	return v;
    164 }
    165 
    166 static inline int
    167 atomic_dec_and_test(atomic_t *atomic)
    168 {
    169 	/* membar implied by atomic_dec_return */
    170 	return atomic_dec_return(atomic) == 0;
    171 }
    172 
    173 static inline int
    174 atomic_dec_if_positive(atomic_t *atomic)
    175 {
    176 	int v;
    177 
    178 	smp_mb__before_atomic();
    179 	do {
    180 		v = atomic->a_u.au_uint;
    181 		if (v <= 0)
    182 			break;
    183 	} while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
    184 	smp_mb__after_atomic();
    185 
    186 	return v - 1;
    187 }
    188 
    189 static inline void
    190 atomic_or(int value, atomic_t *atomic)
    191 {
    192 	/* no membar */
    193 	atomic_or_uint(&atomic->a_u.au_uint, value);
    194 }
    195 
    196 static inline void
    197 atomic_andnot(int value, atomic_t *atomic)
    198 {
    199 	/* no membar */
    200 	atomic_and_uint(&atomic->a_u.au_uint, ~value);
    201 }
    202 
    203 static inline int
    204 atomic_fetch_add(int value, atomic_t *atomic)
    205 {
    206 	unsigned old, new;
    207 
    208 	smp_mb__before_atomic();
    209 	do {
    210 		old = atomic->a_u.au_uint;
    211 		new = old + value;
    212 	} while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
    213 	smp_mb__after_atomic();
    214 
    215 	return old;
    216 }
    217 
    218 static inline int
    219 atomic_fetch_inc(atomic_t *atomic)
    220 {
    221 	return atomic_fetch_add(1, atomic);
    222 }
    223 
    224 static inline int
    225 atomic_fetch_xor(int value, atomic_t *atomic)
    226 {
    227 	unsigned old, new;
    228 
    229 	smp_mb__before_atomic();
    230 	do {
    231 		old = atomic->a_u.au_uint;
    232 		new = old ^ value;
    233 	} while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
    234 	smp_mb__after_atomic();
    235 
    236 	return old;
    237 }
    238 
    239 static inline void
    240 atomic_set_mask(unsigned long mask, atomic_t *atomic)
    241 {
    242 	/* no membar */
    243 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    244 }
    245 
    246 static inline void
    247 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    248 {
    249 	/* no membar */
    250 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    251 }
    252 
    253 static inline int
    254 atomic_add_unless(atomic_t *atomic, int addend, int zero)
    255 {
    256 	int value;
    257 
    258 	smp_mb__before_atomic();
    259 	do {
    260 		value = atomic->a_u.au_int;
    261 		if (value == zero)
    262 			break;
    263 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    264 	    != (unsigned)value);
    265 	smp_mb__after_atomic();
    266 
    267 	return value != zero;
    268 }
    269 
    270 static inline int
    271 atomic_inc_not_zero(atomic_t *atomic)
    272 {
    273 	/* membar implied by atomic_add_unless */
    274 	return atomic_add_unless(atomic, 1, 0);
    275 }
    276 
    277 static inline int
    278 atomic_xchg(atomic_t *atomic, int new)
    279 {
    280 	int old;
    281 
    282 	smp_mb__before_atomic();
    283 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    284 	smp_mb__after_atomic();
    285 
    286 	return old;
    287 }
    288 
    289 static inline int
    290 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
    291 {
    292 	int old;
    293 
    294 	/*
    295 	 * XXX As an optimization, under Linux's semantics we are
    296 	 * allowed to skip the memory barrier if the comparison fails,
    297 	 * but taking advantage of that is not convenient here.
    298 	 */
    299 	smp_mb__before_atomic();
    300 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
    301 	    (unsigned)new);
    302 	smp_mb__after_atomic();
    303 
    304 	return old;
    305 }
    306 
    307 struct atomic64 {
    308 	volatile uint64_t	a_v;
    309 };
    310 
    311 typedef struct atomic64 atomic64_t;
    312 
    313 #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
    314 
    315 int		linux_atomic64_init(void);
    316 void		linux_atomic64_fini(void);
    317 
    318 #ifdef __HAVE_ATOMIC64_OPS
    319 
    320 static inline uint64_t
    321 atomic64_read(const struct atomic64 *a)
    322 {
    323 	/* no membar */
    324 	return a->a_v;
    325 }
    326 
    327 static inline void
    328 atomic64_set(struct atomic64 *a, uint64_t v)
    329 {
    330 	/* no membar */
    331 	a->a_v = v;
    332 }
    333 
    334 static inline void
    335 atomic64_add(int64_t d, struct atomic64 *a)
    336 {
    337 	/* no membar */
    338 	atomic_add_64(&a->a_v, d);
    339 }
    340 
    341 static inline void
    342 atomic64_sub(int64_t d, struct atomic64 *a)
    343 {
    344 	/* no membar */
    345 	atomic_add_64(&a->a_v, -d);
    346 }
    347 
    348 static inline int64_t
    349 atomic64_add_return(int64_t d, struct atomic64 *a)
    350 {
    351 	int64_t v;
    352 
    353 	smp_mb__before_atomic();
    354 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
    355 	smp_mb__after_atomic();
    356 
    357 	return v;
    358 }
    359 
    360 static inline uint64_t
    361 atomic64_xchg(struct atomic64 *a, uint64_t new)
    362 {
    363 	uint64_t old;
    364 
    365 	smp_mb__before_atomic();
    366 	old = atomic_swap_64(&a->a_v, new);
    367 	smp_mb__after_atomic();
    368 
    369 	return old;
    370 }
    371 
    372 static inline uint64_t
    373 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
    374 {
    375 	uint64_t old;
    376 
    377 	/*
    378 	 * XXX As an optimization, under Linux's semantics we are
    379 	 * allowed to skip the memory barrier if the comparison fails,
    380 	 * but taking advantage of that is not convenient here.
    381 	 */
    382 	smp_mb__before_atomic();
    383 	old = atomic_cas_64(&atomic->a_v, expect, new);
    384 	smp_mb__after_atomic();
    385 
    386 	return old;
    387 }
    388 
    389 #else  /* !defined(__HAVE_ATOMIC64_OPS) */
    390 
    391 #define	atomic64_add		linux_atomic64_add
    392 #define	atomic64_add_return	linux_atomic64_add_return
    393 #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
    394 #define	atomic64_read		linux_atomic64_read
    395 #define	atomic64_set		linux_atomic64_set
    396 #define	atomic64_sub		linux_atomic64_sub
    397 #define	atomic64_xchg		linux_atomic64_xchg
    398 
    399 uint64_t	atomic64_read(const struct atomic64 *);
    400 void		atomic64_set(struct atomic64 *, uint64_t);
    401 void		atomic64_add(int64_t, struct atomic64 *);
    402 void		atomic64_sub(int64_t, struct atomic64 *);
    403 int64_t		atomic64_add_return(int64_t, struct atomic64 *);
    404 uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
    405 uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
    406 
    407 #endif
    408 
    409 static inline int64_t
    410 atomic64_inc_return(struct atomic64 *a)
    411 {
    412 	return atomic64_add_return(1, a);
    413 }
    414 
    415 struct atomic_long {
    416 	volatile unsigned long	al_v;
    417 };
    418 
    419 typedef struct atomic_long atomic_long_t;
    420 
    421 static inline long
    422 atomic_long_read(struct atomic_long *a)
    423 {
    424 	/* no membar */
    425 	return (unsigned long)a->al_v;
    426 }
    427 
    428 static inline void
    429 atomic_long_set(struct atomic_long *a, long v)
    430 {
    431 	/* no membar */
    432 	a->al_v = v;
    433 }
    434 
    435 static inline long
    436 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
    437 {
    438 	long value;
    439 
    440 	smp_mb__before_atomic();
    441 	do {
    442 		value = (long)a->al_v;
    443 		if (value == zero)
    444 			break;
    445 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
    446 		(unsigned long)(value + addend)) != (unsigned long)value);
    447 	smp_mb__after_atomic();
    448 
    449 	return value != zero;
    450 }
    451 
    452 static inline long
    453 atomic_long_inc_not_zero(struct atomic_long *a)
    454 {
    455 	/* membar implied by atomic_long_add_unless */
    456 	return atomic_long_add_unless(a, 1, 0);
    457 }
    458 
    459 static inline long
    460 atomic_long_xchg(struct atomic_long *a, long new)
    461 {
    462 	long old;
    463 
    464 	smp_mb__before_atomic();
    465 	old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
    466 	smp_mb__after_atomic();
    467 
    468 	return old;
    469 }
    470 
    471 static inline long
    472 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
    473 {
    474 	long old;
    475 
    476 	/*
    477 	 * XXX As an optimization, under Linux's semantics we are
    478 	 * allowed to skip the memory barrier if the comparison fails,
    479 	 * but taking advantage of that is not convenient here.
    480 	 */
    481 	smp_mb__before_atomic();
    482 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
    483 	    (unsigned long)new);
    484 	smp_mb__after_atomic();
    485 
    486 	return old;
    487 }
    488 
    489 #endif  /* _LINUX_ATOMIC_H_ */
    490