Home | History | Annotate | Line # | Download | only in linux
atomic.h revision 1.31
      1 /*	$NetBSD: atomic.h,v 1.31 2021/12/19 11:01:44 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_ATOMIC_H_
     33 #define _LINUX_ATOMIC_H_
     34 
     35 #include <sys/atomic.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <asm/barrier.h>
     40 
     41 #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
     42 #  define	smp_mb__before_atomic()		membar_exit()
     43 #  define	smp_mb__after_atomic()		membar_enter()
     44 #else
     45 #  define	smp_mb__before_atomic()		__insn_barrier()
     46 #  define	smp_mb__after_atomic()		__insn_barrier()
     47 #endif
     48 
     49 #define	xchg(P, V)							      \
     50 	(sizeof(*(P)) == 4 ? atomic_swap_32((volatile uint32_t *)P, V)	      \
     51 	    : sizeof(*(P)) == 8 ? atomic_swap_64((volatile uint64_t *)P, V)   \
     52 	    : (__builtin_abort(), 0))
     53 
     54 /*
     55  * atomic (u)int operations
     56  *
     57  *	Atomics that return a value, other than atomic_read, imply a
     58  *	full memory_sync barrier.  Those that do not return a value
     59  *	imply no memory barrier.
     60  */
     61 
     62 struct atomic {
     63 	union {
     64 		volatile int au_int;
     65 		volatile unsigned int au_uint;
     66 	} a_u;
     67 };
     68 
     69 #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     70 
     71 typedef struct atomic atomic_t;
     72 
     73 static inline int
     74 atomic_read(const atomic_t *atomic)
     75 {
     76 	/* no membar */
     77 	return atomic->a_u.au_int;
     78 }
     79 
     80 static inline void
     81 atomic_set(atomic_t *atomic, int value)
     82 {
     83 	/* no membar */
     84 	atomic->a_u.au_int = value;
     85 }
     86 
     87 static inline void
     88 atomic_set_release(atomic_t *atomic, int value)
     89 {
     90 	atomic_store_release(&atomic->a_u.au_int, value);
     91 }
     92 
     93 static inline void
     94 atomic_add(int addend, atomic_t *atomic)
     95 {
     96 	/* no membar */
     97 	atomic_add_int(&atomic->a_u.au_uint, addend);
     98 }
     99 
    100 static inline void
    101 atomic_sub(int subtrahend, atomic_t *atomic)
    102 {
    103 	/* no membar */
    104 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
    105 }
    106 
    107 static inline int
    108 atomic_add_return(int addend, atomic_t *atomic)
    109 {
    110 	int v;
    111 
    112 	smp_mb__before_atomic();
    113 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
    114 	smp_mb__after_atomic();
    115 
    116 	return v;
    117 }
    118 
    119 static inline void
    120 atomic_inc(atomic_t *atomic)
    121 {
    122 	/* no membar */
    123 	atomic_inc_uint(&atomic->a_u.au_uint);
    124 }
    125 
    126 static inline void
    127 atomic_dec(atomic_t *atomic)
    128 {
    129 	/* no membar */
    130 	atomic_dec_uint(&atomic->a_u.au_uint);
    131 }
    132 
    133 static inline int
    134 atomic_inc_return(atomic_t *atomic)
    135 {
    136 	int v;
    137 
    138 	smp_mb__before_atomic();
    139 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
    140 	smp_mb__after_atomic();
    141 
    142 	return v;
    143 }
    144 
    145 static inline int
    146 atomic_dec_return(atomic_t *atomic)
    147 {
    148 	int v;
    149 
    150 	smp_mb__before_atomic();
    151 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    152 	smp_mb__after_atomic();
    153 
    154 	return v;
    155 }
    156 
    157 static inline int
    158 atomic_dec_and_test(atomic_t *atomic)
    159 {
    160 	/* membar implied by atomic_dec_return */
    161 	return atomic_dec_return(atomic) == 0;
    162 }
    163 
    164 static inline int
    165 atomic_dec_if_positive(atomic_t *atomic)
    166 {
    167 	int v;
    168 
    169 	smp_mb__before_atomic();
    170 	do {
    171 		v = atomic->a_u.au_uint;
    172 		if (v <= 0)
    173 			break;
    174 	} while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
    175 	smp_mb__after_atomic();
    176 
    177 	return v - 1;
    178 }
    179 
    180 static inline void
    181 atomic_or(int value, atomic_t *atomic)
    182 {
    183 	/* no membar */
    184 	atomic_or_uint(&atomic->a_u.au_uint, value);
    185 }
    186 
    187 static inline void
    188 atomic_andnot(int value, atomic_t *atomic)
    189 {
    190 	/* no membar */
    191 	atomic_and_uint(&atomic->a_u.au_uint, ~value);
    192 }
    193 
    194 static inline int
    195 atomic_fetch_xor(int value, atomic_t *atomic)
    196 {
    197 	unsigned old, new;
    198 
    199 	smp_mb__before_atomic();
    200 	do {
    201 		old = atomic->a_u.au_uint;
    202 		new = old ^ value;
    203 	} while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
    204 	smp_mb__after_atomic();
    205 
    206 	return old;
    207 }
    208 
    209 static inline void
    210 atomic_set_mask(unsigned long mask, atomic_t *atomic)
    211 {
    212 	/* no membar */
    213 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    214 }
    215 
    216 static inline void
    217 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    218 {
    219 	/* no membar */
    220 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    221 }
    222 
    223 static inline int
    224 atomic_add_unless(atomic_t *atomic, int addend, int zero)
    225 {
    226 	int value;
    227 
    228 	smp_mb__before_atomic();
    229 	do {
    230 		value = atomic->a_u.au_int;
    231 		if (value == zero)
    232 			break;
    233 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    234 	    != (unsigned)value);
    235 	smp_mb__after_atomic();
    236 
    237 	return value != zero;
    238 }
    239 
    240 static inline int
    241 atomic_inc_not_zero(atomic_t *atomic)
    242 {
    243 	/* membar implied by atomic_add_unless */
    244 	return atomic_add_unless(atomic, 1, 0);
    245 }
    246 
    247 static inline int
    248 atomic_xchg(atomic_t *atomic, int new)
    249 {
    250 	int old;
    251 
    252 	smp_mb__before_atomic();
    253 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    254 	smp_mb__after_atomic();
    255 
    256 	return old;
    257 }
    258 
    259 static inline int
    260 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
    261 {
    262 	int old;
    263 
    264 	/*
    265 	 * XXX As an optimization, under Linux's semantics we are
    266 	 * allowed to skip the memory barrier if the comparison fails,
    267 	 * but taking advantage of that is not convenient here.
    268 	 */
    269 	smp_mb__before_atomic();
    270 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
    271 	    (unsigned)new);
    272 	smp_mb__after_atomic();
    273 
    274 	return old;
    275 }
    276 
    277 struct atomic64 {
    278 	volatile uint64_t	a_v;
    279 };
    280 
    281 typedef struct atomic64 atomic64_t;
    282 
    283 #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
    284 
    285 int		linux_atomic64_init(void);
    286 void		linux_atomic64_fini(void);
    287 
    288 #ifdef __HAVE_ATOMIC64_OPS
    289 
    290 static inline uint64_t
    291 atomic64_read(const struct atomic64 *a)
    292 {
    293 	/* no membar */
    294 	return a->a_v;
    295 }
    296 
    297 static inline void
    298 atomic64_set(struct atomic64 *a, uint64_t v)
    299 {
    300 	/* no membar */
    301 	a->a_v = v;
    302 }
    303 
    304 static inline void
    305 atomic64_add(int64_t d, struct atomic64 *a)
    306 {
    307 	/* no membar */
    308 	atomic_add_64(&a->a_v, d);
    309 }
    310 
    311 static inline void
    312 atomic64_sub(int64_t d, struct atomic64 *a)
    313 {
    314 	/* no membar */
    315 	atomic_add_64(&a->a_v, -d);
    316 }
    317 
    318 static inline int64_t
    319 atomic64_add_return(int64_t d, struct atomic64 *a)
    320 {
    321 	int64_t v;
    322 
    323 	smp_mb__before_atomic();
    324 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
    325 	smp_mb__after_atomic();
    326 
    327 	return v;
    328 }
    329 
    330 static inline uint64_t
    331 atomic64_xchg(struct atomic64 *a, uint64_t new)
    332 {
    333 	uint64_t old;
    334 
    335 	smp_mb__before_atomic();
    336 	old = atomic_swap_64(&a->a_v, new);
    337 	smp_mb__after_atomic();
    338 
    339 	return old;
    340 }
    341 
    342 static inline uint64_t
    343 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
    344 {
    345 	uint64_t old;
    346 
    347 	/*
    348 	 * XXX As an optimization, under Linux's semantics we are
    349 	 * allowed to skip the memory barrier if the comparison fails,
    350 	 * but taking advantage of that is not convenient here.
    351 	 */
    352 	smp_mb__before_atomic();
    353 	old = atomic_cas_64(&atomic->a_v, expect, new);
    354 	smp_mb__after_atomic();
    355 
    356 	return old;
    357 }
    358 
    359 #else  /* !defined(__HAVE_ATOMIC64_OPS) */
    360 
    361 #define	atomic64_add		linux_atomic64_add
    362 #define	atomic64_add_return	linux_atomic64_add_return
    363 #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
    364 #define	atomic64_read		linux_atomic64_read
    365 #define	atomic64_set		linux_atomic64_set
    366 #define	atomic64_sub		linux_atomic64_sub
    367 #define	atomic64_xchg		linux_atomic64_xchg
    368 
    369 uint64_t	atomic64_read(const struct atomic64 *);
    370 void		atomic64_set(struct atomic64 *, uint64_t);
    371 void		atomic64_add(int64_t, struct atomic64 *);
    372 void		atomic64_sub(int64_t, struct atomic64 *);
    373 int64_t		atomic64_add_return(int64_t, struct atomic64 *);
    374 uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
    375 uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
    376 
    377 #endif
    378 
    379 static inline int64_t
    380 atomic64_inc_return(struct atomic64 *a)
    381 {
    382 	return atomic64_add_return(1, a);
    383 }
    384 
    385 struct atomic_long {
    386 	volatile unsigned long	al_v;
    387 };
    388 
    389 typedef struct atomic_long atomic_long_t;
    390 
    391 static inline long
    392 atomic_long_read(struct atomic_long *a)
    393 {
    394 	/* no membar */
    395 	return (unsigned long)a->al_v;
    396 }
    397 
    398 static inline void
    399 atomic_long_set(struct atomic_long *a, long v)
    400 {
    401 	/* no membar */
    402 	a->al_v = v;
    403 }
    404 
    405 static inline long
    406 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
    407 {
    408 	long value;
    409 
    410 	smp_mb__before_atomic();
    411 	do {
    412 		value = (long)a->al_v;
    413 		if (value == zero)
    414 			break;
    415 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
    416 		(unsigned long)(value + addend)) != (unsigned long)value);
    417 	smp_mb__after_atomic();
    418 
    419 	return value != zero;
    420 }
    421 
    422 static inline long
    423 atomic_long_inc_not_zero(struct atomic_long *a)
    424 {
    425 	/* membar implied by atomic_long_add_unless */
    426 	return atomic_long_add_unless(a, 1, 0);
    427 }
    428 
    429 static inline long
    430 atomic_long_xchg(struct atomic_long *a, long new)
    431 {
    432 	long old;
    433 
    434 	smp_mb__before_atomic();
    435 	old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
    436 	smp_mb__after_atomic();
    437 
    438 	return old;
    439 }
    440 
    441 static inline long
    442 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
    443 {
    444 	long old;
    445 
    446 	/*
    447 	 * XXX As an optimization, under Linux's semantics we are
    448 	 * allowed to skip the memory barrier if the comparison fails,
    449 	 * but taking advantage of that is not convenient here.
    450 	 */
    451 	smp_mb__before_atomic();
    452 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
    453 	    (unsigned long)new);
    454 	smp_mb__after_atomic();
    455 
    456 	return old;
    457 }
    458 
    459 static inline void
    460 set_bit(unsigned int bit, volatile unsigned long *ptr)
    461 {
    462 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    463 
    464 	/* no memory barrier */
    465 	atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
    466 }
    467 
    468 static inline void
    469 clear_bit(unsigned int bit, volatile unsigned long *ptr)
    470 {
    471 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    472 
    473 	/* no memory barrier */
    474 	atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
    475 }
    476 
    477 static inline void
    478 change_bit(unsigned int bit, volatile unsigned long *ptr)
    479 {
    480 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    481 	volatile unsigned long *const p = &ptr[bit / units];
    482 	const unsigned long mask = (1UL << (bit % units));
    483 	unsigned long v;
    484 
    485 	/* no memory barrier */
    486 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    487 }
    488 
    489 static inline int
    490 test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
    491 {
    492 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    493 	volatile unsigned long *const p = &ptr[bit / units];
    494 	const unsigned long mask = (1UL << (bit % units));
    495 	unsigned long v;
    496 
    497 	smp_mb__before_atomic();
    498 	do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
    499 	smp_mb__after_atomic();
    500 
    501 	return ((v & mask) != 0);
    502 }
    503 
    504 static inline int
    505 test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
    506 {
    507 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    508 	volatile unsigned long *const p = &ptr[bit / units];
    509 	const unsigned long mask = (1UL << (bit % units));
    510 	unsigned long v;
    511 
    512 	smp_mb__before_atomic();
    513 	do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
    514 	smp_mb__after_atomic();
    515 
    516 	return ((v & mask) != 0);
    517 }
    518 
    519 static inline int
    520 test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
    521 {
    522 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    523 	volatile unsigned long *const p = &ptr[bit / units];
    524 	const unsigned long mask = (1UL << (bit % units));
    525 	unsigned long v;
    526 
    527 	smp_mb__before_atomic();
    528 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    529 	smp_mb__after_atomic();
    530 
    531 	return ((v & mask) != 0);
    532 }
    533 
    534 #endif  /* _LINUX_ATOMIC_H_ */
    535