Home | History | Annotate | Line # | Download | only in linux
atomic.h revision 1.30
      1 /*	$NetBSD: atomic.h,v 1.30 2021/12/19 10:57:12 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_ATOMIC_H_
     33 #define _LINUX_ATOMIC_H_
     34 
     35 #include <sys/atomic.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <asm/barrier.h>
     40 
     41 #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
     42 #  define	smp_mb__before_atomic()		membar_exit()
     43 #  define	smp_mb__after_atomic()		membar_enter()
     44 #else
     45 #  define	smp_mb__before_atomic()		__insn_barrier()
     46 #  define	smp_mb__after_atomic()		__insn_barrier()
     47 #endif
     48 
     49 #define	xchg(P, V)							      \
     50 	(sizeof(*(P)) == 4 ? atomic_swap_32((volatile uint32_t *)P, V)	      \
     51 	    : sizeof(*(P)) == 8 ? atomic_swap_64((volatile uint64_t *)P, V)   \
     52 	    : (__builtin_abort(), 0))
     53 
     54 /*
     55  * atomic (u)int operations
     56  *
     57  *	Atomics that return a value, other than atomic_read, imply a
     58  *	full memory_sync barrier.  Those that do not return a value
     59  *	imply no memory barrier.
     60  */
     61 
     62 struct atomic {
     63 	union {
     64 		volatile int au_int;
     65 		volatile unsigned int au_uint;
     66 	} a_u;
     67 };
     68 
     69 #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     70 
     71 typedef struct atomic atomic_t;
     72 
     73 static inline int
     74 atomic_read(const atomic_t *atomic)
     75 {
     76 	/* no membar */
     77 	return atomic->a_u.au_int;
     78 }
     79 
     80 static inline void
     81 atomic_set(atomic_t *atomic, int value)
     82 {
     83 	/* no membar */
     84 	atomic->a_u.au_int = value;
     85 }
     86 
     87 static inline void
     88 atomic_add(int addend, atomic_t *atomic)
     89 {
     90 	/* no membar */
     91 	atomic_add_int(&atomic->a_u.au_uint, addend);
     92 }
     93 
     94 static inline void
     95 atomic_sub(int subtrahend, atomic_t *atomic)
     96 {
     97 	/* no membar */
     98 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
     99 }
    100 
    101 static inline int
    102 atomic_add_return(int addend, atomic_t *atomic)
    103 {
    104 	int v;
    105 
    106 	smp_mb__before_atomic();
    107 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
    108 	smp_mb__after_atomic();
    109 
    110 	return v;
    111 }
    112 
    113 static inline void
    114 atomic_inc(atomic_t *atomic)
    115 {
    116 	/* no membar */
    117 	atomic_inc_uint(&atomic->a_u.au_uint);
    118 }
    119 
    120 static inline void
    121 atomic_dec(atomic_t *atomic)
    122 {
    123 	/* no membar */
    124 	atomic_dec_uint(&atomic->a_u.au_uint);
    125 }
    126 
    127 static inline int
    128 atomic_inc_return(atomic_t *atomic)
    129 {
    130 	int v;
    131 
    132 	smp_mb__before_atomic();
    133 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
    134 	smp_mb__after_atomic();
    135 
    136 	return v;
    137 }
    138 
    139 static inline int
    140 atomic_dec_return(atomic_t *atomic)
    141 {
    142 	int v;
    143 
    144 	smp_mb__before_atomic();
    145 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    146 	smp_mb__after_atomic();
    147 
    148 	return v;
    149 }
    150 
    151 static inline int
    152 atomic_dec_and_test(atomic_t *atomic)
    153 {
    154 	/* membar implied by atomic_dec_return */
    155 	return atomic_dec_return(atomic) == 0;
    156 }
    157 
    158 static inline int
    159 atomic_dec_if_positive(atomic_t *atomic)
    160 {
    161 	int v;
    162 
    163 	smp_mb__before_atomic();
    164 	do {
    165 		v = atomic->a_u.au_uint;
    166 		if (v <= 0)
    167 			break;
    168 	} while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
    169 	smp_mb__after_atomic();
    170 
    171 	return v - 1;
    172 }
    173 
    174 static inline void
    175 atomic_or(int value, atomic_t *atomic)
    176 {
    177 	/* no membar */
    178 	atomic_or_uint(&atomic->a_u.au_uint, value);
    179 }
    180 
    181 static inline void
    182 atomic_andnot(int value, atomic_t *atomic)
    183 {
    184 	/* no membar */
    185 	atomic_and_uint(&atomic->a_u.au_uint, ~value);
    186 }
    187 
    188 static inline int
    189 atomic_fetch_xor(int value, atomic_t *atomic)
    190 {
    191 	unsigned old, new;
    192 
    193 	smp_mb__before_atomic();
    194 	do {
    195 		old = atomic->a_u.au_uint;
    196 		new = old ^ value;
    197 	} while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
    198 	smp_mb__after_atomic();
    199 
    200 	return old;
    201 }
    202 
    203 static inline void
    204 atomic_set_mask(unsigned long mask, atomic_t *atomic)
    205 {
    206 	/* no membar */
    207 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    208 }
    209 
    210 static inline void
    211 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    212 {
    213 	/* no membar */
    214 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    215 }
    216 
    217 static inline int
    218 atomic_add_unless(atomic_t *atomic, int addend, int zero)
    219 {
    220 	int value;
    221 
    222 	smp_mb__before_atomic();
    223 	do {
    224 		value = atomic->a_u.au_int;
    225 		if (value == zero)
    226 			break;
    227 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    228 	    != (unsigned)value);
    229 	smp_mb__after_atomic();
    230 
    231 	return value != zero;
    232 }
    233 
    234 static inline int
    235 atomic_inc_not_zero(atomic_t *atomic)
    236 {
    237 	/* membar implied by atomic_add_unless */
    238 	return atomic_add_unless(atomic, 1, 0);
    239 }
    240 
    241 static inline int
    242 atomic_xchg(atomic_t *atomic, int new)
    243 {
    244 	int old;
    245 
    246 	smp_mb__before_atomic();
    247 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    248 	smp_mb__after_atomic();
    249 
    250 	return old;
    251 }
    252 
    253 static inline int
    254 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
    255 {
    256 	int old;
    257 
    258 	/*
    259 	 * XXX As an optimization, under Linux's semantics we are
    260 	 * allowed to skip the memory barrier if the comparison fails,
    261 	 * but taking advantage of that is not convenient here.
    262 	 */
    263 	smp_mb__before_atomic();
    264 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
    265 	    (unsigned)new);
    266 	smp_mb__after_atomic();
    267 
    268 	return old;
    269 }
    270 
    271 struct atomic64 {
    272 	volatile uint64_t	a_v;
    273 };
    274 
    275 typedef struct atomic64 atomic64_t;
    276 
    277 #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
    278 
    279 int		linux_atomic64_init(void);
    280 void		linux_atomic64_fini(void);
    281 
    282 #ifdef __HAVE_ATOMIC64_OPS
    283 
    284 static inline uint64_t
    285 atomic64_read(const struct atomic64 *a)
    286 {
    287 	/* no membar */
    288 	return a->a_v;
    289 }
    290 
    291 static inline void
    292 atomic64_set(struct atomic64 *a, uint64_t v)
    293 {
    294 	/* no membar */
    295 	a->a_v = v;
    296 }
    297 
    298 static inline void
    299 atomic64_add(int64_t d, struct atomic64 *a)
    300 {
    301 	/* no membar */
    302 	atomic_add_64(&a->a_v, d);
    303 }
    304 
    305 static inline void
    306 atomic64_sub(int64_t d, struct atomic64 *a)
    307 {
    308 	/* no membar */
    309 	atomic_add_64(&a->a_v, -d);
    310 }
    311 
    312 static inline int64_t
    313 atomic64_add_return(int64_t d, struct atomic64 *a)
    314 {
    315 	int64_t v;
    316 
    317 	smp_mb__before_atomic();
    318 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
    319 	smp_mb__after_atomic();
    320 
    321 	return v;
    322 }
    323 
    324 static inline uint64_t
    325 atomic64_xchg(struct atomic64 *a, uint64_t new)
    326 {
    327 	uint64_t old;
    328 
    329 	smp_mb__before_atomic();
    330 	old = atomic_swap_64(&a->a_v, new);
    331 	smp_mb__after_atomic();
    332 
    333 	return old;
    334 }
    335 
    336 static inline uint64_t
    337 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
    338 {
    339 	uint64_t old;
    340 
    341 	/*
    342 	 * XXX As an optimization, under Linux's semantics we are
    343 	 * allowed to skip the memory barrier if the comparison fails,
    344 	 * but taking advantage of that is not convenient here.
    345 	 */
    346 	smp_mb__before_atomic();
    347 	old = atomic_cas_64(&atomic->a_v, expect, new);
    348 	smp_mb__after_atomic();
    349 
    350 	return old;
    351 }
    352 
    353 #else  /* !defined(__HAVE_ATOMIC64_OPS) */
    354 
    355 #define	atomic64_add		linux_atomic64_add
    356 #define	atomic64_add_return	linux_atomic64_add_return
    357 #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
    358 #define	atomic64_read		linux_atomic64_read
    359 #define	atomic64_set		linux_atomic64_set
    360 #define	atomic64_sub		linux_atomic64_sub
    361 #define	atomic64_xchg		linux_atomic64_xchg
    362 
    363 uint64_t	atomic64_read(const struct atomic64 *);
    364 void		atomic64_set(struct atomic64 *, uint64_t);
    365 void		atomic64_add(int64_t, struct atomic64 *);
    366 void		atomic64_sub(int64_t, struct atomic64 *);
    367 int64_t		atomic64_add_return(int64_t, struct atomic64 *);
    368 uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
    369 uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
    370 
    371 #endif
    372 
    373 static inline int64_t
    374 atomic64_inc_return(struct atomic64 *a)
    375 {
    376 	return atomic64_add_return(1, a);
    377 }
    378 
    379 struct atomic_long {
    380 	volatile unsigned long	al_v;
    381 };
    382 
    383 typedef struct atomic_long atomic_long_t;
    384 
    385 static inline long
    386 atomic_long_read(struct atomic_long *a)
    387 {
    388 	/* no membar */
    389 	return (unsigned long)a->al_v;
    390 }
    391 
    392 static inline void
    393 atomic_long_set(struct atomic_long *a, long v)
    394 {
    395 	/* no membar */
    396 	a->al_v = v;
    397 }
    398 
    399 static inline long
    400 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
    401 {
    402 	long value;
    403 
    404 	smp_mb__before_atomic();
    405 	do {
    406 		value = (long)a->al_v;
    407 		if (value == zero)
    408 			break;
    409 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
    410 		(unsigned long)(value + addend)) != (unsigned long)value);
    411 	smp_mb__after_atomic();
    412 
    413 	return value != zero;
    414 }
    415 
    416 static inline long
    417 atomic_long_inc_not_zero(struct atomic_long *a)
    418 {
    419 	/* membar implied by atomic_long_add_unless */
    420 	return atomic_long_add_unless(a, 1, 0);
    421 }
    422 
    423 static inline long
    424 atomic_long_xchg(struct atomic_long *a, long new)
    425 {
    426 	long old;
    427 
    428 	smp_mb__before_atomic();
    429 	old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
    430 	smp_mb__after_atomic();
    431 
    432 	return old;
    433 }
    434 
    435 static inline long
    436 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
    437 {
    438 	long old;
    439 
    440 	/*
    441 	 * XXX As an optimization, under Linux's semantics we are
    442 	 * allowed to skip the memory barrier if the comparison fails,
    443 	 * but taking advantage of that is not convenient here.
    444 	 */
    445 	smp_mb__before_atomic();
    446 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
    447 	    (unsigned long)new);
    448 	smp_mb__after_atomic();
    449 
    450 	return old;
    451 }
    452 
    453 static inline void
    454 set_bit(unsigned int bit, volatile unsigned long *ptr)
    455 {
    456 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    457 
    458 	/* no memory barrier */
    459 	atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
    460 }
    461 
    462 static inline void
    463 clear_bit(unsigned int bit, volatile unsigned long *ptr)
    464 {
    465 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    466 
    467 	/* no memory barrier */
    468 	atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
    469 }
    470 
    471 static inline void
    472 change_bit(unsigned int bit, volatile unsigned long *ptr)
    473 {
    474 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    475 	volatile unsigned long *const p = &ptr[bit / units];
    476 	const unsigned long mask = (1UL << (bit % units));
    477 	unsigned long v;
    478 
    479 	/* no memory barrier */
    480 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    481 }
    482 
    483 static inline int
    484 test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
    485 {
    486 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    487 	volatile unsigned long *const p = &ptr[bit / units];
    488 	const unsigned long mask = (1UL << (bit % units));
    489 	unsigned long v;
    490 
    491 	smp_mb__before_atomic();
    492 	do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
    493 	smp_mb__after_atomic();
    494 
    495 	return ((v & mask) != 0);
    496 }
    497 
    498 static inline int
    499 test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
    500 {
    501 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    502 	volatile unsigned long *const p = &ptr[bit / units];
    503 	const unsigned long mask = (1UL << (bit % units));
    504 	unsigned long v;
    505 
    506 	smp_mb__before_atomic();
    507 	do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
    508 	smp_mb__after_atomic();
    509 
    510 	return ((v & mask) != 0);
    511 }
    512 
    513 static inline int
    514 test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
    515 {
    516 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    517 	volatile unsigned long *const p = &ptr[bit / units];
    518 	const unsigned long mask = (1UL << (bit % units));
    519 	unsigned long v;
    520 
    521 	smp_mb__before_atomic();
    522 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    523 	smp_mb__after_atomic();
    524 
    525 	return ((v & mask) != 0);
    526 }
    527 
    528 #endif  /* _LINUX_ATOMIC_H_ */
    529