Home | History | Annotate | Line # | Download | only in linux
atomic.h revision 1.33
      1 /*	$NetBSD: atomic.h,v 1.33 2021/12/19 11:02:46 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_ATOMIC_H_
     33 #define _LINUX_ATOMIC_H_
     34 
     35 #include <sys/atomic.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <asm/barrier.h>
     40 
     41 #define	xchg(P, V)							      \
     42 	(sizeof(*(P)) == 4 ? atomic_swap_32((volatile uint32_t *)P, V)	      \
     43 	    : sizeof(*(P)) == 8 ? atomic_swap_64((volatile uint64_t *)P, V)   \
     44 	    : (__builtin_abort(), 0))
     45 
     46 /*
     47  * atomic (u)int operations
     48  *
     49  *	Atomics that return a value, other than atomic_read, imply a
     50  *	full memory_sync barrier.  Those that do not return a value
     51  *	imply no memory barrier.
     52  */
     53 
     54 struct atomic {
     55 	union {
     56 		volatile int au_int;
     57 		volatile unsigned int au_uint;
     58 	} a_u;
     59 };
     60 
     61 #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     62 
     63 typedef struct atomic atomic_t;
     64 
     65 static inline int
     66 atomic_read(const atomic_t *atomic)
     67 {
     68 	/* no membar */
     69 	return atomic->a_u.au_int;
     70 }
     71 
     72 static inline void
     73 atomic_set(atomic_t *atomic, int value)
     74 {
     75 	/* no membar */
     76 	atomic->a_u.au_int = value;
     77 }
     78 
     79 static inline void
     80 atomic_set_release(atomic_t *atomic, int value)
     81 {
     82 	atomic_store_release(&atomic->a_u.au_int, value);
     83 }
     84 
     85 static inline void
     86 atomic_add(int addend, atomic_t *atomic)
     87 {
     88 	/* no membar */
     89 	atomic_add_int(&atomic->a_u.au_uint, addend);
     90 }
     91 
     92 static inline void
     93 atomic_sub(int subtrahend, atomic_t *atomic)
     94 {
     95 	/* no membar */
     96 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
     97 }
     98 
     99 static inline int
    100 atomic_add_return(int addend, atomic_t *atomic)
    101 {
    102 	int v;
    103 
    104 	smp_mb__before_atomic();
    105 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
    106 	smp_mb__after_atomic();
    107 
    108 	return v;
    109 }
    110 
    111 static inline void
    112 atomic_inc(atomic_t *atomic)
    113 {
    114 	/* no membar */
    115 	atomic_inc_uint(&atomic->a_u.au_uint);
    116 }
    117 
    118 static inline void
    119 atomic_dec(atomic_t *atomic)
    120 {
    121 	/* no membar */
    122 	atomic_dec_uint(&atomic->a_u.au_uint);
    123 }
    124 
    125 static inline int
    126 atomic_inc_return(atomic_t *atomic)
    127 {
    128 	int v;
    129 
    130 	smp_mb__before_atomic();
    131 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
    132 	smp_mb__after_atomic();
    133 
    134 	return v;
    135 }
    136 
    137 static inline int
    138 atomic_dec_return(atomic_t *atomic)
    139 {
    140 	int v;
    141 
    142 	smp_mb__before_atomic();
    143 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    144 	smp_mb__after_atomic();
    145 
    146 	return v;
    147 }
    148 
    149 static inline int
    150 atomic_dec_and_test(atomic_t *atomic)
    151 {
    152 	/* membar implied by atomic_dec_return */
    153 	return atomic_dec_return(atomic) == 0;
    154 }
    155 
    156 static inline int
    157 atomic_dec_if_positive(atomic_t *atomic)
    158 {
    159 	int v;
    160 
    161 	smp_mb__before_atomic();
    162 	do {
    163 		v = atomic->a_u.au_uint;
    164 		if (v <= 0)
    165 			break;
    166 	} while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
    167 	smp_mb__after_atomic();
    168 
    169 	return v - 1;
    170 }
    171 
    172 static inline void
    173 atomic_or(int value, atomic_t *atomic)
    174 {
    175 	/* no membar */
    176 	atomic_or_uint(&atomic->a_u.au_uint, value);
    177 }
    178 
    179 static inline void
    180 atomic_andnot(int value, atomic_t *atomic)
    181 {
    182 	/* no membar */
    183 	atomic_and_uint(&atomic->a_u.au_uint, ~value);
    184 }
    185 
    186 static inline int
    187 atomic_fetch_xor(int value, atomic_t *atomic)
    188 {
    189 	unsigned old, new;
    190 
    191 	smp_mb__before_atomic();
    192 	do {
    193 		old = atomic->a_u.au_uint;
    194 		new = old ^ value;
    195 	} while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
    196 	smp_mb__after_atomic();
    197 
    198 	return old;
    199 }
    200 
    201 static inline void
    202 atomic_set_mask(unsigned long mask, atomic_t *atomic)
    203 {
    204 	/* no membar */
    205 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    206 }
    207 
    208 static inline void
    209 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    210 {
    211 	/* no membar */
    212 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    213 }
    214 
    215 static inline int
    216 atomic_add_unless(atomic_t *atomic, int addend, int zero)
    217 {
    218 	int value;
    219 
    220 	smp_mb__before_atomic();
    221 	do {
    222 		value = atomic->a_u.au_int;
    223 		if (value == zero)
    224 			break;
    225 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    226 	    != (unsigned)value);
    227 	smp_mb__after_atomic();
    228 
    229 	return value != zero;
    230 }
    231 
    232 static inline int
    233 atomic_inc_not_zero(atomic_t *atomic)
    234 {
    235 	/* membar implied by atomic_add_unless */
    236 	return atomic_add_unless(atomic, 1, 0);
    237 }
    238 
    239 static inline int
    240 atomic_xchg(atomic_t *atomic, int new)
    241 {
    242 	int old;
    243 
    244 	smp_mb__before_atomic();
    245 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    246 	smp_mb__after_atomic();
    247 
    248 	return old;
    249 }
    250 
    251 static inline int
    252 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
    253 {
    254 	int old;
    255 
    256 	/*
    257 	 * XXX As an optimization, under Linux's semantics we are
    258 	 * allowed to skip the memory barrier if the comparison fails,
    259 	 * but taking advantage of that is not convenient here.
    260 	 */
    261 	smp_mb__before_atomic();
    262 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
    263 	    (unsigned)new);
    264 	smp_mb__after_atomic();
    265 
    266 	return old;
    267 }
    268 
    269 struct atomic64 {
    270 	volatile uint64_t	a_v;
    271 };
    272 
    273 typedef struct atomic64 atomic64_t;
    274 
    275 #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
    276 
    277 int		linux_atomic64_init(void);
    278 void		linux_atomic64_fini(void);
    279 
    280 #ifdef __HAVE_ATOMIC64_OPS
    281 
    282 static inline uint64_t
    283 atomic64_read(const struct atomic64 *a)
    284 {
    285 	/* no membar */
    286 	return a->a_v;
    287 }
    288 
    289 static inline void
    290 atomic64_set(struct atomic64 *a, uint64_t v)
    291 {
    292 	/* no membar */
    293 	a->a_v = v;
    294 }
    295 
    296 static inline void
    297 atomic64_add(int64_t d, struct atomic64 *a)
    298 {
    299 	/* no membar */
    300 	atomic_add_64(&a->a_v, d);
    301 }
    302 
    303 static inline void
    304 atomic64_sub(int64_t d, struct atomic64 *a)
    305 {
    306 	/* no membar */
    307 	atomic_add_64(&a->a_v, -d);
    308 }
    309 
    310 static inline int64_t
    311 atomic64_add_return(int64_t d, struct atomic64 *a)
    312 {
    313 	int64_t v;
    314 
    315 	smp_mb__before_atomic();
    316 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
    317 	smp_mb__after_atomic();
    318 
    319 	return v;
    320 }
    321 
    322 static inline uint64_t
    323 atomic64_xchg(struct atomic64 *a, uint64_t new)
    324 {
    325 	uint64_t old;
    326 
    327 	smp_mb__before_atomic();
    328 	old = atomic_swap_64(&a->a_v, new);
    329 	smp_mb__after_atomic();
    330 
    331 	return old;
    332 }
    333 
    334 static inline uint64_t
    335 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
    336 {
    337 	uint64_t old;
    338 
    339 	/*
    340 	 * XXX As an optimization, under Linux's semantics we are
    341 	 * allowed to skip the memory barrier if the comparison fails,
    342 	 * but taking advantage of that is not convenient here.
    343 	 */
    344 	smp_mb__before_atomic();
    345 	old = atomic_cas_64(&atomic->a_v, expect, new);
    346 	smp_mb__after_atomic();
    347 
    348 	return old;
    349 }
    350 
    351 #else  /* !defined(__HAVE_ATOMIC64_OPS) */
    352 
    353 #define	atomic64_add		linux_atomic64_add
    354 #define	atomic64_add_return	linux_atomic64_add_return
    355 #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
    356 #define	atomic64_read		linux_atomic64_read
    357 #define	atomic64_set		linux_atomic64_set
    358 #define	atomic64_sub		linux_atomic64_sub
    359 #define	atomic64_xchg		linux_atomic64_xchg
    360 
    361 uint64_t	atomic64_read(const struct atomic64 *);
    362 void		atomic64_set(struct atomic64 *, uint64_t);
    363 void		atomic64_add(int64_t, struct atomic64 *);
    364 void		atomic64_sub(int64_t, struct atomic64 *);
    365 int64_t		atomic64_add_return(int64_t, struct atomic64 *);
    366 uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
    367 uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
    368 
    369 #endif
    370 
    371 static inline int64_t
    372 atomic64_inc_return(struct atomic64 *a)
    373 {
    374 	return atomic64_add_return(1, a);
    375 }
    376 
    377 struct atomic_long {
    378 	volatile unsigned long	al_v;
    379 };
    380 
    381 typedef struct atomic_long atomic_long_t;
    382 
    383 static inline long
    384 atomic_long_read(struct atomic_long *a)
    385 {
    386 	/* no membar */
    387 	return (unsigned long)a->al_v;
    388 }
    389 
    390 static inline void
    391 atomic_long_set(struct atomic_long *a, long v)
    392 {
    393 	/* no membar */
    394 	a->al_v = v;
    395 }
    396 
    397 static inline long
    398 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
    399 {
    400 	long value;
    401 
    402 	smp_mb__before_atomic();
    403 	do {
    404 		value = (long)a->al_v;
    405 		if (value == zero)
    406 			break;
    407 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
    408 		(unsigned long)(value + addend)) != (unsigned long)value);
    409 	smp_mb__after_atomic();
    410 
    411 	return value != zero;
    412 }
    413 
    414 static inline long
    415 atomic_long_inc_not_zero(struct atomic_long *a)
    416 {
    417 	/* membar implied by atomic_long_add_unless */
    418 	return atomic_long_add_unless(a, 1, 0);
    419 }
    420 
    421 static inline long
    422 atomic_long_xchg(struct atomic_long *a, long new)
    423 {
    424 	long old;
    425 
    426 	smp_mb__before_atomic();
    427 	old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
    428 	smp_mb__after_atomic();
    429 
    430 	return old;
    431 }
    432 
    433 static inline long
    434 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
    435 {
    436 	long old;
    437 
    438 	/*
    439 	 * XXX As an optimization, under Linux's semantics we are
    440 	 * allowed to skip the memory barrier if the comparison fails,
    441 	 * but taking advantage of that is not convenient here.
    442 	 */
    443 	smp_mb__before_atomic();
    444 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
    445 	    (unsigned long)new);
    446 	smp_mb__after_atomic();
    447 
    448 	return old;
    449 }
    450 
    451 static inline void
    452 set_bit(unsigned int bit, volatile unsigned long *ptr)
    453 {
    454 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    455 
    456 	/* no memory barrier */
    457 	atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
    458 }
    459 
    460 static inline void
    461 clear_bit(unsigned int bit, volatile unsigned long *ptr)
    462 {
    463 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    464 
    465 	/* no memory barrier */
    466 	atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
    467 }
    468 
    469 static inline void
    470 clear_bit_unlock(unsigned int bit, volatile unsigned long *ptr)
    471 {
    472 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    473 
    474 	/* store-release */
    475 	smp_mb__before_atomic();
    476 	atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
    477 }
    478 
    479 static inline void
    480 change_bit(unsigned int bit, volatile unsigned long *ptr)
    481 {
    482 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    483 	volatile unsigned long *const p = &ptr[bit / units];
    484 	const unsigned long mask = (1UL << (bit % units));
    485 	unsigned long v;
    486 
    487 	/* no memory barrier */
    488 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    489 }
    490 
    491 static inline int
    492 test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
    493 {
    494 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    495 	volatile unsigned long *const p = &ptr[bit / units];
    496 	const unsigned long mask = (1UL << (bit % units));
    497 	unsigned long v;
    498 
    499 	smp_mb__before_atomic();
    500 	do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
    501 	smp_mb__after_atomic();
    502 
    503 	return ((v & mask) != 0);
    504 }
    505 
    506 static inline int
    507 test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
    508 {
    509 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    510 	volatile unsigned long *const p = &ptr[bit / units];
    511 	const unsigned long mask = (1UL << (bit % units));
    512 	unsigned long v;
    513 
    514 	smp_mb__before_atomic();
    515 	do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
    516 	smp_mb__after_atomic();
    517 
    518 	return ((v & mask) != 0);
    519 }
    520 
    521 static inline int
    522 test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
    523 {
    524 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    525 	volatile unsigned long *const p = &ptr[bit / units];
    526 	const unsigned long mask = (1UL << (bit % units));
    527 	unsigned long v;
    528 
    529 	smp_mb__before_atomic();
    530 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    531 	smp_mb__after_atomic();
    532 
    533 	return ((v & mask) != 0);
    534 }
    535 
    536 #endif  /* _LINUX_ATOMIC_H_ */
    537