Home | History | Annotate | Line # | Download | only in linux
atomic.h revision 1.35
      1 /*	$NetBSD: atomic.h,v 1.35 2021/12/19 11:04:28 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_ATOMIC_H_
     33 #define _LINUX_ATOMIC_H_
     34 
     35 #include <sys/atomic.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <asm/barrier.h>
     40 
     41 #define	xchg(P, V)							      \
     42 	(sizeof(*(P)) == 4 ? atomic_swap_32((volatile uint32_t *)P, V)	      \
     43 	    : sizeof(*(P)) == 8 ? atomic_swap_64((volatile uint64_t *)P, V)   \
     44 	    : (__builtin_abort(), 0))
     45 
     46 #define	cmpxchg(P, O, N)						      \
     47 	(sizeof(*(P)) == 4 ? atomic_cas_32((volatile uint32_t *)P, O, N)      \
     48 	    : sizeof(*(P)) == 8 ? atomic_cas_64((volatile uint64_t *)P, O, N) \
     49 	    : (__builtin_abort(), 0))
     50 
     51 /*
     52  * atomic (u)int operations
     53  *
     54  *	Atomics that return a value, other than atomic_read, imply a
     55  *	full memory_sync barrier.  Those that do not return a value
     56  *	imply no memory barrier.
     57  */
     58 
     59 struct atomic {
     60 	union {
     61 		volatile int au_int;
     62 		volatile unsigned int au_uint;
     63 	} a_u;
     64 };
     65 
     66 #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     67 
     68 typedef struct atomic atomic_t;
     69 
     70 static inline int
     71 atomic_read(const atomic_t *atomic)
     72 {
     73 	/* no membar */
     74 	return atomic->a_u.au_int;
     75 }
     76 
     77 static inline void
     78 atomic_set(atomic_t *atomic, int value)
     79 {
     80 	/* no membar */
     81 	atomic->a_u.au_int = value;
     82 }
     83 
     84 static inline void
     85 atomic_set_release(atomic_t *atomic, int value)
     86 {
     87 	atomic_store_release(&atomic->a_u.au_int, value);
     88 }
     89 
     90 static inline void
     91 atomic_add(int addend, atomic_t *atomic)
     92 {
     93 	/* no membar */
     94 	atomic_add_int(&atomic->a_u.au_uint, addend);
     95 }
     96 
     97 static inline void
     98 atomic_sub(int subtrahend, atomic_t *atomic)
     99 {
    100 	/* no membar */
    101 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
    102 }
    103 
    104 static inline int
    105 atomic_add_return(int addend, atomic_t *atomic)
    106 {
    107 	int v;
    108 
    109 	smp_mb__before_atomic();
    110 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
    111 	smp_mb__after_atomic();
    112 
    113 	return v;
    114 }
    115 
    116 static inline void
    117 atomic_inc(atomic_t *atomic)
    118 {
    119 	/* no membar */
    120 	atomic_inc_uint(&atomic->a_u.au_uint);
    121 }
    122 
    123 static inline void
    124 atomic_dec(atomic_t *atomic)
    125 {
    126 	/* no membar */
    127 	atomic_dec_uint(&atomic->a_u.au_uint);
    128 }
    129 
    130 static inline int
    131 atomic_inc_return(atomic_t *atomic)
    132 {
    133 	int v;
    134 
    135 	smp_mb__before_atomic();
    136 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
    137 	smp_mb__after_atomic();
    138 
    139 	return v;
    140 }
    141 
    142 static inline int
    143 atomic_dec_return(atomic_t *atomic)
    144 {
    145 	int v;
    146 
    147 	smp_mb__before_atomic();
    148 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    149 	smp_mb__after_atomic();
    150 
    151 	return v;
    152 }
    153 
    154 static inline int
    155 atomic_dec_and_test(atomic_t *atomic)
    156 {
    157 	/* membar implied by atomic_dec_return */
    158 	return atomic_dec_return(atomic) == 0;
    159 }
    160 
    161 static inline int
    162 atomic_dec_if_positive(atomic_t *atomic)
    163 {
    164 	int v;
    165 
    166 	smp_mb__before_atomic();
    167 	do {
    168 		v = atomic->a_u.au_uint;
    169 		if (v <= 0)
    170 			break;
    171 	} while (atomic_cas_uint(&atomic->a_u.au_uint, v, v - 1) != v);
    172 	smp_mb__after_atomic();
    173 
    174 	return v - 1;
    175 }
    176 
    177 static inline void
    178 atomic_or(int value, atomic_t *atomic)
    179 {
    180 	/* no membar */
    181 	atomic_or_uint(&atomic->a_u.au_uint, value);
    182 }
    183 
    184 static inline void
    185 atomic_andnot(int value, atomic_t *atomic)
    186 {
    187 	/* no membar */
    188 	atomic_and_uint(&atomic->a_u.au_uint, ~value);
    189 }
    190 
    191 static inline int
    192 atomic_fetch_xor(int value, atomic_t *atomic)
    193 {
    194 	unsigned old, new;
    195 
    196 	smp_mb__before_atomic();
    197 	do {
    198 		old = atomic->a_u.au_uint;
    199 		new = old ^ value;
    200 	} while (atomic_cas_uint(&atomic->a_u.au_uint, old, new) != old);
    201 	smp_mb__after_atomic();
    202 
    203 	return old;
    204 }
    205 
    206 static inline void
    207 atomic_set_mask(unsigned long mask, atomic_t *atomic)
    208 {
    209 	/* no membar */
    210 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    211 }
    212 
    213 static inline void
    214 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    215 {
    216 	/* no membar */
    217 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    218 }
    219 
    220 static inline int
    221 atomic_add_unless(atomic_t *atomic, int addend, int zero)
    222 {
    223 	int value;
    224 
    225 	smp_mb__before_atomic();
    226 	do {
    227 		value = atomic->a_u.au_int;
    228 		if (value == zero)
    229 			break;
    230 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    231 	    != (unsigned)value);
    232 	smp_mb__after_atomic();
    233 
    234 	return value != zero;
    235 }
    236 
    237 static inline int
    238 atomic_inc_not_zero(atomic_t *atomic)
    239 {
    240 	/* membar implied by atomic_add_unless */
    241 	return atomic_add_unless(atomic, 1, 0);
    242 }
    243 
    244 static inline int
    245 atomic_xchg(atomic_t *atomic, int new)
    246 {
    247 	int old;
    248 
    249 	smp_mb__before_atomic();
    250 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    251 	smp_mb__after_atomic();
    252 
    253 	return old;
    254 }
    255 
    256 static inline int
    257 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
    258 {
    259 	int old;
    260 
    261 	/*
    262 	 * XXX As an optimization, under Linux's semantics we are
    263 	 * allowed to skip the memory barrier if the comparison fails,
    264 	 * but taking advantage of that is not convenient here.
    265 	 */
    266 	smp_mb__before_atomic();
    267 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
    268 	    (unsigned)new);
    269 	smp_mb__after_atomic();
    270 
    271 	return old;
    272 }
    273 
    274 struct atomic64 {
    275 	volatile uint64_t	a_v;
    276 };
    277 
    278 typedef struct atomic64 atomic64_t;
    279 
    280 #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
    281 
    282 int		linux_atomic64_init(void);
    283 void		linux_atomic64_fini(void);
    284 
    285 #ifdef __HAVE_ATOMIC64_OPS
    286 
    287 static inline uint64_t
    288 atomic64_read(const struct atomic64 *a)
    289 {
    290 	/* no membar */
    291 	return a->a_v;
    292 }
    293 
    294 static inline void
    295 atomic64_set(struct atomic64 *a, uint64_t v)
    296 {
    297 	/* no membar */
    298 	a->a_v = v;
    299 }
    300 
    301 static inline void
    302 atomic64_add(int64_t d, struct atomic64 *a)
    303 {
    304 	/* no membar */
    305 	atomic_add_64(&a->a_v, d);
    306 }
    307 
    308 static inline void
    309 atomic64_sub(int64_t d, struct atomic64 *a)
    310 {
    311 	/* no membar */
    312 	atomic_add_64(&a->a_v, -d);
    313 }
    314 
    315 static inline int64_t
    316 atomic64_add_return(int64_t d, struct atomic64 *a)
    317 {
    318 	int64_t v;
    319 
    320 	smp_mb__before_atomic();
    321 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
    322 	smp_mb__after_atomic();
    323 
    324 	return v;
    325 }
    326 
    327 static inline uint64_t
    328 atomic64_xchg(struct atomic64 *a, uint64_t new)
    329 {
    330 	uint64_t old;
    331 
    332 	smp_mb__before_atomic();
    333 	old = atomic_swap_64(&a->a_v, new);
    334 	smp_mb__after_atomic();
    335 
    336 	return old;
    337 }
    338 
    339 static inline uint64_t
    340 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
    341 {
    342 	uint64_t old;
    343 
    344 	/*
    345 	 * XXX As an optimization, under Linux's semantics we are
    346 	 * allowed to skip the memory barrier if the comparison fails,
    347 	 * but taking advantage of that is not convenient here.
    348 	 */
    349 	smp_mb__before_atomic();
    350 	old = atomic_cas_64(&atomic->a_v, expect, new);
    351 	smp_mb__after_atomic();
    352 
    353 	return old;
    354 }
    355 
    356 #else  /* !defined(__HAVE_ATOMIC64_OPS) */
    357 
    358 #define	atomic64_add		linux_atomic64_add
    359 #define	atomic64_add_return	linux_atomic64_add_return
    360 #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
    361 #define	atomic64_read		linux_atomic64_read
    362 #define	atomic64_set		linux_atomic64_set
    363 #define	atomic64_sub		linux_atomic64_sub
    364 #define	atomic64_xchg		linux_atomic64_xchg
    365 
    366 uint64_t	atomic64_read(const struct atomic64 *);
    367 void		atomic64_set(struct atomic64 *, uint64_t);
    368 void		atomic64_add(int64_t, struct atomic64 *);
    369 void		atomic64_sub(int64_t, struct atomic64 *);
    370 int64_t		atomic64_add_return(int64_t, struct atomic64 *);
    371 uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
    372 uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
    373 
    374 #endif
    375 
    376 static inline int64_t
    377 atomic64_inc_return(struct atomic64 *a)
    378 {
    379 	return atomic64_add_return(1, a);
    380 }
    381 
    382 struct atomic_long {
    383 	volatile unsigned long	al_v;
    384 };
    385 
    386 typedef struct atomic_long atomic_long_t;
    387 
    388 static inline long
    389 atomic_long_read(struct atomic_long *a)
    390 {
    391 	/* no membar */
    392 	return (unsigned long)a->al_v;
    393 }
    394 
    395 static inline void
    396 atomic_long_set(struct atomic_long *a, long v)
    397 {
    398 	/* no membar */
    399 	a->al_v = v;
    400 }
    401 
    402 static inline long
    403 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
    404 {
    405 	long value;
    406 
    407 	smp_mb__before_atomic();
    408 	do {
    409 		value = (long)a->al_v;
    410 		if (value == zero)
    411 			break;
    412 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
    413 		(unsigned long)(value + addend)) != (unsigned long)value);
    414 	smp_mb__after_atomic();
    415 
    416 	return value != zero;
    417 }
    418 
    419 static inline long
    420 atomic_long_inc_not_zero(struct atomic_long *a)
    421 {
    422 	/* membar implied by atomic_long_add_unless */
    423 	return atomic_long_add_unless(a, 1, 0);
    424 }
    425 
    426 static inline long
    427 atomic_long_xchg(struct atomic_long *a, long new)
    428 {
    429 	long old;
    430 
    431 	smp_mb__before_atomic();
    432 	old = (long)atomic_swap_ulong(&a->al_v, (unsigned long)new);
    433 	smp_mb__after_atomic();
    434 
    435 	return old;
    436 }
    437 
    438 static inline long
    439 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
    440 {
    441 	long old;
    442 
    443 	/*
    444 	 * XXX As an optimization, under Linux's semantics we are
    445 	 * allowed to skip the memory barrier if the comparison fails,
    446 	 * but taking advantage of that is not convenient here.
    447 	 */
    448 	smp_mb__before_atomic();
    449 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
    450 	    (unsigned long)new);
    451 	smp_mb__after_atomic();
    452 
    453 	return old;
    454 }
    455 
    456 #endif  /* _LINUX_ATOMIC_H_ */
    457