Home | History | Annotate | Line # | Download | only in linux
atomic.h revision 1.21.2.1
      1 /*	$NetBSD: atomic.h,v 1.21.2.1 2020/02/29 20:20:17 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef _LINUX_ATOMIC_H_
     33 #define _LINUX_ATOMIC_H_
     34 
     35 #include <sys/atomic.h>
     36 
     37 #include <machine/limits.h>
     38 
     39 #include <asm/barrier.h>
     40 
     41 #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
     42 #  define	smp_mb__before_atomic()		membar_exit()
     43 #  define	smp_mb__after_atomic()		membar_enter()
     44 #else
     45 #  define	smp_mb__before_atomic()		__insn_barrier()
     46 #  define	smp_mb__after_atomic()		__insn_barrier()
     47 #endif
     48 
     49 /*
     50  * atomic (u)int operations
     51  *
     52  *	Atomics that return a value, other than atomic_read, imply a
     53  *	full memory_sync barrier.  Those that do not return a value
     54  *	imply no memory barrier.
     55  */
     56 
     57 struct atomic {
     58 	union {
     59 		volatile int au_int;
     60 		volatile unsigned int au_uint;
     61 	} a_u;
     62 };
     63 
     64 #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     65 
     66 typedef struct atomic atomic_t;
     67 
     68 static inline int
     69 atomic_read(atomic_t *atomic)
     70 {
     71 	/* no membar */
     72 	return atomic->a_u.au_int;
     73 }
     74 
     75 static inline void
     76 atomic_set(atomic_t *atomic, int value)
     77 {
     78 	/* no membar */
     79 	atomic->a_u.au_int = value;
     80 }
     81 
     82 static inline void
     83 atomic_add(int addend, atomic_t *atomic)
     84 {
     85 	/* no membar */
     86 	atomic_add_int(&atomic->a_u.au_uint, addend);
     87 }
     88 
     89 static inline void
     90 atomic_sub(int subtrahend, atomic_t *atomic)
     91 {
     92 	/* no membar */
     93 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
     94 }
     95 
     96 static inline int
     97 atomic_add_return(int addend, atomic_t *atomic)
     98 {
     99 	int v;
    100 
    101 	smp_mb__before_atomic();
    102 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
    103 	smp_mb__after_atomic();
    104 
    105 	return v;
    106 }
    107 
    108 static inline void
    109 atomic_inc(atomic_t *atomic)
    110 {
    111 	/* no membar */
    112 	atomic_inc_uint(&atomic->a_u.au_uint);
    113 }
    114 
    115 static inline void
    116 atomic_dec(atomic_t *atomic)
    117 {
    118 	/* no membar */
    119 	atomic_dec_uint(&atomic->a_u.au_uint);
    120 }
    121 
    122 static inline int
    123 atomic_inc_return(atomic_t *atomic)
    124 {
    125 	int v;
    126 
    127 	smp_mb__before_atomic();
    128 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
    129 	smp_mb__after_atomic();
    130 
    131 	return v;
    132 }
    133 
    134 static inline int
    135 atomic_dec_return(atomic_t *atomic)
    136 {
    137 	int v;
    138 
    139 	smp_mb__before_atomic();
    140 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    141 	smp_mb__after_atomic();
    142 
    143 	return v;
    144 }
    145 
    146 static inline int
    147 atomic_dec_and_test(atomic_t *atomic)
    148 {
    149 	/* membar implied by atomic_dec_return */
    150 	return atomic_dec_return(atomic) == 0;
    151 }
    152 
    153 static inline void
    154 atomic_or(int value, atomic_t *atomic)
    155 {
    156 	/* no membar */
    157 	atomic_or_uint(&atomic->a_u.au_uint, value);
    158 }
    159 
    160 static inline void
    161 atomic_set_mask(unsigned long mask, atomic_t *atomic)
    162 {
    163 	/* no membar */
    164 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    165 }
    166 
    167 static inline void
    168 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    169 {
    170 	/* no membar */
    171 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    172 }
    173 
    174 static inline int
    175 atomic_add_unless(atomic_t *atomic, int addend, int zero)
    176 {
    177 	int value;
    178 
    179 	smp_mb__before_atomic();
    180 	do {
    181 		value = atomic->a_u.au_int;
    182 		if (value == zero)
    183 			break;
    184 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    185 	    != (unsigned)value);
    186 	smp_mb__after_atomic();
    187 
    188 	return value != zero;
    189 }
    190 
    191 static inline int
    192 atomic_inc_not_zero(atomic_t *atomic)
    193 {
    194 	/* membar implied by atomic_add_unless */
    195 	return atomic_add_unless(atomic, 1, 0);
    196 }
    197 
    198 static inline int
    199 atomic_xchg(atomic_t *atomic, int new)
    200 {
    201 	int old;
    202 
    203 	smp_mb__before_atomic();
    204 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    205 	smp_mb__after_atomic();
    206 
    207 	return old;
    208 }
    209 
    210 static inline int
    211 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
    212 {
    213 	int old;
    214 
    215 	/*
    216 	 * XXX As an optimization, under Linux's semantics we are
    217 	 * allowed to skip the memory barrier if the comparison fails,
    218 	 * but taking advantage of that is not convenient here.
    219 	 */
    220 	smp_mb__before_atomic();
    221 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
    222 	    (unsigned)new);
    223 	smp_mb__after_atomic();
    224 
    225 	return old;
    226 }
    227 
    228 struct atomic64 {
    229 	volatile uint64_t	a_v;
    230 };
    231 
    232 typedef struct atomic64 atomic64_t;
    233 
    234 #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
    235 
    236 int		linux_atomic64_init(void);
    237 void		linux_atomic64_fini(void);
    238 
    239 #ifdef __HAVE_ATOMIC64_OPS
    240 
    241 static inline uint64_t
    242 atomic64_read(const struct atomic64 *a)
    243 {
    244 	/* no membar */
    245 	return a->a_v;
    246 }
    247 
    248 static inline void
    249 atomic64_set(struct atomic64 *a, uint64_t v)
    250 {
    251 	/* no membar */
    252 	a->a_v = v;
    253 }
    254 
    255 static inline void
    256 atomic64_add(int64_t d, struct atomic64 *a)
    257 {
    258 	/* no membar */
    259 	atomic_add_64(&a->a_v, d);
    260 }
    261 
    262 static inline void
    263 atomic64_sub(int64_t d, struct atomic64 *a)
    264 {
    265 	/* no membar */
    266 	atomic_add_64(&a->a_v, -d);
    267 }
    268 
    269 static inline int64_t
    270 atomic64_add_return(int64_t d, struct atomic64 *a)
    271 {
    272 	int64_t v;
    273 
    274 	smp_mb__before_atomic();
    275 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
    276 	smp_mb__after_atomic();
    277 
    278 	return v;
    279 }
    280 
    281 static inline uint64_t
    282 atomic64_xchg(struct atomic64 *a, uint64_t new)
    283 {
    284 	uint64_t old;
    285 
    286 	smp_mb__before_atomic();
    287 	old = atomic_swap_64(&a->a_v, new);
    288 	smp_mb__after_atomic();
    289 
    290 	return old;
    291 }
    292 
    293 static inline uint64_t
    294 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
    295 {
    296 	uint64_t old;
    297 
    298 	/*
    299 	 * XXX As an optimization, under Linux's semantics we are
    300 	 * allowed to skip the memory barrier if the comparison fails,
    301 	 * but taking advantage of that is not convenient here.
    302 	 */
    303 	smp_mb__before_atomic();
    304 	old = atomic_cas_64(&atomic->a_v, expect, new);
    305 	smp_mb__after_atomic();
    306 
    307 	return old;
    308 }
    309 
    310 #else  /* !defined(__HAVE_ATOMIC64_OPS) */
    311 
    312 #define	atomic64_add		linux_atomic64_add
    313 #define	atomic64_add_return	linux_atomic64_add_return
    314 #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
    315 #define	atomic64_read		linux_atomic64_read
    316 #define	atomic64_set		linux_atomic64_set
    317 #define	atomic64_sub		linux_atomic64_sub
    318 #define	atomic64_xchg		linux_atomic64_xchg
    319 
    320 uint64_t	atomic64_read(const struct atomic64 *);
    321 void		atomic64_set(struct atomic64 *, uint64_t);
    322 void		atomic64_add(int64_t, struct atomic64 *);
    323 void		atomic64_sub(int64_t, struct atomic64 *);
    324 int64_t		atomic64_add_return(int64_t, struct atomic64 *);
    325 uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
    326 uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
    327 
    328 #endif
    329 
    330 static inline int64_t
    331 atomic64_inc_return(struct atomic64 *a)
    332 {
    333 	return atomic64_add_return(1, a);
    334 }
    335 
    336 struct atomic_long {
    337 	volatile unsigned long	al_v;
    338 };
    339 
    340 typedef struct atomic_long atomic_long_t;
    341 
    342 static inline long
    343 atomic_long_read(struct atomic_long *a)
    344 {
    345 	/* no membar */
    346 	return (unsigned long)a->al_v;
    347 }
    348 
    349 static inline void
    350 atomic_long_set(struct atomic_long *a, long v)
    351 {
    352 	/* no membar */
    353 	a->al_v = v;
    354 }
    355 
    356 static inline long
    357 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
    358 {
    359 	long value;
    360 
    361 	smp_mb__before_atomic();
    362 	do {
    363 		value = (long)a->al_v;
    364 		if (value == zero)
    365 			break;
    366 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
    367 		(unsigned long)(value + addend)) != (unsigned long)value);
    368 	smp_mb__after_atomic();
    369 
    370 	return value != zero;
    371 }
    372 
    373 static inline long
    374 atomic_long_inc_not_zero(struct atomic_long *a)
    375 {
    376 	/* membar implied by atomic_long_add_unless */
    377 	return atomic_long_add_unless(a, 1, 0);
    378 }
    379 
    380 static inline long
    381 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
    382 {
    383 	long old;
    384 
    385 	/*
    386 	 * XXX As an optimization, under Linux's semantics we are
    387 	 * allowed to skip the memory barrier if the comparison fails,
    388 	 * but taking advantage of that is not convenient here.
    389 	 */
    390 	smp_mb__before_atomic();
    391 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
    392 	    (unsigned long)new);
    393 	smp_mb__after_atomic();
    394 
    395 	return old;
    396 }
    397 
    398 static inline void
    399 set_bit(unsigned int bit, volatile unsigned long *ptr)
    400 {
    401 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    402 
    403 	/* no memory barrier */
    404 	atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
    405 }
    406 
    407 static inline void
    408 clear_bit(unsigned int bit, volatile unsigned long *ptr)
    409 {
    410 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    411 
    412 	/* no memory barrier */
    413 	atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
    414 }
    415 
    416 static inline void
    417 change_bit(unsigned int bit, volatile unsigned long *ptr)
    418 {
    419 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    420 	volatile unsigned long *const p = &ptr[bit / units];
    421 	const unsigned long mask = (1UL << (bit % units));
    422 	unsigned long v;
    423 
    424 	/* no memory barrier */
    425 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    426 }
    427 
    428 static inline int
    429 test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
    430 {
    431 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    432 	volatile unsigned long *const p = &ptr[bit / units];
    433 	const unsigned long mask = (1UL << (bit % units));
    434 	unsigned long v;
    435 
    436 	smp_mb__before_atomic();
    437 	do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
    438 	smp_mb__after_atomic();
    439 
    440 	return ((v & mask) != 0);
    441 }
    442 
    443 static inline int
    444 test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
    445 {
    446 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    447 	volatile unsigned long *const p = &ptr[bit / units];
    448 	const unsigned long mask = (1UL << (bit % units));
    449 	unsigned long v;
    450 
    451 	smp_mb__before_atomic();
    452 	do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
    453 	smp_mb__after_atomic();
    454 
    455 	return ((v & mask) != 0);
    456 }
    457 
    458 static inline int
    459 test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
    460 {
    461 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    462 	volatile unsigned long *const p = &ptr[bit / units];
    463 	const unsigned long mask = (1UL << (bit % units));
    464 	unsigned long v;
    465 
    466 	smp_mb__before_atomic();
    467 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    468 	smp_mb__after_atomic();
    469 
    470 	return ((v & mask) != 0);
    471 }
    472 
    473 #endif  /* _LINUX_ATOMIC_H_ */
    474