Home | History | Annotate | Line # | Download | only in linux
atomic.h revision 1.7.4.2
      1  1.7.4.2  tls /*	$NetBSD: atomic.h,v 1.7.4.2 2014/08/20 00:04:21 tls Exp $	*/
      2  1.7.4.2  tls 
      3  1.7.4.2  tls /*-
      4  1.7.4.2  tls  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  1.7.4.2  tls  * All rights reserved.
      6  1.7.4.2  tls  *
      7  1.7.4.2  tls  * This code is derived from software contributed to The NetBSD Foundation
      8  1.7.4.2  tls  * by Taylor R. Campbell.
      9  1.7.4.2  tls  *
     10  1.7.4.2  tls  * Redistribution and use in source and binary forms, with or without
     11  1.7.4.2  tls  * modification, are permitted provided that the following conditions
     12  1.7.4.2  tls  * are met:
     13  1.7.4.2  tls  * 1. Redistributions of source code must retain the above copyright
     14  1.7.4.2  tls  *    notice, this list of conditions and the following disclaimer.
     15  1.7.4.2  tls  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.7.4.2  tls  *    notice, this list of conditions and the following disclaimer in the
     17  1.7.4.2  tls  *    documentation and/or other materials provided with the distribution.
     18  1.7.4.2  tls  *
     19  1.7.4.2  tls  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.7.4.2  tls  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.7.4.2  tls  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.7.4.2  tls  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.7.4.2  tls  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.7.4.2  tls  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.7.4.2  tls  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.7.4.2  tls  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.7.4.2  tls  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.7.4.2  tls  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.7.4.2  tls  * POSSIBILITY OF SUCH DAMAGE.
     30  1.7.4.2  tls  */
     31  1.7.4.2  tls 
     32  1.7.4.2  tls #ifndef _LINUX_ATOMIC_H_
     33  1.7.4.2  tls #define _LINUX_ATOMIC_H_
     34  1.7.4.2  tls 
     35  1.7.4.2  tls #include <sys/atomic.h>
     36  1.7.4.2  tls 
     37  1.7.4.2  tls #include <machine/limits.h>
     38  1.7.4.2  tls 
     39  1.7.4.2  tls struct atomic {
     40  1.7.4.2  tls 	union {
     41  1.7.4.2  tls 		volatile int au_int;
     42  1.7.4.2  tls 		volatile unsigned int au_uint;
     43  1.7.4.2  tls 	} a_u;
     44  1.7.4.2  tls };
     45  1.7.4.2  tls 
     46  1.7.4.2  tls #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     47  1.7.4.2  tls 
     48  1.7.4.2  tls typedef struct atomic atomic_t;
     49  1.7.4.2  tls 
     50  1.7.4.2  tls static inline int
     51  1.7.4.2  tls atomic_read(atomic_t *atomic)
     52  1.7.4.2  tls {
     53  1.7.4.2  tls 	return atomic->a_u.au_int;
     54  1.7.4.2  tls }
     55  1.7.4.2  tls 
     56  1.7.4.2  tls static inline void
     57  1.7.4.2  tls atomic_set(atomic_t *atomic, int value)
     58  1.7.4.2  tls {
     59  1.7.4.2  tls 	atomic->a_u.au_int = value;
     60  1.7.4.2  tls }
     61  1.7.4.2  tls 
     62  1.7.4.2  tls static inline void
     63  1.7.4.2  tls atomic_add(int addend, atomic_t *atomic)
     64  1.7.4.2  tls {
     65  1.7.4.2  tls 	atomic_add_int(&atomic->a_u.au_uint, addend);
     66  1.7.4.2  tls }
     67  1.7.4.2  tls 
     68  1.7.4.2  tls static inline void
     69  1.7.4.2  tls atomic_sub(int subtrahend, atomic_t *atomic)
     70  1.7.4.2  tls {
     71  1.7.4.2  tls 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
     72  1.7.4.2  tls }
     73  1.7.4.2  tls 
     74  1.7.4.2  tls static inline int
     75  1.7.4.2  tls atomic_add_return(int addend, atomic_t *atomic)
     76  1.7.4.2  tls {
     77  1.7.4.2  tls 	return (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
     78  1.7.4.2  tls }
     79  1.7.4.2  tls 
     80  1.7.4.2  tls static inline void
     81  1.7.4.2  tls atomic_inc(atomic_t *atomic)
     82  1.7.4.2  tls {
     83  1.7.4.2  tls 	atomic_inc_uint(&atomic->a_u.au_uint);
     84  1.7.4.2  tls }
     85  1.7.4.2  tls 
     86  1.7.4.2  tls static inline void
     87  1.7.4.2  tls atomic_dec(atomic_t *atomic)
     88  1.7.4.2  tls {
     89  1.7.4.2  tls 	atomic_dec_uint(&atomic->a_u.au_uint);
     90  1.7.4.2  tls }
     91  1.7.4.2  tls 
     92  1.7.4.2  tls static inline int
     93  1.7.4.2  tls atomic_inc_return(atomic_t *atomic)
     94  1.7.4.2  tls {
     95  1.7.4.2  tls 	return (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
     96  1.7.4.2  tls }
     97  1.7.4.2  tls 
     98  1.7.4.2  tls static inline int
     99  1.7.4.2  tls atomic_dec_return(atomic_t *atomic)
    100  1.7.4.2  tls {
    101  1.7.4.2  tls 	return (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    102  1.7.4.2  tls }
    103  1.7.4.2  tls 
    104  1.7.4.2  tls static inline int
    105  1.7.4.2  tls atomic_dec_and_test(atomic_t *atomic)
    106  1.7.4.2  tls {
    107  1.7.4.2  tls 	return (0 == (int)atomic_dec_uint_nv(&atomic->a_u.au_uint));
    108  1.7.4.2  tls }
    109  1.7.4.2  tls 
    110  1.7.4.2  tls static inline void
    111  1.7.4.2  tls atomic_set_mask(unsigned long mask, atomic_t *atomic)
    112  1.7.4.2  tls {
    113  1.7.4.2  tls 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    114  1.7.4.2  tls }
    115  1.7.4.2  tls 
    116  1.7.4.2  tls static inline void
    117  1.7.4.2  tls atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    118  1.7.4.2  tls {
    119  1.7.4.2  tls 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    120  1.7.4.2  tls }
    121  1.7.4.2  tls 
    122  1.7.4.2  tls static inline int
    123  1.7.4.2  tls atomic_add_unless(atomic_t *atomic, int addend, int zero)
    124  1.7.4.2  tls {
    125  1.7.4.2  tls 	int value;
    126  1.7.4.2  tls 
    127  1.7.4.2  tls 	do {
    128  1.7.4.2  tls 		value = atomic->a_u.au_int;
    129  1.7.4.2  tls 		if (value == zero)
    130  1.7.4.2  tls 			return 0;
    131  1.7.4.2  tls 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    132  1.7.4.2  tls 	    != value);
    133  1.7.4.2  tls 
    134  1.7.4.2  tls 	return 1;
    135  1.7.4.2  tls }
    136  1.7.4.2  tls 
    137  1.7.4.2  tls static inline int
    138  1.7.4.2  tls atomic_inc_not_zero(atomic_t *atomic)
    139  1.7.4.2  tls {
    140  1.7.4.2  tls 	return atomic_add_unless(atomic, 1, 0);
    141  1.7.4.2  tls }
    142  1.7.4.2  tls 
    143  1.7.4.2  tls static inline int
    144  1.7.4.2  tls atomic_xchg(atomic_t *atomic, int new)
    145  1.7.4.2  tls {
    146  1.7.4.2  tls 	return (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    147  1.7.4.2  tls }
    148  1.7.4.2  tls 
    149  1.7.4.2  tls static inline int
    150  1.7.4.2  tls atomic_cmpxchg(atomic_t *atomic, int old, int new)
    151  1.7.4.2  tls {
    152  1.7.4.2  tls 	return (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)old,
    153  1.7.4.2  tls 	    (unsigned)new);
    154  1.7.4.2  tls }
    155  1.7.4.2  tls 
    156  1.7.4.2  tls struct atomic64 {
    157  1.7.4.2  tls 	volatile uint64_t	a_v;
    158  1.7.4.2  tls };
    159  1.7.4.2  tls 
    160  1.7.4.2  tls typedef struct atomic64 atomic64_t;
    161  1.7.4.2  tls 
    162  1.7.4.2  tls static inline uint64_t
    163  1.7.4.2  tls atomic64_read(const struct atomic64 *a)
    164  1.7.4.2  tls {
    165  1.7.4.2  tls 	return a->a_v;
    166  1.7.4.2  tls }
    167  1.7.4.2  tls 
    168  1.7.4.2  tls static inline void
    169  1.7.4.2  tls atomic64_set(struct atomic64 *a, uint64_t v)
    170  1.7.4.2  tls {
    171  1.7.4.2  tls 	a->a_v = v;
    172  1.7.4.2  tls }
    173  1.7.4.2  tls 
    174  1.7.4.2  tls static inline void
    175  1.7.4.2  tls atomic64_add(long long d, struct atomic64 *a)
    176  1.7.4.2  tls {
    177  1.7.4.2  tls 	atomic_add_64(&a->a_v, d);
    178  1.7.4.2  tls }
    179  1.7.4.2  tls 
    180  1.7.4.2  tls static inline void
    181  1.7.4.2  tls atomic64_sub(long long d, struct atomic64 *a)
    182  1.7.4.2  tls {
    183  1.7.4.2  tls 	atomic_add_64(&a->a_v, -d);
    184  1.7.4.2  tls }
    185  1.7.4.2  tls 
    186  1.7.4.2  tls static inline uint64_t
    187  1.7.4.2  tls atomic64_xchg(struct atomic64 *a, uint64_t v)
    188  1.7.4.2  tls {
    189  1.7.4.2  tls 	return atomic_swap_64(&a->a_v, v);
    190  1.7.4.2  tls }
    191  1.7.4.2  tls 
    192  1.7.4.2  tls static inline void
    193  1.7.4.2  tls set_bit(unsigned int bit, volatile unsigned long *ptr)
    194  1.7.4.2  tls {
    195  1.7.4.2  tls 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    196  1.7.4.2  tls 
    197  1.7.4.2  tls 	atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
    198  1.7.4.2  tls }
    199  1.7.4.2  tls 
    200  1.7.4.2  tls static inline void
    201  1.7.4.2  tls clear_bit(unsigned int bit, volatile unsigned long *ptr)
    202  1.7.4.2  tls {
    203  1.7.4.2  tls 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    204  1.7.4.2  tls 
    205  1.7.4.2  tls 	atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
    206  1.7.4.2  tls }
    207  1.7.4.2  tls 
    208  1.7.4.2  tls static inline void
    209  1.7.4.2  tls change_bit(unsigned int bit, volatile unsigned long *ptr)
    210  1.7.4.2  tls {
    211  1.7.4.2  tls 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    212  1.7.4.2  tls 	volatile unsigned long *const p = &ptr[bit / units];
    213  1.7.4.2  tls 	const unsigned long mask = (1UL << (bit % units));
    214  1.7.4.2  tls 	unsigned long v;
    215  1.7.4.2  tls 
    216  1.7.4.2  tls 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    217  1.7.4.2  tls }
    218  1.7.4.2  tls 
    219  1.7.4.2  tls static inline unsigned long
    220  1.7.4.2  tls test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
    221  1.7.4.2  tls {
    222  1.7.4.2  tls 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    223  1.7.4.2  tls 	volatile unsigned long *const p = &ptr[bit / units];
    224  1.7.4.2  tls 	const unsigned long mask = (1UL << (bit % units));
    225  1.7.4.2  tls 	unsigned long v;
    226  1.7.4.2  tls 
    227  1.7.4.2  tls 	do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
    228  1.7.4.2  tls 
    229  1.7.4.2  tls 	return ((v & mask) != 0);
    230  1.7.4.2  tls }
    231  1.7.4.2  tls 
    232  1.7.4.2  tls static inline unsigned long
    233  1.7.4.2  tls test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
    234  1.7.4.2  tls {
    235  1.7.4.2  tls 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    236  1.7.4.2  tls 	volatile unsigned long *const p = &ptr[bit / units];
    237  1.7.4.2  tls 	const unsigned long mask = (1UL << (bit % units));
    238  1.7.4.2  tls 	unsigned long v;
    239  1.7.4.2  tls 
    240  1.7.4.2  tls 	do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
    241  1.7.4.2  tls 
    242  1.7.4.2  tls 	return ((v & mask) != 0);
    243  1.7.4.2  tls }
    244  1.7.4.2  tls 
    245  1.7.4.2  tls static inline unsigned long
    246  1.7.4.2  tls test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
    247  1.7.4.2  tls {
    248  1.7.4.2  tls 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    249  1.7.4.2  tls 	volatile unsigned long *const p = &ptr[bit / units];
    250  1.7.4.2  tls 	const unsigned long mask = (1UL << (bit % units));
    251  1.7.4.2  tls 	unsigned long v;
    252  1.7.4.2  tls 
    253  1.7.4.2  tls 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    254  1.7.4.2  tls 
    255  1.7.4.2  tls 	return ((v & mask) != 0);
    256  1.7.4.2  tls }
    257  1.7.4.2  tls 
    258  1.7.4.2  tls #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
    259  1.7.4.2  tls /*
    260  1.7.4.2  tls  * XXX These memory barriers are doubtless overkill, but I am having
    261  1.7.4.2  tls  * trouble understanding the intent and use of the Linux atomic membar
    262  1.7.4.2  tls  * API.  I think that for reference counting purposes, the sequences
    263  1.7.4.2  tls  * should be insn/inc/enter and exit/dec/insn, but the use of the
    264  1.7.4.2  tls  * before/after memory barriers is not consistent throughout Linux.
    265  1.7.4.2  tls  */
    266  1.7.4.2  tls #  define	smp_mb__before_atomic_inc()	membar_sync()
    267  1.7.4.2  tls #  define	smp_mb__after_atomic_inc()	membar_sync()
    268  1.7.4.2  tls #  define	smp_mb__before_atomic_dec()	membar_sync()
    269  1.7.4.2  tls #  define	smp_mb__after_atomic_dec()	membar_sync()
    270  1.7.4.2  tls #else
    271  1.7.4.2  tls #  define	smp_mb__before_atomic_inc()	__insn_barrier()
    272  1.7.4.2  tls #  define	smp_mb__after_atomic_inc()	__insn_barrier()
    273  1.7.4.2  tls #  define	smp_mb__before_atomic_dec()	__insn_barrier()
    274  1.7.4.2  tls #  define	smp_mb__after_atomic_dec()	__insn_barrier()
    275  1.7.4.2  tls #endif
    276  1.7.4.2  tls 
    277  1.7.4.2  tls #endif  /* _LINUX_ATOMIC_H_ */
    278