Home | History | Annotate | Line # | Download | only in linux
atomic.h revision 1.24
      1  1.24  riastrad /*	$NetBSD: atomic.h,v 1.24 2021/12/19 01:33:51 riastradh Exp $	*/
      2   1.2  riastrad 
      3   1.2  riastrad /*-
      4   1.2  riastrad  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5   1.2  riastrad  * All rights reserved.
      6   1.2  riastrad  *
      7   1.2  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.2  riastrad  * by Taylor R. Campbell.
      9   1.2  riastrad  *
     10   1.2  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.2  riastrad  * modification, are permitted provided that the following conditions
     12   1.2  riastrad  * are met:
     13   1.2  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.2  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.2  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.2  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.2  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.2  riastrad  *
     19   1.2  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.2  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.2  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.2  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.2  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.2  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.2  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.2  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.2  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.2  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.2  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.2  riastrad  */
     31   1.2  riastrad 
     32   1.2  riastrad #ifndef _LINUX_ATOMIC_H_
     33   1.2  riastrad #define _LINUX_ATOMIC_H_
     34   1.2  riastrad 
     35   1.2  riastrad #include <sys/atomic.h>
     36   1.2  riastrad 
     37   1.2  riastrad #include <machine/limits.h>
     38   1.2  riastrad 
     39  1.22      maya #include <asm/barrier.h>
     40  1.22      maya 
     41  1.13  riastrad #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
     42  1.13  riastrad #  define	smp_mb__before_atomic()		membar_exit()
     43  1.13  riastrad #  define	smp_mb__after_atomic()		membar_enter()
     44  1.13  riastrad #else
     45  1.13  riastrad #  define	smp_mb__before_atomic()		__insn_barrier()
     46  1.13  riastrad #  define	smp_mb__after_atomic()		__insn_barrier()
     47  1.13  riastrad #endif
     48  1.13  riastrad 
     49  1.23  riastrad #define	xchg(P, V)							      \
     50  1.23  riastrad 	(sizeof(*(P)) == 4 ? atomic_swap_32((volatile uint32_t *)P, V)	      \
     51  1.23  riastrad 	    : sizeof(*(P)) == 8 ? atomic_swap_64((volatile uint64_t *)P, V)   \
     52  1.23  riastrad 	    : (__builtin_abort(), 0))
     53  1.23  riastrad 
     54  1.13  riastrad /*
     55  1.13  riastrad  * atomic (u)int operations
     56  1.13  riastrad  *
     57  1.13  riastrad  *	Atomics that return a value, other than atomic_read, imply a
     58  1.13  riastrad  *	full memory_sync barrier.  Those that do not return a value
     59  1.13  riastrad  *	imply no memory barrier.
     60  1.13  riastrad  */
     61  1.13  riastrad 
     62   1.2  riastrad struct atomic {
     63   1.2  riastrad 	union {
     64   1.3  riastrad 		volatile int au_int;
     65   1.3  riastrad 		volatile unsigned int au_uint;
     66   1.2  riastrad 	} a_u;
     67   1.2  riastrad };
     68   1.2  riastrad 
     69   1.2  riastrad #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     70   1.2  riastrad 
     71   1.2  riastrad typedef struct atomic atomic_t;
     72   1.2  riastrad 
     73   1.2  riastrad static inline int
     74   1.2  riastrad atomic_read(atomic_t *atomic)
     75   1.2  riastrad {
     76  1.13  riastrad 	/* no membar */
     77   1.3  riastrad 	return atomic->a_u.au_int;
     78   1.2  riastrad }
     79   1.2  riastrad 
     80   1.2  riastrad static inline void
     81   1.2  riastrad atomic_set(atomic_t *atomic, int value)
     82   1.2  riastrad {
     83  1.13  riastrad 	/* no membar */
     84   1.2  riastrad 	atomic->a_u.au_int = value;
     85   1.2  riastrad }
     86   1.2  riastrad 
     87   1.2  riastrad static inline void
     88   1.2  riastrad atomic_add(int addend, atomic_t *atomic)
     89   1.2  riastrad {
     90  1.13  riastrad 	/* no membar */
     91   1.2  riastrad 	atomic_add_int(&atomic->a_u.au_uint, addend);
     92   1.2  riastrad }
     93   1.2  riastrad 
     94   1.2  riastrad static inline void
     95   1.2  riastrad atomic_sub(int subtrahend, atomic_t *atomic)
     96   1.2  riastrad {
     97  1.13  riastrad 	/* no membar */
     98   1.2  riastrad 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
     99   1.2  riastrad }
    100   1.2  riastrad 
    101   1.2  riastrad static inline int
    102   1.2  riastrad atomic_add_return(int addend, atomic_t *atomic)
    103   1.2  riastrad {
    104  1.13  riastrad 	int v;
    105  1.13  riastrad 
    106  1.13  riastrad 	smp_mb__before_atomic();
    107  1.13  riastrad 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
    108  1.13  riastrad 	smp_mb__after_atomic();
    109  1.13  riastrad 
    110  1.13  riastrad 	return v;
    111   1.2  riastrad }
    112   1.2  riastrad 
    113   1.2  riastrad static inline void
    114   1.2  riastrad atomic_inc(atomic_t *atomic)
    115   1.2  riastrad {
    116  1.13  riastrad 	/* no membar */
    117   1.2  riastrad 	atomic_inc_uint(&atomic->a_u.au_uint);
    118   1.2  riastrad }
    119   1.2  riastrad 
    120   1.2  riastrad static inline void
    121   1.2  riastrad atomic_dec(atomic_t *atomic)
    122   1.2  riastrad {
    123  1.13  riastrad 	/* no membar */
    124   1.2  riastrad 	atomic_dec_uint(&atomic->a_u.au_uint);
    125   1.2  riastrad }
    126   1.2  riastrad 
    127   1.2  riastrad static inline int
    128   1.2  riastrad atomic_inc_return(atomic_t *atomic)
    129   1.2  riastrad {
    130  1.13  riastrad 	int v;
    131  1.13  riastrad 
    132  1.13  riastrad 	smp_mb__before_atomic();
    133  1.13  riastrad 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
    134  1.13  riastrad 	smp_mb__after_atomic();
    135  1.13  riastrad 
    136  1.13  riastrad 	return v;
    137   1.2  riastrad }
    138   1.2  riastrad 
    139   1.2  riastrad static inline int
    140   1.2  riastrad atomic_dec_return(atomic_t *atomic)
    141   1.2  riastrad {
    142  1.13  riastrad 	int v;
    143  1.13  riastrad 
    144  1.13  riastrad 	smp_mb__before_atomic();
    145  1.13  riastrad 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    146  1.13  riastrad 	smp_mb__after_atomic();
    147  1.13  riastrad 
    148  1.13  riastrad 	return v;
    149   1.2  riastrad }
    150   1.2  riastrad 
    151   1.2  riastrad static inline int
    152   1.2  riastrad atomic_dec_and_test(atomic_t *atomic)
    153   1.2  riastrad {
    154  1.13  riastrad 	/* membar implied by atomic_dec_return */
    155  1.13  riastrad 	return atomic_dec_return(atomic) == 0;
    156   1.2  riastrad }
    157   1.2  riastrad 
    158   1.2  riastrad static inline void
    159   1.8  riastrad atomic_or(int value, atomic_t *atomic)
    160   1.8  riastrad {
    161  1.13  riastrad 	/* no membar */
    162   1.8  riastrad 	atomic_or_uint(&atomic->a_u.au_uint, value);
    163   1.8  riastrad }
    164   1.8  riastrad 
    165   1.8  riastrad static inline void
    166  1.24  riastrad atomic_andnot(int value, atomic_t *atomic)
    167  1.24  riastrad {
    168  1.24  riastrad 	/* no membar */
    169  1.24  riastrad 	atomic_and_uint(&atomic->a_u.au_uint, ~value);
    170  1.24  riastrad }
    171  1.24  riastrad 
    172  1.24  riastrad static inline void
    173   1.2  riastrad atomic_set_mask(unsigned long mask, atomic_t *atomic)
    174   1.2  riastrad {
    175  1.13  riastrad 	/* no membar */
    176   1.2  riastrad 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    177   1.2  riastrad }
    178   1.2  riastrad 
    179   1.2  riastrad static inline void
    180   1.2  riastrad atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    181   1.2  riastrad {
    182  1.13  riastrad 	/* no membar */
    183   1.2  riastrad 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    184   1.2  riastrad }
    185   1.2  riastrad 
    186   1.2  riastrad static inline int
    187   1.2  riastrad atomic_add_unless(atomic_t *atomic, int addend, int zero)
    188   1.2  riastrad {
    189   1.2  riastrad 	int value;
    190   1.2  riastrad 
    191  1.13  riastrad 	smp_mb__before_atomic();
    192   1.2  riastrad 	do {
    193   1.2  riastrad 		value = atomic->a_u.au_int;
    194   1.2  riastrad 		if (value == zero)
    195  1.13  riastrad 			break;
    196   1.2  riastrad 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    197  1.21  christos 	    != (unsigned)value);
    198  1.13  riastrad 	smp_mb__after_atomic();
    199   1.2  riastrad 
    200  1.13  riastrad 	return value != zero;
    201   1.2  riastrad }
    202   1.2  riastrad 
    203   1.2  riastrad static inline int
    204   1.2  riastrad atomic_inc_not_zero(atomic_t *atomic)
    205   1.2  riastrad {
    206  1.13  riastrad 	/* membar implied by atomic_add_unless */
    207   1.2  riastrad 	return atomic_add_unless(atomic, 1, 0);
    208   1.2  riastrad }
    209   1.2  riastrad 
    210   1.5  riastrad static inline int
    211   1.5  riastrad atomic_xchg(atomic_t *atomic, int new)
    212   1.5  riastrad {
    213  1.13  riastrad 	int old;
    214  1.13  riastrad 
    215  1.13  riastrad 	smp_mb__before_atomic();
    216  1.13  riastrad 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    217  1.13  riastrad 	smp_mb__after_atomic();
    218  1.13  riastrad 
    219  1.13  riastrad 	return old;
    220   1.5  riastrad }
    221   1.5  riastrad 
    222   1.5  riastrad static inline int
    223  1.13  riastrad atomic_cmpxchg(atomic_t *atomic, int expect, int new)
    224   1.5  riastrad {
    225  1.13  riastrad 	int old;
    226  1.13  riastrad 
    227  1.13  riastrad 	/*
    228  1.13  riastrad 	 * XXX As an optimization, under Linux's semantics we are
    229  1.13  riastrad 	 * allowed to skip the memory barrier if the comparison fails,
    230  1.13  riastrad 	 * but taking advantage of that is not convenient here.
    231  1.13  riastrad 	 */
    232  1.13  riastrad 	smp_mb__before_atomic();
    233  1.13  riastrad 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
    234   1.5  riastrad 	    (unsigned)new);
    235  1.13  riastrad 	smp_mb__after_atomic();
    236  1.13  riastrad 
    237  1.13  riastrad 	return old;
    238   1.5  riastrad }
    239   1.5  riastrad 
    240   1.6  riastrad struct atomic64 {
    241   1.6  riastrad 	volatile uint64_t	a_v;
    242   1.6  riastrad };
    243   1.6  riastrad 
    244   1.6  riastrad typedef struct atomic64 atomic64_t;
    245   1.6  riastrad 
    246  1.16  riastrad #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
    247  1.16  riastrad 
    248  1.15  riastrad int		linux_atomic64_init(void);
    249  1.15  riastrad void		linux_atomic64_fini(void);
    250  1.15  riastrad 
    251  1.15  riastrad #ifdef __HAVE_ATOMIC64_OPS
    252  1.15  riastrad 
    253   1.6  riastrad static inline uint64_t
    254   1.6  riastrad atomic64_read(const struct atomic64 *a)
    255   1.6  riastrad {
    256  1.13  riastrad 	/* no membar */
    257   1.6  riastrad 	return a->a_v;
    258   1.6  riastrad }
    259   1.6  riastrad 
    260   1.6  riastrad static inline void
    261   1.6  riastrad atomic64_set(struct atomic64 *a, uint64_t v)
    262   1.6  riastrad {
    263  1.13  riastrad 	/* no membar */
    264   1.6  riastrad 	a->a_v = v;
    265   1.6  riastrad }
    266   1.6  riastrad 
    267   1.6  riastrad static inline void
    268  1.17  riastrad atomic64_add(int64_t d, struct atomic64 *a)
    269   1.6  riastrad {
    270  1.13  riastrad 	/* no membar */
    271   1.6  riastrad 	atomic_add_64(&a->a_v, d);
    272   1.6  riastrad }
    273   1.6  riastrad 
    274   1.6  riastrad static inline void
    275  1.17  riastrad atomic64_sub(int64_t d, struct atomic64 *a)
    276   1.6  riastrad {
    277  1.13  riastrad 	/* no membar */
    278   1.6  riastrad 	atomic_add_64(&a->a_v, -d);
    279   1.6  riastrad }
    280   1.6  riastrad 
    281  1.19  riastrad static inline int64_t
    282  1.19  riastrad atomic64_add_return(int64_t d, struct atomic64 *a)
    283  1.19  riastrad {
    284  1.19  riastrad 	int64_t v;
    285  1.19  riastrad 
    286  1.19  riastrad 	smp_mb__before_atomic();
    287  1.19  riastrad 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
    288  1.19  riastrad 	smp_mb__after_atomic();
    289  1.19  riastrad 
    290  1.19  riastrad 	return v;
    291  1.19  riastrad }
    292  1.19  riastrad 
    293   1.6  riastrad static inline uint64_t
    294  1.13  riastrad atomic64_xchg(struct atomic64 *a, uint64_t new)
    295   1.6  riastrad {
    296  1.13  riastrad 	uint64_t old;
    297  1.13  riastrad 
    298  1.13  riastrad 	smp_mb__before_atomic();
    299  1.13  riastrad 	old = atomic_swap_64(&a->a_v, new);
    300  1.13  riastrad 	smp_mb__after_atomic();
    301  1.13  riastrad 
    302  1.13  riastrad 	return old;
    303   1.6  riastrad }
    304   1.6  riastrad 
    305   1.9  riastrad static inline uint64_t
    306  1.13  riastrad atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
    307   1.9  riastrad {
    308  1.13  riastrad 	uint64_t old;
    309  1.13  riastrad 
    310  1.13  riastrad 	/*
    311  1.13  riastrad 	 * XXX As an optimization, under Linux's semantics we are
    312  1.13  riastrad 	 * allowed to skip the memory barrier if the comparison fails,
    313  1.13  riastrad 	 * but taking advantage of that is not convenient here.
    314  1.13  riastrad 	 */
    315  1.13  riastrad 	smp_mb__before_atomic();
    316  1.13  riastrad 	old = atomic_cas_64(&atomic->a_v, expect, new);
    317  1.13  riastrad 	smp_mb__after_atomic();
    318  1.13  riastrad 
    319  1.13  riastrad 	return old;
    320   1.9  riastrad }
    321   1.9  riastrad 
    322  1.15  riastrad #else  /* !defined(__HAVE_ATOMIC64_OPS) */
    323  1.15  riastrad 
    324  1.18  riastrad #define	atomic64_add		linux_atomic64_add
    325  1.19  riastrad #define	atomic64_add_return	linux_atomic64_add_return
    326  1.18  riastrad #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
    327  1.15  riastrad #define	atomic64_read		linux_atomic64_read
    328  1.15  riastrad #define	atomic64_set		linux_atomic64_set
    329  1.15  riastrad #define	atomic64_sub		linux_atomic64_sub
    330  1.15  riastrad #define	atomic64_xchg		linux_atomic64_xchg
    331  1.15  riastrad 
    332  1.15  riastrad uint64_t	atomic64_read(const struct atomic64 *);
    333  1.15  riastrad void		atomic64_set(struct atomic64 *, uint64_t);
    334  1.17  riastrad void		atomic64_add(int64_t, struct atomic64 *);
    335  1.17  riastrad void		atomic64_sub(int64_t, struct atomic64 *);
    336  1.19  riastrad int64_t		atomic64_add_return(int64_t, struct atomic64 *);
    337  1.15  riastrad uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
    338  1.15  riastrad uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
    339  1.15  riastrad 
    340  1.15  riastrad #endif
    341  1.15  riastrad 
    342  1.19  riastrad static inline int64_t
    343  1.19  riastrad atomic64_inc_return(struct atomic64 *a)
    344  1.19  riastrad {
    345  1.19  riastrad 	return atomic64_add_return(1, a);
    346  1.19  riastrad }
    347  1.19  riastrad 
    348  1.14  riastrad struct atomic_long {
    349  1.14  riastrad 	volatile unsigned long	al_v;
    350  1.14  riastrad };
    351  1.14  riastrad 
    352  1.14  riastrad typedef struct atomic_long atomic_long_t;
    353  1.14  riastrad 
    354  1.14  riastrad static inline long
    355  1.14  riastrad atomic_long_read(struct atomic_long *a)
    356  1.14  riastrad {
    357  1.14  riastrad 	/* no membar */
    358  1.14  riastrad 	return (unsigned long)a->al_v;
    359  1.14  riastrad }
    360  1.14  riastrad 
    361  1.14  riastrad static inline void
    362  1.14  riastrad atomic_long_set(struct atomic_long *a, long v)
    363  1.14  riastrad {
    364  1.14  riastrad 	/* no membar */
    365  1.14  riastrad 	a->al_v = v;
    366  1.14  riastrad }
    367  1.14  riastrad 
    368  1.14  riastrad static inline long
    369  1.14  riastrad atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
    370  1.14  riastrad {
    371  1.14  riastrad 	long value;
    372  1.14  riastrad 
    373  1.14  riastrad 	smp_mb__before_atomic();
    374  1.14  riastrad 	do {
    375  1.14  riastrad 		value = (long)a->al_v;
    376  1.14  riastrad 		if (value == zero)
    377  1.14  riastrad 			break;
    378  1.14  riastrad 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
    379  1.14  riastrad 		(unsigned long)(value + addend)) != (unsigned long)value);
    380  1.14  riastrad 	smp_mb__after_atomic();
    381  1.14  riastrad 
    382  1.14  riastrad 	return value != zero;
    383  1.14  riastrad }
    384  1.14  riastrad 
    385  1.14  riastrad static inline long
    386  1.14  riastrad atomic_long_inc_not_zero(struct atomic_long *a)
    387  1.14  riastrad {
    388  1.14  riastrad 	/* membar implied by atomic_long_add_unless */
    389  1.14  riastrad 	return atomic_long_add_unless(a, 1, 0);
    390  1.14  riastrad }
    391  1.14  riastrad 
    392  1.14  riastrad static inline long
    393  1.14  riastrad atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
    394  1.14  riastrad {
    395  1.14  riastrad 	long old;
    396  1.14  riastrad 
    397  1.14  riastrad 	/*
    398  1.14  riastrad 	 * XXX As an optimization, under Linux's semantics we are
    399  1.14  riastrad 	 * allowed to skip the memory barrier if the comparison fails,
    400  1.14  riastrad 	 * but taking advantage of that is not convenient here.
    401  1.14  riastrad 	 */
    402  1.14  riastrad 	smp_mb__before_atomic();
    403  1.14  riastrad 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
    404  1.14  riastrad 	    (unsigned long)new);
    405  1.14  riastrad 	smp_mb__after_atomic();
    406  1.14  riastrad 
    407  1.14  riastrad 	return old;
    408  1.14  riastrad }
    409  1.14  riastrad 
    410   1.2  riastrad static inline void
    411   1.2  riastrad set_bit(unsigned int bit, volatile unsigned long *ptr)
    412   1.2  riastrad {
    413   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    414   1.2  riastrad 
    415  1.13  riastrad 	/* no memory barrier */
    416   1.2  riastrad 	atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
    417   1.2  riastrad }
    418   1.2  riastrad 
    419   1.2  riastrad static inline void
    420   1.2  riastrad clear_bit(unsigned int bit, volatile unsigned long *ptr)
    421   1.2  riastrad {
    422   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    423   1.2  riastrad 
    424  1.13  riastrad 	/* no memory barrier */
    425   1.2  riastrad 	atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
    426   1.2  riastrad }
    427   1.2  riastrad 
    428   1.2  riastrad static inline void
    429   1.2  riastrad change_bit(unsigned int bit, volatile unsigned long *ptr)
    430   1.2  riastrad {
    431   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    432   1.2  riastrad 	volatile unsigned long *const p = &ptr[bit / units];
    433   1.2  riastrad 	const unsigned long mask = (1UL << (bit % units));
    434   1.2  riastrad 	unsigned long v;
    435   1.2  riastrad 
    436  1.13  riastrad 	/* no memory barrier */
    437   1.2  riastrad 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    438   1.2  riastrad }
    439   1.2  riastrad 
    440  1.11  riastrad static inline int
    441   1.2  riastrad test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
    442   1.2  riastrad {
    443   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    444   1.2  riastrad 	volatile unsigned long *const p = &ptr[bit / units];
    445   1.2  riastrad 	const unsigned long mask = (1UL << (bit % units));
    446   1.2  riastrad 	unsigned long v;
    447   1.2  riastrad 
    448  1.13  riastrad 	smp_mb__before_atomic();
    449   1.2  riastrad 	do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
    450  1.13  riastrad 	smp_mb__after_atomic();
    451   1.2  riastrad 
    452   1.7  riastrad 	return ((v & mask) != 0);
    453   1.2  riastrad }
    454   1.2  riastrad 
    455  1.11  riastrad static inline int
    456   1.2  riastrad test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
    457   1.2  riastrad {
    458   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    459   1.2  riastrad 	volatile unsigned long *const p = &ptr[bit / units];
    460   1.2  riastrad 	const unsigned long mask = (1UL << (bit % units));
    461   1.2  riastrad 	unsigned long v;
    462   1.2  riastrad 
    463  1.13  riastrad 	smp_mb__before_atomic();
    464   1.2  riastrad 	do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
    465  1.13  riastrad 	smp_mb__after_atomic();
    466   1.2  riastrad 
    467   1.7  riastrad 	return ((v & mask) != 0);
    468   1.2  riastrad }
    469   1.2  riastrad 
    470  1.11  riastrad static inline int
    471   1.2  riastrad test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
    472   1.2  riastrad {
    473   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    474   1.2  riastrad 	volatile unsigned long *const p = &ptr[bit / units];
    475   1.2  riastrad 	const unsigned long mask = (1UL << (bit % units));
    476   1.2  riastrad 	unsigned long v;
    477   1.2  riastrad 
    478  1.13  riastrad 	smp_mb__before_atomic();
    479   1.2  riastrad 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    480  1.13  riastrad 	smp_mb__after_atomic();
    481   1.2  riastrad 
    482   1.7  riastrad 	return ((v & mask) != 0);
    483   1.2  riastrad }
    484   1.2  riastrad 
    485   1.2  riastrad #endif  /* _LINUX_ATOMIC_H_ */
    486