Home | History | Annotate | Line # | Download | only in linux
atomic.h revision 1.22
      1  1.22      maya /*	$NetBSD: atomic.h,v 1.22 2020/02/14 14:34:59 maya Exp $	*/
      2   1.2  riastrad 
      3   1.2  riastrad /*-
      4   1.2  riastrad  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5   1.2  riastrad  * All rights reserved.
      6   1.2  riastrad  *
      7   1.2  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.2  riastrad  * by Taylor R. Campbell.
      9   1.2  riastrad  *
     10   1.2  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.2  riastrad  * modification, are permitted provided that the following conditions
     12   1.2  riastrad  * are met:
     13   1.2  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.2  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.2  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.2  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.2  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.2  riastrad  *
     19   1.2  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.2  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.2  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.2  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.2  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.2  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.2  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.2  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.2  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.2  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.2  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.2  riastrad  */
     31   1.2  riastrad 
     32   1.2  riastrad #ifndef _LINUX_ATOMIC_H_
     33   1.2  riastrad #define _LINUX_ATOMIC_H_
     34   1.2  riastrad 
     35   1.2  riastrad #include <sys/atomic.h>
     36   1.2  riastrad 
     37   1.2  riastrad #include <machine/limits.h>
     38   1.2  riastrad 
     39  1.22      maya #include <asm/barrier.h>
     40  1.22      maya 
     41  1.13  riastrad #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
     42  1.13  riastrad #  define	smp_mb__before_atomic()		membar_exit()
     43  1.13  riastrad #  define	smp_mb__after_atomic()		membar_enter()
     44  1.13  riastrad #else
     45  1.13  riastrad #  define	smp_mb__before_atomic()		__insn_barrier()
     46  1.13  riastrad #  define	smp_mb__after_atomic()		__insn_barrier()
     47  1.13  riastrad #endif
     48  1.13  riastrad 
     49  1.13  riastrad /*
     50  1.13  riastrad  * atomic (u)int operations
     51  1.13  riastrad  *
     52  1.13  riastrad  *	Atomics that return a value, other than atomic_read, imply a
     53  1.13  riastrad  *	full memory_sync barrier.  Those that do not return a value
     54  1.13  riastrad  *	imply no memory barrier.
     55  1.13  riastrad  */
     56  1.13  riastrad 
     57   1.2  riastrad struct atomic {
     58   1.2  riastrad 	union {
     59   1.3  riastrad 		volatile int au_int;
     60   1.3  riastrad 		volatile unsigned int au_uint;
     61   1.2  riastrad 	} a_u;
     62   1.2  riastrad };
     63   1.2  riastrad 
     64   1.2  riastrad #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
     65   1.2  riastrad 
     66   1.2  riastrad typedef struct atomic atomic_t;
     67   1.2  riastrad 
     68   1.2  riastrad static inline int
     69   1.2  riastrad atomic_read(atomic_t *atomic)
     70   1.2  riastrad {
     71  1.13  riastrad 	/* no membar */
     72   1.3  riastrad 	return atomic->a_u.au_int;
     73   1.2  riastrad }
     74   1.2  riastrad 
     75   1.2  riastrad static inline void
     76   1.2  riastrad atomic_set(atomic_t *atomic, int value)
     77   1.2  riastrad {
     78  1.13  riastrad 	/* no membar */
     79   1.2  riastrad 	atomic->a_u.au_int = value;
     80   1.2  riastrad }
     81   1.2  riastrad 
     82   1.2  riastrad static inline void
     83   1.2  riastrad atomic_add(int addend, atomic_t *atomic)
     84   1.2  riastrad {
     85  1.13  riastrad 	/* no membar */
     86   1.2  riastrad 	atomic_add_int(&atomic->a_u.au_uint, addend);
     87   1.2  riastrad }
     88   1.2  riastrad 
     89   1.2  riastrad static inline void
     90   1.2  riastrad atomic_sub(int subtrahend, atomic_t *atomic)
     91   1.2  riastrad {
     92  1.13  riastrad 	/* no membar */
     93   1.2  riastrad 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
     94   1.2  riastrad }
     95   1.2  riastrad 
     96   1.2  riastrad static inline int
     97   1.2  riastrad atomic_add_return(int addend, atomic_t *atomic)
     98   1.2  riastrad {
     99  1.13  riastrad 	int v;
    100  1.13  riastrad 
    101  1.13  riastrad 	smp_mb__before_atomic();
    102  1.13  riastrad 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
    103  1.13  riastrad 	smp_mb__after_atomic();
    104  1.13  riastrad 
    105  1.13  riastrad 	return v;
    106   1.2  riastrad }
    107   1.2  riastrad 
    108   1.2  riastrad static inline void
    109   1.2  riastrad atomic_inc(atomic_t *atomic)
    110   1.2  riastrad {
    111  1.13  riastrad 	/* no membar */
    112   1.2  riastrad 	atomic_inc_uint(&atomic->a_u.au_uint);
    113   1.2  riastrad }
    114   1.2  riastrad 
    115   1.2  riastrad static inline void
    116   1.2  riastrad atomic_dec(atomic_t *atomic)
    117   1.2  riastrad {
    118  1.13  riastrad 	/* no membar */
    119   1.2  riastrad 	atomic_dec_uint(&atomic->a_u.au_uint);
    120   1.2  riastrad }
    121   1.2  riastrad 
    122   1.2  riastrad static inline int
    123   1.2  riastrad atomic_inc_return(atomic_t *atomic)
    124   1.2  riastrad {
    125  1.13  riastrad 	int v;
    126  1.13  riastrad 
    127  1.13  riastrad 	smp_mb__before_atomic();
    128  1.13  riastrad 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
    129  1.13  riastrad 	smp_mb__after_atomic();
    130  1.13  riastrad 
    131  1.13  riastrad 	return v;
    132   1.2  riastrad }
    133   1.2  riastrad 
    134   1.2  riastrad static inline int
    135   1.2  riastrad atomic_dec_return(atomic_t *atomic)
    136   1.2  riastrad {
    137  1.13  riastrad 	int v;
    138  1.13  riastrad 
    139  1.13  riastrad 	smp_mb__before_atomic();
    140  1.13  riastrad 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
    141  1.13  riastrad 	smp_mb__after_atomic();
    142  1.13  riastrad 
    143  1.13  riastrad 	return v;
    144   1.2  riastrad }
    145   1.2  riastrad 
    146   1.2  riastrad static inline int
    147   1.2  riastrad atomic_dec_and_test(atomic_t *atomic)
    148   1.2  riastrad {
    149  1.13  riastrad 	/* membar implied by atomic_dec_return */
    150  1.13  riastrad 	return atomic_dec_return(atomic) == 0;
    151   1.2  riastrad }
    152   1.2  riastrad 
    153   1.2  riastrad static inline void
    154   1.8  riastrad atomic_or(int value, atomic_t *atomic)
    155   1.8  riastrad {
    156  1.13  riastrad 	/* no membar */
    157   1.8  riastrad 	atomic_or_uint(&atomic->a_u.au_uint, value);
    158   1.8  riastrad }
    159   1.8  riastrad 
    160   1.8  riastrad static inline void
    161   1.2  riastrad atomic_set_mask(unsigned long mask, atomic_t *atomic)
    162   1.2  riastrad {
    163  1.13  riastrad 	/* no membar */
    164   1.2  riastrad 	atomic_or_uint(&atomic->a_u.au_uint, mask);
    165   1.2  riastrad }
    166   1.2  riastrad 
    167   1.2  riastrad static inline void
    168   1.2  riastrad atomic_clear_mask(unsigned long mask, atomic_t *atomic)
    169   1.2  riastrad {
    170  1.13  riastrad 	/* no membar */
    171   1.2  riastrad 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
    172   1.2  riastrad }
    173   1.2  riastrad 
    174   1.2  riastrad static inline int
    175   1.2  riastrad atomic_add_unless(atomic_t *atomic, int addend, int zero)
    176   1.2  riastrad {
    177   1.2  riastrad 	int value;
    178   1.2  riastrad 
    179  1.13  riastrad 	smp_mb__before_atomic();
    180   1.2  riastrad 	do {
    181   1.2  riastrad 		value = atomic->a_u.au_int;
    182   1.2  riastrad 		if (value == zero)
    183  1.13  riastrad 			break;
    184   1.2  riastrad 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
    185  1.21  christos 	    != (unsigned)value);
    186  1.13  riastrad 	smp_mb__after_atomic();
    187   1.2  riastrad 
    188  1.13  riastrad 	return value != zero;
    189   1.2  riastrad }
    190   1.2  riastrad 
    191   1.2  riastrad static inline int
    192   1.2  riastrad atomic_inc_not_zero(atomic_t *atomic)
    193   1.2  riastrad {
    194  1.13  riastrad 	/* membar implied by atomic_add_unless */
    195   1.2  riastrad 	return atomic_add_unless(atomic, 1, 0);
    196   1.2  riastrad }
    197   1.2  riastrad 
    198   1.5  riastrad static inline int
    199   1.5  riastrad atomic_xchg(atomic_t *atomic, int new)
    200   1.5  riastrad {
    201  1.13  riastrad 	int old;
    202  1.13  riastrad 
    203  1.13  riastrad 	smp_mb__before_atomic();
    204  1.13  riastrad 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
    205  1.13  riastrad 	smp_mb__after_atomic();
    206  1.13  riastrad 
    207  1.13  riastrad 	return old;
    208   1.5  riastrad }
    209   1.5  riastrad 
    210   1.5  riastrad static inline int
    211  1.13  riastrad atomic_cmpxchg(atomic_t *atomic, int expect, int new)
    212   1.5  riastrad {
    213  1.13  riastrad 	int old;
    214  1.13  riastrad 
    215  1.13  riastrad 	/*
    216  1.13  riastrad 	 * XXX As an optimization, under Linux's semantics we are
    217  1.13  riastrad 	 * allowed to skip the memory barrier if the comparison fails,
    218  1.13  riastrad 	 * but taking advantage of that is not convenient here.
    219  1.13  riastrad 	 */
    220  1.13  riastrad 	smp_mb__before_atomic();
    221  1.13  riastrad 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
    222   1.5  riastrad 	    (unsigned)new);
    223  1.13  riastrad 	smp_mb__after_atomic();
    224  1.13  riastrad 
    225  1.13  riastrad 	return old;
    226   1.5  riastrad }
    227   1.5  riastrad 
    228   1.6  riastrad struct atomic64 {
    229   1.6  riastrad 	volatile uint64_t	a_v;
    230   1.6  riastrad };
    231   1.6  riastrad 
    232   1.6  riastrad typedef struct atomic64 atomic64_t;
    233   1.6  riastrad 
    234  1.16  riastrad #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
    235  1.16  riastrad 
    236  1.15  riastrad int		linux_atomic64_init(void);
    237  1.15  riastrad void		linux_atomic64_fini(void);
    238  1.15  riastrad 
    239  1.15  riastrad #ifdef __HAVE_ATOMIC64_OPS
    240  1.15  riastrad 
    241   1.6  riastrad static inline uint64_t
    242   1.6  riastrad atomic64_read(const struct atomic64 *a)
    243   1.6  riastrad {
    244  1.13  riastrad 	/* no membar */
    245   1.6  riastrad 	return a->a_v;
    246   1.6  riastrad }
    247   1.6  riastrad 
    248   1.6  riastrad static inline void
    249   1.6  riastrad atomic64_set(struct atomic64 *a, uint64_t v)
    250   1.6  riastrad {
    251  1.13  riastrad 	/* no membar */
    252   1.6  riastrad 	a->a_v = v;
    253   1.6  riastrad }
    254   1.6  riastrad 
    255   1.6  riastrad static inline void
    256  1.17  riastrad atomic64_add(int64_t d, struct atomic64 *a)
    257   1.6  riastrad {
    258  1.13  riastrad 	/* no membar */
    259   1.6  riastrad 	atomic_add_64(&a->a_v, d);
    260   1.6  riastrad }
    261   1.6  riastrad 
    262   1.6  riastrad static inline void
    263  1.17  riastrad atomic64_sub(int64_t d, struct atomic64 *a)
    264   1.6  riastrad {
    265  1.13  riastrad 	/* no membar */
    266   1.6  riastrad 	atomic_add_64(&a->a_v, -d);
    267   1.6  riastrad }
    268   1.6  riastrad 
    269  1.19  riastrad static inline int64_t
    270  1.19  riastrad atomic64_add_return(int64_t d, struct atomic64 *a)
    271  1.19  riastrad {
    272  1.19  riastrad 	int64_t v;
    273  1.19  riastrad 
    274  1.19  riastrad 	smp_mb__before_atomic();
    275  1.19  riastrad 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
    276  1.19  riastrad 	smp_mb__after_atomic();
    277  1.19  riastrad 
    278  1.19  riastrad 	return v;
    279  1.19  riastrad }
    280  1.19  riastrad 
    281   1.6  riastrad static inline uint64_t
    282  1.13  riastrad atomic64_xchg(struct atomic64 *a, uint64_t new)
    283   1.6  riastrad {
    284  1.13  riastrad 	uint64_t old;
    285  1.13  riastrad 
    286  1.13  riastrad 	smp_mb__before_atomic();
    287  1.13  riastrad 	old = atomic_swap_64(&a->a_v, new);
    288  1.13  riastrad 	smp_mb__after_atomic();
    289  1.13  riastrad 
    290  1.13  riastrad 	return old;
    291   1.6  riastrad }
    292   1.6  riastrad 
    293   1.9  riastrad static inline uint64_t
    294  1.13  riastrad atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
    295   1.9  riastrad {
    296  1.13  riastrad 	uint64_t old;
    297  1.13  riastrad 
    298  1.13  riastrad 	/*
    299  1.13  riastrad 	 * XXX As an optimization, under Linux's semantics we are
    300  1.13  riastrad 	 * allowed to skip the memory barrier if the comparison fails,
    301  1.13  riastrad 	 * but taking advantage of that is not convenient here.
    302  1.13  riastrad 	 */
    303  1.13  riastrad 	smp_mb__before_atomic();
    304  1.13  riastrad 	old = atomic_cas_64(&atomic->a_v, expect, new);
    305  1.13  riastrad 	smp_mb__after_atomic();
    306  1.13  riastrad 
    307  1.13  riastrad 	return old;
    308   1.9  riastrad }
    309   1.9  riastrad 
    310  1.15  riastrad #else  /* !defined(__HAVE_ATOMIC64_OPS) */
    311  1.15  riastrad 
    312  1.18  riastrad #define	atomic64_add		linux_atomic64_add
    313  1.19  riastrad #define	atomic64_add_return	linux_atomic64_add_return
    314  1.18  riastrad #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
    315  1.15  riastrad #define	atomic64_read		linux_atomic64_read
    316  1.15  riastrad #define	atomic64_set		linux_atomic64_set
    317  1.15  riastrad #define	atomic64_sub		linux_atomic64_sub
    318  1.15  riastrad #define	atomic64_xchg		linux_atomic64_xchg
    319  1.15  riastrad 
    320  1.15  riastrad uint64_t	atomic64_read(const struct atomic64 *);
    321  1.15  riastrad void		atomic64_set(struct atomic64 *, uint64_t);
    322  1.17  riastrad void		atomic64_add(int64_t, struct atomic64 *);
    323  1.17  riastrad void		atomic64_sub(int64_t, struct atomic64 *);
    324  1.19  riastrad int64_t		atomic64_add_return(int64_t, struct atomic64 *);
    325  1.15  riastrad uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
    326  1.15  riastrad uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
    327  1.15  riastrad 
    328  1.15  riastrad #endif
    329  1.15  riastrad 
    330  1.19  riastrad static inline int64_t
    331  1.19  riastrad atomic64_inc_return(struct atomic64 *a)
    332  1.19  riastrad {
    333  1.19  riastrad 	return atomic64_add_return(1, a);
    334  1.19  riastrad }
    335  1.19  riastrad 
    336  1.14  riastrad struct atomic_long {
    337  1.14  riastrad 	volatile unsigned long	al_v;
    338  1.14  riastrad };
    339  1.14  riastrad 
    340  1.14  riastrad typedef struct atomic_long atomic_long_t;
    341  1.14  riastrad 
    342  1.14  riastrad static inline long
    343  1.14  riastrad atomic_long_read(struct atomic_long *a)
    344  1.14  riastrad {
    345  1.14  riastrad 	/* no membar */
    346  1.14  riastrad 	return (unsigned long)a->al_v;
    347  1.14  riastrad }
    348  1.14  riastrad 
    349  1.14  riastrad static inline void
    350  1.14  riastrad atomic_long_set(struct atomic_long *a, long v)
    351  1.14  riastrad {
    352  1.14  riastrad 	/* no membar */
    353  1.14  riastrad 	a->al_v = v;
    354  1.14  riastrad }
    355  1.14  riastrad 
    356  1.14  riastrad static inline long
    357  1.14  riastrad atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
    358  1.14  riastrad {
    359  1.14  riastrad 	long value;
    360  1.14  riastrad 
    361  1.14  riastrad 	smp_mb__before_atomic();
    362  1.14  riastrad 	do {
    363  1.14  riastrad 		value = (long)a->al_v;
    364  1.14  riastrad 		if (value == zero)
    365  1.14  riastrad 			break;
    366  1.14  riastrad 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
    367  1.14  riastrad 		(unsigned long)(value + addend)) != (unsigned long)value);
    368  1.14  riastrad 	smp_mb__after_atomic();
    369  1.14  riastrad 
    370  1.14  riastrad 	return value != zero;
    371  1.14  riastrad }
    372  1.14  riastrad 
    373  1.14  riastrad static inline long
    374  1.14  riastrad atomic_long_inc_not_zero(struct atomic_long *a)
    375  1.14  riastrad {
    376  1.14  riastrad 	/* membar implied by atomic_long_add_unless */
    377  1.14  riastrad 	return atomic_long_add_unless(a, 1, 0);
    378  1.14  riastrad }
    379  1.14  riastrad 
    380  1.14  riastrad static inline long
    381  1.14  riastrad atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
    382  1.14  riastrad {
    383  1.14  riastrad 	long old;
    384  1.14  riastrad 
    385  1.14  riastrad 	/*
    386  1.14  riastrad 	 * XXX As an optimization, under Linux's semantics we are
    387  1.14  riastrad 	 * allowed to skip the memory barrier if the comparison fails,
    388  1.14  riastrad 	 * but taking advantage of that is not convenient here.
    389  1.14  riastrad 	 */
    390  1.14  riastrad 	smp_mb__before_atomic();
    391  1.14  riastrad 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
    392  1.14  riastrad 	    (unsigned long)new);
    393  1.14  riastrad 	smp_mb__after_atomic();
    394  1.14  riastrad 
    395  1.14  riastrad 	return old;
    396  1.14  riastrad }
    397  1.14  riastrad 
    398   1.2  riastrad static inline void
    399   1.2  riastrad set_bit(unsigned int bit, volatile unsigned long *ptr)
    400   1.2  riastrad {
    401   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    402   1.2  riastrad 
    403  1.13  riastrad 	/* no memory barrier */
    404   1.2  riastrad 	atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
    405   1.2  riastrad }
    406   1.2  riastrad 
    407   1.2  riastrad static inline void
    408   1.2  riastrad clear_bit(unsigned int bit, volatile unsigned long *ptr)
    409   1.2  riastrad {
    410   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    411   1.2  riastrad 
    412  1.13  riastrad 	/* no memory barrier */
    413   1.2  riastrad 	atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
    414   1.2  riastrad }
    415   1.2  riastrad 
    416   1.2  riastrad static inline void
    417   1.2  riastrad change_bit(unsigned int bit, volatile unsigned long *ptr)
    418   1.2  riastrad {
    419   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    420   1.2  riastrad 	volatile unsigned long *const p = &ptr[bit / units];
    421   1.2  riastrad 	const unsigned long mask = (1UL << (bit % units));
    422   1.2  riastrad 	unsigned long v;
    423   1.2  riastrad 
    424  1.13  riastrad 	/* no memory barrier */
    425   1.2  riastrad 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    426   1.2  riastrad }
    427   1.2  riastrad 
    428  1.11  riastrad static inline int
    429   1.2  riastrad test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
    430   1.2  riastrad {
    431   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    432   1.2  riastrad 	volatile unsigned long *const p = &ptr[bit / units];
    433   1.2  riastrad 	const unsigned long mask = (1UL << (bit % units));
    434   1.2  riastrad 	unsigned long v;
    435   1.2  riastrad 
    436  1.13  riastrad 	smp_mb__before_atomic();
    437   1.2  riastrad 	do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
    438  1.13  riastrad 	smp_mb__after_atomic();
    439   1.2  riastrad 
    440   1.7  riastrad 	return ((v & mask) != 0);
    441   1.2  riastrad }
    442   1.2  riastrad 
    443  1.11  riastrad static inline int
    444   1.2  riastrad test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
    445   1.2  riastrad {
    446   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    447   1.2  riastrad 	volatile unsigned long *const p = &ptr[bit / units];
    448   1.2  riastrad 	const unsigned long mask = (1UL << (bit % units));
    449   1.2  riastrad 	unsigned long v;
    450   1.2  riastrad 
    451  1.13  riastrad 	smp_mb__before_atomic();
    452   1.2  riastrad 	do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
    453  1.13  riastrad 	smp_mb__after_atomic();
    454   1.2  riastrad 
    455   1.7  riastrad 	return ((v & mask) != 0);
    456   1.2  riastrad }
    457   1.2  riastrad 
    458  1.11  riastrad static inline int
    459   1.2  riastrad test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
    460   1.2  riastrad {
    461   1.2  riastrad 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
    462   1.2  riastrad 	volatile unsigned long *const p = &ptr[bit / units];
    463   1.2  riastrad 	const unsigned long mask = (1UL << (bit % units));
    464   1.2  riastrad 	unsigned long v;
    465   1.2  riastrad 
    466  1.13  riastrad 	smp_mb__before_atomic();
    467   1.2  riastrad 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
    468  1.13  riastrad 	smp_mb__after_atomic();
    469   1.2  riastrad 
    470   1.7  riastrad 	return ((v & mask) != 0);
    471   1.2  riastrad }
    472   1.2  riastrad 
    473   1.2  riastrad #endif  /* _LINUX_ATOMIC_H_ */
    474