Home | History | Annotate | Line # | Download | only in include
atomic.h revision 1.1.14.2
      1 /*	$NetBSD: atomic.h,v 1.1.14.2 2006/06/21 14:52:48 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998 Doug Rabson
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  *
     28  * $FreeBSD: src/sys/ia64/include/atomic.h,v 1.10 2005/09/27 17:39:10 jhb Exp $
     29  */
     30 
     31 #ifndef _MACHINE_ATOMIC_H_
     32 #define	_MACHINE_ATOMIC_H_
     33 
     34 /*
     35  * Various simple arithmetic on memory which is atomic in the presence
     36  * of interrupts and SMP safe.
     37  */
     38 
     39 /*
     40  * Everything is built out of cmpxchg.
     41  */
     42 #define	IA64_CMPXCHG(sz, sem, p, cmpval, newval, ret)			\
     43 	__asm __volatile (						\
     44 		"mov ar.ccv=%2;;\n\t"					\
     45 		"cmpxchg" #sz "." #sem " %0=%4,%3,ar.ccv\n\t"		\
     46 		: "=r" (ret), "=m" (*p)					\
     47 		: "r" (cmpval), "r" (newval), "m" (*p)			\
     48 		: "memory")
     49 
     50 /*
     51  * Some common forms of cmpxch.
     52  */
     53 static __inline uint32_t
     54 ia64_cmpxchg_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
     55 {
     56 	uint32_t ret;
     57 	IA64_CMPXCHG(4, acq, p, cmpval, newval, ret);
     58 	return (ret);
     59 }
     60 
     61 static __inline uint32_t
     62 ia64_cmpxchg_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
     63 {
     64 	uint32_t ret;
     65 	IA64_CMPXCHG(4, rel, p, cmpval, newval, ret);
     66 	return (ret);
     67 }
     68 
     69 static __inline uint64_t
     70 ia64_cmpxchg_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
     71 {
     72 	uint64_t ret;
     73 	IA64_CMPXCHG(8, acq, p, cmpval, newval, ret);
     74 	return (ret);
     75 }
     76 
     77 static __inline uint64_t
     78 ia64_cmpxchg_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
     79 {
     80 	uint64_t ret;
     81 	IA64_CMPXCHG(8, rel, p, cmpval, newval, ret);
     82 	return (ret);
     83 }
     84 
     85 #define	ATOMIC_STORE_LOAD(type, width, size)				\
     86 	static __inline uint##width##_t					\
     87 	ia64_ld_acq_##width(volatile uint##width##_t* p)		\
     88 	{								\
     89 		uint##width##_t v;					\
     90 		__asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)	\
     91 		    : "m" (*p) : "memory");				\
     92 		return (v);						\
     93 	}								\
     94 									\
     95 	static __inline uint##width##_t					\
     96 	atomic_load_acq_##width(volatile uint##width##_t* p)		\
     97 	{								\
     98 		uint##width##_t v;					\
     99 		__asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)	\
    100 		    : "m" (*p) : "memory");				\
    101 		return (v);						\
    102 	}								\
    103 									\
    104 	static __inline uint##width##_t					\
    105 	atomic_load_acq_##type(volatile uint##width##_t* p)		\
    106 	{								\
    107 		uint##width##_t v;					\
    108 		__asm __volatile ("ld" size ".acq %0=%1" : "=r" (v)	\
    109 		    : "m" (*p) : "memory");				\
    110 		return (v);						\
    111 	}								\
    112 								       	\
    113 	static __inline void						\
    114 	ia64_st_rel_##width(volatile uint##width##_t* p, uint##width##_t v) \
    115 	{								\
    116 		__asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)	\
    117 		    : "r" (v) : "memory");				\
    118 	}								\
    119 									\
    120 	static __inline void						\
    121 	atomic_store_rel_##width(volatile uint##width##_t* p,		\
    122 	    uint##width##_t v)						\
    123 	{								\
    124 		__asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)	\
    125 		    : "r" (v) : "memory");				\
    126 	}								\
    127 									\
    128 	static __inline void						\
    129 	atomic_store_rel_##type(volatile uint##width##_t* p,		\
    130 	    uint##width##_t v)						\
    131 	{								\
    132 		__asm __volatile ("st" size ".rel %0=%1" : "=m" (*p)	\
    133 		    : "r" (v) : "memory");				\
    134 	}
    135 
    136 ATOMIC_STORE_LOAD(char,	 8,  "1")
    137 ATOMIC_STORE_LOAD(short, 16, "2")
    138 ATOMIC_STORE_LOAD(int,	 32, "4")
    139 ATOMIC_STORE_LOAD(long,	 64, "8")
    140 
    141 #undef ATOMIC_STORE_LOAD
    142 
    143 #define	atomic_load_acq_ptr	atomic_load_acq_64
    144 #define	atomic_store_rel_ptr	atomic_store_rel_64
    145 
    146 #define	IA64_ATOMIC(sz, type, name, width, op)				\
    147 	static __inline type						\
    148 	atomic_##name##_acq_##width(volatile type *p, type v)		\
    149 	{								\
    150 		type old, ret;						\
    151 		do {							\
    152 			old = *p;					\
    153 			IA64_CMPXCHG(sz, acq, p, old, old op v, ret);	\
    154 		} while (ret != old);					\
    155 		return (old);						\
    156 	}								\
    157 									\
    158 	static __inline type						\
    159 	atomic_##name##_rel_##width(volatile type *p, type v)		\
    160 	{								\
    161 		type old, ret;						\
    162 		do {							\
    163 			old = *p;					\
    164 			IA64_CMPXCHG(sz, rel, p, old, old op v, ret);	\
    165 		} while (ret != old);					\
    166 		return (old);						\
    167 	}
    168 
    169 IA64_ATOMIC(1, uint8_t,	 set, 8,  |)
    170 IA64_ATOMIC(2, uint16_t, set, 16, |)
    171 IA64_ATOMIC(4, uint32_t, set, 32, |)
    172 IA64_ATOMIC(8, uint64_t, set, 64, |)
    173 
    174 IA64_ATOMIC(1, uint8_t,  clear,	8,  &~)
    175 IA64_ATOMIC(2, uint16_t, clear,	16, &~)
    176 IA64_ATOMIC(4, uint32_t, clear,	32, &~)
    177 IA64_ATOMIC(8, uint64_t, clear,	64, &~)
    178 
    179 IA64_ATOMIC(1, uint8_t,  add, 8,  +)
    180 IA64_ATOMIC(2, uint16_t, add, 16, +)
    181 IA64_ATOMIC(4, uint32_t, add, 32, +)
    182 IA64_ATOMIC(8, uint64_t, add, 64, +)
    183 
    184 IA64_ATOMIC(1, uint8_t,  subtract, 8,  -)
    185 IA64_ATOMIC(2, uint16_t, subtract, 16, -)
    186 IA64_ATOMIC(4, uint32_t, subtract, 32, -)
    187 IA64_ATOMIC(8, uint64_t, subtract, 64, -)
    188 
    189 #undef IA64_ATOMIC
    190 
    191 #define	atomic_set_8			atomic_set_acq_8
    192 #define	atomic_clear_8			atomic_clear_acq_8
    193 #define	atomic_add_8			atomic_add_acq_8
    194 #define	atomic_subtract_8		atomic_subtract_acq_8
    195 
    196 #define	atomic_set_16			atomic_set_acq_16
    197 #define	atomic_clear_16			atomic_clear_acq_16
    198 #define	atomic_add_16			atomic_add_acq_16
    199 #define	atomic_subtract_16		atomic_subtract_acq_16
    200 
    201 #define	atomic_set_32			atomic_set_acq_32
    202 #define	atomic_clear_32			atomic_clear_acq_32
    203 #define	atomic_add_32			atomic_add_acq_32
    204 #define	atomic_subtract_32		atomic_subtract_acq_32
    205 
    206 #define	atomic_set_64			atomic_set_acq_64
    207 #define	atomic_clear_64			atomic_clear_acq_64
    208 #define	atomic_add_64			atomic_add_acq_64
    209 #define	atomic_subtract_64		atomic_subtract_acq_64
    210 
    211 #define	atomic_set_char			atomic_set_8
    212 #define	atomic_clear_char		atomic_clear_8
    213 #define	atomic_add_char			atomic_add_8
    214 #define	atomic_subtract_char		atomic_subtract_8
    215 #define	atomic_set_acq_char		atomic_set_acq_8
    216 #define	atomic_clear_acq_char		atomic_clear_acq_8
    217 #define	atomic_add_acq_char		atomic_add_acq_8
    218 #define	atomic_subtract_acq_char	atomic_subtract_acq_8
    219 #define	atomic_set_rel_char		atomic_set_rel_8
    220 #define	atomic_clear_rel_char		atomic_clear_rel_8
    221 #define	atomic_add_rel_char		atomic_add_rel_8
    222 #define	atomic_subtract_rel_char	atomic_subtract_rel_8
    223 
    224 #define	atomic_set_short		atomic_set_16
    225 #define	atomic_clear_short		atomic_clear_16
    226 #define	atomic_add_short		atomic_add_16
    227 #define	atomic_subtract_short		atomic_subtract_16
    228 #define	atomic_set_acq_short		atomic_set_acq_16
    229 #define	atomic_clear_acq_short		atomic_clear_acq_16
    230 #define	atomic_add_acq_short		atomic_add_acq_16
    231 #define	atomic_subtract_acq_short	atomic_subtract_acq_16
    232 #define	atomic_set_rel_short		atomic_set_rel_16
    233 #define	atomic_clear_rel_short		atomic_clear_rel_16
    234 #define	atomic_add_rel_short		atomic_add_rel_16
    235 #define	atomic_subtract_rel_short	atomic_subtract_rel_16
    236 
    237 #define	atomic_set_int			atomic_set_32
    238 #define	atomic_clear_int		atomic_clear_32
    239 #define	atomic_add_int			atomic_add_32
    240 #define	atomic_subtract_int		atomic_subtract_32
    241 #define	atomic_set_acq_int		atomic_set_acq_32
    242 #define	atomic_clear_acq_int		atomic_clear_acq_32
    243 #define	atomic_add_acq_int		atomic_add_acq_32
    244 #define	atomic_subtract_acq_int		atomic_subtract_acq_32
    245 #define	atomic_set_rel_int		atomic_set_rel_32
    246 #define	atomic_clear_rel_int		atomic_clear_rel_32
    247 #define	atomic_add_rel_int		atomic_add_rel_32
    248 #define	atomic_subtract_rel_int		atomic_subtract_rel_32
    249 
    250 #define	atomic_set_long			atomic_set_64
    251 #define	atomic_clear_long		atomic_clear_64
    252 #define	atomic_add_long			atomic_add_64
    253 #define	atomic_subtract_long		atomic_subtract_64
    254 #define	atomic_set_acq_long		atomic_set_acq_64
    255 #define	atomic_clear_acq_long		atomic_clear_acq_64
    256 #define	atomic_add_acq_long		atomic_add_acq_64
    257 #define	atomic_subtract_acq_long	atomic_subtract_acq_64
    258 #define	atomic_set_rel_long		atomic_set_rel_64
    259 #define	atomic_clear_rel_long		atomic_clear_rel_64
    260 #define	atomic_add_rel_long		atomic_add_rel_64
    261 #define	atomic_subtract_rel_long	atomic_subtract_rel_64
    262 
    263 #define	atomic_set_ptr			atomic_set_64
    264 #define	atomic_clear_ptr		atomic_clear_64
    265 #define	atomic_add_ptr			atomic_add_64
    266 #define	atomic_subtract_ptr		atomic_subtract_64
    267 #define	atomic_set_acq_ptr		atomic_set_acq_64
    268 #define	atomic_clear_acq_ptr		atomic_clear_acq_64
    269 #define	atomic_add_acq_ptr		atomic_add_acq_64
    270 #define	atomic_subtract_acq_ptr		atomic_subtract_acq_64
    271 #define	atomic_set_rel_ptr		atomic_set_rel_64
    272 #define	atomic_clear_rel_ptr		atomic_clear_rel_64
    273 #define	atomic_add_rel_ptr		atomic_add_rel_64
    274 #define	atomic_subtract_rel_ptr		atomic_subtract_rel_64
    275 
    276 #undef IA64_CMPXCHG
    277 
    278 /*
    279  * Atomically compare the value stored at *p with cmpval and if the
    280  * two values are equal, update the value of *p with newval. Returns
    281  * zero if the compare failed, nonzero otherwise.
    282  */
    283 static __inline int
    284 atomic_cmpset_acq_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
    285 {
    286 	return (ia64_cmpxchg_acq_32(p, cmpval, newval) == cmpval);
    287 }
    288 
    289 static __inline int
    290 atomic_cmpset_rel_32(volatile uint32_t* p, uint32_t cmpval, uint32_t newval)
    291 {
    292 	return (ia64_cmpxchg_rel_32(p, cmpval, newval) == cmpval);
    293 }
    294 
    295 /*
    296  * Atomically compare the value stored at *p with cmpval and if the
    297  * two values are equal, update the value of *p with newval. Returns
    298  * zero if the compare failed, nonzero otherwise.
    299  */
    300 static __inline int
    301 atomic_cmpset_acq_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
    302 {
    303 	return (ia64_cmpxchg_acq_64(p, cmpval, newval) == cmpval);
    304 }
    305 
    306 static __inline int
    307 atomic_cmpset_rel_64(volatile uint64_t* p, uint64_t cmpval, uint64_t newval)
    308 {
    309 	return (ia64_cmpxchg_rel_64(p, cmpval, newval) == cmpval);
    310 }
    311 
    312 #define	atomic_cmpset_32		atomic_cmpset_acq_32
    313 #define	atomic_cmpset_64		atomic_cmpset_acq_64
    314 #define	atomic_cmpset_int		atomic_cmpset_32
    315 #define	atomic_cmpset_long		atomic_cmpset_64
    316 #define	atomic_cmpset_ptr		atomic_cmpset_64
    317 #define	atomic_cmpset_acq_int		atomic_cmpset_acq_32
    318 #define	atomic_cmpset_rel_int		atomic_cmpset_rel_32
    319 #define	atomic_cmpset_acq_long		atomic_cmpset_acq_64
    320 #define	atomic_cmpset_rel_long		atomic_cmpset_rel_64
    321 #define	atomic_cmpset_acq_ptr		atomic_cmpset_acq_64
    322 #define	atomic_cmpset_rel_ptr		atomic_cmpset_rel_64
    323 
    324 static __inline uint32_t
    325 atomic_readandclear_32(volatile uint32_t* p)
    326 {
    327 	uint32_t val;
    328 	do {
    329 		val = *p;
    330 	} while (!atomic_cmpset_32(p, val, 0));
    331 	return (val);
    332 }
    333 
    334 static __inline uint64_t
    335 atomic_readandclear_64(volatile uint64_t* p)
    336 {
    337 	uint64_t val;
    338 	do {
    339 		val = *p;
    340 	} while (!atomic_cmpset_64(p, val, 0));
    341 	return (val);
    342 }
    343 
    344 #define	atomic_readandclear_int		atomic_readandclear_32
    345 #define	atomic_readandclear_long	atomic_readandclear_64
    346 
    347 /*
    348  * Atomically add the value of v to the integer pointed to by p and return
    349  * the previous value of *p.
    350  *
    351  * XXX: Should we use the fetchadd instruction here?
    352  */
    353 static __inline uint32_t
    354 atomic_fetchadd_32(volatile uint32_t *p, uint32_t v)
    355 {
    356 	uint32_t value;
    357 
    358 	do {
    359 		value = *p;
    360 	} while (!atomic_cmpset_32(p, value, value + v));
    361 	return (value);
    362 }
    363 
    364 #define	atomic_fetchadd_int		atomic_fetchadd_32
    365 
    366 #endif /* ! _MACHINE_ATOMIC_H_ */
    367