atomic.h revision 1.11 1 /* $NetBSD: atomic.h,v 1.11 2018/08/27 13:40:41 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_ATOMIC_H_
33 #define _LINUX_ATOMIC_H_
34
35 #include <sys/atomic.h>
36
37 #include <machine/limits.h>
38
39 struct atomic {
40 union {
41 volatile int au_int;
42 volatile unsigned int au_uint;
43 } a_u;
44 };
45
46 #define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
47
48 typedef struct atomic atomic_t;
49
50 static inline int
51 atomic_read(atomic_t *atomic)
52 {
53 return atomic->a_u.au_int;
54 }
55
56 static inline void
57 atomic_set(atomic_t *atomic, int value)
58 {
59 atomic->a_u.au_int = value;
60 }
61
62 static inline void
63 atomic_add(int addend, atomic_t *atomic)
64 {
65 atomic_add_int(&atomic->a_u.au_uint, addend);
66 }
67
68 static inline void
69 atomic_sub(int subtrahend, atomic_t *atomic)
70 {
71 atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
72 }
73
74 static inline int
75 atomic_add_return(int addend, atomic_t *atomic)
76 {
77 return (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
78 }
79
80 static inline void
81 atomic_inc(atomic_t *atomic)
82 {
83 atomic_inc_uint(&atomic->a_u.au_uint);
84 }
85
86 static inline void
87 atomic_dec(atomic_t *atomic)
88 {
89 atomic_dec_uint(&atomic->a_u.au_uint);
90 }
91
92 static inline int
93 atomic_inc_return(atomic_t *atomic)
94 {
95 return (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
96 }
97
98 static inline int
99 atomic_dec_return(atomic_t *atomic)
100 {
101 return (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
102 }
103
104 static inline int
105 atomic_dec_and_test(atomic_t *atomic)
106 {
107 return (0 == (int)atomic_dec_uint_nv(&atomic->a_u.au_uint));
108 }
109
110 static inline void
111 atomic_or(int value, atomic_t *atomic)
112 {
113 atomic_or_uint(&atomic->a_u.au_uint, value);
114 }
115
116 static inline void
117 atomic_set_mask(unsigned long mask, atomic_t *atomic)
118 {
119 atomic_or_uint(&atomic->a_u.au_uint, mask);
120 }
121
122 static inline void
123 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
124 {
125 atomic_and_uint(&atomic->a_u.au_uint, ~mask);
126 }
127
128 static inline int
129 atomic_add_unless(atomic_t *atomic, int addend, int zero)
130 {
131 int value;
132
133 do {
134 value = atomic->a_u.au_int;
135 if (value == zero)
136 return 0;
137 } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
138 != value);
139
140 return 1;
141 }
142
143 static inline int
144 atomic_inc_not_zero(atomic_t *atomic)
145 {
146 return atomic_add_unless(atomic, 1, 0);
147 }
148
149 static inline int
150 atomic_xchg(atomic_t *atomic, int new)
151 {
152 return (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
153 }
154
155 static inline int
156 atomic_cmpxchg(atomic_t *atomic, int old, int new)
157 {
158 return (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)old,
159 (unsigned)new);
160 }
161
162 struct atomic64 {
163 volatile uint64_t a_v;
164 };
165
166 typedef struct atomic64 atomic64_t;
167
168 static inline uint64_t
169 atomic64_read(const struct atomic64 *a)
170 {
171 return a->a_v;
172 }
173
174 static inline void
175 atomic64_set(struct atomic64 *a, uint64_t v)
176 {
177 a->a_v = v;
178 }
179
180 static inline void
181 atomic64_add(long long d, struct atomic64 *a)
182 {
183 atomic_add_64(&a->a_v, d);
184 }
185
186 static inline void
187 atomic64_sub(long long d, struct atomic64 *a)
188 {
189 atomic_add_64(&a->a_v, -d);
190 }
191
192 static inline uint64_t
193 atomic64_xchg(struct atomic64 *a, uint64_t v)
194 {
195 return atomic_swap_64(&a->a_v, v);
196 }
197
198 static inline uint64_t
199 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t old, uint64_t new)
200 {
201 return atomic_cas_64(&atomic->a_v, old, new);
202 }
203
204 static inline void
205 set_bit(unsigned int bit, volatile unsigned long *ptr)
206 {
207 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
208
209 atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
210 }
211
212 static inline void
213 clear_bit(unsigned int bit, volatile unsigned long *ptr)
214 {
215 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
216
217 atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
218 }
219
220 static inline void
221 change_bit(unsigned int bit, volatile unsigned long *ptr)
222 {
223 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
224 volatile unsigned long *const p = &ptr[bit / units];
225 const unsigned long mask = (1UL << (bit % units));
226 unsigned long v;
227
228 do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
229 }
230
231 static inline int
232 test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
233 {
234 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
235 volatile unsigned long *const p = &ptr[bit / units];
236 const unsigned long mask = (1UL << (bit % units));
237 unsigned long v;
238
239 do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
240
241 return ((v & mask) != 0);
242 }
243
244 static inline int
245 test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
246 {
247 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
248 volatile unsigned long *const p = &ptr[bit / units];
249 const unsigned long mask = (1UL << (bit % units));
250 unsigned long v;
251
252 do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
253
254 return ((v & mask) != 0);
255 }
256
257 static inline int
258 test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
259 {
260 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
261 volatile unsigned long *const p = &ptr[bit / units];
262 const unsigned long mask = (1UL << (bit % units));
263 unsigned long v;
264
265 do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
266
267 return ((v & mask) != 0);
268 }
269
270 #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
271 /*
272 * XXX These memory barriers are doubtless overkill, but I am having
273 * trouble understanding the intent and use of the Linux atomic membar
274 * API. I think that for reference counting purposes, the sequences
275 * should be insn/inc/enter and exit/dec/insn, but the use of the
276 * before/after memory barriers is not consistent throughout Linux.
277 */
278 # define smp_mb__before_atomic() membar_sync()
279 # define smp_mb__before_atomic_inc() membar_sync()
280 # define smp_mb__after_atomic_inc() membar_sync()
281 # define smp_mb__before_atomic_dec() membar_sync()
282 # define smp_mb__after_atomic_dec() membar_sync()
283 #else
284 # define smp_mb__before_atomic() __insn_barrier()
285 # define smp_mb__before_atomic_inc() __insn_barrier()
286 # define smp_mb__after_atomic_inc() __insn_barrier()
287 # define smp_mb__before_atomic_dec() __insn_barrier()
288 # define smp_mb__after_atomic_dec() __insn_barrier()
289 #endif
290
291 #endif /* _LINUX_ATOMIC_H_ */
292