atomic.h revision 1.3.4.2 1 1.3.4.2 rmind /* $NetBSD: atomic.h,v 1.3.4.2 2014/05/18 17:46:01 rmind Exp $ */
2 1.3.4.2 rmind
3 1.3.4.2 rmind /*-
4 1.3.4.2 rmind * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.3.4.2 rmind * All rights reserved.
6 1.3.4.2 rmind *
7 1.3.4.2 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.3.4.2 rmind * by Taylor R. Campbell.
9 1.3.4.2 rmind *
10 1.3.4.2 rmind * Redistribution and use in source and binary forms, with or without
11 1.3.4.2 rmind * modification, are permitted provided that the following conditions
12 1.3.4.2 rmind * are met:
13 1.3.4.2 rmind * 1. Redistributions of source code must retain the above copyright
14 1.3.4.2 rmind * notice, this list of conditions and the following disclaimer.
15 1.3.4.2 rmind * 2. Redistributions in binary form must reproduce the above copyright
16 1.3.4.2 rmind * notice, this list of conditions and the following disclaimer in the
17 1.3.4.2 rmind * documentation and/or other materials provided with the distribution.
18 1.3.4.2 rmind *
19 1.3.4.2 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.3.4.2 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.3.4.2 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.3.4.2 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.3.4.2 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.3.4.2 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.3.4.2 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.3.4.2 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.3.4.2 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.3.4.2 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.3.4.2 rmind * POSSIBILITY OF SUCH DAMAGE.
30 1.3.4.2 rmind */
31 1.3.4.2 rmind
32 1.3.4.2 rmind #ifndef _LINUX_ATOMIC_H_
33 1.3.4.2 rmind #define _LINUX_ATOMIC_H_
34 1.3.4.2 rmind
35 1.3.4.2 rmind #include <sys/atomic.h>
36 1.3.4.2 rmind
37 1.3.4.2 rmind #include <machine/limits.h>
38 1.3.4.2 rmind
39 1.3.4.2 rmind struct atomic {
40 1.3.4.2 rmind union {
41 1.3.4.2 rmind volatile int au_int;
42 1.3.4.2 rmind volatile unsigned int au_uint;
43 1.3.4.2 rmind } a_u;
44 1.3.4.2 rmind };
45 1.3.4.2 rmind
46 1.3.4.2 rmind #define ATOMIC_INIT(i) { .a_u = { .au_int = (i) } }
47 1.3.4.2 rmind
48 1.3.4.2 rmind typedef struct atomic atomic_t;
49 1.3.4.2 rmind
50 1.3.4.2 rmind static inline int
51 1.3.4.2 rmind atomic_read(atomic_t *atomic)
52 1.3.4.2 rmind {
53 1.3.4.2 rmind return atomic->a_u.au_int;
54 1.3.4.2 rmind }
55 1.3.4.2 rmind
56 1.3.4.2 rmind static inline void
57 1.3.4.2 rmind atomic_set(atomic_t *atomic, int value)
58 1.3.4.2 rmind {
59 1.3.4.2 rmind atomic->a_u.au_int = value;
60 1.3.4.2 rmind }
61 1.3.4.2 rmind
62 1.3.4.2 rmind static inline void
63 1.3.4.2 rmind atomic_add(int addend, atomic_t *atomic)
64 1.3.4.2 rmind {
65 1.3.4.2 rmind atomic_add_int(&atomic->a_u.au_uint, addend);
66 1.3.4.2 rmind }
67 1.3.4.2 rmind
68 1.3.4.2 rmind static inline void
69 1.3.4.2 rmind atomic_sub(int subtrahend, atomic_t *atomic)
70 1.3.4.2 rmind {
71 1.3.4.2 rmind atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
72 1.3.4.2 rmind }
73 1.3.4.2 rmind
74 1.3.4.2 rmind static inline int
75 1.3.4.2 rmind atomic_add_return(int addend, atomic_t *atomic)
76 1.3.4.2 rmind {
77 1.3.4.2 rmind return (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
78 1.3.4.2 rmind }
79 1.3.4.2 rmind
80 1.3.4.2 rmind static inline void
81 1.3.4.2 rmind atomic_inc(atomic_t *atomic)
82 1.3.4.2 rmind {
83 1.3.4.2 rmind atomic_inc_uint(&atomic->a_u.au_uint);
84 1.3.4.2 rmind }
85 1.3.4.2 rmind
86 1.3.4.2 rmind static inline void
87 1.3.4.2 rmind atomic_dec(atomic_t *atomic)
88 1.3.4.2 rmind {
89 1.3.4.2 rmind atomic_dec_uint(&atomic->a_u.au_uint);
90 1.3.4.2 rmind }
91 1.3.4.2 rmind
92 1.3.4.2 rmind static inline int
93 1.3.4.2 rmind atomic_inc_return(atomic_t *atomic)
94 1.3.4.2 rmind {
95 1.3.4.2 rmind return (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
96 1.3.4.2 rmind }
97 1.3.4.2 rmind
98 1.3.4.2 rmind static inline int
99 1.3.4.2 rmind atomic_dec_return(atomic_t *atomic)
100 1.3.4.2 rmind {
101 1.3.4.2 rmind return (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
102 1.3.4.2 rmind }
103 1.3.4.2 rmind
104 1.3.4.2 rmind static inline int
105 1.3.4.2 rmind atomic_dec_and_test(atomic_t *atomic)
106 1.3.4.2 rmind {
107 1.3.4.2 rmind return (-1 == (int)atomic_dec_uint_nv(&atomic->a_u.au_uint));
108 1.3.4.2 rmind }
109 1.3.4.2 rmind
110 1.3.4.2 rmind static inline void
111 1.3.4.2 rmind atomic_set_mask(unsigned long mask, atomic_t *atomic)
112 1.3.4.2 rmind {
113 1.3.4.2 rmind atomic_or_uint(&atomic->a_u.au_uint, mask);
114 1.3.4.2 rmind }
115 1.3.4.2 rmind
116 1.3.4.2 rmind static inline void
117 1.3.4.2 rmind atomic_clear_mask(unsigned long mask, atomic_t *atomic)
118 1.3.4.2 rmind {
119 1.3.4.2 rmind atomic_and_uint(&atomic->a_u.au_uint, ~mask);
120 1.3.4.2 rmind }
121 1.3.4.2 rmind
122 1.3.4.2 rmind static inline int
123 1.3.4.2 rmind atomic_add_unless(atomic_t *atomic, int addend, int zero)
124 1.3.4.2 rmind {
125 1.3.4.2 rmind int value;
126 1.3.4.2 rmind
127 1.3.4.2 rmind do {
128 1.3.4.2 rmind value = atomic->a_u.au_int;
129 1.3.4.2 rmind if (value == zero)
130 1.3.4.2 rmind return 0;
131 1.3.4.2 rmind } while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
132 1.3.4.2 rmind != value);
133 1.3.4.2 rmind
134 1.3.4.2 rmind return 1;
135 1.3.4.2 rmind }
136 1.3.4.2 rmind
137 1.3.4.2 rmind static inline int
138 1.3.4.2 rmind atomic_inc_not_zero(atomic_t *atomic)
139 1.3.4.2 rmind {
140 1.3.4.2 rmind return atomic_add_unless(atomic, 1, 0);
141 1.3.4.2 rmind }
142 1.3.4.2 rmind
143 1.3.4.2 rmind static inline void
144 1.3.4.2 rmind set_bit(unsigned int bit, volatile unsigned long *ptr)
145 1.3.4.2 rmind {
146 1.3.4.2 rmind const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
147 1.3.4.2 rmind
148 1.3.4.2 rmind atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
149 1.3.4.2 rmind }
150 1.3.4.2 rmind
151 1.3.4.2 rmind static inline void
152 1.3.4.2 rmind clear_bit(unsigned int bit, volatile unsigned long *ptr)
153 1.3.4.2 rmind {
154 1.3.4.2 rmind const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
155 1.3.4.2 rmind
156 1.3.4.2 rmind atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
157 1.3.4.2 rmind }
158 1.3.4.2 rmind
159 1.3.4.2 rmind static inline void
160 1.3.4.2 rmind change_bit(unsigned int bit, volatile unsigned long *ptr)
161 1.3.4.2 rmind {
162 1.3.4.2 rmind const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
163 1.3.4.2 rmind volatile unsigned long *const p = &ptr[bit / units];
164 1.3.4.2 rmind const unsigned long mask = (1UL << (bit % units));
165 1.3.4.2 rmind unsigned long v;
166 1.3.4.2 rmind
167 1.3.4.2 rmind do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
168 1.3.4.2 rmind }
169 1.3.4.2 rmind
170 1.3.4.2 rmind static inline unsigned long
171 1.3.4.2 rmind test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
172 1.3.4.2 rmind {
173 1.3.4.2 rmind const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
174 1.3.4.2 rmind volatile unsigned long *const p = &ptr[bit / units];
175 1.3.4.2 rmind const unsigned long mask = (1UL << (bit % units));
176 1.3.4.2 rmind unsigned long v;
177 1.3.4.2 rmind
178 1.3.4.2 rmind do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
179 1.3.4.2 rmind
180 1.3.4.2 rmind return (v & mask);
181 1.3.4.2 rmind }
182 1.3.4.2 rmind
183 1.3.4.2 rmind static inline unsigned long
184 1.3.4.2 rmind test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
185 1.3.4.2 rmind {
186 1.3.4.2 rmind const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
187 1.3.4.2 rmind volatile unsigned long *const p = &ptr[bit / units];
188 1.3.4.2 rmind const unsigned long mask = (1UL << (bit % units));
189 1.3.4.2 rmind unsigned long v;
190 1.3.4.2 rmind
191 1.3.4.2 rmind do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
192 1.3.4.2 rmind
193 1.3.4.2 rmind return (v & mask);
194 1.3.4.2 rmind }
195 1.3.4.2 rmind
196 1.3.4.2 rmind static inline unsigned long
197 1.3.4.2 rmind test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
198 1.3.4.2 rmind {
199 1.3.4.2 rmind const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
200 1.3.4.2 rmind volatile unsigned long *const p = &ptr[bit / units];
201 1.3.4.2 rmind const unsigned long mask = (1UL << (bit % units));
202 1.3.4.2 rmind unsigned long v;
203 1.3.4.2 rmind
204 1.3.4.2 rmind do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
205 1.3.4.2 rmind
206 1.3.4.2 rmind return (v & mask);
207 1.3.4.2 rmind }
208 1.3.4.2 rmind
209 1.3.4.2 rmind #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
210 1.3.4.2 rmind /*
211 1.3.4.2 rmind * XXX These memory barriers are doubtless overkill, but I am having
212 1.3.4.2 rmind * trouble understanding the intent and use of the Linux atomic membar
213 1.3.4.2 rmind * API. I think that for reference counting purposes, the sequences
214 1.3.4.2 rmind * should be insn/inc/enter and exit/dec/insn, but the use of the
215 1.3.4.2 rmind * before/after memory barriers is not consistent throughout Linux.
216 1.3.4.2 rmind */
217 1.3.4.2 rmind # define smp_mb__before_atomic_inc() membar_sync()
218 1.3.4.2 rmind # define smp_mb__after_atomic_inc() membar_sync()
219 1.3.4.2 rmind # define smp_mb__before_atomic_dec() membar_sync()
220 1.3.4.2 rmind # define smp_mb__after_atomic_dec() membar_sync()
221 1.3.4.2 rmind #else
222 1.3.4.2 rmind # define smp_mb__before_atomic_inc() __insn_barrier()
223 1.3.4.2 rmind # define smp_mb__after_atomic_inc() __insn_barrier()
224 1.3.4.2 rmind # define smp_mb__before_atomic_dec() __insn_barrier()
225 1.3.4.2 rmind # define smp_mb__after_atomic_dec() __insn_barrier()
226 1.3.4.2 rmind #endif
227 1.3.4.2 rmind
228 1.3.4.2 rmind #endif /* _LINUX_ATOMIC_H_ */
229