bitops.h revision 1.14 1 /* $NetBSD: bitops.h,v 1.14 2021/12/19 09:44:27 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_BITOPS_H_
33 #define _LINUX_BITOPS_H_
34
35 #include <sys/cdefs.h>
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/atomic.h>
39 #include <sys/bitops.h>
40
41 #include <machine/limits.h>
42
43 #include <lib/libkern/libkern.h>
44 #include <linux/bits.h>
45
46 /*
47 * Linux __ffs/__ffs64 is zero-based; zero input is undefined. Our
48 * ffs/ffs64 is one-based; zero input yields zero.
49 */
50 static inline unsigned long
51 __ffs(unsigned long x)
52 {
53
54 KASSERT(x != 0);
55 return ffs64(x) - 1;
56 }
57
58 static inline unsigned long
59 __ffs64(uint64_t x)
60 {
61
62 KASSERT(x != 0);
63 return ffs64(x) - 1;
64 }
65
66 /*
67 * Linux fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32, so it matches
68 * our fls semantics.
69 */
70 static inline int
71 fls(int x)
72 {
73 return fls32(x);
74 }
75
76 static inline unsigned int
77 hweight8(uint8_t w)
78 {
79 return popcount(w & 0xff);
80 }
81
82 static inline unsigned int
83 hweight16(uint16_t n)
84 {
85 return popcount32(n);
86 }
87
88 static inline unsigned int
89 hweight32(uint32_t n)
90 {
91 return popcount32(n);
92 }
93
94 static inline unsigned int
95 hweight64(uint64_t n)
96 {
97 return popcount64(n);
98 }
99
100 static inline int64_t
101 sign_extend64(uint64_t x, unsigned n)
102 {
103 return (int64_t)(x << (63 - n)) >> (63 - n);
104 }
105
106 #define BITS_TO_LONGS(n) \
107 roundup2((n), (sizeof(unsigned long) * CHAR_BIT))
108
109 #define BITS_PER_BYTE NBBY
110 #define BITS_PER_LONG (__SIZEOF_LONG__ * CHAR_BIT)
111
112 #define BIT(n) ((unsigned long)__BIT(n))
113 #define BIT_ULL(n) ((unsigned long long)__BIT(n))
114 #define GENMASK(h,l) ((unsigned long)__BITS(h,l))
115 #define GENMASK_ULL(h,l)((unsigned long long)__BITS(h,l))
116
117 static inline int
118 test_bit(unsigned int n, const volatile unsigned long *p)
119 {
120 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
121
122 return ((p[n / units] & (1UL << (n % units))) != 0);
123 }
124
125 static inline void
126 __set_bit(unsigned int n, volatile unsigned long *p)
127 {
128 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
129
130 p[n / units] |= (1UL << (n % units));
131 }
132
133 static inline void
134 __clear_bit(unsigned int n, volatile unsigned long *p)
135 {
136 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
137
138 p[n / units] &= ~(1UL << (n % units));
139 }
140
141 static inline void
142 __change_bit(unsigned int n, volatile unsigned long *p)
143 {
144 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
145
146 p[n / units] ^= (1UL << (n % units));
147 }
148
149 static inline unsigned long
150 __test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
151 {
152 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
153 volatile unsigned long *const p = &ptr[bit / units];
154 const unsigned long mask = (1UL << (bit % units));
155 unsigned long v;
156
157 v = *p;
158 *p |= mask;
159
160 return ((v & mask) != 0);
161 }
162
163 static inline unsigned long
164 __test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
165 {
166 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
167 volatile unsigned long *const p = &ptr[bit / units];
168 const unsigned long mask = (1UL << (bit % units));
169 unsigned long v;
170
171 v = *p;
172 *p &= ~mask;
173
174 return ((v & mask) != 0);
175 }
176
177 static inline unsigned long
178 __test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
179 {
180 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
181 volatile unsigned long *const p = &ptr[bit / units];
182 const unsigned long mask = (1UL << (bit % units));
183 unsigned long v;
184
185 v = *p;
186 *p ^= mask;
187
188 return ((v & mask) != 0);
189 }
190
191 static inline unsigned long
192 __find_next_bit(const unsigned long *ptr, unsigned long nbits,
193 unsigned long startbit, unsigned long toggle)
194 {
195 const size_t bpl = (CHAR_BIT * sizeof(*ptr));
196 const unsigned long *p = ptr + startbit/bpl;
197 size_t n = howmany(nbits, bpl);
198 unsigned long result;
199 uint64_t word;
200
201 /*
202 * We use ffs64 because NetBSD doesn't have a handy ffsl that
203 * works on unsigned long. This is a waste on 32-bit systems
204 * but I'd rather not maintain multiple copies of this -- the
205 * first version had enough bugs already.
206 */
207
208 /* Do we need to examine a partial starting word? */
209 if (startbit % bpl) {
210 /* Toggle the bits and convert to 64 bits for ffs64. */
211 word = *p ^ toggle;
212
213 /* Clear the low startbit%bpl bits. */
214 word &= (~0UL << (startbit % bpl));
215
216 /* Are any of these bits set now? */
217 if (word)
218 goto out;
219
220 /* Move past it. */
221 p++;
222 n--;
223 }
224
225 /* Find the first word with any bits set. */
226 for (; n --> 0; p++) {
227 /* Toggle the bits and convert to 64 bits for ffs64. */
228 word = *p ^ toggle;
229
230 /* Are any of these bits set now? */
231 if (word)
232 goto out;
233 }
234
235 /* Nada. */
236 return nbits;
237
238 out:
239 /* Count how many words we've skipped. */
240 result = bpl*(p - ptr);
241
242 /* Find the first set bit in this word, zero-based. */
243 result += ffs64(word) - 1;
244
245 /* We may have overshot, so clamp down to at most nbits. */
246 return MIN(result, nbits);
247 }
248
249 static inline unsigned long
250 find_next_bit(const unsigned long *ptr, unsigned long nbits,
251 unsigned long startbit)
252 {
253 return __find_next_bit(ptr, nbits, startbit, 0);
254 }
255
256 static inline unsigned long
257 find_first_bit(const unsigned long *ptr, unsigned long nbits)
258 {
259 return find_next_bit(ptr, nbits, 0);
260 }
261
262 static inline unsigned long
263 find_next_zero_bit(const unsigned long *ptr, unsigned long nbits,
264 unsigned long startbit)
265 {
266 return __find_next_bit(ptr, nbits, startbit, ~0UL);
267 }
268
269 static inline unsigned long
270 find_first_zero_bit(const unsigned long *ptr, unsigned long nbits)
271 {
272 return find_next_zero_bit(ptr, nbits, 0);
273 }
274
275 #define for_each_set_bit(BIT, PTR, NBITS) \
276 for ((BIT) = find_first_bit((PTR), (NBITS)); \
277 (BIT) < (NBITS); \
278 (BIT) = find_next_bit((PTR), (NBITS), (BIT) + 1))
279
280 #define for_each_clear_bit(BIT, PTR, NBITS) \
281 for ((BIT) = find_first_zero_bit((PTR), (NBITS)); \
282 (BIT) < (NBITS); \
283 (BIT) = find_next_zero_bit((PTR), (NBITS), (BIT) + 1))
284
285 #endif /* _LINUX_BITOPS_H_ */
286