bitops.h revision 1.12 1 /* $NetBSD: bitops.h,v 1.12 2021/12/19 01:54:12 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LINUX_BITOPS_H_
33 #define _LINUX_BITOPS_H_
34
35 #include <sys/cdefs.h>
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <sys/atomic.h>
39 #include <sys/bitops.h>
40
41 #include <machine/limits.h>
42
43 #include <lib/libkern/libkern.h>
44
45 /*
46 * Linux __ffs/__ffs64 is zero-based; zero input is undefined. Our
47 * ffs/ffs64 is one-based; zero input yields zero.
48 */
49 static inline unsigned long
50 __ffs(unsigned long x)
51 {
52
53 KASSERT(x != 0);
54 return ffs64(x) - 1;
55 }
56
57 static inline unsigned long
58 __ffs64(uint64_t x)
59 {
60
61 KASSERT(x != 0);
62 return ffs64(x) - 1;
63 }
64
65 /*
66 * Linux fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32, so it matches
67 * our fls semantics.
68 */
69 static inline int
70 fls(int x)
71 {
72 return fls32(x);
73 }
74
75 static inline unsigned int
76 hweight8(uint8_t w)
77 {
78 return popcount(w & 0xff);
79 }
80
81 static inline unsigned int
82 hweight16(uint16_t n)
83 {
84 return popcount32(n);
85 }
86
87 static inline unsigned int
88 hweight32(uint32_t n)
89 {
90 return popcount32(n);
91 }
92
93 static inline unsigned int
94 hweight64(uint64_t n)
95 {
96 return popcount64(n);
97 }
98
99 static inline int64_t
100 sign_extend64(uint64_t x, unsigned n)
101 {
102 return (int64_t)(x << (63 - n)) >> (63 - n);
103 }
104
105 /*
106 * XXX Don't define BITS_PER_LONG as sizeof(unsigned long)*CHAR_BIT
107 * because that won't work in preprocessor conditionals, where it often
108 * turns up.
109 */
110
111 #define BITS_TO_LONGS(n) \
112 roundup2((n), (sizeof(unsigned long) * CHAR_BIT))
113
114 #define BITS_PER_BYTE NBBY
115
116 #define BIT(n) ((unsigned long)__BIT(n))
117 #define BIT_ULL(n) ((unsigned long long)__BIT(n))
118 #define GENMASK(h,l) ((unsigned long)__BITS(h,l))
119 #define GENMASK_ULL(h,l)((unsigned long long)__BITS(h,l))
120
121 static inline int
122 test_bit(unsigned int n, const volatile unsigned long *p)
123 {
124 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
125
126 return ((p[n / units] & (1UL << (n % units))) != 0);
127 }
128
129 static inline void
130 __set_bit(unsigned int n, volatile unsigned long *p)
131 {
132 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
133
134 p[n / units] |= (1UL << (n % units));
135 }
136
137 static inline void
138 __clear_bit(unsigned int n, volatile unsigned long *p)
139 {
140 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
141
142 p[n / units] &= ~(1UL << (n % units));
143 }
144
145 static inline void
146 __change_bit(unsigned int n, volatile unsigned long *p)
147 {
148 const unsigned units = (sizeof(unsigned long) * CHAR_BIT);
149
150 p[n / units] ^= (1UL << (n % units));
151 }
152
153 static inline unsigned long
154 __test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
155 {
156 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
157 volatile unsigned long *const p = &ptr[bit / units];
158 const unsigned long mask = (1UL << (bit % units));
159 unsigned long v;
160
161 v = *p;
162 *p |= mask;
163
164 return ((v & mask) != 0);
165 }
166
167 static inline unsigned long
168 __test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
169 {
170 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
171 volatile unsigned long *const p = &ptr[bit / units];
172 const unsigned long mask = (1UL << (bit % units));
173 unsigned long v;
174
175 v = *p;
176 *p &= ~mask;
177
178 return ((v & mask) != 0);
179 }
180
181 static inline unsigned long
182 __test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
183 {
184 const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
185 volatile unsigned long *const p = &ptr[bit / units];
186 const unsigned long mask = (1UL << (bit % units));
187 unsigned long v;
188
189 v = *p;
190 *p ^= mask;
191
192 return ((v & mask) != 0);
193 }
194
195 static inline unsigned long
196 __find_next_bit(const unsigned long *ptr, unsigned long nbits,
197 unsigned long startbit, unsigned long toggle)
198 {
199 const size_t bpl = (CHAR_BIT * sizeof(*ptr));
200 const unsigned long *p = ptr + startbit/bpl;
201 size_t n = howmany(nbits, bpl);
202 unsigned long result;
203 uint64_t word;
204
205 /*
206 * We use ffs64 because NetBSD doesn't have a handy ffsl that
207 * works on unsigned long. This is a waste on 32-bit systems
208 * but I'd rather not maintain multiple copies of this -- the
209 * first version had enough bugs already.
210 */
211
212 /* Do we need to examine a partial starting word? */
213 if (startbit % bpl) {
214 /* Toggle the bits and convert to 64 bits for ffs64. */
215 word = *p ^ toggle;
216
217 /* Clear the low startbit%bpl bits. */
218 word &= (~0UL << (startbit % bpl));
219
220 /* Are any of these bits set now? */
221 if (word)
222 goto out;
223
224 /* Move past it. */
225 p++;
226 n--;
227 }
228
229 /* Find the first word with any bits set. */
230 for (; n --> 0; p++) {
231 /* Toggle the bits and convert to 64 bits for ffs64. */
232 word = *p ^ toggle;
233
234 /* Are any of these bits set now? */
235 if (word)
236 goto out;
237 }
238
239 /* Nada. */
240 return nbits;
241
242 out:
243 /* Count how many words we've skipped. */
244 result = bpl*(p - ptr);
245
246 /* Find the first set bit in this word, zero-based. */
247 result += ffs64(word) - 1;
248
249 /* We may have overshot, so clamp down to at most nbits. */
250 return MIN(result, nbits);
251 }
252
253 static inline unsigned long
254 find_next_bit(const unsigned long *ptr, unsigned long nbits,
255 unsigned long startbit)
256 {
257 return __find_next_bit(ptr, nbits, startbit, 0);
258 }
259
260 static inline unsigned long
261 find_first_bit(const unsigned long *ptr, unsigned long nbits)
262 {
263 return find_next_bit(ptr, nbits, 0);
264 }
265
266 static inline unsigned long
267 find_next_zero_bit(const unsigned long *ptr, unsigned long nbits,
268 unsigned long startbit)
269 {
270 return __find_next_bit(ptr, nbits, startbit, ~0UL);
271 }
272
273 static inline unsigned long
274 find_first_zero_bit(const unsigned long *ptr, unsigned long nbits)
275 {
276 return find_next_zero_bit(ptr, nbits, 0);
277 }
278
279 #define for_each_set_bit(BIT, PTR, NBITS) \
280 for ((BIT) = find_first_bit((PTR), (NBITS)); \
281 (BIT) < (NBITS); \
282 (BIT) = find_next_bit((PTR), (NBITS), (BIT) + 1))
283
284 #define for_each_clear_bit(BIT, PTR, NBITS) \
285 for ((BIT) = find_first_zero_bit((PTR), (NBITS)); \
286 (BIT) < (NBITS); \
287 (BIT) = find_next_zero_bit((PTR), (NBITS), (BIT) + 1))
288
289 #endif /* _LINUX_BITOPS_H_ */
290