subr_kcpuset.c revision 1.19 1 1.19 ad /* $NetBSD: subr_kcpuset.c,v 1.19 2023/09/12 16:17:21 ad Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.19 ad * Copyright (c) 2011 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.1 rmind * by Mindaugas Rasiukevicius.
9 1.1 rmind *
10 1.1 rmind * Redistribution and use in source and binary forms, with or without
11 1.1 rmind * modification, are permitted provided that the following conditions
12 1.1 rmind * are met:
13 1.1 rmind * 1. Redistributions of source code must retain the above copyright
14 1.1 rmind * notice, this list of conditions and the following disclaimer.
15 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 rmind * notice, this list of conditions and the following disclaimer in the
17 1.1 rmind * documentation and/or other materials provided with the distribution.
18 1.1 rmind *
19 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
30 1.1 rmind */
31 1.1 rmind
32 1.1 rmind /*
33 1.1 rmind * Kernel CPU set implementation.
34 1.1 rmind *
35 1.1 rmind * Interface can be used by kernel subsystems as a unified dynamic CPU
36 1.1 rmind * bitset implementation handling many CPUs. Facility also supports early
37 1.1 rmind * use by MD code on boot, as it fixups bitsets on further boot.
38 1.1 rmind *
39 1.1 rmind * TODO:
40 1.1 rmind * - Handle "reverse" bitset on fixup/grow.
41 1.1 rmind */
42 1.1 rmind
43 1.1 rmind #include <sys/cdefs.h>
44 1.19 ad __KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.19 2023/09/12 16:17:21 ad Exp $");
45 1.1 rmind
46 1.1 rmind #include <sys/param.h>
47 1.1 rmind #include <sys/types.h>
48 1.1 rmind
49 1.1 rmind #include <sys/atomic.h>
50 1.18 martin #include <sys/intr.h>
51 1.1 rmind #include <sys/sched.h>
52 1.1 rmind #include <sys/kcpuset.h>
53 1.19 ad #include <sys/pool.h>
54 1.1 rmind
55 1.1 rmind /* Number of CPUs to support. */
56 1.1 rmind #define KC_MAXCPUS roundup2(MAXCPUS, 32)
57 1.1 rmind
58 1.1 rmind /*
59 1.1 rmind * Structure of dynamic CPU set in the kernel.
60 1.1 rmind */
61 1.1 rmind struct kcpuset {
62 1.1 rmind uint32_t bits[0];
63 1.1 rmind };
64 1.1 rmind
65 1.1 rmind typedef struct kcpuset_impl {
66 1.1 rmind /* Reference count. */
67 1.1 rmind u_int kc_refcnt;
68 1.16 skrll /* Next to free, if non-NULL (used when multiple references). */
69 1.1 rmind struct kcpuset * kc_next;
70 1.1 rmind /* Actual variable-sized field of bits. */
71 1.1 rmind struct kcpuset kc_field;
72 1.1 rmind } kcpuset_impl_t;
73 1.1 rmind
74 1.1 rmind #define KC_BITS_OFF (offsetof(struct kcpuset_impl, kc_field))
75 1.1 rmind #define KC_GETSTRUCT(b) ((kcpuset_impl_t *)((char *)(b) - KC_BITS_OFF))
76 1.9 matt #define KC_GETCSTRUCT(b) ((const kcpuset_impl_t *)((const char *)(b) - KC_BITS_OFF))
77 1.1 rmind
78 1.1 rmind /* Sizes of a single bitset. */
79 1.1 rmind #define KC_SHIFT 5
80 1.1 rmind #define KC_MASK 31
81 1.1 rmind
82 1.1 rmind /* An array of noted early kcpuset creations and data. */
83 1.1 rmind #define KC_SAVE_NITEMS 8
84 1.1 rmind
85 1.1 rmind /* Structures for early boot mechanism (must be statically initialised). */
86 1.1 rmind static kcpuset_t ** kc_noted_early[KC_SAVE_NITEMS];
87 1.1 rmind static uint32_t kc_bits_early[KC_SAVE_NITEMS];
88 1.1 rmind static int kc_last_idx = 0;
89 1.1 rmind static bool kc_initialised = false;
90 1.1 rmind
91 1.1 rmind #define KC_BITSIZE_EARLY sizeof(kc_bits_early[0])
92 1.4 rmind #define KC_NFIELDS_EARLY 1
93 1.1 rmind
94 1.1 rmind /*
95 1.1 rmind * The size of whole bitset fields and amount of fields.
96 1.1 rmind * The whole size must statically initialise for early case.
97 1.1 rmind */
98 1.1 rmind static size_t kc_bitsize __read_mostly = KC_BITSIZE_EARLY;
99 1.1 rmind static size_t kc_nfields __read_mostly = KC_NFIELDS_EARLY;
100 1.19 ad
101 1.19 ad static pool_cache_t kc_cache __read_mostly;
102 1.1 rmind
103 1.3 rmind static kcpuset_t * kcpuset_create_raw(bool);
104 1.1 rmind
105 1.1 rmind /*
106 1.1 rmind * kcpuset_sysinit: initialize the subsystem, transfer early boot cases
107 1.1 rmind * to dynamically allocated sets.
108 1.1 rmind */
109 1.1 rmind void
110 1.1 rmind kcpuset_sysinit(void)
111 1.1 rmind {
112 1.1 rmind kcpuset_t *kc_dynamic[KC_SAVE_NITEMS], *kcp;
113 1.1 rmind int i, s;
114 1.1 rmind
115 1.1 rmind /* Set a kcpuset_t sizes. */
116 1.1 rmind kc_nfields = (KC_MAXCPUS >> KC_SHIFT);
117 1.1 rmind kc_bitsize = sizeof(uint32_t) * kc_nfields;
118 1.15 riastrad KASSERT(kc_nfields != 0);
119 1.15 riastrad KASSERT(kc_bitsize != 0);
120 1.1 rmind
121 1.19 ad kc_cache = pool_cache_init(sizeof(kcpuset_impl_t) + kc_bitsize,
122 1.19 ad coherency_unit, 0, 0, "kcpuset", NULL, IPL_NONE, NULL, NULL, NULL);
123 1.19 ad
124 1.1 rmind /* First, pre-allocate kcpuset entries. */
125 1.1 rmind for (i = 0; i < kc_last_idx; i++) {
126 1.3 rmind kcp = kcpuset_create_raw(true);
127 1.1 rmind kc_dynamic[i] = kcp;
128 1.1 rmind }
129 1.1 rmind
130 1.1 rmind /*
131 1.1 rmind * Prepare to convert all early noted kcpuset uses to dynamic sets.
132 1.1 rmind * All processors, except the one we are currently running (primary),
133 1.1 rmind * must not be spinned yet. Since MD facilities can use kcpuset,
134 1.1 rmind * raise the IPL to high.
135 1.1 rmind */
136 1.1 rmind KASSERT(mp_online == false);
137 1.1 rmind
138 1.1 rmind s = splhigh();
139 1.1 rmind for (i = 0; i < kc_last_idx; i++) {
140 1.1 rmind /*
141 1.1 rmind * Transfer the bits from early static storage to the kcpuset.
142 1.1 rmind */
143 1.1 rmind KASSERT(kc_bitsize >= KC_BITSIZE_EARLY);
144 1.1 rmind memcpy(kc_dynamic[i], &kc_bits_early[i], KC_BITSIZE_EARLY);
145 1.1 rmind
146 1.1 rmind /*
147 1.1 rmind * Store the new pointer, pointing to the allocated kcpuset.
148 1.1 rmind * Note: we are not in an interrupt context and it is the only
149 1.1 rmind * CPU running - thus store is safe (e.g. no need for pointer
150 1.1 rmind * variable to be volatile).
151 1.1 rmind */
152 1.1 rmind *kc_noted_early[i] = kc_dynamic[i];
153 1.1 rmind }
154 1.1 rmind kc_initialised = true;
155 1.1 rmind kc_last_idx = 0;
156 1.1 rmind splx(s);
157 1.1 rmind }
158 1.1 rmind
159 1.1 rmind /*
160 1.1 rmind * kcpuset_early_ptr: note an early boot use by saving the pointer and
161 1.1 rmind * returning a pointer to a static, temporary bit field.
162 1.1 rmind */
163 1.1 rmind static kcpuset_t *
164 1.1 rmind kcpuset_early_ptr(kcpuset_t **kcptr)
165 1.1 rmind {
166 1.1 rmind kcpuset_t *kcp;
167 1.1 rmind int s;
168 1.1 rmind
169 1.1 rmind s = splhigh();
170 1.1 rmind if (kc_last_idx < KC_SAVE_NITEMS) {
171 1.1 rmind /*
172 1.1 rmind * Save the pointer, return pointer to static early field.
173 1.1 rmind * Need to zero it out.
174 1.1 rmind */
175 1.5 rmind kc_noted_early[kc_last_idx] = kcptr;
176 1.1 rmind kcp = (kcpuset_t *)&kc_bits_early[kc_last_idx];
177 1.5 rmind kc_last_idx++;
178 1.1 rmind memset(kcp, 0, KC_BITSIZE_EARLY);
179 1.1 rmind KASSERT(kc_bitsize == KC_BITSIZE_EARLY);
180 1.1 rmind } else {
181 1.1 rmind panic("kcpuset(9): all early-use entries exhausted; "
182 1.1 rmind "increase KC_SAVE_NITEMS\n");
183 1.1 rmind }
184 1.1 rmind splx(s);
185 1.1 rmind
186 1.1 rmind return kcp;
187 1.1 rmind }
188 1.1 rmind
189 1.1 rmind /*
190 1.1 rmind * Routines to create or destroy the CPU set.
191 1.1 rmind * Early boot case is handled.
192 1.1 rmind */
193 1.1 rmind
194 1.1 rmind static kcpuset_t *
195 1.3 rmind kcpuset_create_raw(bool zero)
196 1.1 rmind {
197 1.1 rmind kcpuset_impl_t *kc;
198 1.1 rmind
199 1.19 ad kc = pool_cache_get(kc_cache, PR_WAITOK);
200 1.1 rmind kc->kc_refcnt = 1;
201 1.1 rmind kc->kc_next = NULL;
202 1.1 rmind
203 1.3 rmind if (zero) {
204 1.3 rmind memset(&kc->kc_field, 0, kc_bitsize);
205 1.3 rmind }
206 1.3 rmind
207 1.1 rmind /* Note: return pointer to the actual field of bits. */
208 1.1 rmind KASSERT((uint8_t *)kc + KC_BITS_OFF == (uint8_t *)&kc->kc_field);
209 1.1 rmind return &kc->kc_field;
210 1.1 rmind }
211 1.1 rmind
212 1.1 rmind void
213 1.3 rmind kcpuset_create(kcpuset_t **retkcp, bool zero)
214 1.1 rmind {
215 1.1 rmind if (__predict_false(!kc_initialised)) {
216 1.1 rmind /* Early boot use - special case. */
217 1.1 rmind *retkcp = kcpuset_early_ptr(retkcp);
218 1.1 rmind return;
219 1.1 rmind }
220 1.3 rmind *retkcp = kcpuset_create_raw(zero);
221 1.1 rmind }
222 1.1 rmind
223 1.1 rmind void
224 1.9 matt kcpuset_clone(kcpuset_t **retkcp, const kcpuset_t *kcp)
225 1.9 matt {
226 1.9 matt kcpuset_create(retkcp, false);
227 1.9 matt memcpy(*retkcp, kcp, kc_bitsize);
228 1.9 matt }
229 1.9 matt
230 1.9 matt void
231 1.1 rmind kcpuset_destroy(kcpuset_t *kcp)
232 1.1 rmind {
233 1.2 rmind kcpuset_impl_t *kc;
234 1.1 rmind
235 1.1 rmind KASSERT(kc_initialised);
236 1.1 rmind KASSERT(kcp != NULL);
237 1.1 rmind
238 1.1 rmind do {
239 1.2 rmind kc = KC_GETSTRUCT(kcp);
240 1.2 rmind kcp = kc->kc_next;
241 1.19 ad pool_cache_put(kc_cache, kc);
242 1.2 rmind } while (kcp);
243 1.1 rmind }
244 1.1 rmind
245 1.1 rmind /*
246 1.4 rmind * Routines to reference/unreference the CPU set.
247 1.1 rmind * Note: early boot case is not supported by these routines.
248 1.1 rmind */
249 1.1 rmind
250 1.1 rmind void
251 1.1 rmind kcpuset_use(kcpuset_t *kcp)
252 1.1 rmind {
253 1.1 rmind kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
254 1.1 rmind
255 1.1 rmind KASSERT(kc_initialised);
256 1.1 rmind atomic_inc_uint(&kc->kc_refcnt);
257 1.1 rmind }
258 1.1 rmind
259 1.1 rmind void
260 1.1 rmind kcpuset_unuse(kcpuset_t *kcp, kcpuset_t **lst)
261 1.1 rmind {
262 1.1 rmind kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
263 1.1 rmind
264 1.1 rmind KASSERT(kc_initialised);
265 1.1 rmind KASSERT(kc->kc_refcnt > 0);
266 1.1 rmind
267 1.14 riastrad membar_release();
268 1.1 rmind if (atomic_dec_uint_nv(&kc->kc_refcnt) != 0) {
269 1.1 rmind return;
270 1.1 rmind }
271 1.14 riastrad membar_acquire();
272 1.1 rmind KASSERT(kc->kc_next == NULL);
273 1.1 rmind if (lst == NULL) {
274 1.1 rmind kcpuset_destroy(kcp);
275 1.1 rmind return;
276 1.1 rmind }
277 1.1 rmind kc->kc_next = *lst;
278 1.1 rmind *lst = kcp;
279 1.1 rmind }
280 1.1 rmind
281 1.1 rmind /*
282 1.1 rmind * Routines to transfer the CPU set from / to userspace.
283 1.1 rmind * Note: early boot case is not supported by these routines.
284 1.1 rmind */
285 1.1 rmind
286 1.1 rmind int
287 1.1 rmind kcpuset_copyin(const cpuset_t *ucp, kcpuset_t *kcp, size_t len)
288 1.1 rmind {
289 1.10 martin kcpuset_impl_t *kc __diagused = KC_GETSTRUCT(kcp);
290 1.1 rmind
291 1.1 rmind KASSERT(kc_initialised);
292 1.1 rmind KASSERT(kc->kc_refcnt > 0);
293 1.1 rmind KASSERT(kc->kc_next == NULL);
294 1.1 rmind
295 1.5 rmind if (len > kc_bitsize) { /* XXX */
296 1.1 rmind return EINVAL;
297 1.1 rmind }
298 1.5 rmind return copyin(ucp, kcp, len);
299 1.1 rmind }
300 1.1 rmind
301 1.1 rmind int
302 1.1 rmind kcpuset_copyout(kcpuset_t *kcp, cpuset_t *ucp, size_t len)
303 1.1 rmind {
304 1.10 martin kcpuset_impl_t *kc __diagused = KC_GETSTRUCT(kcp);
305 1.1 rmind
306 1.1 rmind KASSERT(kc_initialised);
307 1.1 rmind KASSERT(kc->kc_refcnt > 0);
308 1.1 rmind KASSERT(kc->kc_next == NULL);
309 1.1 rmind
310 1.5 rmind if (len > kc_bitsize) { /* XXX */
311 1.1 rmind return EINVAL;
312 1.1 rmind }
313 1.5 rmind return copyout(kcp, ucp, len);
314 1.1 rmind }
315 1.1 rmind
316 1.6 rmind void
317 1.8 rmind kcpuset_export_u32(const kcpuset_t *kcp, uint32_t *bitfield, size_t len)
318 1.6 rmind {
319 1.6 rmind size_t rlen = MIN(kc_bitsize, len);
320 1.6 rmind
321 1.6 rmind KASSERT(kcp != NULL);
322 1.6 rmind memcpy(bitfield, kcp->bits, rlen);
323 1.6 rmind }
324 1.6 rmind
325 1.1 rmind /*
326 1.4 rmind * Routines to change bit field - zero, fill, copy, set, unset, etc.
327 1.1 rmind */
328 1.4 rmind
329 1.1 rmind void
330 1.1 rmind kcpuset_zero(kcpuset_t *kcp)
331 1.1 rmind {
332 1.1 rmind
333 1.1 rmind KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
334 1.1 rmind KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
335 1.1 rmind memset(kcp, 0, kc_bitsize);
336 1.1 rmind }
337 1.1 rmind
338 1.1 rmind void
339 1.1 rmind kcpuset_fill(kcpuset_t *kcp)
340 1.1 rmind {
341 1.1 rmind
342 1.1 rmind KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
343 1.1 rmind KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
344 1.1 rmind memset(kcp, ~0, kc_bitsize);
345 1.1 rmind }
346 1.1 rmind
347 1.1 rmind void
348 1.9 matt kcpuset_copy(kcpuset_t *dkcp, const kcpuset_t *skcp)
349 1.4 rmind {
350 1.4 rmind
351 1.4 rmind KASSERT(!kc_initialised || KC_GETSTRUCT(dkcp)->kc_refcnt > 0);
352 1.4 rmind KASSERT(!kc_initialised || KC_GETSTRUCT(dkcp)->kc_next == NULL);
353 1.4 rmind memcpy(dkcp, skcp, kc_bitsize);
354 1.4 rmind }
355 1.4 rmind
356 1.4 rmind void
357 1.1 rmind kcpuset_set(kcpuset_t *kcp, cpuid_t i)
358 1.1 rmind {
359 1.1 rmind const size_t j = i >> KC_SHIFT;
360 1.1 rmind
361 1.1 rmind KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
362 1.1 rmind KASSERT(j < kc_nfields);
363 1.1 rmind
364 1.12 msaitoh kcp->bits[j] |= __BIT(i & KC_MASK);
365 1.1 rmind }
366 1.1 rmind
367 1.1 rmind void
368 1.1 rmind kcpuset_clear(kcpuset_t *kcp, cpuid_t i)
369 1.1 rmind {
370 1.1 rmind const size_t j = i >> KC_SHIFT;
371 1.1 rmind
372 1.9 matt KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_next == NULL);
373 1.1 rmind KASSERT(j < kc_nfields);
374 1.1 rmind
375 1.12 msaitoh kcp->bits[j] &= ~(__BIT(i & KC_MASK));
376 1.1 rmind }
377 1.1 rmind
378 1.4 rmind bool
379 1.9 matt kcpuset_isset(const kcpuset_t *kcp, cpuid_t i)
380 1.1 rmind {
381 1.1 rmind const size_t j = i >> KC_SHIFT;
382 1.1 rmind
383 1.1 rmind KASSERT(kcp != NULL);
384 1.9 matt KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_refcnt > 0);
385 1.9 matt KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_next == NULL);
386 1.1 rmind KASSERT(j < kc_nfields);
387 1.1 rmind
388 1.12 msaitoh return ((__BIT(i & KC_MASK)) & kcp->bits[j]) != 0;
389 1.1 rmind }
390 1.1 rmind
391 1.1 rmind bool
392 1.9 matt kcpuset_isotherset(const kcpuset_t *kcp, cpuid_t i)
393 1.4 rmind {
394 1.4 rmind const size_t j2 = i >> KC_SHIFT;
395 1.12 msaitoh const uint32_t mask = ~(__BIT(i & KC_MASK));
396 1.4 rmind
397 1.4 rmind for (size_t j = 0; j < kc_nfields; j++) {
398 1.4 rmind const uint32_t bits = kcp->bits[j];
399 1.4 rmind if (bits && (j != j2 || (bits & mask) != 0)) {
400 1.4 rmind return true;
401 1.4 rmind }
402 1.4 rmind }
403 1.4 rmind return false;
404 1.4 rmind }
405 1.4 rmind
406 1.4 rmind bool
407 1.9 matt kcpuset_iszero(const kcpuset_t *kcp)
408 1.1 rmind {
409 1.1 rmind
410 1.1 rmind for (size_t j = 0; j < kc_nfields; j++) {
411 1.1 rmind if (kcp->bits[j] != 0) {
412 1.1 rmind return false;
413 1.1 rmind }
414 1.1 rmind }
415 1.1 rmind return true;
416 1.1 rmind }
417 1.1 rmind
418 1.1 rmind bool
419 1.1 rmind kcpuset_match(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
420 1.1 rmind {
421 1.1 rmind
422 1.1 rmind return memcmp(kcp1, kcp2, kc_bitsize) == 0;
423 1.1 rmind }
424 1.3 rmind
425 1.9 matt bool
426 1.9 matt kcpuset_intersecting_p(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
427 1.9 matt {
428 1.9 matt
429 1.9 matt for (size_t j = 0; j < kc_nfields; j++) {
430 1.9 matt if (kcp1->bits[j] & kcp2->bits[j])
431 1.9 matt return true;
432 1.9 matt }
433 1.9 matt return false;
434 1.9 matt }
435 1.9 matt
436 1.9 matt cpuid_t
437 1.9 matt kcpuset_ffs(const kcpuset_t *kcp)
438 1.9 matt {
439 1.9 matt
440 1.9 matt for (size_t j = 0; j < kc_nfields; j++) {
441 1.9 matt if (kcp->bits[j])
442 1.9 matt return 32 * j + ffs(kcp->bits[j]);
443 1.9 matt }
444 1.9 matt return 0;
445 1.9 matt }
446 1.9 matt
447 1.9 matt cpuid_t
448 1.9 matt kcpuset_ffs_intersecting(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
449 1.9 matt {
450 1.9 matt
451 1.9 matt for (size_t j = 0; j < kc_nfields; j++) {
452 1.9 matt uint32_t bits = kcp1->bits[j] & kcp2->bits[j];
453 1.9 matt if (bits)
454 1.9 matt return 32 * j + ffs(bits);
455 1.9 matt }
456 1.9 matt return 0;
457 1.9 matt }
458 1.9 matt
459 1.3 rmind void
460 1.9 matt kcpuset_merge(kcpuset_t *kcp1, const kcpuset_t *kcp2)
461 1.3 rmind {
462 1.3 rmind
463 1.3 rmind for (size_t j = 0; j < kc_nfields; j++) {
464 1.3 rmind kcp1->bits[j] |= kcp2->bits[j];
465 1.3 rmind }
466 1.3 rmind }
467 1.3 rmind
468 1.5 rmind void
469 1.9 matt kcpuset_intersect(kcpuset_t *kcp1, const kcpuset_t *kcp2)
470 1.5 rmind {
471 1.5 rmind
472 1.5 rmind for (size_t j = 0; j < kc_nfields; j++) {
473 1.5 rmind kcp1->bits[j] &= kcp2->bits[j];
474 1.5 rmind }
475 1.5 rmind }
476 1.5 rmind
477 1.9 matt void
478 1.9 matt kcpuset_remove(kcpuset_t *kcp1, const kcpuset_t *kcp2)
479 1.9 matt {
480 1.9 matt
481 1.9 matt for (size_t j = 0; j < kc_nfields; j++) {
482 1.9 matt kcp1->bits[j] &= ~kcp2->bits[j];
483 1.9 matt }
484 1.9 matt }
485 1.9 matt
486 1.4 rmind int
487 1.11 rmind kcpuset_countset(const kcpuset_t *kcp)
488 1.4 rmind {
489 1.4 rmind int count = 0;
490 1.4 rmind
491 1.4 rmind for (size_t j = 0; j < kc_nfields; j++) {
492 1.4 rmind count += popcount32(kcp->bits[j]);
493 1.4 rmind }
494 1.4 rmind return count;
495 1.4 rmind }
496 1.4 rmind
497 1.3 rmind /*
498 1.3 rmind * Routines to set/clear the flags atomically.
499 1.3 rmind */
500 1.3 rmind
501 1.3 rmind void
502 1.3 rmind kcpuset_atomic_set(kcpuset_t *kcp, cpuid_t i)
503 1.3 rmind {
504 1.3 rmind const size_t j = i >> KC_SHIFT;
505 1.3 rmind
506 1.3 rmind KASSERT(j < kc_nfields);
507 1.12 msaitoh atomic_or_32(&kcp->bits[j], __BIT(i & KC_MASK));
508 1.3 rmind }
509 1.3 rmind
510 1.3 rmind void
511 1.3 rmind kcpuset_atomic_clear(kcpuset_t *kcp, cpuid_t i)
512 1.3 rmind {
513 1.3 rmind const size_t j = i >> KC_SHIFT;
514 1.3 rmind
515 1.3 rmind KASSERT(j < kc_nfields);
516 1.12 msaitoh atomic_and_32(&kcp->bits[j], ~(__BIT(i & KC_MASK)));
517 1.3 rmind }
518 1.9 matt
519 1.9 matt void
520 1.9 matt kcpuset_atomicly_intersect(kcpuset_t *kcp1, const kcpuset_t *kcp2)
521 1.9 matt {
522 1.9 matt
523 1.9 matt for (size_t j = 0; j < kc_nfields; j++) {
524 1.9 matt if (kcp2->bits[j])
525 1.9 matt atomic_and_32(&kcp1->bits[j], kcp2->bits[j]);
526 1.9 matt }
527 1.9 matt }
528 1.9 matt
529 1.9 matt void
530 1.9 matt kcpuset_atomicly_merge(kcpuset_t *kcp1, const kcpuset_t *kcp2)
531 1.9 matt {
532 1.9 matt
533 1.9 matt for (size_t j = 0; j < kc_nfields; j++) {
534 1.9 matt if (kcp2->bits[j])
535 1.9 matt atomic_or_32(&kcp1->bits[j], kcp2->bits[j]);
536 1.9 matt }
537 1.9 matt }
538 1.9 matt
539 1.9 matt void
540 1.9 matt kcpuset_atomicly_remove(kcpuset_t *kcp1, const kcpuset_t *kcp2)
541 1.9 matt {
542 1.9 matt
543 1.9 matt for (size_t j = 0; j < kc_nfields; j++) {
544 1.9 matt if (kcp2->bits[j])
545 1.9 matt atomic_and_32(&kcp1->bits[j], ~kcp2->bits[j]);
546 1.9 matt }
547 1.9 matt }
548