Home | History | Annotate | Line # | Download | only in kern
subr_kcpuset.c revision 1.15
      1  1.15  riastrad /*	$NetBSD: subr_kcpuset.c,v 1.15 2023/04/09 09:18:09 riastradh Exp $	*/
      2   1.1     rmind 
      3   1.1     rmind /*-
      4   1.1     rmind  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5   1.1     rmind  * All rights reserved.
      6   1.1     rmind  *
      7   1.1     rmind  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1     rmind  * by Mindaugas Rasiukevicius.
      9   1.1     rmind  *
     10   1.1     rmind  * Redistribution and use in source and binary forms, with or without
     11   1.1     rmind  * modification, are permitted provided that the following conditions
     12   1.1     rmind  * are met:
     13   1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     14   1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     15   1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     17   1.1     rmind  *    documentation and/or other materials provided with the distribution.
     18   1.1     rmind  *
     19   1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1     rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1     rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1     rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1     rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1     rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1     rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1     rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1     rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1     rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1     rmind  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1     rmind  */
     31   1.1     rmind 
     32   1.1     rmind /*
     33   1.1     rmind  * Kernel CPU set implementation.
     34   1.1     rmind  *
     35   1.1     rmind  * Interface can be used by kernel subsystems as a unified dynamic CPU
     36   1.1     rmind  * bitset implementation handling many CPUs.  Facility also supports early
     37   1.1     rmind  * use by MD code on boot, as it fixups bitsets on further boot.
     38   1.1     rmind  *
     39   1.1     rmind  * TODO:
     40   1.1     rmind  * - Handle "reverse" bitset on fixup/grow.
     41   1.1     rmind  */
     42   1.1     rmind 
     43   1.1     rmind #include <sys/cdefs.h>
     44  1.15  riastrad __KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.15 2023/04/09 09:18:09 riastradh Exp $");
     45   1.1     rmind 
     46   1.1     rmind #include <sys/param.h>
     47   1.1     rmind #include <sys/types.h>
     48   1.1     rmind 
     49   1.1     rmind #include <sys/atomic.h>
     50   1.1     rmind #include <sys/sched.h>
     51   1.1     rmind #include <sys/kcpuset.h>
     52   1.1     rmind #include <sys/pool.h>
     53   1.1     rmind 
     54   1.1     rmind /* Number of CPUs to support. */
     55   1.1     rmind #define	KC_MAXCPUS		roundup2(MAXCPUS, 32)
     56   1.1     rmind 
     57   1.1     rmind /*
     58   1.1     rmind  * Structure of dynamic CPU set in the kernel.
     59   1.1     rmind  */
     60   1.1     rmind struct kcpuset {
     61   1.1     rmind 	uint32_t		bits[0];
     62   1.1     rmind };
     63   1.1     rmind 
     64   1.1     rmind typedef struct kcpuset_impl {
     65   1.1     rmind 	/* Reference count. */
     66   1.1     rmind 	u_int			kc_refcnt;
     67   1.1     rmind 	/* Next to free, if non-NULL (used when multiple references). */
     68   1.1     rmind 	struct kcpuset *	kc_next;
     69   1.1     rmind 	/* Actual variable-sized field of bits. */
     70   1.1     rmind 	struct kcpuset		kc_field;
     71   1.1     rmind } kcpuset_impl_t;
     72   1.1     rmind 
     73   1.1     rmind #define	KC_BITS_OFF		(offsetof(struct kcpuset_impl, kc_field))
     74   1.1     rmind #define	KC_GETSTRUCT(b)		((kcpuset_impl_t *)((char *)(b) - KC_BITS_OFF))
     75   1.9      matt #define	KC_GETCSTRUCT(b)	((const kcpuset_impl_t *)((const char *)(b) - KC_BITS_OFF))
     76   1.1     rmind 
     77   1.1     rmind /* Sizes of a single bitset. */
     78   1.1     rmind #define	KC_SHIFT		5
     79   1.1     rmind #define	KC_MASK			31
     80   1.1     rmind 
     81   1.1     rmind /* An array of noted early kcpuset creations and data. */
     82   1.1     rmind #define	KC_SAVE_NITEMS		8
     83   1.1     rmind 
     84   1.1     rmind /* Structures for early boot mechanism (must be statically initialised). */
     85   1.1     rmind static kcpuset_t **		kc_noted_early[KC_SAVE_NITEMS];
     86   1.1     rmind static uint32_t			kc_bits_early[KC_SAVE_NITEMS];
     87   1.1     rmind static int			kc_last_idx = 0;
     88   1.1     rmind static bool			kc_initialised = false;
     89   1.1     rmind 
     90   1.1     rmind #define	KC_BITSIZE_EARLY	sizeof(kc_bits_early[0])
     91   1.4     rmind #define	KC_NFIELDS_EARLY	1
     92   1.1     rmind 
     93   1.1     rmind /*
     94   1.1     rmind  * The size of whole bitset fields and amount of fields.
     95   1.1     rmind  * The whole size must statically initialise for early case.
     96   1.1     rmind  */
     97   1.1     rmind static size_t			kc_bitsize __read_mostly = KC_BITSIZE_EARLY;
     98   1.1     rmind static size_t			kc_nfields __read_mostly = KC_NFIELDS_EARLY;
     99   1.1     rmind 
    100   1.1     rmind static pool_cache_t		kc_cache __read_mostly;
    101   1.1     rmind 
    102   1.3     rmind static kcpuset_t *		kcpuset_create_raw(bool);
    103   1.1     rmind 
    104   1.1     rmind /*
    105   1.1     rmind  * kcpuset_sysinit: initialize the subsystem, transfer early boot cases
    106   1.1     rmind  * to dynamically allocated sets.
    107   1.1     rmind  */
    108   1.1     rmind void
    109   1.1     rmind kcpuset_sysinit(void)
    110   1.1     rmind {
    111   1.1     rmind 	kcpuset_t *kc_dynamic[KC_SAVE_NITEMS], *kcp;
    112   1.1     rmind 	int i, s;
    113   1.1     rmind 
    114   1.1     rmind 	/* Set a kcpuset_t sizes. */
    115   1.1     rmind 	kc_nfields = (KC_MAXCPUS >> KC_SHIFT);
    116   1.1     rmind 	kc_bitsize = sizeof(uint32_t) * kc_nfields;
    117  1.15  riastrad 	KASSERT(kc_nfields != 0);
    118  1.15  riastrad 	KASSERT(kc_bitsize != 0);
    119   1.1     rmind 
    120   1.1     rmind 	kc_cache = pool_cache_init(sizeof(kcpuset_impl_t) + kc_bitsize,
    121   1.1     rmind 	    coherency_unit, 0, 0, "kcpuset", NULL, IPL_NONE, NULL, NULL, NULL);
    122   1.1     rmind 
    123   1.1     rmind 	/* First, pre-allocate kcpuset entries. */
    124   1.1     rmind 	for (i = 0; i < kc_last_idx; i++) {
    125   1.3     rmind 		kcp = kcpuset_create_raw(true);
    126   1.1     rmind 		kc_dynamic[i] = kcp;
    127   1.1     rmind 	}
    128   1.1     rmind 
    129   1.1     rmind 	/*
    130   1.1     rmind 	 * Prepare to convert all early noted kcpuset uses to dynamic sets.
    131   1.1     rmind 	 * All processors, except the one we are currently running (primary),
    132   1.1     rmind 	 * must not be spinned yet.  Since MD facilities can use kcpuset,
    133   1.1     rmind 	 * raise the IPL to high.
    134   1.1     rmind 	 */
    135   1.1     rmind 	KASSERT(mp_online == false);
    136   1.1     rmind 
    137   1.1     rmind 	s = splhigh();
    138   1.1     rmind 	for (i = 0; i < kc_last_idx; i++) {
    139   1.1     rmind 		/*
    140   1.1     rmind 		 * Transfer the bits from early static storage to the kcpuset.
    141   1.1     rmind 		 */
    142   1.1     rmind 		KASSERT(kc_bitsize >= KC_BITSIZE_EARLY);
    143   1.1     rmind 		memcpy(kc_dynamic[i], &kc_bits_early[i], KC_BITSIZE_EARLY);
    144   1.1     rmind 
    145   1.1     rmind 		/*
    146   1.1     rmind 		 * Store the new pointer, pointing to the allocated kcpuset.
    147   1.1     rmind 		 * Note: we are not in an interrupt context and it is the only
    148   1.1     rmind 		 * CPU running - thus store is safe (e.g. no need for pointer
    149   1.1     rmind 		 * variable to be volatile).
    150   1.1     rmind 		 */
    151   1.1     rmind 		*kc_noted_early[i] = kc_dynamic[i];
    152   1.1     rmind 	}
    153   1.1     rmind 	kc_initialised = true;
    154   1.1     rmind 	kc_last_idx = 0;
    155   1.1     rmind 	splx(s);
    156   1.1     rmind }
    157   1.1     rmind 
    158   1.1     rmind /*
    159   1.1     rmind  * kcpuset_early_ptr: note an early boot use by saving the pointer and
    160   1.1     rmind  * returning a pointer to a static, temporary bit field.
    161   1.1     rmind  */
    162   1.1     rmind static kcpuset_t *
    163   1.1     rmind kcpuset_early_ptr(kcpuset_t **kcptr)
    164   1.1     rmind {
    165   1.1     rmind 	kcpuset_t *kcp;
    166   1.1     rmind 	int s;
    167   1.1     rmind 
    168   1.1     rmind 	s = splhigh();
    169   1.1     rmind 	if (kc_last_idx < KC_SAVE_NITEMS) {
    170   1.1     rmind 		/*
    171   1.1     rmind 		 * Save the pointer, return pointer to static early field.
    172   1.1     rmind 		 * Need to zero it out.
    173   1.1     rmind 		 */
    174   1.5     rmind 		kc_noted_early[kc_last_idx] = kcptr;
    175   1.1     rmind 		kcp = (kcpuset_t *)&kc_bits_early[kc_last_idx];
    176   1.5     rmind 		kc_last_idx++;
    177   1.1     rmind 		memset(kcp, 0, KC_BITSIZE_EARLY);
    178   1.1     rmind 		KASSERT(kc_bitsize == KC_BITSIZE_EARLY);
    179   1.1     rmind 	} else {
    180   1.1     rmind 		panic("kcpuset(9): all early-use entries exhausted; "
    181   1.1     rmind 		    "increase KC_SAVE_NITEMS\n");
    182   1.1     rmind 	}
    183   1.1     rmind 	splx(s);
    184   1.1     rmind 
    185   1.1     rmind 	return kcp;
    186   1.1     rmind }
    187   1.1     rmind 
    188   1.1     rmind /*
    189   1.1     rmind  * Routines to create or destroy the CPU set.
    190   1.1     rmind  * Early boot case is handled.
    191   1.1     rmind  */
    192   1.1     rmind 
    193   1.1     rmind static kcpuset_t *
    194   1.3     rmind kcpuset_create_raw(bool zero)
    195   1.1     rmind {
    196   1.1     rmind 	kcpuset_impl_t *kc;
    197   1.1     rmind 
    198   1.1     rmind 	kc = pool_cache_get(kc_cache, PR_WAITOK);
    199   1.1     rmind 	kc->kc_refcnt = 1;
    200   1.1     rmind 	kc->kc_next = NULL;
    201   1.1     rmind 
    202   1.3     rmind 	if (zero) {
    203   1.3     rmind 		memset(&kc->kc_field, 0, kc_bitsize);
    204   1.3     rmind 	}
    205   1.3     rmind 
    206   1.1     rmind 	/* Note: return pointer to the actual field of bits. */
    207   1.1     rmind 	KASSERT((uint8_t *)kc + KC_BITS_OFF == (uint8_t *)&kc->kc_field);
    208   1.1     rmind 	return &kc->kc_field;
    209   1.1     rmind }
    210   1.1     rmind 
    211   1.1     rmind void
    212   1.3     rmind kcpuset_create(kcpuset_t **retkcp, bool zero)
    213   1.1     rmind {
    214   1.1     rmind 	if (__predict_false(!kc_initialised)) {
    215   1.1     rmind 		/* Early boot use - special case. */
    216   1.1     rmind 		*retkcp = kcpuset_early_ptr(retkcp);
    217   1.1     rmind 		return;
    218   1.1     rmind 	}
    219   1.3     rmind 	*retkcp = kcpuset_create_raw(zero);
    220   1.1     rmind }
    221   1.1     rmind 
    222   1.1     rmind void
    223   1.9      matt kcpuset_clone(kcpuset_t **retkcp, const kcpuset_t *kcp)
    224   1.9      matt {
    225   1.9      matt 	kcpuset_create(retkcp, false);
    226   1.9      matt 	memcpy(*retkcp, kcp, kc_bitsize);
    227   1.9      matt }
    228   1.9      matt 
    229   1.9      matt void
    230   1.1     rmind kcpuset_destroy(kcpuset_t *kcp)
    231   1.1     rmind {
    232   1.2     rmind 	kcpuset_impl_t *kc;
    233   1.1     rmind 
    234   1.1     rmind 	KASSERT(kc_initialised);
    235   1.1     rmind 	KASSERT(kcp != NULL);
    236   1.1     rmind 
    237   1.1     rmind 	do {
    238   1.2     rmind 		kc = KC_GETSTRUCT(kcp);
    239   1.2     rmind 		kcp = kc->kc_next;
    240   1.1     rmind 		pool_cache_put(kc_cache, kc);
    241   1.2     rmind 	} while (kcp);
    242   1.1     rmind }
    243   1.1     rmind 
    244   1.1     rmind /*
    245   1.4     rmind  * Routines to reference/unreference the CPU set.
    246   1.1     rmind  * Note: early boot case is not supported by these routines.
    247   1.1     rmind  */
    248   1.1     rmind 
    249   1.1     rmind void
    250   1.1     rmind kcpuset_use(kcpuset_t *kcp)
    251   1.1     rmind {
    252   1.1     rmind 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    253   1.1     rmind 
    254   1.1     rmind 	KASSERT(kc_initialised);
    255   1.1     rmind 	atomic_inc_uint(&kc->kc_refcnt);
    256   1.1     rmind }
    257   1.1     rmind 
    258   1.1     rmind void
    259   1.1     rmind kcpuset_unuse(kcpuset_t *kcp, kcpuset_t **lst)
    260   1.1     rmind {
    261   1.1     rmind 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    262   1.1     rmind 
    263   1.1     rmind 	KASSERT(kc_initialised);
    264   1.1     rmind 	KASSERT(kc->kc_refcnt > 0);
    265   1.1     rmind 
    266  1.14  riastrad 	membar_release();
    267   1.1     rmind 	if (atomic_dec_uint_nv(&kc->kc_refcnt) != 0) {
    268   1.1     rmind 		return;
    269   1.1     rmind 	}
    270  1.14  riastrad 	membar_acquire();
    271   1.1     rmind 	KASSERT(kc->kc_next == NULL);
    272   1.1     rmind 	if (lst == NULL) {
    273   1.1     rmind 		kcpuset_destroy(kcp);
    274   1.1     rmind 		return;
    275   1.1     rmind 	}
    276   1.1     rmind 	kc->kc_next = *lst;
    277   1.1     rmind 	*lst = kcp;
    278   1.1     rmind }
    279   1.1     rmind 
    280   1.1     rmind /*
    281   1.1     rmind  * Routines to transfer the CPU set from / to userspace.
    282   1.1     rmind  * Note: early boot case is not supported by these routines.
    283   1.1     rmind  */
    284   1.1     rmind 
    285   1.1     rmind int
    286   1.1     rmind kcpuset_copyin(const cpuset_t *ucp, kcpuset_t *kcp, size_t len)
    287   1.1     rmind {
    288  1.10    martin 	kcpuset_impl_t *kc __diagused = KC_GETSTRUCT(kcp);
    289   1.1     rmind 
    290   1.1     rmind 	KASSERT(kc_initialised);
    291   1.1     rmind 	KASSERT(kc->kc_refcnt > 0);
    292   1.1     rmind 	KASSERT(kc->kc_next == NULL);
    293   1.1     rmind 
    294   1.5     rmind 	if (len > kc_bitsize) { /* XXX */
    295   1.1     rmind 		return EINVAL;
    296   1.1     rmind 	}
    297   1.5     rmind 	return copyin(ucp, kcp, len);
    298   1.1     rmind }
    299   1.1     rmind 
    300   1.1     rmind int
    301   1.1     rmind kcpuset_copyout(kcpuset_t *kcp, cpuset_t *ucp, size_t len)
    302   1.1     rmind {
    303  1.10    martin 	kcpuset_impl_t *kc __diagused = KC_GETSTRUCT(kcp);
    304   1.1     rmind 
    305   1.1     rmind 	KASSERT(kc_initialised);
    306   1.1     rmind 	KASSERT(kc->kc_refcnt > 0);
    307   1.1     rmind 	KASSERT(kc->kc_next == NULL);
    308   1.1     rmind 
    309   1.5     rmind 	if (len > kc_bitsize) { /* XXX */
    310   1.1     rmind 		return EINVAL;
    311   1.1     rmind 	}
    312   1.5     rmind 	return copyout(kcp, ucp, len);
    313   1.1     rmind }
    314   1.1     rmind 
    315   1.6     rmind void
    316   1.8     rmind kcpuset_export_u32(const kcpuset_t *kcp, uint32_t *bitfield, size_t len)
    317   1.6     rmind {
    318   1.6     rmind 	size_t rlen = MIN(kc_bitsize, len);
    319   1.6     rmind 
    320   1.6     rmind 	KASSERT(kcp != NULL);
    321   1.6     rmind 	memcpy(bitfield, kcp->bits, rlen);
    322   1.6     rmind }
    323   1.6     rmind 
    324   1.1     rmind /*
    325   1.4     rmind  * Routines to change bit field - zero, fill, copy, set, unset, etc.
    326   1.1     rmind  */
    327   1.4     rmind 
    328   1.1     rmind void
    329   1.1     rmind kcpuset_zero(kcpuset_t *kcp)
    330   1.1     rmind {
    331   1.1     rmind 
    332   1.1     rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
    333   1.1     rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    334   1.1     rmind 	memset(kcp, 0, kc_bitsize);
    335   1.1     rmind }
    336   1.1     rmind 
    337   1.1     rmind void
    338   1.1     rmind kcpuset_fill(kcpuset_t *kcp)
    339   1.1     rmind {
    340   1.1     rmind 
    341   1.1     rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
    342   1.1     rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    343   1.1     rmind 	memset(kcp, ~0, kc_bitsize);
    344   1.1     rmind }
    345   1.1     rmind 
    346   1.1     rmind void
    347   1.9      matt kcpuset_copy(kcpuset_t *dkcp, const kcpuset_t *skcp)
    348   1.4     rmind {
    349   1.4     rmind 
    350   1.4     rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(dkcp)->kc_refcnt > 0);
    351   1.4     rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(dkcp)->kc_next == NULL);
    352   1.4     rmind 	memcpy(dkcp, skcp, kc_bitsize);
    353   1.4     rmind }
    354   1.4     rmind 
    355   1.4     rmind void
    356   1.1     rmind kcpuset_set(kcpuset_t *kcp, cpuid_t i)
    357   1.1     rmind {
    358   1.1     rmind 	const size_t j = i >> KC_SHIFT;
    359   1.1     rmind 
    360   1.1     rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    361   1.1     rmind 	KASSERT(j < kc_nfields);
    362   1.1     rmind 
    363  1.12   msaitoh 	kcp->bits[j] |= __BIT(i & KC_MASK);
    364   1.1     rmind }
    365   1.1     rmind 
    366   1.1     rmind void
    367   1.1     rmind kcpuset_clear(kcpuset_t *kcp, cpuid_t i)
    368   1.1     rmind {
    369   1.1     rmind 	const size_t j = i >> KC_SHIFT;
    370   1.1     rmind 
    371   1.9      matt 	KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_next == NULL);
    372   1.1     rmind 	KASSERT(j < kc_nfields);
    373   1.1     rmind 
    374  1.12   msaitoh 	kcp->bits[j] &= ~(__BIT(i & KC_MASK));
    375   1.1     rmind }
    376   1.1     rmind 
    377   1.4     rmind bool
    378   1.9      matt kcpuset_isset(const kcpuset_t *kcp, cpuid_t i)
    379   1.1     rmind {
    380   1.1     rmind 	const size_t j = i >> KC_SHIFT;
    381   1.1     rmind 
    382   1.1     rmind 	KASSERT(kcp != NULL);
    383   1.9      matt 	KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_refcnt > 0);
    384   1.9      matt 	KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_next == NULL);
    385   1.1     rmind 	KASSERT(j < kc_nfields);
    386   1.1     rmind 
    387  1.12   msaitoh 	return ((__BIT(i & KC_MASK)) & kcp->bits[j]) != 0;
    388   1.1     rmind }
    389   1.1     rmind 
    390   1.1     rmind bool
    391   1.9      matt kcpuset_isotherset(const kcpuset_t *kcp, cpuid_t i)
    392   1.4     rmind {
    393   1.4     rmind 	const size_t j2 = i >> KC_SHIFT;
    394  1.12   msaitoh 	const uint32_t mask = ~(__BIT(i & KC_MASK));
    395   1.4     rmind 
    396   1.4     rmind 	for (size_t j = 0; j < kc_nfields; j++) {
    397   1.4     rmind 		const uint32_t bits = kcp->bits[j];
    398   1.4     rmind 		if (bits && (j != j2 || (bits & mask) != 0)) {
    399   1.4     rmind 			return true;
    400   1.4     rmind 		}
    401   1.4     rmind 	}
    402   1.4     rmind 	return false;
    403   1.4     rmind }
    404   1.4     rmind 
    405   1.4     rmind bool
    406   1.9      matt kcpuset_iszero(const kcpuset_t *kcp)
    407   1.1     rmind {
    408   1.1     rmind 
    409   1.1     rmind 	for (size_t j = 0; j < kc_nfields; j++) {
    410   1.1     rmind 		if (kcp->bits[j] != 0) {
    411   1.1     rmind 			return false;
    412   1.1     rmind 		}
    413   1.1     rmind 	}
    414   1.1     rmind 	return true;
    415   1.1     rmind }
    416   1.1     rmind 
    417   1.1     rmind bool
    418   1.1     rmind kcpuset_match(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
    419   1.1     rmind {
    420   1.1     rmind 
    421   1.1     rmind 	return memcmp(kcp1, kcp2, kc_bitsize) == 0;
    422   1.1     rmind }
    423   1.3     rmind 
    424   1.9      matt bool
    425   1.9      matt kcpuset_intersecting_p(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
    426   1.9      matt {
    427   1.9      matt 
    428   1.9      matt 	for (size_t j = 0; j < kc_nfields; j++) {
    429   1.9      matt 		if (kcp1->bits[j] & kcp2->bits[j])
    430   1.9      matt 			return true;
    431   1.9      matt 	}
    432   1.9      matt 	return false;
    433   1.9      matt }
    434   1.9      matt 
    435   1.9      matt cpuid_t
    436   1.9      matt kcpuset_ffs(const kcpuset_t *kcp)
    437   1.9      matt {
    438   1.9      matt 
    439   1.9      matt 	for (size_t j = 0; j < kc_nfields; j++) {
    440   1.9      matt 		if (kcp->bits[j])
    441   1.9      matt 			return 32 * j + ffs(kcp->bits[j]);
    442   1.9      matt 	}
    443   1.9      matt 	return 0;
    444   1.9      matt }
    445   1.9      matt 
    446   1.9      matt cpuid_t
    447   1.9      matt kcpuset_ffs_intersecting(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
    448   1.9      matt {
    449   1.9      matt 
    450   1.9      matt 	for (size_t j = 0; j < kc_nfields; j++) {
    451   1.9      matt 		uint32_t bits = kcp1->bits[j] & kcp2->bits[j];
    452   1.9      matt 		if (bits)
    453   1.9      matt 			return 32 * j + ffs(bits);
    454   1.9      matt 	}
    455   1.9      matt 	return 0;
    456   1.9      matt }
    457   1.9      matt 
    458   1.3     rmind void
    459   1.9      matt kcpuset_merge(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    460   1.3     rmind {
    461   1.3     rmind 
    462   1.3     rmind 	for (size_t j = 0; j < kc_nfields; j++) {
    463   1.3     rmind 		kcp1->bits[j] |= kcp2->bits[j];
    464   1.3     rmind 	}
    465   1.3     rmind }
    466   1.3     rmind 
    467   1.5     rmind void
    468   1.9      matt kcpuset_intersect(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    469   1.5     rmind {
    470   1.5     rmind 
    471   1.5     rmind 	for (size_t j = 0; j < kc_nfields; j++) {
    472   1.5     rmind 		kcp1->bits[j] &= kcp2->bits[j];
    473   1.5     rmind 	}
    474   1.5     rmind }
    475   1.5     rmind 
    476   1.9      matt void
    477   1.9      matt kcpuset_remove(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    478   1.9      matt {
    479   1.9      matt 
    480   1.9      matt 	for (size_t j = 0; j < kc_nfields; j++) {
    481   1.9      matt 		kcp1->bits[j] &= ~kcp2->bits[j];
    482   1.9      matt 	}
    483   1.9      matt }
    484   1.9      matt 
    485   1.4     rmind int
    486  1.11     rmind kcpuset_countset(const kcpuset_t *kcp)
    487   1.4     rmind {
    488   1.4     rmind 	int count = 0;
    489   1.4     rmind 
    490   1.4     rmind 	for (size_t j = 0; j < kc_nfields; j++) {
    491   1.4     rmind 		count += popcount32(kcp->bits[j]);
    492   1.4     rmind 	}
    493   1.4     rmind 	return count;
    494   1.4     rmind }
    495   1.4     rmind 
    496   1.3     rmind /*
    497   1.3     rmind  * Routines to set/clear the flags atomically.
    498   1.3     rmind  */
    499   1.3     rmind 
    500   1.3     rmind void
    501   1.3     rmind kcpuset_atomic_set(kcpuset_t *kcp, cpuid_t i)
    502   1.3     rmind {
    503   1.3     rmind 	const size_t j = i >> KC_SHIFT;
    504   1.3     rmind 
    505   1.3     rmind 	KASSERT(j < kc_nfields);
    506  1.12   msaitoh 	atomic_or_32(&kcp->bits[j], __BIT(i & KC_MASK));
    507   1.3     rmind }
    508   1.3     rmind 
    509   1.3     rmind void
    510   1.3     rmind kcpuset_atomic_clear(kcpuset_t *kcp, cpuid_t i)
    511   1.3     rmind {
    512   1.3     rmind 	const size_t j = i >> KC_SHIFT;
    513   1.3     rmind 
    514   1.3     rmind 	KASSERT(j < kc_nfields);
    515  1.12   msaitoh 	atomic_and_32(&kcp->bits[j], ~(__BIT(i & KC_MASK)));
    516   1.3     rmind }
    517   1.9      matt 
    518   1.9      matt void
    519   1.9      matt kcpuset_atomicly_intersect(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    520   1.9      matt {
    521   1.9      matt 
    522   1.9      matt 	for (size_t j = 0; j < kc_nfields; j++) {
    523   1.9      matt 		if (kcp2->bits[j])
    524   1.9      matt 			atomic_and_32(&kcp1->bits[j], kcp2->bits[j]);
    525   1.9      matt 	}
    526   1.9      matt }
    527   1.9      matt 
    528   1.9      matt void
    529   1.9      matt kcpuset_atomicly_merge(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    530   1.9      matt {
    531   1.9      matt 
    532   1.9      matt 	for (size_t j = 0; j < kc_nfields; j++) {
    533   1.9      matt 		if (kcp2->bits[j])
    534   1.9      matt 			atomic_or_32(&kcp1->bits[j], kcp2->bits[j]);
    535   1.9      matt 	}
    536   1.9      matt }
    537   1.9      matt 
    538   1.9      matt void
    539   1.9      matt kcpuset_atomicly_remove(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    540   1.9      matt {
    541   1.9      matt 
    542   1.9      matt 	for (size_t j = 0; j < kc_nfields; j++) {
    543   1.9      matt 		if (kcp2->bits[j])
    544   1.9      matt 			atomic_and_32(&kcp1->bits[j], ~kcp2->bits[j]);
    545   1.9      matt 	}
    546   1.9      matt }
    547