Home | History | Annotate | Line # | Download | only in kern
subr_kcpuset.c revision 1.9
      1 /*	$NetBSD: subr_kcpuset.c,v 1.9 2013/07/17 22:36:26 matt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Mindaugas Rasiukevicius.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Kernel CPU set implementation.
     34  *
     35  * Interface can be used by kernel subsystems as a unified dynamic CPU
     36  * bitset implementation handling many CPUs.  Facility also supports early
     37  * use by MD code on boot, as it fixups bitsets on further boot.
     38  *
     39  * TODO:
     40  * - Handle "reverse" bitset on fixup/grow.
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.9 2013/07/17 22:36:26 matt Exp $");
     45 
     46 #include <sys/param.h>
     47 #include <sys/types.h>
     48 
     49 #include <sys/atomic.h>
     50 #include <sys/sched.h>
     51 #include <sys/kcpuset.h>
     52 #include <sys/pool.h>
     53 
     54 /* Number of CPUs to support. */
     55 #define	KC_MAXCPUS		roundup2(MAXCPUS, 32)
     56 
     57 /*
     58  * Structure of dynamic CPU set in the kernel.
     59  */
     60 struct kcpuset {
     61 	uint32_t		bits[0];
     62 };
     63 
     64 typedef struct kcpuset_impl {
     65 	/* Reference count. */
     66 	u_int			kc_refcnt;
     67 	/* Next to free, if non-NULL (used when multiple references). */
     68 	struct kcpuset *	kc_next;
     69 	/* Actual variable-sized field of bits. */
     70 	struct kcpuset		kc_field;
     71 } kcpuset_impl_t;
     72 
     73 #define	KC_BITS_OFF		(offsetof(struct kcpuset_impl, kc_field))
     74 #define	KC_GETSTRUCT(b)		((kcpuset_impl_t *)((char *)(b) - KC_BITS_OFF))
     75 #define	KC_GETCSTRUCT(b)	((const kcpuset_impl_t *)((const char *)(b) - KC_BITS_OFF))
     76 
     77 /* Sizes of a single bitset. */
     78 #define	KC_SHIFT		5
     79 #define	KC_MASK			31
     80 
     81 /* An array of noted early kcpuset creations and data. */
     82 #define	KC_SAVE_NITEMS		8
     83 
     84 /* Structures for early boot mechanism (must be statically initialised). */
     85 static kcpuset_t **		kc_noted_early[KC_SAVE_NITEMS];
     86 static uint32_t			kc_bits_early[KC_SAVE_NITEMS];
     87 static int			kc_last_idx = 0;
     88 static bool			kc_initialised = false;
     89 
     90 #define	KC_BITSIZE_EARLY	sizeof(kc_bits_early[0])
     91 #define	KC_NFIELDS_EARLY	1
     92 
     93 /*
     94  * The size of whole bitset fields and amount of fields.
     95  * The whole size must statically initialise for early case.
     96  */
     97 static size_t			kc_bitsize __read_mostly = KC_BITSIZE_EARLY;
     98 static size_t			kc_nfields __read_mostly = KC_NFIELDS_EARLY;
     99 
    100 static pool_cache_t		kc_cache __read_mostly;
    101 
    102 static kcpuset_t *		kcpuset_create_raw(bool);
    103 
    104 /*
    105  * kcpuset_sysinit: initialize the subsystem, transfer early boot cases
    106  * to dynamically allocated sets.
    107  */
    108 void
    109 kcpuset_sysinit(void)
    110 {
    111 	kcpuset_t *kc_dynamic[KC_SAVE_NITEMS], *kcp;
    112 	int i, s;
    113 
    114 	/* Set a kcpuset_t sizes. */
    115 	kc_nfields = (KC_MAXCPUS >> KC_SHIFT);
    116 	kc_bitsize = sizeof(uint32_t) * kc_nfields;
    117 	KASSERT(kc_nfields != 0 && kc_bitsize != 0);
    118 
    119 	kc_cache = pool_cache_init(sizeof(kcpuset_impl_t) + kc_bitsize,
    120 	    coherency_unit, 0, 0, "kcpuset", NULL, IPL_NONE, NULL, NULL, NULL);
    121 
    122 	/* First, pre-allocate kcpuset entries. */
    123 	for (i = 0; i < kc_last_idx; i++) {
    124 		kcp = kcpuset_create_raw(true);
    125 		kc_dynamic[i] = kcp;
    126 	}
    127 
    128 	/*
    129 	 * Prepare to convert all early noted kcpuset uses to dynamic sets.
    130 	 * All processors, except the one we are currently running (primary),
    131 	 * must not be spinned yet.  Since MD facilities can use kcpuset,
    132 	 * raise the IPL to high.
    133 	 */
    134 	KASSERT(mp_online == false);
    135 
    136 	s = splhigh();
    137 	for (i = 0; i < kc_last_idx; i++) {
    138 		/*
    139 		 * Transfer the bits from early static storage to the kcpuset.
    140 		 */
    141 		KASSERT(kc_bitsize >= KC_BITSIZE_EARLY);
    142 		memcpy(kc_dynamic[i], &kc_bits_early[i], KC_BITSIZE_EARLY);
    143 
    144 		/*
    145 		 * Store the new pointer, pointing to the allocated kcpuset.
    146 		 * Note: we are not in an interrupt context and it is the only
    147 		 * CPU running - thus store is safe (e.g. no need for pointer
    148 		 * variable to be volatile).
    149 		 */
    150 		*kc_noted_early[i] = kc_dynamic[i];
    151 	}
    152 	kc_initialised = true;
    153 	kc_last_idx = 0;
    154 	splx(s);
    155 }
    156 
    157 /*
    158  * kcpuset_early_ptr: note an early boot use by saving the pointer and
    159  * returning a pointer to a static, temporary bit field.
    160  */
    161 static kcpuset_t *
    162 kcpuset_early_ptr(kcpuset_t **kcptr)
    163 {
    164 	kcpuset_t *kcp;
    165 	int s;
    166 
    167 	s = splhigh();
    168 	if (kc_last_idx < KC_SAVE_NITEMS) {
    169 		/*
    170 		 * Save the pointer, return pointer to static early field.
    171 		 * Need to zero it out.
    172 		 */
    173 		kc_noted_early[kc_last_idx] = kcptr;
    174 		kcp = (kcpuset_t *)&kc_bits_early[kc_last_idx];
    175 		kc_last_idx++;
    176 		memset(kcp, 0, KC_BITSIZE_EARLY);
    177 		KASSERT(kc_bitsize == KC_BITSIZE_EARLY);
    178 	} else {
    179 		panic("kcpuset(9): all early-use entries exhausted; "
    180 		    "increase KC_SAVE_NITEMS\n");
    181 	}
    182 	splx(s);
    183 
    184 	return kcp;
    185 }
    186 
    187 /*
    188  * Routines to create or destroy the CPU set.
    189  * Early boot case is handled.
    190  */
    191 
    192 static kcpuset_t *
    193 kcpuset_create_raw(bool zero)
    194 {
    195 	kcpuset_impl_t *kc;
    196 
    197 	kc = pool_cache_get(kc_cache, PR_WAITOK);
    198 	kc->kc_refcnt = 1;
    199 	kc->kc_next = NULL;
    200 
    201 	if (zero) {
    202 		memset(&kc->kc_field, 0, kc_bitsize);
    203 	}
    204 
    205 	/* Note: return pointer to the actual field of bits. */
    206 	KASSERT((uint8_t *)kc + KC_BITS_OFF == (uint8_t *)&kc->kc_field);
    207 	return &kc->kc_field;
    208 }
    209 
    210 void
    211 kcpuset_create(kcpuset_t **retkcp, bool zero)
    212 {
    213 	if (__predict_false(!kc_initialised)) {
    214 		/* Early boot use - special case. */
    215 		*retkcp = kcpuset_early_ptr(retkcp);
    216 		return;
    217 	}
    218 	*retkcp = kcpuset_create_raw(zero);
    219 }
    220 
    221 void
    222 kcpuset_clone(kcpuset_t **retkcp, const kcpuset_t *kcp)
    223 {
    224 	kcpuset_create(retkcp, false);
    225 	memcpy(*retkcp, kcp, kc_bitsize);
    226 }
    227 
    228 void
    229 kcpuset_destroy(kcpuset_t *kcp)
    230 {
    231 	kcpuset_impl_t *kc;
    232 
    233 	KASSERT(kc_initialised);
    234 	KASSERT(kcp != NULL);
    235 
    236 	do {
    237 		kc = KC_GETSTRUCT(kcp);
    238 		kcp = kc->kc_next;
    239 		pool_cache_put(kc_cache, kc);
    240 	} while (kcp);
    241 }
    242 
    243 /*
    244  * Routines to reference/unreference the CPU set.
    245  * Note: early boot case is not supported by these routines.
    246  */
    247 
    248 void
    249 kcpuset_use(kcpuset_t *kcp)
    250 {
    251 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    252 
    253 	KASSERT(kc_initialised);
    254 	atomic_inc_uint(&kc->kc_refcnt);
    255 }
    256 
    257 void
    258 kcpuset_unuse(kcpuset_t *kcp, kcpuset_t **lst)
    259 {
    260 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    261 
    262 	KASSERT(kc_initialised);
    263 	KASSERT(kc->kc_refcnt > 0);
    264 
    265 	if (atomic_dec_uint_nv(&kc->kc_refcnt) != 0) {
    266 		return;
    267 	}
    268 	KASSERT(kc->kc_next == NULL);
    269 	if (lst == NULL) {
    270 		kcpuset_destroy(kcp);
    271 		return;
    272 	}
    273 	kc->kc_next = *lst;
    274 	*lst = kcp;
    275 }
    276 
    277 /*
    278  * Routines to transfer the CPU set from / to userspace.
    279  * Note: early boot case is not supported by these routines.
    280  */
    281 
    282 int
    283 kcpuset_copyin(const cpuset_t *ucp, kcpuset_t *kcp, size_t len)
    284 {
    285 	kcpuset_impl_t *kc __unused = KC_GETSTRUCT(kcp);
    286 
    287 	KASSERT(kc_initialised);
    288 	KASSERT(kc->kc_refcnt > 0);
    289 	KASSERT(kc->kc_next == NULL);
    290 
    291 	if (len > kc_bitsize) { /* XXX */
    292 		return EINVAL;
    293 	}
    294 	return copyin(ucp, kcp, len);
    295 }
    296 
    297 int
    298 kcpuset_copyout(kcpuset_t *kcp, cpuset_t *ucp, size_t len)
    299 {
    300 	kcpuset_impl_t *kc __unused = KC_GETSTRUCT(kcp);
    301 
    302 	KASSERT(kc_initialised);
    303 	KASSERT(kc->kc_refcnt > 0);
    304 	KASSERT(kc->kc_next == NULL);
    305 
    306 	if (len > kc_bitsize) { /* XXX */
    307 		return EINVAL;
    308 	}
    309 	return copyout(kcp, ucp, len);
    310 }
    311 
    312 void
    313 kcpuset_export_u32(const kcpuset_t *kcp, uint32_t *bitfield, size_t len)
    314 {
    315 	size_t rlen = MIN(kc_bitsize, len);
    316 
    317 	KASSERT(kcp != NULL);
    318 	memcpy(bitfield, kcp->bits, rlen);
    319 }
    320 
    321 /*
    322  * Routines to change bit field - zero, fill, copy, set, unset, etc.
    323  */
    324 
    325 void
    326 kcpuset_zero(kcpuset_t *kcp)
    327 {
    328 
    329 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
    330 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    331 	memset(kcp, 0, kc_bitsize);
    332 }
    333 
    334 void
    335 kcpuset_fill(kcpuset_t *kcp)
    336 {
    337 
    338 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
    339 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    340 	memset(kcp, ~0, kc_bitsize);
    341 }
    342 
    343 void
    344 kcpuset_copy(kcpuset_t *dkcp, const kcpuset_t *skcp)
    345 {
    346 
    347 	KASSERT(!kc_initialised || KC_GETSTRUCT(dkcp)->kc_refcnt > 0);
    348 	KASSERT(!kc_initialised || KC_GETSTRUCT(dkcp)->kc_next == NULL);
    349 	memcpy(dkcp, skcp, kc_bitsize);
    350 }
    351 
    352 void
    353 kcpuset_set(kcpuset_t *kcp, cpuid_t i)
    354 {
    355 	const size_t j = i >> KC_SHIFT;
    356 
    357 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    358 	KASSERT(j < kc_nfields);
    359 
    360 	kcp->bits[j] |= 1 << (i & KC_MASK);
    361 }
    362 
    363 void
    364 kcpuset_clear(kcpuset_t *kcp, cpuid_t i)
    365 {
    366 	const size_t j = i >> KC_SHIFT;
    367 
    368 	KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_next == NULL);
    369 	KASSERT(j < kc_nfields);
    370 
    371 	kcp->bits[j] &= ~(1 << (i & KC_MASK));
    372 }
    373 
    374 bool
    375 kcpuset_isset(const kcpuset_t *kcp, cpuid_t i)
    376 {
    377 	const size_t j = i >> KC_SHIFT;
    378 
    379 	KASSERT(kcp != NULL);
    380 	KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_refcnt > 0);
    381 	KASSERT(!kc_initialised || KC_GETCSTRUCT(kcp)->kc_next == NULL);
    382 	KASSERT(j < kc_nfields);
    383 
    384 	return ((1 << (i & KC_MASK)) & kcp->bits[j]) != 0;
    385 }
    386 
    387 bool
    388 kcpuset_isotherset(const kcpuset_t *kcp, cpuid_t i)
    389 {
    390 	const size_t j2 = i >> KC_SHIFT;
    391 	const uint32_t mask = ~(1 << (i & KC_MASK));
    392 
    393 	for (size_t j = 0; j < kc_nfields; j++) {
    394 		const uint32_t bits = kcp->bits[j];
    395 		if (bits && (j != j2 || (bits & mask) != 0)) {
    396 			return true;
    397 		}
    398 	}
    399 	return false;
    400 }
    401 
    402 bool
    403 kcpuset_iszero(const kcpuset_t *kcp)
    404 {
    405 
    406 	for (size_t j = 0; j < kc_nfields; j++) {
    407 		if (kcp->bits[j] != 0) {
    408 			return false;
    409 		}
    410 	}
    411 	return true;
    412 }
    413 
    414 bool
    415 kcpuset_match(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
    416 {
    417 
    418 	return memcmp(kcp1, kcp2, kc_bitsize) == 0;
    419 }
    420 
    421 bool
    422 kcpuset_intersecting_p(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
    423 {
    424 
    425 	for (size_t j = 0; j < kc_nfields; j++) {
    426 		if (kcp1->bits[j] & kcp2->bits[j])
    427 			return true;
    428 	}
    429 	return false;
    430 }
    431 
    432 cpuid_t
    433 kcpuset_ffs(const kcpuset_t *kcp)
    434 {
    435 
    436 	for (size_t j = 0; j < kc_nfields; j++) {
    437 		if (kcp->bits[j])
    438 			return 32 * j + ffs(kcp->bits[j]);
    439 	}
    440 	return 0;
    441 }
    442 
    443 cpuid_t
    444 kcpuset_ffs_intersecting(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
    445 {
    446 
    447 	for (size_t j = 0; j < kc_nfields; j++) {
    448 		uint32_t bits = kcp1->bits[j] & kcp2->bits[j];
    449 		if (bits)
    450 			return 32 * j + ffs(bits);
    451 	}
    452 	return 0;
    453 }
    454 
    455 void
    456 kcpuset_merge(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    457 {
    458 
    459 	for (size_t j = 0; j < kc_nfields; j++) {
    460 		kcp1->bits[j] |= kcp2->bits[j];
    461 	}
    462 }
    463 
    464 void
    465 kcpuset_intersect(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    466 {
    467 
    468 	for (size_t j = 0; j < kc_nfields; j++) {
    469 		kcp1->bits[j] &= kcp2->bits[j];
    470 	}
    471 }
    472 
    473 void
    474 kcpuset_remove(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    475 {
    476 
    477 	for (size_t j = 0; j < kc_nfields; j++) {
    478 		kcp1->bits[j] &= ~kcp2->bits[j];
    479 	}
    480 }
    481 
    482 int
    483 kcpuset_countset(kcpuset_t *kcp)
    484 {
    485 	int count = 0;
    486 
    487 	for (size_t j = 0; j < kc_nfields; j++) {
    488 		count += popcount32(kcp->bits[j]);
    489 	}
    490 	return count;
    491 }
    492 
    493 /*
    494  * Routines to set/clear the flags atomically.
    495  */
    496 
    497 void
    498 kcpuset_atomic_set(kcpuset_t *kcp, cpuid_t i)
    499 {
    500 	const size_t j = i >> KC_SHIFT;
    501 
    502 	KASSERT(j < kc_nfields);
    503 	atomic_or_32(&kcp->bits[j], 1 << (i & KC_MASK));
    504 }
    505 
    506 void
    507 kcpuset_atomic_clear(kcpuset_t *kcp, cpuid_t i)
    508 {
    509 	const size_t j = i >> KC_SHIFT;
    510 
    511 	KASSERT(j < kc_nfields);
    512 	atomic_and_32(&kcp->bits[j], ~(1 << (i & KC_MASK)));
    513 }
    514 
    515 void
    516 kcpuset_atomicly_intersect(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    517 {
    518 
    519 	for (size_t j = 0; j < kc_nfields; j++) {
    520 		if (kcp2->bits[j])
    521 			atomic_and_32(&kcp1->bits[j], kcp2->bits[j]);
    522 	}
    523 }
    524 
    525 void
    526 kcpuset_atomicly_merge(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    527 {
    528 
    529 	for (size_t j = 0; j < kc_nfields; j++) {
    530 		if (kcp2->bits[j])
    531 			atomic_or_32(&kcp1->bits[j], kcp2->bits[j]);
    532 	}
    533 }
    534 
    535 void
    536 kcpuset_atomicly_remove(kcpuset_t *kcp1, const kcpuset_t *kcp2)
    537 {
    538 
    539 	for (size_t j = 0; j < kc_nfields; j++) {
    540 		if (kcp2->bits[j])
    541 			atomic_and_32(&kcp1->bits[j], ~kcp2->bits[j]);
    542 	}
    543 }
    544