Home | History | Annotate | Line # | Download | only in kern
subr_kcpuset.c revision 1.1
      1 /*	$NetBSD: subr_kcpuset.c,v 1.1 2011/08/07 13:33:01 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Mindaugas Rasiukevicius.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Kernel CPU set implementation.
     34  *
     35  * Interface can be used by kernel subsystems as a unified dynamic CPU
     36  * bitset implementation handling many CPUs.  Facility also supports early
     37  * use by MD code on boot, as it fixups bitsets on further boot.
     38  *
     39  * TODO:
     40  * - Handle "reverse" bitset on fixup/grow.
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.1 2011/08/07 13:33:01 rmind Exp $");
     45 
     46 #include <sys/param.h>
     47 #include <sys/types.h>
     48 
     49 #include <sys/atomic.h>
     50 #include <sys/sched.h>
     51 #include <sys/kcpuset.h>
     52 #include <sys/pool.h>
     53 
     54 /* Number of CPUs to support. */
     55 #define	KC_MAXCPUS		roundup2(MAXCPUS, 32)
     56 
     57 /*
     58  * Structure of dynamic CPU set in the kernel.
     59  */
     60 struct kcpuset {
     61 	uint32_t		bits[0];
     62 };
     63 
     64 typedef struct kcpuset_impl {
     65 	/* Reference count. */
     66 	u_int			kc_refcnt;
     67 	/* Next to free, if non-NULL (used when multiple references). */
     68 	struct kcpuset *	kc_next;
     69 	/* Actual variable-sized field of bits. */
     70 	struct kcpuset		kc_field;
     71 } kcpuset_impl_t;
     72 
     73 #define	KC_BITS_OFF		(offsetof(struct kcpuset_impl, kc_field))
     74 #define	KC_GETSTRUCT(b)		((kcpuset_impl_t *)((char *)(b) - KC_BITS_OFF))
     75 
     76 /* Sizes of a single bitset. */
     77 #define	KC_SHIFT		5
     78 #define	KC_MASK			31
     79 
     80 /* An array of noted early kcpuset creations and data. */
     81 #define	KC_SAVE_NITEMS		8
     82 
     83 /* Structures for early boot mechanism (must be statically initialised). */
     84 static kcpuset_t **		kc_noted_early[KC_SAVE_NITEMS];
     85 static uint32_t			kc_bits_early[KC_SAVE_NITEMS];
     86 static int			kc_last_idx = 0;
     87 static bool			kc_initialised = false;
     88 
     89 #define	KC_BITSIZE_EARLY	sizeof(kc_bits_early[0])
     90 #define	KC_NFIELDS_EARLY	(KC_BITSIZE_EARLY >> KC_SHIFT)
     91 
     92 /*
     93  * The size of whole bitset fields and amount of fields.
     94  * The whole size must statically initialise for early case.
     95  */
     96 static size_t			kc_bitsize __read_mostly = KC_BITSIZE_EARLY;
     97 static size_t			kc_nfields __read_mostly = KC_NFIELDS_EARLY;
     98 
     99 static pool_cache_t		kc_cache __read_mostly;
    100 
    101 static kcpuset_t *		kcpuset_create_raw(void);
    102 
    103 /*
    104  * kcpuset_sysinit: initialize the subsystem, transfer early boot cases
    105  * to dynamically allocated sets.
    106  */
    107 void
    108 kcpuset_sysinit(void)
    109 {
    110 	kcpuset_t *kc_dynamic[KC_SAVE_NITEMS], *kcp;
    111 	int i, s;
    112 
    113 	/* Set a kcpuset_t sizes. */
    114 	kc_nfields = (KC_MAXCPUS >> KC_SHIFT);
    115 	kc_bitsize = sizeof(uint32_t) * kc_nfields;
    116 
    117 	kc_cache = pool_cache_init(sizeof(kcpuset_impl_t) + kc_bitsize,
    118 	    coherency_unit, 0, 0, "kcpuset", NULL, IPL_NONE, NULL, NULL, NULL);
    119 
    120 	/* First, pre-allocate kcpuset entries. */
    121 	for (i = 0; i < kc_last_idx; i++) {
    122 		kcp = kcpuset_create_raw();
    123 		kcpuset_zero(kcp);
    124 		kc_dynamic[i] = kcp;
    125 	}
    126 
    127 	/*
    128 	 * Prepare to convert all early noted kcpuset uses to dynamic sets.
    129 	 * All processors, except the one we are currently running (primary),
    130 	 * must not be spinned yet.  Since MD facilities can use kcpuset,
    131 	 * raise the IPL to high.
    132 	 */
    133 	KASSERT(mp_online == false);
    134 
    135 	s = splhigh();
    136 	for (i = 0; i < kc_last_idx; i++) {
    137 		/*
    138 		 * Transfer the bits from early static storage to the kcpuset.
    139 		 */
    140 		KASSERT(kc_bitsize >= KC_BITSIZE_EARLY);
    141 		memcpy(kc_dynamic[i], &kc_bits_early[i], KC_BITSIZE_EARLY);
    142 
    143 		/*
    144 		 * Store the new pointer, pointing to the allocated kcpuset.
    145 		 * Note: we are not in an interrupt context and it is the only
    146 		 * CPU running - thus store is safe (e.g. no need for pointer
    147 		 * variable to be volatile).
    148 		 */
    149 		*kc_noted_early[i] = kc_dynamic[i];
    150 	}
    151 	kc_initialised = true;
    152 	kc_last_idx = 0;
    153 	splx(s);
    154 }
    155 
    156 /*
    157  * kcpuset_early_ptr: note an early boot use by saving the pointer and
    158  * returning a pointer to a static, temporary bit field.
    159  */
    160 static kcpuset_t *
    161 kcpuset_early_ptr(kcpuset_t **kcptr)
    162 {
    163 	kcpuset_t *kcp;
    164 	int s;
    165 
    166 	s = splhigh();
    167 	if (kc_last_idx < KC_SAVE_NITEMS) {
    168 		/*
    169 		 * Save the pointer, return pointer to static early field.
    170 		 * Need to zero it out.
    171 		 */
    172 		kc_noted_early[kc_last_idx++] = kcptr;
    173 		kcp = (kcpuset_t *)&kc_bits_early[kc_last_idx];
    174 		memset(kcp, 0, KC_BITSIZE_EARLY);
    175 		KASSERT(kc_bitsize == KC_BITSIZE_EARLY);
    176 	} else {
    177 		panic("kcpuset(9): all early-use entries exhausted; "
    178 		    "increase KC_SAVE_NITEMS\n");
    179 	}
    180 	splx(s);
    181 
    182 	return kcp;
    183 }
    184 
    185 /*
    186  * Routines to create or destroy the CPU set.
    187  * Early boot case is handled.
    188  */
    189 
    190 static kcpuset_t *
    191 kcpuset_create_raw(void)
    192 {
    193 	kcpuset_impl_t *kc;
    194 
    195 	kc = pool_cache_get(kc_cache, PR_WAITOK);
    196 	kc->kc_refcnt = 1;
    197 	kc->kc_next = NULL;
    198 
    199 	/* Note: return pointer to the actual field of bits. */
    200 	KASSERT((uint8_t *)kc + KC_BITS_OFF == (uint8_t *)&kc->kc_field);
    201 	return &kc->kc_field;
    202 }
    203 
    204 void
    205 kcpuset_create(kcpuset_t **retkcp)
    206 {
    207 
    208 	if (__predict_false(!kc_initialised)) {
    209 		/* Early boot use - special case. */
    210 		*retkcp = kcpuset_early_ptr(retkcp);
    211 		return;
    212 	}
    213 	*retkcp = kcpuset_create_raw();
    214 }
    215 
    216 void
    217 kcpuset_destroy(kcpuset_t *kcp)
    218 {
    219 	kcpuset_impl_t *kc, *nkc;
    220 
    221 	KASSERT(kc_initialised);
    222 	KASSERT(kcp != NULL);
    223 
    224 	kc = KC_GETSTRUCT(kcp);
    225 	do {
    226 		nkc = KC_GETSTRUCT(kc->kc_next);
    227 		pool_cache_put(kc_cache, kc);
    228 		kc = nkc;
    229 	} while (kc);
    230 }
    231 
    232 /*
    233  * Routines to copy or reference/unreference the CPU set.
    234  * Note: early boot case is not supported by these routines.
    235  */
    236 
    237 void
    238 kcpuset_copy(kcpuset_t *dkcp, kcpuset_t *skcp)
    239 {
    240 
    241 	KASSERT(kc_initialised);
    242 	KASSERT(KC_GETSTRUCT(dkcp)->kc_refcnt == 1);
    243 	memcpy(dkcp, skcp, kc_bitsize);
    244 }
    245 
    246 void
    247 kcpuset_use(kcpuset_t *kcp)
    248 {
    249 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    250 
    251 	KASSERT(kc_initialised);
    252 	atomic_inc_uint(&kc->kc_refcnt);
    253 }
    254 
    255 void
    256 kcpuset_unuse(kcpuset_t *kcp, kcpuset_t **lst)
    257 {
    258 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    259 
    260 	KASSERT(kc_initialised);
    261 	KASSERT(kc->kc_refcnt > 0);
    262 
    263 	if (atomic_dec_uint_nv(&kc->kc_refcnt) != 0) {
    264 		return;
    265 	}
    266 	KASSERT(kc->kc_next == NULL);
    267 	if (lst == NULL) {
    268 		kcpuset_destroy(kcp);
    269 		return;
    270 	}
    271 	kc->kc_next = *lst;
    272 	*lst = kcp;
    273 }
    274 
    275 /*
    276  * Routines to transfer the CPU set from / to userspace.
    277  * Note: early boot case is not supported by these routines.
    278  */
    279 
    280 int
    281 kcpuset_copyin(const cpuset_t *ucp, kcpuset_t *kcp, size_t len)
    282 {
    283 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    284 
    285 	KASSERT(kc_initialised);
    286 	KASSERT(kc->kc_refcnt > 0);
    287 	KASSERT(kc->kc_next == NULL);
    288 	(void)kc;
    289 
    290 	if (len != kc_bitsize) { /* XXX */
    291 		return EINVAL;
    292 	}
    293 	return copyin(ucp, kcp, kc_bitsize);
    294 }
    295 
    296 int
    297 kcpuset_copyout(kcpuset_t *kcp, cpuset_t *ucp, size_t len)
    298 {
    299 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    300 
    301 	KASSERT(kc_initialised);
    302 	KASSERT(kc->kc_refcnt > 0);
    303 	KASSERT(kc->kc_next == NULL);
    304 	(void)kc;
    305 
    306 	if (len != kc_bitsize) { /* XXX */
    307 		return EINVAL;
    308 	}
    309 	return copyout(kcp, ucp, kc_bitsize);
    310 }
    311 
    312 /*
    313  * Routines to change bit field - zero, fill, set, unset, etc.
    314  */
    315 
    316 void
    317 kcpuset_zero(kcpuset_t *kcp)
    318 {
    319 
    320 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
    321 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    322 	memset(kcp, 0, kc_bitsize);
    323 }
    324 
    325 void
    326 kcpuset_fill(kcpuset_t *kcp)
    327 {
    328 
    329 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
    330 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    331 	memset(kcp, ~0, kc_bitsize);
    332 }
    333 
    334 void
    335 kcpuset_set(kcpuset_t *kcp, cpuid_t i)
    336 {
    337 	const size_t j = i >> KC_SHIFT;
    338 
    339 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    340 	KASSERT(j < kc_nfields);
    341 
    342 	kcp->bits[j] |= 1 << (i & KC_MASK);
    343 }
    344 
    345 void
    346 kcpuset_clear(kcpuset_t *kcp, cpuid_t i)
    347 {
    348 	const size_t j = i >> KC_SHIFT;
    349 
    350 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    351 	KASSERT(j < kc_nfields);
    352 
    353 	kcp->bits[j] &= ~(1 << (i & KC_MASK));
    354 }
    355 
    356 int
    357 kcpuset_isset(kcpuset_t *kcp, cpuid_t i)
    358 {
    359 	const size_t j = i >> KC_SHIFT;
    360 
    361 	KASSERT(kcp != NULL);
    362 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
    363 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    364 	KASSERT(j < kc_nfields);
    365 
    366 	return ((1 << (i & KC_MASK)) & kcp->bits[j]) != 0;
    367 }
    368 
    369 bool
    370 kcpuset_iszero(kcpuset_t *kcp)
    371 {
    372 
    373 	for (size_t j = 0; j < kc_nfields; j++) {
    374 		if (kcp->bits[j] != 0) {
    375 			return false;
    376 		}
    377 	}
    378 	return true;
    379 }
    380 
    381 bool
    382 kcpuset_match(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
    383 {
    384 
    385 	return memcmp(kcp1, kcp2, kc_bitsize) == 0;
    386 }
    387