Home | History | Annotate | Line # | Download | only in kern
subr_kcpuset.c revision 1.2
      1  1.2  rmind /*	$NetBSD: subr_kcpuset.c,v 1.2 2011/08/07 21:13:05 rmind Exp $	*/
      2  1.1  rmind 
      3  1.1  rmind /*-
      4  1.1  rmind  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5  1.1  rmind  * All rights reserved.
      6  1.1  rmind  *
      7  1.1  rmind  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1  rmind  * by Mindaugas Rasiukevicius.
      9  1.1  rmind  *
     10  1.1  rmind  * Redistribution and use in source and binary forms, with or without
     11  1.1  rmind  * modification, are permitted provided that the following conditions
     12  1.1  rmind  * are met:
     13  1.1  rmind  * 1. Redistributions of source code must retain the above copyright
     14  1.1  rmind  *    notice, this list of conditions and the following disclaimer.
     15  1.1  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1  rmind  *    notice, this list of conditions and the following disclaimer in the
     17  1.1  rmind  *    documentation and/or other materials provided with the distribution.
     18  1.1  rmind  *
     19  1.1  rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1  rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1  rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1  rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1  rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1  rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1  rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1  rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1  rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1  rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1  rmind  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1  rmind  */
     31  1.1  rmind 
     32  1.1  rmind /*
     33  1.1  rmind  * Kernel CPU set implementation.
     34  1.1  rmind  *
     35  1.1  rmind  * Interface can be used by kernel subsystems as a unified dynamic CPU
     36  1.1  rmind  * bitset implementation handling many CPUs.  Facility also supports early
     37  1.1  rmind  * use by MD code on boot, as it fixups bitsets on further boot.
     38  1.1  rmind  *
     39  1.1  rmind  * TODO:
     40  1.1  rmind  * - Handle "reverse" bitset on fixup/grow.
     41  1.1  rmind  */
     42  1.1  rmind 
     43  1.1  rmind #include <sys/cdefs.h>
     44  1.2  rmind __KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.2 2011/08/07 21:13:05 rmind Exp $");
     45  1.1  rmind 
     46  1.1  rmind #include <sys/param.h>
     47  1.1  rmind #include <sys/types.h>
     48  1.1  rmind 
     49  1.1  rmind #include <sys/atomic.h>
     50  1.1  rmind #include <sys/sched.h>
     51  1.1  rmind #include <sys/kcpuset.h>
     52  1.1  rmind #include <sys/pool.h>
     53  1.1  rmind 
     54  1.1  rmind /* Number of CPUs to support. */
     55  1.1  rmind #define	KC_MAXCPUS		roundup2(MAXCPUS, 32)
     56  1.1  rmind 
     57  1.1  rmind /*
     58  1.1  rmind  * Structure of dynamic CPU set in the kernel.
     59  1.1  rmind  */
     60  1.1  rmind struct kcpuset {
     61  1.1  rmind 	uint32_t		bits[0];
     62  1.1  rmind };
     63  1.1  rmind 
     64  1.1  rmind typedef struct kcpuset_impl {
     65  1.1  rmind 	/* Reference count. */
     66  1.1  rmind 	u_int			kc_refcnt;
     67  1.1  rmind 	/* Next to free, if non-NULL (used when multiple references). */
     68  1.1  rmind 	struct kcpuset *	kc_next;
     69  1.1  rmind 	/* Actual variable-sized field of bits. */
     70  1.1  rmind 	struct kcpuset		kc_field;
     71  1.1  rmind } kcpuset_impl_t;
     72  1.1  rmind 
     73  1.1  rmind #define	KC_BITS_OFF		(offsetof(struct kcpuset_impl, kc_field))
     74  1.1  rmind #define	KC_GETSTRUCT(b)		((kcpuset_impl_t *)((char *)(b) - KC_BITS_OFF))
     75  1.1  rmind 
     76  1.1  rmind /* Sizes of a single bitset. */
     77  1.1  rmind #define	KC_SHIFT		5
     78  1.1  rmind #define	KC_MASK			31
     79  1.1  rmind 
     80  1.1  rmind /* An array of noted early kcpuset creations and data. */
     81  1.1  rmind #define	KC_SAVE_NITEMS		8
     82  1.1  rmind 
     83  1.1  rmind /* Structures for early boot mechanism (must be statically initialised). */
     84  1.1  rmind static kcpuset_t **		kc_noted_early[KC_SAVE_NITEMS];
     85  1.1  rmind static uint32_t			kc_bits_early[KC_SAVE_NITEMS];
     86  1.1  rmind static int			kc_last_idx = 0;
     87  1.1  rmind static bool			kc_initialised = false;
     88  1.1  rmind 
     89  1.1  rmind #define	KC_BITSIZE_EARLY	sizeof(kc_bits_early[0])
     90  1.1  rmind #define	KC_NFIELDS_EARLY	(KC_BITSIZE_EARLY >> KC_SHIFT)
     91  1.1  rmind 
     92  1.1  rmind /*
     93  1.1  rmind  * The size of whole bitset fields and amount of fields.
     94  1.1  rmind  * The whole size must statically initialise for early case.
     95  1.1  rmind  */
     96  1.1  rmind static size_t			kc_bitsize __read_mostly = KC_BITSIZE_EARLY;
     97  1.1  rmind static size_t			kc_nfields __read_mostly = KC_NFIELDS_EARLY;
     98  1.1  rmind 
     99  1.1  rmind static pool_cache_t		kc_cache __read_mostly;
    100  1.1  rmind 
    101  1.1  rmind static kcpuset_t *		kcpuset_create_raw(void);
    102  1.1  rmind 
    103  1.1  rmind /*
    104  1.1  rmind  * kcpuset_sysinit: initialize the subsystem, transfer early boot cases
    105  1.1  rmind  * to dynamically allocated sets.
    106  1.1  rmind  */
    107  1.1  rmind void
    108  1.1  rmind kcpuset_sysinit(void)
    109  1.1  rmind {
    110  1.1  rmind 	kcpuset_t *kc_dynamic[KC_SAVE_NITEMS], *kcp;
    111  1.1  rmind 	int i, s;
    112  1.1  rmind 
    113  1.1  rmind 	/* Set a kcpuset_t sizes. */
    114  1.1  rmind 	kc_nfields = (KC_MAXCPUS >> KC_SHIFT);
    115  1.1  rmind 	kc_bitsize = sizeof(uint32_t) * kc_nfields;
    116  1.1  rmind 
    117  1.1  rmind 	kc_cache = pool_cache_init(sizeof(kcpuset_impl_t) + kc_bitsize,
    118  1.1  rmind 	    coherency_unit, 0, 0, "kcpuset", NULL, IPL_NONE, NULL, NULL, NULL);
    119  1.1  rmind 
    120  1.1  rmind 	/* First, pre-allocate kcpuset entries. */
    121  1.1  rmind 	for (i = 0; i < kc_last_idx; i++) {
    122  1.1  rmind 		kcp = kcpuset_create_raw();
    123  1.1  rmind 		kcpuset_zero(kcp);
    124  1.1  rmind 		kc_dynamic[i] = kcp;
    125  1.1  rmind 	}
    126  1.1  rmind 
    127  1.1  rmind 	/*
    128  1.1  rmind 	 * Prepare to convert all early noted kcpuset uses to dynamic sets.
    129  1.1  rmind 	 * All processors, except the one we are currently running (primary),
    130  1.1  rmind 	 * must not be spinned yet.  Since MD facilities can use kcpuset,
    131  1.1  rmind 	 * raise the IPL to high.
    132  1.1  rmind 	 */
    133  1.1  rmind 	KASSERT(mp_online == false);
    134  1.1  rmind 
    135  1.1  rmind 	s = splhigh();
    136  1.1  rmind 	for (i = 0; i < kc_last_idx; i++) {
    137  1.1  rmind 		/*
    138  1.1  rmind 		 * Transfer the bits from early static storage to the kcpuset.
    139  1.1  rmind 		 */
    140  1.1  rmind 		KASSERT(kc_bitsize >= KC_BITSIZE_EARLY);
    141  1.1  rmind 		memcpy(kc_dynamic[i], &kc_bits_early[i], KC_BITSIZE_EARLY);
    142  1.1  rmind 
    143  1.1  rmind 		/*
    144  1.1  rmind 		 * Store the new pointer, pointing to the allocated kcpuset.
    145  1.1  rmind 		 * Note: we are not in an interrupt context and it is the only
    146  1.1  rmind 		 * CPU running - thus store is safe (e.g. no need for pointer
    147  1.1  rmind 		 * variable to be volatile).
    148  1.1  rmind 		 */
    149  1.1  rmind 		*kc_noted_early[i] = kc_dynamic[i];
    150  1.1  rmind 	}
    151  1.1  rmind 	kc_initialised = true;
    152  1.1  rmind 	kc_last_idx = 0;
    153  1.1  rmind 	splx(s);
    154  1.1  rmind }
    155  1.1  rmind 
    156  1.1  rmind /*
    157  1.1  rmind  * kcpuset_early_ptr: note an early boot use by saving the pointer and
    158  1.1  rmind  * returning a pointer to a static, temporary bit field.
    159  1.1  rmind  */
    160  1.1  rmind static kcpuset_t *
    161  1.1  rmind kcpuset_early_ptr(kcpuset_t **kcptr)
    162  1.1  rmind {
    163  1.1  rmind 	kcpuset_t *kcp;
    164  1.1  rmind 	int s;
    165  1.1  rmind 
    166  1.1  rmind 	s = splhigh();
    167  1.1  rmind 	if (kc_last_idx < KC_SAVE_NITEMS) {
    168  1.1  rmind 		/*
    169  1.1  rmind 		 * Save the pointer, return pointer to static early field.
    170  1.1  rmind 		 * Need to zero it out.
    171  1.1  rmind 		 */
    172  1.1  rmind 		kc_noted_early[kc_last_idx++] = kcptr;
    173  1.1  rmind 		kcp = (kcpuset_t *)&kc_bits_early[kc_last_idx];
    174  1.1  rmind 		memset(kcp, 0, KC_BITSIZE_EARLY);
    175  1.1  rmind 		KASSERT(kc_bitsize == KC_BITSIZE_EARLY);
    176  1.1  rmind 	} else {
    177  1.1  rmind 		panic("kcpuset(9): all early-use entries exhausted; "
    178  1.1  rmind 		    "increase KC_SAVE_NITEMS\n");
    179  1.1  rmind 	}
    180  1.1  rmind 	splx(s);
    181  1.1  rmind 
    182  1.1  rmind 	return kcp;
    183  1.1  rmind }
    184  1.1  rmind 
    185  1.1  rmind /*
    186  1.1  rmind  * Routines to create or destroy the CPU set.
    187  1.1  rmind  * Early boot case is handled.
    188  1.1  rmind  */
    189  1.1  rmind 
    190  1.1  rmind static kcpuset_t *
    191  1.1  rmind kcpuset_create_raw(void)
    192  1.1  rmind {
    193  1.1  rmind 	kcpuset_impl_t *kc;
    194  1.1  rmind 
    195  1.1  rmind 	kc = pool_cache_get(kc_cache, PR_WAITOK);
    196  1.1  rmind 	kc->kc_refcnt = 1;
    197  1.1  rmind 	kc->kc_next = NULL;
    198  1.1  rmind 
    199  1.1  rmind 	/* Note: return pointer to the actual field of bits. */
    200  1.1  rmind 	KASSERT((uint8_t *)kc + KC_BITS_OFF == (uint8_t *)&kc->kc_field);
    201  1.1  rmind 	return &kc->kc_field;
    202  1.1  rmind }
    203  1.1  rmind 
    204  1.1  rmind void
    205  1.1  rmind kcpuset_create(kcpuset_t **retkcp)
    206  1.1  rmind {
    207  1.1  rmind 
    208  1.1  rmind 	if (__predict_false(!kc_initialised)) {
    209  1.1  rmind 		/* Early boot use - special case. */
    210  1.1  rmind 		*retkcp = kcpuset_early_ptr(retkcp);
    211  1.1  rmind 		return;
    212  1.1  rmind 	}
    213  1.1  rmind 	*retkcp = kcpuset_create_raw();
    214  1.1  rmind }
    215  1.1  rmind 
    216  1.1  rmind void
    217  1.1  rmind kcpuset_destroy(kcpuset_t *kcp)
    218  1.1  rmind {
    219  1.2  rmind 	kcpuset_impl_t *kc;
    220  1.1  rmind 
    221  1.1  rmind 	KASSERT(kc_initialised);
    222  1.1  rmind 	KASSERT(kcp != NULL);
    223  1.1  rmind 
    224  1.1  rmind 	do {
    225  1.2  rmind 		kc = KC_GETSTRUCT(kcp);
    226  1.2  rmind 		kcp = kc->kc_next;
    227  1.1  rmind 		pool_cache_put(kc_cache, kc);
    228  1.2  rmind 	} while (kcp);
    229  1.1  rmind }
    230  1.1  rmind 
    231  1.1  rmind /*
    232  1.1  rmind  * Routines to copy or reference/unreference the CPU set.
    233  1.1  rmind  * Note: early boot case is not supported by these routines.
    234  1.1  rmind  */
    235  1.1  rmind 
    236  1.1  rmind void
    237  1.1  rmind kcpuset_copy(kcpuset_t *dkcp, kcpuset_t *skcp)
    238  1.1  rmind {
    239  1.1  rmind 
    240  1.1  rmind 	KASSERT(kc_initialised);
    241  1.1  rmind 	KASSERT(KC_GETSTRUCT(dkcp)->kc_refcnt == 1);
    242  1.1  rmind 	memcpy(dkcp, skcp, kc_bitsize);
    243  1.1  rmind }
    244  1.1  rmind 
    245  1.1  rmind void
    246  1.1  rmind kcpuset_use(kcpuset_t *kcp)
    247  1.1  rmind {
    248  1.1  rmind 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    249  1.1  rmind 
    250  1.1  rmind 	KASSERT(kc_initialised);
    251  1.1  rmind 	atomic_inc_uint(&kc->kc_refcnt);
    252  1.1  rmind }
    253  1.1  rmind 
    254  1.1  rmind void
    255  1.1  rmind kcpuset_unuse(kcpuset_t *kcp, kcpuset_t **lst)
    256  1.1  rmind {
    257  1.1  rmind 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    258  1.1  rmind 
    259  1.1  rmind 	KASSERT(kc_initialised);
    260  1.1  rmind 	KASSERT(kc->kc_refcnt > 0);
    261  1.1  rmind 
    262  1.1  rmind 	if (atomic_dec_uint_nv(&kc->kc_refcnt) != 0) {
    263  1.1  rmind 		return;
    264  1.1  rmind 	}
    265  1.1  rmind 	KASSERT(kc->kc_next == NULL);
    266  1.1  rmind 	if (lst == NULL) {
    267  1.1  rmind 		kcpuset_destroy(kcp);
    268  1.1  rmind 		return;
    269  1.1  rmind 	}
    270  1.1  rmind 	kc->kc_next = *lst;
    271  1.1  rmind 	*lst = kcp;
    272  1.1  rmind }
    273  1.1  rmind 
    274  1.1  rmind /*
    275  1.1  rmind  * Routines to transfer the CPU set from / to userspace.
    276  1.1  rmind  * Note: early boot case is not supported by these routines.
    277  1.1  rmind  */
    278  1.1  rmind 
    279  1.1  rmind int
    280  1.1  rmind kcpuset_copyin(const cpuset_t *ucp, kcpuset_t *kcp, size_t len)
    281  1.1  rmind {
    282  1.1  rmind 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    283  1.1  rmind 
    284  1.1  rmind 	KASSERT(kc_initialised);
    285  1.1  rmind 	KASSERT(kc->kc_refcnt > 0);
    286  1.1  rmind 	KASSERT(kc->kc_next == NULL);
    287  1.1  rmind 	(void)kc;
    288  1.1  rmind 
    289  1.1  rmind 	if (len != kc_bitsize) { /* XXX */
    290  1.1  rmind 		return EINVAL;
    291  1.1  rmind 	}
    292  1.1  rmind 	return copyin(ucp, kcp, kc_bitsize);
    293  1.1  rmind }
    294  1.1  rmind 
    295  1.1  rmind int
    296  1.1  rmind kcpuset_copyout(kcpuset_t *kcp, cpuset_t *ucp, size_t len)
    297  1.1  rmind {
    298  1.1  rmind 	kcpuset_impl_t *kc = KC_GETSTRUCT(kcp);
    299  1.1  rmind 
    300  1.1  rmind 	KASSERT(kc_initialised);
    301  1.1  rmind 	KASSERT(kc->kc_refcnt > 0);
    302  1.1  rmind 	KASSERT(kc->kc_next == NULL);
    303  1.1  rmind 	(void)kc;
    304  1.1  rmind 
    305  1.1  rmind 	if (len != kc_bitsize) { /* XXX */
    306  1.1  rmind 		return EINVAL;
    307  1.1  rmind 	}
    308  1.1  rmind 	return copyout(kcp, ucp, kc_bitsize);
    309  1.1  rmind }
    310  1.1  rmind 
    311  1.1  rmind /*
    312  1.1  rmind  * Routines to change bit field - zero, fill, set, unset, etc.
    313  1.1  rmind  */
    314  1.1  rmind 
    315  1.1  rmind void
    316  1.1  rmind kcpuset_zero(kcpuset_t *kcp)
    317  1.1  rmind {
    318  1.1  rmind 
    319  1.1  rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
    320  1.1  rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    321  1.1  rmind 	memset(kcp, 0, kc_bitsize);
    322  1.1  rmind }
    323  1.1  rmind 
    324  1.1  rmind void
    325  1.1  rmind kcpuset_fill(kcpuset_t *kcp)
    326  1.1  rmind {
    327  1.1  rmind 
    328  1.1  rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
    329  1.1  rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    330  1.1  rmind 	memset(kcp, ~0, kc_bitsize);
    331  1.1  rmind }
    332  1.1  rmind 
    333  1.1  rmind void
    334  1.1  rmind kcpuset_set(kcpuset_t *kcp, cpuid_t i)
    335  1.1  rmind {
    336  1.1  rmind 	const size_t j = i >> KC_SHIFT;
    337  1.1  rmind 
    338  1.1  rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    339  1.1  rmind 	KASSERT(j < kc_nfields);
    340  1.1  rmind 
    341  1.1  rmind 	kcp->bits[j] |= 1 << (i & KC_MASK);
    342  1.1  rmind }
    343  1.1  rmind 
    344  1.1  rmind void
    345  1.1  rmind kcpuset_clear(kcpuset_t *kcp, cpuid_t i)
    346  1.1  rmind {
    347  1.1  rmind 	const size_t j = i >> KC_SHIFT;
    348  1.1  rmind 
    349  1.1  rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    350  1.1  rmind 	KASSERT(j < kc_nfields);
    351  1.1  rmind 
    352  1.1  rmind 	kcp->bits[j] &= ~(1 << (i & KC_MASK));
    353  1.1  rmind }
    354  1.1  rmind 
    355  1.1  rmind int
    356  1.1  rmind kcpuset_isset(kcpuset_t *kcp, cpuid_t i)
    357  1.1  rmind {
    358  1.1  rmind 	const size_t j = i >> KC_SHIFT;
    359  1.1  rmind 
    360  1.1  rmind 	KASSERT(kcp != NULL);
    361  1.1  rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_refcnt > 0);
    362  1.1  rmind 	KASSERT(!kc_initialised || KC_GETSTRUCT(kcp)->kc_next == NULL);
    363  1.1  rmind 	KASSERT(j < kc_nfields);
    364  1.1  rmind 
    365  1.1  rmind 	return ((1 << (i & KC_MASK)) & kcp->bits[j]) != 0;
    366  1.1  rmind }
    367  1.1  rmind 
    368  1.1  rmind bool
    369  1.1  rmind kcpuset_iszero(kcpuset_t *kcp)
    370  1.1  rmind {
    371  1.1  rmind 
    372  1.1  rmind 	for (size_t j = 0; j < kc_nfields; j++) {
    373  1.1  rmind 		if (kcp->bits[j] != 0) {
    374  1.1  rmind 			return false;
    375  1.1  rmind 		}
    376  1.1  rmind 	}
    377  1.1  rmind 	return true;
    378  1.1  rmind }
    379  1.1  rmind 
    380  1.1  rmind bool
    381  1.1  rmind kcpuset_match(const kcpuset_t *kcp1, const kcpuset_t *kcp2)
    382  1.1  rmind {
    383  1.1  rmind 
    384  1.1  rmind 	return memcmp(kcp1, kcp2, kc_bitsize) == 0;
    385  1.1  rmind }
    386