Home | History | Annotate | Line # | Download | only in kern
subr_cpu.c revision 1.5.2.1
      1  1.5.2.1  ad /*	$NetBSD: subr_cpu.c,v 1.5.2.1 2020/01/17 21:47:35 ad Exp $	*/
      2      1.1  ad 
      3      1.1  ad /*-
      4  1.5.2.1  ad  * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019, 2020
      5  1.5.2.1  ad  *     The NetBSD Foundation, Inc.
      6      1.1  ad  * All rights reserved.
      7      1.1  ad  *
      8      1.1  ad  * This code is derived from software contributed to The NetBSD Foundation
      9      1.1  ad  * by Andrew Doran.
     10      1.1  ad  *
     11      1.1  ad  * Redistribution and use in source and binary forms, with or without
     12      1.1  ad  * modification, are permitted provided that the following conditions
     13      1.1  ad  * are met:
     14      1.1  ad  * 1. Redistributions of source code must retain the above copyright
     15      1.1  ad  *    notice, this list of conditions and the following disclaimer.
     16      1.1  ad  * 2. Redistributions in binary form must reproduce the above copyright
     17      1.1  ad  *    notice, this list of conditions and the following disclaimer in the
     18      1.1  ad  *    documentation and/or other materials provided with the distribution.
     19      1.1  ad  *
     20      1.1  ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21      1.1  ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22      1.1  ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23      1.1  ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24      1.1  ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25      1.1  ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26      1.1  ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27      1.1  ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28      1.1  ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29      1.1  ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30      1.1  ad  * POSSIBILITY OF SUCH DAMAGE.
     31      1.1  ad  */
     32      1.1  ad 
     33      1.1  ad /*-
     34      1.1  ad  * Copyright (c)2007 YAMAMOTO Takashi,
     35      1.1  ad  * All rights reserved.
     36      1.1  ad  *
     37      1.1  ad  * Redistribution and use in source and binary forms, with or without
     38      1.1  ad  * modification, are permitted provided that the following conditions
     39      1.1  ad  * are met:
     40      1.1  ad  * 1. Redistributions of source code must retain the above copyright
     41      1.1  ad  *    notice, this list of conditions and the following disclaimer.
     42      1.1  ad  * 2. Redistributions in binary form must reproduce the above copyright
     43      1.1  ad  *    notice, this list of conditions and the following disclaimer in the
     44      1.1  ad  *    documentation and/or other materials provided with the distribution.
     45      1.1  ad  *
     46      1.1  ad  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     47      1.1  ad  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     48      1.1  ad  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     49      1.1  ad  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     50      1.1  ad  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     51      1.1  ad  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     52      1.1  ad  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     53      1.1  ad  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     54      1.1  ad  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     55      1.1  ad  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     56      1.1  ad  * SUCH DAMAGE.
     57      1.1  ad  */
     58      1.1  ad 
     59      1.1  ad /*
     60      1.1  ad  * CPU related routines shared with rump.
     61      1.1  ad  */
     62      1.1  ad 
     63      1.1  ad #include <sys/cdefs.h>
     64  1.5.2.1  ad __KERNEL_RCSID(0, "$NetBSD: subr_cpu.c,v 1.5.2.1 2020/01/17 21:47:35 ad Exp $");
     65      1.1  ad 
     66      1.1  ad #include <sys/param.h>
     67      1.1  ad #include <sys/systm.h>
     68      1.1  ad #include <sys/sched.h>
     69      1.1  ad #include <sys/conf.h>
     70      1.1  ad #include <sys/cpu.h>
     71      1.1  ad #include <sys/proc.h>
     72      1.1  ad #include <sys/kernel.h>
     73      1.1  ad #include <sys/kmem.h>
     74      1.1  ad 
     75      1.5  ad static void	cpu_topology_fake1(struct cpu_info *);
     76      1.5  ad 
     77      1.1  ad kmutex_t	cpu_lock		__cacheline_aligned;
     78      1.1  ad int		ncpu			__read_mostly;
     79      1.1  ad int		ncpuonline		__read_mostly;
     80      1.1  ad bool		mp_online		__read_mostly;
     81      1.1  ad static bool	cpu_topology_present	__read_mostly;
     82  1.5.2.1  ad static bool	cpu_topology_haveslow	__read_mostly;
     83      1.1  ad int64_t		cpu_counts[CPU_COUNT_MAX];
     84      1.1  ad 
     85      1.1  ad /* An array of CPUs.  There are ncpu entries. */
     86      1.1  ad struct cpu_info **cpu_infos		__read_mostly;
     87      1.1  ad 
     88      1.1  ad /* Note: set on mi_cpu_attach() and idle_loop(). */
     89      1.1  ad kcpuset_t *	kcpuset_attached	__read_mostly	= NULL;
     90      1.1  ad kcpuset_t *	kcpuset_running		__read_mostly	= NULL;
     91      1.1  ad 
     92      1.1  ad static char cpu_model[128];
     93      1.1  ad 
     94      1.1  ad /*
     95      1.1  ad  * mi_cpu_init: early initialisation of MI CPU related structures.
     96      1.1  ad  *
     97      1.1  ad  * Note: may not block and memory allocator is not yet available.
     98      1.1  ad  */
     99      1.1  ad void
    100      1.1  ad mi_cpu_init(void)
    101      1.1  ad {
    102      1.4  ad 	struct cpu_info *ci;
    103      1.1  ad 
    104      1.1  ad 	mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
    105      1.1  ad 
    106      1.1  ad 	kcpuset_create(&kcpuset_attached, true);
    107      1.1  ad 	kcpuset_create(&kcpuset_running, true);
    108      1.1  ad 	kcpuset_set(kcpuset_running, 0);
    109      1.4  ad 
    110      1.4  ad 	ci = curcpu();
    111      1.5  ad 	cpu_topology_fake1(ci);
    112      1.1  ad }
    113      1.1  ad 
    114      1.1  ad int
    115      1.1  ad cpu_setmodel(const char *fmt, ...)
    116      1.1  ad {
    117      1.1  ad 	int len;
    118      1.1  ad 	va_list ap;
    119      1.1  ad 
    120      1.1  ad 	va_start(ap, fmt);
    121      1.1  ad 	len = vsnprintf(cpu_model, sizeof(cpu_model), fmt, ap);
    122      1.1  ad 	va_end(ap);
    123      1.1  ad 	return len;
    124      1.1  ad }
    125      1.1  ad 
    126      1.1  ad const char *
    127      1.1  ad cpu_getmodel(void)
    128      1.1  ad {
    129      1.1  ad 	return cpu_model;
    130      1.1  ad }
    131      1.1  ad 
    132      1.1  ad bool
    133      1.1  ad cpu_softintr_p(void)
    134      1.1  ad {
    135      1.1  ad 
    136      1.1  ad 	return (curlwp->l_pflag & LP_INTR) != 0;
    137      1.1  ad }
    138      1.1  ad 
    139      1.1  ad /*
    140      1.1  ad  * Collect CPU topology information as each CPU is attached.  This can be
    141      1.1  ad  * called early during boot, so we need to be careful what we do.
    142      1.1  ad  */
    143      1.1  ad void
    144      1.1  ad cpu_topology_set(struct cpu_info *ci, u_int package_id, u_int core_id,
    145  1.5.2.1  ad     u_int smt_id, u_int numa_id, bool slow)
    146      1.1  ad {
    147      1.1  ad 	enum cpu_rel rel;
    148      1.1  ad 
    149      1.1  ad 	cpu_topology_present = true;
    150  1.5.2.1  ad 	cpu_topology_haveslow |= slow;
    151      1.1  ad 	ci->ci_package_id = package_id;
    152      1.1  ad 	ci->ci_core_id = core_id;
    153      1.1  ad 	ci->ci_smt_id = smt_id;
    154      1.1  ad 	ci->ci_numa_id = numa_id;
    155  1.5.2.1  ad 	ci->ci_is_slow = slow;
    156      1.1  ad 	for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
    157      1.1  ad 		ci->ci_sibling[rel] = ci;
    158      1.1  ad 		ci->ci_nsibling[rel] = 1;
    159      1.1  ad 	}
    160      1.1  ad }
    161      1.1  ad 
    162      1.1  ad /*
    163      1.1  ad  * Link a CPU into the given circular list.
    164      1.1  ad  */
    165      1.1  ad static void
    166      1.1  ad cpu_topology_link(struct cpu_info *ci, struct cpu_info *ci2, enum cpu_rel rel)
    167      1.1  ad {
    168      1.1  ad 	struct cpu_info *ci3;
    169      1.1  ad 
    170      1.1  ad 	/* Walk to the end of the existing circular list and append. */
    171      1.1  ad 	for (ci3 = ci2;; ci3 = ci3->ci_sibling[rel]) {
    172      1.1  ad 		ci3->ci_nsibling[rel]++;
    173      1.1  ad 		if (ci3->ci_sibling[rel] == ci2) {
    174      1.1  ad 			break;
    175      1.1  ad 		}
    176      1.1  ad 	}
    177      1.1  ad 	ci->ci_sibling[rel] = ci2;
    178      1.1  ad 	ci3->ci_sibling[rel] = ci;
    179      1.1  ad 	ci->ci_nsibling[rel] = ci3->ci_nsibling[rel];
    180      1.1  ad }
    181      1.1  ad 
    182      1.1  ad /*
    183      1.1  ad  * Print out the topology lists.
    184      1.1  ad  */
    185      1.1  ad static void
    186      1.1  ad cpu_topology_dump(void)
    187      1.1  ad {
    188  1.5.2.1  ad #ifdef DEBUG
    189      1.1  ad 	CPU_INFO_ITERATOR cii;
    190      1.1  ad 	struct cpu_info *ci, *ci2;
    191  1.5.2.1  ad 	const char *names[] = { "core", "pkg", "1st" };
    192      1.1  ad 	enum cpu_rel rel;
    193      1.1  ad 	int i;
    194      1.1  ad 
    195  1.5.2.1  ad 	CTASSERT(__arraycount(names) >= __arraycount(ci->ci_sibling));
    196  1.5.2.1  ad 
    197      1.1  ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    198  1.5.2.1  ad 		if (cpu_topology_haveslow)
    199  1.5.2.1  ad 			printf("%s ", ci->ci_is_slow ? "slow" : "fast");
    200      1.1  ad 		for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
    201      1.1  ad 			printf("%s has %d %s siblings:", cpu_name(ci),
    202      1.1  ad 			    ci->ci_nsibling[rel], names[rel]);
    203      1.1  ad 			ci2 = ci->ci_sibling[rel];
    204      1.1  ad 			i = 0;
    205      1.1  ad 			do {
    206      1.1  ad 				printf(" %s", cpu_name(ci2));
    207      1.1  ad 				ci2 = ci2->ci_sibling[rel];
    208      1.1  ad 			} while (++i < 64 && ci2 != ci->ci_sibling[rel]);
    209      1.1  ad 			if (i == 64) {
    210      1.1  ad 				printf(" GAVE UP");
    211      1.1  ad 			}
    212      1.1  ad 			printf("\n");
    213      1.1  ad 		}
    214  1.5.2.1  ad 		printf("%s first in package: %s\n", cpu_name(ci),
    215  1.5.2.1  ad 		    cpu_name(ci->ci_package1st));
    216      1.1  ad 	}
    217      1.1  ad #endif	/* DEBUG */
    218      1.1  ad }
    219      1.1  ad 
    220      1.1  ad /*
    221      1.1  ad  * Fake up topology info if we have none, or if what we got was bogus.
    222      1.5  ad  * Used early in boot, and by cpu_topology_fake().
    223      1.5  ad  */
    224      1.5  ad static void
    225      1.5  ad cpu_topology_fake1(struct cpu_info *ci)
    226      1.5  ad {
    227      1.5  ad 	enum cpu_rel rel;
    228      1.5  ad 
    229      1.5  ad 	for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
    230      1.5  ad 		ci->ci_sibling[rel] = ci;
    231      1.5  ad 		ci->ci_nsibling[rel] = 1;
    232      1.5  ad 	}
    233      1.5  ad 	if (!cpu_topology_present) {
    234      1.5  ad 		ci->ci_package_id = cpu_index(ci);
    235      1.5  ad 	}
    236  1.5.2.1  ad 	ci->ci_schedstate.spc_flags |=
    237  1.5.2.1  ad 	    (SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
    238  1.5.2.1  ad 	ci->ci_package1st = ci;
    239  1.5.2.1  ad 	ci->ci_is_slow = false;
    240  1.5.2.1  ad 	cpu_topology_haveslow = false;
    241      1.5  ad }
    242      1.5  ad 
    243      1.5  ad /*
    244      1.5  ad  * Fake up topology info if we have none, or if what we got was bogus.
    245      1.1  ad  * Don't override ci_package_id, etc, if cpu_topology_present is set.
    246      1.1  ad  * MD code also uses these.
    247      1.1  ad  */
    248      1.1  ad static void
    249      1.1  ad cpu_topology_fake(void)
    250      1.1  ad {
    251      1.1  ad 	CPU_INFO_ITERATOR cii;
    252      1.1  ad 	struct cpu_info *ci;
    253      1.1  ad 
    254      1.1  ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    255      1.5  ad 		cpu_topology_fake1(ci);
    256  1.5.2.1  ad 		/* Undo (early boot) flag set so everything links OK. */
    257  1.5.2.1  ad 		ci->ci_schedstate.spc_flags &=
    258  1.5.2.1  ad 		    ~(SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
    259      1.1  ad 	}
    260      1.1  ad }
    261      1.1  ad 
    262      1.1  ad /*
    263      1.1  ad  * Fix up basic CPU topology info.  Right now that means attach each CPU to
    264      1.1  ad  * circular lists of its siblings in the same core, and in the same package.
    265      1.1  ad  */
    266      1.1  ad void
    267      1.1  ad cpu_topology_init(void)
    268      1.1  ad {
    269      1.1  ad 	CPU_INFO_ITERATOR cii, cii2;
    270      1.1  ad 	struct cpu_info *ci, *ci2, *ci3;
    271  1.5.2.1  ad 	u_int minsmt, mincore;
    272      1.1  ad 
    273      1.1  ad 	if (!cpu_topology_present) {
    274      1.1  ad 		cpu_topology_fake();
    275  1.5.2.1  ad 		goto linkit;
    276      1.1  ad 	}
    277      1.1  ad 
    278      1.1  ad 	/* Find siblings in same core and package. */
    279      1.1  ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    280  1.5.2.1  ad 		ci->ci_schedstate.spc_flags &=
    281  1.5.2.1  ad 		    ~(SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
    282      1.1  ad 		for (CPU_INFO_FOREACH(cii2, ci2)) {
    283      1.1  ad 			/* Avoid bad things happening. */
    284      1.1  ad 			if (ci2->ci_package_id == ci->ci_package_id &&
    285      1.1  ad 			    ci2->ci_core_id == ci->ci_core_id &&
    286      1.1  ad 			    ci2->ci_smt_id == ci->ci_smt_id &&
    287      1.1  ad 			    ci2 != ci) {
    288  1.5.2.1  ad #ifdef DEBUG
    289  1.5.2.1  ad 				printf("cpu%u %p pkg %u core %u smt %u same as "
    290  1.5.2.1  ad 				       "cpu%u %p pkg %u core %u smt %u\n",
    291  1.5.2.1  ad 				       cpu_index(ci), ci, ci->ci_package_id,
    292  1.5.2.1  ad 				       ci->ci_core_id, ci->ci_smt_id,
    293  1.5.2.1  ad 				       cpu_index(ci2), ci2, ci2->ci_package_id,
    294  1.5.2.1  ad 				       ci2->ci_core_id, ci2->ci_smt_id);
    295  1.5.2.1  ad #endif
    296      1.1  ad 			    	printf("cpu_topology_init: info bogus, "
    297      1.1  ad 			    	    "faking it\n");
    298      1.1  ad 			    	cpu_topology_fake();
    299  1.5.2.1  ad 			    	goto linkit;
    300      1.1  ad 			}
    301      1.1  ad 			if (ci2 == ci ||
    302      1.1  ad 			    ci2->ci_package_id != ci->ci_package_id) {
    303      1.1  ad 				continue;
    304      1.1  ad 			}
    305      1.1  ad 			/* Find CPUs in the same core. */
    306      1.1  ad 			if (ci->ci_nsibling[CPUREL_CORE] == 1 &&
    307      1.1  ad 			    ci->ci_core_id == ci2->ci_core_id) {
    308      1.1  ad 			    	cpu_topology_link(ci, ci2, CPUREL_CORE);
    309      1.1  ad 			}
    310      1.1  ad 			/* Find CPUs in the same package. */
    311      1.1  ad 			if (ci->ci_nsibling[CPUREL_PACKAGE] == 1) {
    312      1.1  ad 			    	cpu_topology_link(ci, ci2, CPUREL_PACKAGE);
    313      1.1  ad 			}
    314      1.1  ad 			if (ci->ci_nsibling[CPUREL_CORE] > 1 &&
    315      1.1  ad 			    ci->ci_nsibling[CPUREL_PACKAGE] > 1) {
    316      1.1  ad 				break;
    317      1.1  ad 			}
    318      1.1  ad 		}
    319      1.1  ad 	}
    320      1.1  ad 
    321  1.5.2.1  ad  linkit:
    322  1.5.2.1  ad 	/* Identify lowest numbered SMT in each core. */
    323      1.1  ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    324  1.5.2.1  ad 		ci2 = ci3 = ci;
    325  1.5.2.1  ad 		minsmt = ci->ci_smt_id;
    326  1.5.2.1  ad 		do {
    327  1.5.2.1  ad 			if (ci2->ci_smt_id < minsmt) {
    328  1.5.2.1  ad 				ci3 = ci2;
    329  1.5.2.1  ad 				minsmt = ci2->ci_smt_id;
    330      1.1  ad 			}
    331  1.5.2.1  ad 			ci2 = ci2->ci_sibling[CPUREL_CORE];
    332  1.5.2.1  ad 		} while (ci2 != ci);
    333  1.5.2.1  ad 		ci3->ci_schedstate.spc_flags |= SPCF_CORE1ST;
    334      1.1  ad 	}
    335      1.1  ad 
    336  1.5.2.1  ad 	/* Identify lowest numbered SMT in each package. */
    337  1.5.2.1  ad 	ci3 = NULL;
    338      1.1  ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    339  1.5.2.1  ad 		if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) == 0) {
    340  1.5.2.1  ad 			continue;
    341      1.1  ad 		}
    342      1.1  ad 		ci2 = ci3 = ci;
    343  1.5.2.1  ad 		mincore = ci->ci_core_id;
    344      1.1  ad 		do {
    345  1.5.2.1  ad 			if ((ci2->ci_schedstate.spc_flags &
    346  1.5.2.1  ad 			    SPCF_CORE1ST) != 0 &&
    347  1.5.2.1  ad 			    ci2->ci_core_id < mincore) {
    348      1.1  ad 				ci3 = ci2;
    349  1.5.2.1  ad 				mincore = ci2->ci_core_id;
    350      1.1  ad 			}
    351  1.5.2.1  ad 			ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
    352      1.1  ad 		} while (ci2 != ci);
    353      1.1  ad 
    354  1.5.2.1  ad 		if ((ci3->ci_schedstate.spc_flags & SPCF_PACKAGE1ST) != 0) {
    355  1.5.2.1  ad 			/* Already identified - nothing more to do. */
    356  1.5.2.1  ad 			continue;
    357  1.5.2.1  ad 		}
    358  1.5.2.1  ad 		ci3->ci_schedstate.spc_flags |= SPCF_PACKAGE1ST;
    359  1.5.2.1  ad 
    360  1.5.2.1  ad 		/* Walk through all CPUs in package and point to first. */
    361  1.5.2.1  ad 		ci2 = ci3;
    362  1.5.2.1  ad 		do {
    363  1.5.2.1  ad 			ci2->ci_package1st = ci3;
    364  1.5.2.1  ad 			ci2->ci_sibling[CPUREL_PACKAGE1ST] = ci3;
    365  1.5.2.1  ad 			ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
    366  1.5.2.1  ad 		} while (ci2 != ci3);
    367  1.5.2.1  ad 
    368  1.5.2.1  ad 		/* Now look for somebody else to link to. */
    369  1.5.2.1  ad 		for (CPU_INFO_FOREACH(cii2, ci2)) {
    370  1.5.2.1  ad 			if ((ci2->ci_schedstate.spc_flags & SPCF_PACKAGE1ST)
    371  1.5.2.1  ad 			    != 0 && ci2 != ci3) {
    372  1.5.2.1  ad 			    	cpu_topology_link(ci3, ci2, CPUREL_PACKAGE1ST);
    373  1.5.2.1  ad 			    	break;
    374  1.5.2.1  ad 			}
    375  1.5.2.1  ad 		}
    376  1.5.2.1  ad 	}
    377  1.5.2.1  ad 
    378  1.5.2.1  ad 	/* Walk through all packages, starting with value of ci3 from above. */
    379  1.5.2.1  ad 	KASSERT(ci3 != NULL);
    380  1.5.2.1  ad 	ci = ci3;
    381  1.5.2.1  ad 	do {
    382  1.5.2.1  ad 		/* Walk through CPUs in the package and copy in PACKAGE1ST. */
    383      1.1  ad 		ci2 = ci;
    384      1.1  ad 		do {
    385  1.5.2.1  ad 			ci2->ci_sibling[CPUREL_PACKAGE1ST] =
    386  1.5.2.1  ad 			    ci->ci_sibling[CPUREL_PACKAGE1ST];
    387  1.5.2.1  ad 			ci2->ci_nsibling[CPUREL_PACKAGE1ST] =
    388  1.5.2.1  ad 			    ci->ci_nsibling[CPUREL_PACKAGE1ST];
    389  1.5.2.1  ad 			ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
    390      1.1  ad 		} while (ci2 != ci);
    391  1.5.2.1  ad 		ci = ci->ci_sibling[CPUREL_PACKAGE1ST];
    392  1.5.2.1  ad 	} while (ci != ci3);
    393  1.5.2.1  ad 
    394  1.5.2.1  ad 	if (cpu_topology_haveslow) {
    395  1.5.2.1  ad 		/*
    396  1.5.2.1  ad 		 * For asymmetric systems where some CPUs are slower than
    397  1.5.2.1  ad 		 * others, mark first class CPUs for the scheduler.  This
    398  1.5.2.1  ad 		 * conflicts with SMT right now so whinge if observed.
    399  1.5.2.1  ad 		 */
    400  1.5.2.1  ad 		if (curcpu()->ci_nsibling[CPUREL_CORE] > 1) {
    401  1.5.2.1  ad 			printf("cpu_topology_init: asymmetric & SMT??\n");
    402  1.5.2.1  ad 		}
    403  1.5.2.1  ad 		for (CPU_INFO_FOREACH(cii, ci)) {
    404  1.5.2.1  ad 			if (!ci->ci_is_slow) {
    405  1.5.2.1  ad 				ci->ci_schedstate.spc_flags |= SPCF_1STCLASS;
    406  1.5.2.1  ad 			}
    407  1.5.2.1  ad 		}
    408  1.5.2.1  ad 	} else {
    409  1.5.2.1  ad 		/*
    410  1.5.2.1  ad 		 * For any other configuration mark the 1st CPU in each
    411  1.5.2.1  ad 		 * core as a first class CPU.
    412  1.5.2.1  ad 		 */
    413  1.5.2.1  ad 		for (CPU_INFO_FOREACH(cii, ci)) {
    414  1.5.2.1  ad 			if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) != 0) {
    415  1.5.2.1  ad 				ci->ci_schedstate.spc_flags |= SPCF_1STCLASS;
    416  1.5.2.1  ad 			}
    417  1.5.2.1  ad 		}
    418      1.1  ad 	}
    419  1.5.2.1  ad 
    420  1.5.2.1  ad 	cpu_topology_dump();
    421      1.1  ad }
    422      1.1  ad 
    423      1.1  ad /*
    424      1.1  ad  * Adjust one count, for a counter that's NOT updated from interrupt
    425      1.1  ad  * context.  Hardly worth making an inline due to preemption stuff.
    426      1.1  ad  */
    427      1.1  ad void
    428      1.1  ad cpu_count(enum cpu_count idx, int64_t delta)
    429      1.1  ad {
    430      1.1  ad 	lwp_t *l = curlwp;
    431      1.1  ad 	KPREEMPT_DISABLE(l);
    432      1.1  ad 	l->l_cpu->ci_counts[idx] += delta;
    433      1.1  ad 	KPREEMPT_ENABLE(l);
    434      1.1  ad }
    435      1.1  ad 
    436      1.1  ad /*
    437      1.1  ad  * Fetch fresh sum total for all counts.  Expensive - don't call often.
    438      1.1  ad  */
    439      1.1  ad void
    440      1.1  ad cpu_count_sync_all(void)
    441      1.1  ad {
    442      1.1  ad 	CPU_INFO_ITERATOR cii;
    443      1.1  ad 	struct cpu_info *ci;
    444      1.1  ad 	int64_t sum[CPU_COUNT_MAX], *ptr;
    445      1.1  ad 	enum cpu_count i;
    446      1.1  ad 	int s;
    447      1.1  ad 
    448      1.1  ad 	KASSERT(sizeof(ci->ci_counts) == sizeof(cpu_counts));
    449      1.1  ad 
    450      1.1  ad 	if (__predict_true(mp_online)) {
    451      1.1  ad 		memset(sum, 0, sizeof(sum));
    452      1.1  ad 		/*
    453      1.1  ad 		 * We want this to be reasonably quick, so any value we get
    454      1.1  ad 		 * isn't totally out of whack, so don't let the current LWP
    455      1.1  ad 		 * get preempted.
    456      1.1  ad 		 */
    457      1.1  ad 		s = splvm();
    458      1.1  ad 		curcpu()->ci_counts[CPU_COUNT_SYNC_ALL]++;
    459      1.1  ad 		for (CPU_INFO_FOREACH(cii, ci)) {
    460      1.1  ad 			ptr = ci->ci_counts;
    461      1.1  ad 			for (i = 0; i < CPU_COUNT_MAX; i += 8) {
    462      1.1  ad 				sum[i+0] += ptr[i+0];
    463      1.1  ad 				sum[i+1] += ptr[i+1];
    464      1.1  ad 				sum[i+2] += ptr[i+2];
    465      1.1  ad 				sum[i+3] += ptr[i+3];
    466      1.1  ad 				sum[i+4] += ptr[i+4];
    467      1.1  ad 				sum[i+5] += ptr[i+5];
    468      1.1  ad 				sum[i+6] += ptr[i+6];
    469      1.1  ad 				sum[i+7] += ptr[i+7];
    470      1.1  ad 			}
    471      1.1  ad 			KASSERT(i == CPU_COUNT_MAX);
    472      1.1  ad 		}
    473      1.1  ad 		memcpy(cpu_counts, sum, sizeof(cpu_counts));
    474      1.1  ad 		splx(s);
    475      1.1  ad 	} else {
    476      1.1  ad 		memcpy(cpu_counts, curcpu()->ci_counts, sizeof(cpu_counts));
    477      1.1  ad 	}
    478      1.1  ad }
    479      1.1  ad 
    480      1.1  ad /*
    481      1.1  ad  * Fetch a fresh sum total for one single count.  Expensive - don't call often.
    482      1.1  ad  */
    483      1.1  ad int64_t
    484      1.1  ad cpu_count_sync(enum cpu_count count)
    485      1.1  ad {
    486      1.1  ad 	CPU_INFO_ITERATOR cii;
    487      1.1  ad 	struct cpu_info *ci;
    488      1.1  ad 	int64_t sum;
    489      1.1  ad 	int s;
    490      1.1  ad 
    491      1.1  ad 	if (__predict_true(mp_online)) {
    492      1.1  ad 		s = splvm();
    493      1.1  ad 		curcpu()->ci_counts[CPU_COUNT_SYNC_ONE]++;
    494      1.1  ad 		sum = 0;
    495      1.1  ad 		for (CPU_INFO_FOREACH(cii, ci)) {
    496      1.1  ad 			sum += ci->ci_counts[count];
    497      1.1  ad 		}
    498      1.1  ad 		splx(s);
    499      1.1  ad 	} else {
    500      1.1  ad 		/* XXX Early boot, iterator might not be available. */
    501      1.1  ad 		sum = curcpu()->ci_counts[count];
    502      1.1  ad 	}
    503      1.1  ad 	return cpu_counts[count] = sum;
    504      1.1  ad }
    505