Home | History | Annotate | Line # | Download | only in kern
subr_cpu.c revision 1.23
      1  1.23        ad /*	$NetBSD: subr_cpu.c,v 1.23 2025/06/23 22:50:23 ad Exp $	*/
      2   1.1        ad 
      3   1.1        ad /*-
      4   1.6        ad  * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019, 2020
      5   1.6        ad  *     The NetBSD Foundation, Inc.
      6   1.1        ad  * All rights reserved.
      7   1.1        ad  *
      8   1.1        ad  * This code is derived from software contributed to The NetBSD Foundation
      9   1.1        ad  * by Andrew Doran.
     10   1.1        ad  *
     11   1.1        ad  * Redistribution and use in source and binary forms, with or without
     12   1.1        ad  * modification, are permitted provided that the following conditions
     13   1.1        ad  * are met:
     14   1.1        ad  * 1. Redistributions of source code must retain the above copyright
     15   1.1        ad  *    notice, this list of conditions and the following disclaimer.
     16   1.1        ad  * 2. Redistributions in binary form must reproduce the above copyright
     17   1.1        ad  *    notice, this list of conditions and the following disclaimer in the
     18   1.1        ad  *    documentation and/or other materials provided with the distribution.
     19   1.1        ad  *
     20   1.1        ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21   1.1        ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22   1.1        ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23   1.1        ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24   1.1        ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   1.1        ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   1.1        ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   1.1        ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   1.1        ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   1.1        ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   1.1        ad  * POSSIBILITY OF SUCH DAMAGE.
     31   1.1        ad  */
     32   1.1        ad 
     33   1.1        ad /*-
     34   1.1        ad  * Copyright (c)2007 YAMAMOTO Takashi,
     35   1.1        ad  * All rights reserved.
     36   1.1        ad  *
     37   1.1        ad  * Redistribution and use in source and binary forms, with or without
     38   1.1        ad  * modification, are permitted provided that the following conditions
     39   1.1        ad  * are met:
     40   1.1        ad  * 1. Redistributions of source code must retain the above copyright
     41   1.1        ad  *    notice, this list of conditions and the following disclaimer.
     42   1.1        ad  * 2. Redistributions in binary form must reproduce the above copyright
     43   1.1        ad  *    notice, this list of conditions and the following disclaimer in the
     44   1.1        ad  *    documentation and/or other materials provided with the distribution.
     45   1.1        ad  *
     46   1.1        ad  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     47   1.1        ad  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     48   1.1        ad  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     49   1.1        ad  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     50   1.1        ad  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     51   1.1        ad  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     52   1.1        ad  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     53   1.1        ad  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     54   1.1        ad  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     55   1.1        ad  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     56   1.1        ad  * SUCH DAMAGE.
     57   1.1        ad  */
     58   1.1        ad 
     59   1.1        ad /*
     60   1.1        ad  * CPU related routines shared with rump.
     61   1.1        ad  */
     62   1.1        ad 
     63   1.1        ad #include <sys/cdefs.h>
     64  1.23        ad __KERNEL_RCSID(0, "$NetBSD: subr_cpu.c,v 1.23 2025/06/23 22:50:23 ad Exp $");
     65   1.1        ad 
     66   1.1        ad #include <sys/param.h>
     67  1.15        ad #include <sys/atomic.h>
     68   1.1        ad #include <sys/systm.h>
     69   1.1        ad #include <sys/sched.h>
     70   1.1        ad #include <sys/conf.h>
     71   1.1        ad #include <sys/cpu.h>
     72   1.1        ad #include <sys/proc.h>
     73   1.1        ad #include <sys/kernel.h>
     74   1.1        ad #include <sys/kmem.h>
     75   1.1        ad 
     76   1.5        ad static void	cpu_topology_fake1(struct cpu_info *);
     77   1.5        ad 
     78   1.1        ad kmutex_t	cpu_lock		__cacheline_aligned;
     79   1.1        ad int		ncpu			__read_mostly;
     80   1.1        ad int		ncpuonline		__read_mostly;
     81   1.1        ad bool		mp_online		__read_mostly;
     82   1.1        ad static bool	cpu_topology_present	__read_mostly;
     83   1.6        ad static bool	cpu_topology_haveslow	__read_mostly;
     84   1.1        ad int64_t		cpu_counts[CPU_COUNT_MAX];
     85   1.1        ad 
     86   1.1        ad /* An array of CPUs.  There are ncpu entries. */
     87   1.1        ad struct cpu_info **cpu_infos		__read_mostly;
     88   1.1        ad 
     89   1.1        ad /* Note: set on mi_cpu_attach() and idle_loop(). */
     90   1.1        ad kcpuset_t *	kcpuset_attached	__read_mostly	= NULL;
     91   1.1        ad kcpuset_t *	kcpuset_running		__read_mostly	= NULL;
     92   1.1        ad 
     93   1.1        ad static char cpu_model[128];
     94   1.1        ad 
     95   1.1        ad /*
     96   1.1        ad  * mi_cpu_init: early initialisation of MI CPU related structures.
     97   1.1        ad  *
     98   1.1        ad  * Note: may not block and memory allocator is not yet available.
     99   1.1        ad  */
    100   1.1        ad void
    101   1.1        ad mi_cpu_init(void)
    102   1.1        ad {
    103   1.4        ad 	struct cpu_info *ci;
    104   1.1        ad 
    105   1.1        ad 	mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
    106   1.1        ad 
    107   1.1        ad 	kcpuset_create(&kcpuset_attached, true);
    108   1.1        ad 	kcpuset_create(&kcpuset_running, true);
    109   1.1        ad 	kcpuset_set(kcpuset_running, 0);
    110   1.4        ad 
    111   1.4        ad 	ci = curcpu();
    112   1.5        ad 	cpu_topology_fake1(ci);
    113   1.1        ad }
    114   1.1        ad 
    115   1.1        ad int
    116   1.1        ad cpu_setmodel(const char *fmt, ...)
    117   1.1        ad {
    118   1.1        ad 	int len;
    119   1.1        ad 	va_list ap;
    120   1.1        ad 
    121   1.1        ad 	va_start(ap, fmt);
    122   1.1        ad 	len = vsnprintf(cpu_model, sizeof(cpu_model), fmt, ap);
    123   1.1        ad 	va_end(ap);
    124   1.1        ad 	return len;
    125   1.1        ad }
    126   1.1        ad 
    127   1.1        ad const char *
    128   1.1        ad cpu_getmodel(void)
    129   1.1        ad {
    130   1.1        ad 	return cpu_model;
    131   1.1        ad }
    132   1.1        ad 
    133   1.1        ad bool
    134   1.1        ad cpu_softintr_p(void)
    135   1.1        ad {
    136   1.1        ad 
    137   1.1        ad 	return (curlwp->l_pflag & LP_INTR) != 0;
    138   1.1        ad }
    139   1.1        ad 
    140  1.19  riastrad bool
    141  1.19  riastrad curcpu_stable(void)
    142  1.19  riastrad {
    143  1.19  riastrad 	struct lwp *const l = curlwp;
    144  1.19  riastrad 	const int pflag = l->l_pflag;
    145  1.19  riastrad 	const int nopreempt = l->l_nopreempt;
    146  1.19  riastrad 
    147  1.19  riastrad 	/*
    148  1.19  riastrad 	 * - Softints (LP_INTR) never migrate between CPUs.
    149  1.19  riastrad 	 * - Bound lwps (LP_BOUND), either kthreads created bound to
    150  1.19  riastrad 	 *   a CPU or any lwps bound with curlwp_bind, never migrate.
    151  1.19  riastrad 	 * - If kpreemption is disabled, the lwp can't migrate.
    152  1.19  riastrad 	 * - If we're in interrupt context, preemption is blocked.
    153  1.19  riastrad 	 *
    154  1.19  riastrad 	 * We combine the LP_INTR, LP_BOUND, and l_nopreempt test into
    155  1.19  riastrad 	 * a single predicted-true branch so this is cheap to assert in
    156  1.19  riastrad 	 * most contexts where it will be used, then fall back to
    157  1.19  riastrad 	 * calling the full kpreempt_disabled() and cpu_intr_p() as
    158  1.19  riastrad 	 * subroutines.
    159  1.19  riastrad 	 *
    160  1.19  riastrad 	 * XXX Is cpu_intr_p redundant with kpreempt_disabled?
    161  1.19  riastrad 	 */
    162  1.19  riastrad 	return __predict_true(((pflag & (LP_INTR|LP_BOUND)) | nopreempt)
    163  1.19  riastrad 		!= 0) ||
    164  1.19  riastrad 	    kpreempt_disabled() ||
    165  1.19  riastrad 	    cpu_intr_p();
    166  1.19  riastrad }
    167  1.19  riastrad 
    168   1.1        ad /*
    169   1.1        ad  * Collect CPU topology information as each CPU is attached.  This can be
    170   1.1        ad  * called early during boot, so we need to be careful what we do.
    171   1.1        ad  */
    172   1.1        ad void
    173   1.1        ad cpu_topology_set(struct cpu_info *ci, u_int package_id, u_int core_id,
    174  1.13     skrll     u_int smt_id, u_int numa_id)
    175   1.1        ad {
    176   1.1        ad 	enum cpu_rel rel;
    177   1.1        ad 
    178   1.1        ad 	cpu_topology_present = true;
    179   1.1        ad 	ci->ci_package_id = package_id;
    180   1.1        ad 	ci->ci_core_id = core_id;
    181   1.1        ad 	ci->ci_smt_id = smt_id;
    182   1.1        ad 	ci->ci_numa_id = numa_id;
    183   1.1        ad 	for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
    184   1.1        ad 		ci->ci_sibling[rel] = ci;
    185   1.1        ad 		ci->ci_nsibling[rel] = 1;
    186   1.1        ad 	}
    187   1.1        ad }
    188   1.1        ad 
    189   1.1        ad /*
    190  1.13     skrll  * Collect CPU relative speed
    191  1.13     skrll  */
    192  1.13     skrll void
    193  1.13     skrll cpu_topology_setspeed(struct cpu_info *ci, bool slow)
    194  1.13     skrll {
    195  1.13     skrll 
    196  1.13     skrll 	cpu_topology_haveslow |= slow;
    197  1.13     skrll 	ci->ci_is_slow = slow;
    198  1.13     skrll }
    199  1.13     skrll 
    200  1.13     skrll /*
    201   1.1        ad  * Link a CPU into the given circular list.
    202   1.1        ad  */
    203   1.1        ad static void
    204   1.1        ad cpu_topology_link(struct cpu_info *ci, struct cpu_info *ci2, enum cpu_rel rel)
    205   1.1        ad {
    206   1.1        ad 	struct cpu_info *ci3;
    207   1.1        ad 
    208   1.1        ad 	/* Walk to the end of the existing circular list and append. */
    209   1.1        ad 	for (ci3 = ci2;; ci3 = ci3->ci_sibling[rel]) {
    210   1.1        ad 		ci3->ci_nsibling[rel]++;
    211   1.1        ad 		if (ci3->ci_sibling[rel] == ci2) {
    212   1.1        ad 			break;
    213   1.1        ad 		}
    214   1.1        ad 	}
    215   1.1        ad 	ci->ci_sibling[rel] = ci2;
    216   1.1        ad 	ci3->ci_sibling[rel] = ci;
    217   1.1        ad 	ci->ci_nsibling[rel] = ci3->ci_nsibling[rel];
    218   1.1        ad }
    219   1.1        ad 
    220   1.1        ad /*
    221   1.1        ad  * Print out the topology lists.
    222   1.1        ad  */
    223   1.1        ad static void
    224   1.1        ad cpu_topology_dump(void)
    225   1.1        ad {
    226   1.1        ad 	CPU_INFO_ITERATOR cii;
    227   1.1        ad 	struct cpu_info *ci, *ci2;
    228   1.6        ad 	const char *names[] = { "core", "pkg", "1st" };
    229   1.1        ad 	enum cpu_rel rel;
    230   1.1        ad 	int i;
    231   1.1        ad 
    232  1.10       mrg 	CTASSERT(__arraycount(names) >= __arraycount(ci->ci_sibling));
    233  1.16    simonb 	if (ncpu == 1) {
    234  1.16    simonb 		return;
    235  1.16    simonb 	}
    236  1.10       mrg 
    237   1.1        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    238  1.10       mrg 		if (cpu_topology_haveslow)
    239  1.20   mlelstv 			aprint_debug("%s ", ci->ci_is_slow ? "slow" : "fast");
    240   1.1        ad 		for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
    241  1.20   mlelstv 			aprint_debug("%s has %d %s siblings:", cpu_name(ci),
    242   1.1        ad 			    ci->ci_nsibling[rel], names[rel]);
    243   1.1        ad 			ci2 = ci->ci_sibling[rel];
    244   1.1        ad 			i = 0;
    245   1.1        ad 			do {
    246  1.20   mlelstv 				aprint_debug(" %s", cpu_name(ci2));
    247   1.1        ad 				ci2 = ci2->ci_sibling[rel];
    248   1.1        ad 			} while (++i < 64 && ci2 != ci->ci_sibling[rel]);
    249   1.1        ad 			if (i == 64) {
    250  1.20   mlelstv 				aprint_debug(" GAVE UP");
    251   1.1        ad 			}
    252  1.20   mlelstv 			aprint_debug("\n");
    253   1.1        ad 		}
    254  1.20   mlelstv 		aprint_debug("%s first in package: %s\n", cpu_name(ci),
    255   1.8        ad 		    cpu_name(ci->ci_package1st));
    256   1.1        ad 	}
    257   1.1        ad }
    258   1.1        ad 
    259   1.1        ad /*
    260   1.1        ad  * Fake up topology info if we have none, or if what we got was bogus.
    261   1.5        ad  * Used early in boot, and by cpu_topology_fake().
    262   1.5        ad  */
    263   1.5        ad static void
    264   1.5        ad cpu_topology_fake1(struct cpu_info *ci)
    265   1.5        ad {
    266   1.5        ad 	enum cpu_rel rel;
    267   1.5        ad 
    268   1.5        ad 	for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
    269   1.5        ad 		ci->ci_sibling[rel] = ci;
    270   1.5        ad 		ci->ci_nsibling[rel] = 1;
    271   1.5        ad 	}
    272   1.5        ad 	if (!cpu_topology_present) {
    273   1.5        ad 		ci->ci_package_id = cpu_index(ci);
    274   1.5        ad 	}
    275   1.6        ad 	ci->ci_schedstate.spc_flags |=
    276   1.6        ad 	    (SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
    277   1.8        ad 	ci->ci_package1st = ci;
    278  1.14        ad 	if (!cpu_topology_haveslow) {
    279  1.14        ad 		ci->ci_is_slow = false;
    280  1.14        ad 	}
    281   1.5        ad }
    282   1.5        ad 
    283   1.5        ad /*
    284   1.5        ad  * Fake up topology info if we have none, or if what we got was bogus.
    285   1.1        ad  * Don't override ci_package_id, etc, if cpu_topology_present is set.
    286   1.1        ad  * MD code also uses these.
    287   1.1        ad  */
    288   1.1        ad static void
    289   1.1        ad cpu_topology_fake(void)
    290   1.1        ad {
    291   1.1        ad 	CPU_INFO_ITERATOR cii;
    292   1.1        ad 	struct cpu_info *ci;
    293   1.1        ad 
    294   1.1        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    295   1.5        ad 		cpu_topology_fake1(ci);
    296  1.11        ad 		/* Undo (early boot) flag set so everything links OK. */
    297  1.11        ad 		ci->ci_schedstate.spc_flags &=
    298  1.11        ad 		    ~(SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
    299   1.1        ad 	}
    300  1.11        ad }
    301   1.1        ad 
    302   1.1        ad /*
    303   1.1        ad  * Fix up basic CPU topology info.  Right now that means attach each CPU to
    304  1.12     skrll  * circular lists of its siblings in the same core, and in the same package.
    305   1.1        ad  */
    306   1.1        ad void
    307   1.1        ad cpu_topology_init(void)
    308   1.1        ad {
    309   1.1        ad 	CPU_INFO_ITERATOR cii, cii2;
    310   1.1        ad 	struct cpu_info *ci, *ci2, *ci3;
    311   1.6        ad 	u_int minsmt, mincore;
    312   1.1        ad 
    313   1.1        ad 	if (!cpu_topology_present) {
    314   1.1        ad 		cpu_topology_fake();
    315  1.11        ad 		goto linkit;
    316   1.1        ad 	}
    317   1.1        ad 
    318   1.1        ad 	/* Find siblings in same core and package. */
    319   1.1        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    320   1.6        ad 		ci->ci_schedstate.spc_flags &=
    321   1.6        ad 		    ~(SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
    322   1.1        ad 		for (CPU_INFO_FOREACH(cii2, ci2)) {
    323   1.1        ad 			/* Avoid bad things happening. */
    324   1.1        ad 			if (ci2->ci_package_id == ci->ci_package_id &&
    325   1.1        ad 			    ci2->ci_core_id == ci->ci_core_id &&
    326   1.1        ad 			    ci2->ci_smt_id == ci->ci_smt_id &&
    327   1.1        ad 			    ci2 != ci) {
    328  1.10       mrg #ifdef DEBUG
    329  1.10       mrg 				printf("cpu%u %p pkg %u core %u smt %u same as "
    330  1.12     skrll 				       "cpu%u %p pkg %u core %u smt %u\n",
    331  1.10       mrg 				       cpu_index(ci), ci, ci->ci_package_id,
    332  1.10       mrg 				       ci->ci_core_id, ci->ci_smt_id,
    333  1.10       mrg 				       cpu_index(ci2), ci2, ci2->ci_package_id,
    334  1.10       mrg 				       ci2->ci_core_id, ci2->ci_smt_id);
    335  1.10       mrg #endif
    336   1.1        ad 			    	printf("cpu_topology_init: info bogus, "
    337   1.1        ad 			    	    "faking it\n");
    338   1.1        ad 			    	cpu_topology_fake();
    339  1.11        ad 			    	goto linkit;
    340   1.1        ad 			}
    341   1.1        ad 			if (ci2 == ci ||
    342   1.1        ad 			    ci2->ci_package_id != ci->ci_package_id) {
    343   1.1        ad 				continue;
    344   1.1        ad 			}
    345   1.1        ad 			/* Find CPUs in the same core. */
    346   1.1        ad 			if (ci->ci_nsibling[CPUREL_CORE] == 1 &&
    347   1.1        ad 			    ci->ci_core_id == ci2->ci_core_id) {
    348   1.1        ad 			    	cpu_topology_link(ci, ci2, CPUREL_CORE);
    349   1.1        ad 			}
    350   1.1        ad 			/* Find CPUs in the same package. */
    351   1.1        ad 			if (ci->ci_nsibling[CPUREL_PACKAGE] == 1) {
    352   1.1        ad 			    	cpu_topology_link(ci, ci2, CPUREL_PACKAGE);
    353   1.1        ad 			}
    354   1.1        ad 			if (ci->ci_nsibling[CPUREL_CORE] > 1 &&
    355   1.1        ad 			    ci->ci_nsibling[CPUREL_PACKAGE] > 1) {
    356   1.1        ad 				break;
    357   1.1        ad 			}
    358   1.1        ad 		}
    359   1.1        ad 	}
    360   1.1        ad 
    361  1.11        ad  linkit:
    362   1.6        ad 	/* Identify lowest numbered SMT in each core. */
    363   1.1        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    364   1.6        ad 		ci2 = ci3 = ci;
    365   1.6        ad 		minsmt = ci->ci_smt_id;
    366   1.6        ad 		do {
    367   1.6        ad 			if (ci2->ci_smt_id < minsmt) {
    368   1.6        ad 				ci3 = ci2;
    369   1.6        ad 				minsmt = ci2->ci_smt_id;
    370   1.1        ad 			}
    371   1.6        ad 			ci2 = ci2->ci_sibling[CPUREL_CORE];
    372   1.6        ad 		} while (ci2 != ci);
    373   1.6        ad 		ci3->ci_schedstate.spc_flags |= SPCF_CORE1ST;
    374   1.1        ad 	}
    375   1.1        ad 
    376   1.6        ad 	/* Identify lowest numbered SMT in each package. */
    377   1.6        ad 	ci3 = NULL;
    378   1.1        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    379   1.6        ad 		if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) == 0) {
    380   1.6        ad 			continue;
    381   1.1        ad 		}
    382   1.1        ad 		ci2 = ci3 = ci;
    383   1.6        ad 		mincore = ci->ci_core_id;
    384   1.1        ad 		do {
    385   1.6        ad 			if ((ci2->ci_schedstate.spc_flags &
    386   1.6        ad 			    SPCF_CORE1ST) != 0 &&
    387   1.6        ad 			    ci2->ci_core_id < mincore) {
    388   1.1        ad 				ci3 = ci2;
    389   1.6        ad 				mincore = ci2->ci_core_id;
    390   1.1        ad 			}
    391   1.6        ad 			ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
    392   1.6        ad 		} while (ci2 != ci);
    393   1.6        ad 
    394   1.6        ad 		if ((ci3->ci_schedstate.spc_flags & SPCF_PACKAGE1ST) != 0) {
    395   1.6        ad 			/* Already identified - nothing more to do. */
    396   1.6        ad 			continue;
    397   1.6        ad 		}
    398   1.6        ad 		ci3->ci_schedstate.spc_flags |= SPCF_PACKAGE1ST;
    399   1.6        ad 
    400   1.6        ad 		/* Walk through all CPUs in package and point to first. */
    401   1.8        ad 		ci2 = ci3;
    402   1.6        ad 		do {
    403   1.8        ad 			ci2->ci_package1st = ci3;
    404   1.6        ad 			ci2->ci_sibling[CPUREL_PACKAGE1ST] = ci3;
    405   1.6        ad 			ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
    406  1.11        ad 		} while (ci2 != ci3);
    407   1.1        ad 
    408   1.6        ad 		/* Now look for somebody else to link to. */
    409   1.6        ad 		for (CPU_INFO_FOREACH(cii2, ci2)) {
    410   1.6        ad 			if ((ci2->ci_schedstate.spc_flags & SPCF_PACKAGE1ST)
    411   1.6        ad 			    != 0 && ci2 != ci3) {
    412   1.6        ad 			    	cpu_topology_link(ci3, ci2, CPUREL_PACKAGE1ST);
    413   1.6        ad 			    	break;
    414   1.6        ad 			}
    415   1.6        ad 		}
    416   1.6        ad 	}
    417   1.6        ad 
    418   1.6        ad 	/* Walk through all packages, starting with value of ci3 from above. */
    419   1.6        ad 	KASSERT(ci3 != NULL);
    420   1.6        ad 	ci = ci3;
    421   1.6        ad 	do {
    422   1.6        ad 		/* Walk through CPUs in the package and copy in PACKAGE1ST. */
    423   1.1        ad 		ci2 = ci;
    424   1.1        ad 		do {
    425   1.6        ad 			ci2->ci_sibling[CPUREL_PACKAGE1ST] =
    426   1.6        ad 			    ci->ci_sibling[CPUREL_PACKAGE1ST];
    427   1.6        ad 			ci2->ci_nsibling[CPUREL_PACKAGE1ST] =
    428   1.6        ad 			    ci->ci_nsibling[CPUREL_PACKAGE1ST];
    429   1.6        ad 			ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
    430   1.1        ad 		} while (ci2 != ci);
    431   1.6        ad 		ci = ci->ci_sibling[CPUREL_PACKAGE1ST];
    432   1.6        ad 	} while (ci != ci3);
    433   1.6        ad 
    434   1.6        ad 	if (cpu_topology_haveslow) {
    435   1.6        ad 		/*
    436   1.9        ad 		 * For asymmetric systems where some CPUs are slower than
    437   1.6        ad 		 * others, mark first class CPUs for the scheduler.  This
    438   1.6        ad 		 * conflicts with SMT right now so whinge if observed.
    439   1.6        ad 		 */
    440   1.8        ad 		if (curcpu()->ci_nsibling[CPUREL_CORE] > 1) {
    441   1.6        ad 			printf("cpu_topology_init: asymmetric & SMT??\n");
    442   1.6        ad 		}
    443   1.6        ad 		for (CPU_INFO_FOREACH(cii, ci)) {
    444   1.6        ad 			if (!ci->ci_is_slow) {
    445   1.6        ad 				ci->ci_schedstate.spc_flags |= SPCF_1STCLASS;
    446   1.6        ad 			}
    447   1.6        ad 		}
    448   1.6        ad 	} else {
    449   1.6        ad 		/*
    450   1.6        ad 		 * For any other configuration mark the 1st CPU in each
    451   1.6        ad 		 * core as a first class CPU.
    452   1.6        ad 		 */
    453   1.6        ad 		for (CPU_INFO_FOREACH(cii, ci)) {
    454   1.6        ad 			if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) != 0) {
    455   1.6        ad 				ci->ci_schedstate.spc_flags |= SPCF_1STCLASS;
    456   1.6        ad 			}
    457   1.6        ad 		}
    458   1.1        ad 	}
    459   1.6        ad 
    460   1.6        ad 	cpu_topology_dump();
    461   1.1        ad }
    462   1.1        ad 
    463   1.1        ad /*
    464   1.1        ad  * Adjust one count, for a counter that's NOT updated from interrupt
    465   1.1        ad  * context.  Hardly worth making an inline due to preemption stuff.
    466   1.1        ad  */
    467   1.1        ad void
    468   1.1        ad cpu_count(enum cpu_count idx, int64_t delta)
    469   1.1        ad {
    470   1.1        ad 	lwp_t *l = curlwp;
    471   1.1        ad 	KPREEMPT_DISABLE(l);
    472   1.1        ad 	l->l_cpu->ci_counts[idx] += delta;
    473   1.1        ad 	KPREEMPT_ENABLE(l);
    474   1.1        ad }
    475   1.1        ad 
    476   1.1        ad /*
    477   1.1        ad  * Fetch fresh sum total for all counts.  Expensive - don't call often.
    478  1.15        ad  *
    479  1.18    andvar  * If poll is true, the caller is okay with less recent values (but
    480  1.15        ad  * no more than 1/hz seconds old).  Where this is called very often that
    481  1.15        ad  * should be the case.
    482  1.15        ad  *
    483  1.23        ad  * This should be reasonably quick so that any value collected isn't totally
    484  1.23        ad  * out of whack.  It can also be called from interrupt context, so go to
    485  1.23        ad  * splvm() while summing the counters.  It's tempting to use a spin mutex
    486  1.23        ad  * here but this routine is called from DDB.
    487   1.1        ad  */
    488   1.1        ad void
    489  1.15        ad cpu_count_sync(bool poll)
    490   1.1        ad {
    491   1.1        ad 	CPU_INFO_ITERATOR cii;
    492   1.1        ad 	struct cpu_info *ci;
    493   1.1        ad 	int64_t sum[CPU_COUNT_MAX], *ptr;
    494  1.15        ad 	static int lasttick;
    495  1.15        ad 	int curtick, s;
    496   1.1        ad 	enum cpu_count i;
    497   1.1        ad 
    498   1.1        ad 	KASSERT(sizeof(ci->ci_counts) == sizeof(cpu_counts));
    499   1.1        ad 
    500  1.15        ad 	if (__predict_false(!mp_online)) {
    501   1.1        ad 		memcpy(cpu_counts, curcpu()->ci_counts, sizeof(cpu_counts));
    502  1.15        ad 		return;
    503   1.1        ad 	}
    504   1.1        ad 
    505  1.15        ad 	s = splvm();
    506  1.15        ad 	curtick = getticks();
    507  1.15        ad 	if (poll && atomic_load_acquire(&lasttick) == curtick) {
    508   1.1        ad 		splx(s);
    509  1.15        ad 		return;
    510   1.1        ad 	}
    511  1.15        ad 	memset(sum, 0, sizeof(sum));
    512  1.15        ad 	curcpu()->ci_counts[CPU_COUNT_SYNC]++;
    513  1.15        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    514  1.15        ad 		ptr = ci->ci_counts;
    515  1.15        ad 		for (i = 0; i < CPU_COUNT_MAX; i += 8) {
    516  1.15        ad 			sum[i+0] += ptr[i+0];
    517  1.15        ad 			sum[i+1] += ptr[i+1];
    518  1.15        ad 			sum[i+2] += ptr[i+2];
    519  1.15        ad 			sum[i+3] += ptr[i+3];
    520  1.15        ad 			sum[i+4] += ptr[i+4];
    521  1.15        ad 			sum[i+5] += ptr[i+5];
    522  1.15        ad 			sum[i+6] += ptr[i+6];
    523  1.15        ad 			sum[i+7] += ptr[i+7];
    524  1.15        ad 		}
    525  1.15        ad 		KASSERT(i == CPU_COUNT_MAX);
    526  1.15        ad 	}
    527  1.15        ad 	memcpy(cpu_counts, sum, sizeof(cpu_counts));
    528  1.15        ad 	atomic_store_release(&lasttick, curtick);
    529  1.15        ad 	splx(s);
    530   1.1        ad }
    531