subr_cpu.c revision 1.19 1 1.19 riastrad /* $NetBSD: subr_cpu.c,v 1.19 2023/07/08 13:59:05 riastradh Exp $ */
2 1.1 ad
3 1.1 ad /*-
4 1.6 ad * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019, 2020
5 1.6 ad * The NetBSD Foundation, Inc.
6 1.1 ad * All rights reserved.
7 1.1 ad *
8 1.1 ad * This code is derived from software contributed to The NetBSD Foundation
9 1.1 ad * by Andrew Doran.
10 1.1 ad *
11 1.1 ad * Redistribution and use in source and binary forms, with or without
12 1.1 ad * modification, are permitted provided that the following conditions
13 1.1 ad * are met:
14 1.1 ad * 1. Redistributions of source code must retain the above copyright
15 1.1 ad * notice, this list of conditions and the following disclaimer.
16 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 ad * notice, this list of conditions and the following disclaimer in the
18 1.1 ad * documentation and/or other materials provided with the distribution.
19 1.1 ad *
20 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 ad * POSSIBILITY OF SUCH DAMAGE.
31 1.1 ad */
32 1.1 ad
33 1.1 ad /*-
34 1.1 ad * Copyright (c)2007 YAMAMOTO Takashi,
35 1.1 ad * All rights reserved.
36 1.1 ad *
37 1.1 ad * Redistribution and use in source and binary forms, with or without
38 1.1 ad * modification, are permitted provided that the following conditions
39 1.1 ad * are met:
40 1.1 ad * 1. Redistributions of source code must retain the above copyright
41 1.1 ad * notice, this list of conditions and the following disclaimer.
42 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright
43 1.1 ad * notice, this list of conditions and the following disclaimer in the
44 1.1 ad * documentation and/or other materials provided with the distribution.
45 1.1 ad *
46 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47 1.1 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 1.1 ad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 1.1 ad * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50 1.1 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 1.1 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 1.1 ad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 1.1 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 1.1 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 1.1 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 1.1 ad * SUCH DAMAGE.
57 1.1 ad */
58 1.1 ad
59 1.1 ad /*
60 1.1 ad * CPU related routines shared with rump.
61 1.1 ad */
62 1.1 ad
63 1.1 ad #include <sys/cdefs.h>
64 1.19 riastrad __KERNEL_RCSID(0, "$NetBSD: subr_cpu.c,v 1.19 2023/07/08 13:59:05 riastradh Exp $");
65 1.1 ad
66 1.1 ad #include <sys/param.h>
67 1.15 ad #include <sys/atomic.h>
68 1.1 ad #include <sys/systm.h>
69 1.1 ad #include <sys/sched.h>
70 1.1 ad #include <sys/conf.h>
71 1.1 ad #include <sys/cpu.h>
72 1.1 ad #include <sys/proc.h>
73 1.1 ad #include <sys/kernel.h>
74 1.1 ad #include <sys/kmem.h>
75 1.1 ad
76 1.5 ad static void cpu_topology_fake1(struct cpu_info *);
77 1.5 ad
78 1.1 ad kmutex_t cpu_lock __cacheline_aligned;
79 1.1 ad int ncpu __read_mostly;
80 1.1 ad int ncpuonline __read_mostly;
81 1.1 ad bool mp_online __read_mostly;
82 1.1 ad static bool cpu_topology_present __read_mostly;
83 1.6 ad static bool cpu_topology_haveslow __read_mostly;
84 1.1 ad int64_t cpu_counts[CPU_COUNT_MAX];
85 1.1 ad
86 1.1 ad /* An array of CPUs. There are ncpu entries. */
87 1.1 ad struct cpu_info **cpu_infos __read_mostly;
88 1.1 ad
89 1.1 ad /* Note: set on mi_cpu_attach() and idle_loop(). */
90 1.1 ad kcpuset_t * kcpuset_attached __read_mostly = NULL;
91 1.1 ad kcpuset_t * kcpuset_running __read_mostly = NULL;
92 1.1 ad
93 1.1 ad static char cpu_model[128];
94 1.1 ad
95 1.1 ad /*
96 1.1 ad * mi_cpu_init: early initialisation of MI CPU related structures.
97 1.1 ad *
98 1.1 ad * Note: may not block and memory allocator is not yet available.
99 1.1 ad */
100 1.1 ad void
101 1.1 ad mi_cpu_init(void)
102 1.1 ad {
103 1.4 ad struct cpu_info *ci;
104 1.1 ad
105 1.1 ad mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
106 1.1 ad
107 1.1 ad kcpuset_create(&kcpuset_attached, true);
108 1.1 ad kcpuset_create(&kcpuset_running, true);
109 1.1 ad kcpuset_set(kcpuset_running, 0);
110 1.4 ad
111 1.4 ad ci = curcpu();
112 1.5 ad cpu_topology_fake1(ci);
113 1.1 ad }
114 1.1 ad
115 1.1 ad int
116 1.1 ad cpu_setmodel(const char *fmt, ...)
117 1.1 ad {
118 1.1 ad int len;
119 1.1 ad va_list ap;
120 1.1 ad
121 1.1 ad va_start(ap, fmt);
122 1.1 ad len = vsnprintf(cpu_model, sizeof(cpu_model), fmt, ap);
123 1.1 ad va_end(ap);
124 1.1 ad return len;
125 1.1 ad }
126 1.1 ad
127 1.1 ad const char *
128 1.1 ad cpu_getmodel(void)
129 1.1 ad {
130 1.1 ad return cpu_model;
131 1.1 ad }
132 1.1 ad
133 1.1 ad bool
134 1.1 ad cpu_softintr_p(void)
135 1.1 ad {
136 1.1 ad
137 1.1 ad return (curlwp->l_pflag & LP_INTR) != 0;
138 1.1 ad }
139 1.1 ad
140 1.19 riastrad bool
141 1.19 riastrad curcpu_stable(void)
142 1.19 riastrad {
143 1.19 riastrad struct lwp *const l = curlwp;
144 1.19 riastrad const int pflag = l->l_pflag;
145 1.19 riastrad const int nopreempt = l->l_nopreempt;
146 1.19 riastrad
147 1.19 riastrad /*
148 1.19 riastrad * - Softints (LP_INTR) never migrate between CPUs.
149 1.19 riastrad * - Bound lwps (LP_BOUND), either kthreads created bound to
150 1.19 riastrad * a CPU or any lwps bound with curlwp_bind, never migrate.
151 1.19 riastrad * - If kpreemption is disabled, the lwp can't migrate.
152 1.19 riastrad * - If we're in interrupt context, preemption is blocked.
153 1.19 riastrad *
154 1.19 riastrad * We combine the LP_INTR, LP_BOUND, and l_nopreempt test into
155 1.19 riastrad * a single predicted-true branch so this is cheap to assert in
156 1.19 riastrad * most contexts where it will be used, then fall back to
157 1.19 riastrad * calling the full kpreempt_disabled() and cpu_intr_p() as
158 1.19 riastrad * subroutines.
159 1.19 riastrad *
160 1.19 riastrad * XXX Is cpu_intr_p redundant with kpreempt_disabled?
161 1.19 riastrad */
162 1.19 riastrad return __predict_true(((pflag & (LP_INTR|LP_BOUND)) | nopreempt)
163 1.19 riastrad != 0) ||
164 1.19 riastrad kpreempt_disabled() ||
165 1.19 riastrad cpu_intr_p();
166 1.19 riastrad }
167 1.19 riastrad
168 1.1 ad /*
169 1.1 ad * Collect CPU topology information as each CPU is attached. This can be
170 1.1 ad * called early during boot, so we need to be careful what we do.
171 1.1 ad */
172 1.1 ad void
173 1.1 ad cpu_topology_set(struct cpu_info *ci, u_int package_id, u_int core_id,
174 1.13 skrll u_int smt_id, u_int numa_id)
175 1.1 ad {
176 1.1 ad enum cpu_rel rel;
177 1.1 ad
178 1.1 ad cpu_topology_present = true;
179 1.1 ad ci->ci_package_id = package_id;
180 1.1 ad ci->ci_core_id = core_id;
181 1.1 ad ci->ci_smt_id = smt_id;
182 1.1 ad ci->ci_numa_id = numa_id;
183 1.1 ad for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
184 1.1 ad ci->ci_sibling[rel] = ci;
185 1.1 ad ci->ci_nsibling[rel] = 1;
186 1.1 ad }
187 1.1 ad }
188 1.1 ad
189 1.1 ad /*
190 1.13 skrll * Collect CPU relative speed
191 1.13 skrll */
192 1.13 skrll void
193 1.13 skrll cpu_topology_setspeed(struct cpu_info *ci, bool slow)
194 1.13 skrll {
195 1.13 skrll
196 1.13 skrll cpu_topology_haveslow |= slow;
197 1.13 skrll ci->ci_is_slow = slow;
198 1.13 skrll }
199 1.13 skrll
200 1.13 skrll /*
201 1.1 ad * Link a CPU into the given circular list.
202 1.1 ad */
203 1.1 ad static void
204 1.1 ad cpu_topology_link(struct cpu_info *ci, struct cpu_info *ci2, enum cpu_rel rel)
205 1.1 ad {
206 1.1 ad struct cpu_info *ci3;
207 1.1 ad
208 1.1 ad /* Walk to the end of the existing circular list and append. */
209 1.1 ad for (ci3 = ci2;; ci3 = ci3->ci_sibling[rel]) {
210 1.1 ad ci3->ci_nsibling[rel]++;
211 1.1 ad if (ci3->ci_sibling[rel] == ci2) {
212 1.1 ad break;
213 1.1 ad }
214 1.1 ad }
215 1.1 ad ci->ci_sibling[rel] = ci2;
216 1.1 ad ci3->ci_sibling[rel] = ci;
217 1.1 ad ci->ci_nsibling[rel] = ci3->ci_nsibling[rel];
218 1.1 ad }
219 1.1 ad
220 1.1 ad /*
221 1.1 ad * Print out the topology lists.
222 1.1 ad */
223 1.1 ad static void
224 1.1 ad cpu_topology_dump(void)
225 1.1 ad {
226 1.6 ad #ifdef DEBUG
227 1.1 ad CPU_INFO_ITERATOR cii;
228 1.1 ad struct cpu_info *ci, *ci2;
229 1.6 ad const char *names[] = { "core", "pkg", "1st" };
230 1.1 ad enum cpu_rel rel;
231 1.1 ad int i;
232 1.1 ad
233 1.10 mrg CTASSERT(__arraycount(names) >= __arraycount(ci->ci_sibling));
234 1.16 simonb if (ncpu == 1) {
235 1.16 simonb return;
236 1.16 simonb }
237 1.10 mrg
238 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) {
239 1.10 mrg if (cpu_topology_haveslow)
240 1.10 mrg printf("%s ", ci->ci_is_slow ? "slow" : "fast");
241 1.1 ad for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
242 1.1 ad printf("%s has %d %s siblings:", cpu_name(ci),
243 1.1 ad ci->ci_nsibling[rel], names[rel]);
244 1.1 ad ci2 = ci->ci_sibling[rel];
245 1.1 ad i = 0;
246 1.1 ad do {
247 1.1 ad printf(" %s", cpu_name(ci2));
248 1.1 ad ci2 = ci2->ci_sibling[rel];
249 1.1 ad } while (++i < 64 && ci2 != ci->ci_sibling[rel]);
250 1.1 ad if (i == 64) {
251 1.1 ad printf(" GAVE UP");
252 1.1 ad }
253 1.1 ad printf("\n");
254 1.1 ad }
255 1.8 ad printf("%s first in package: %s\n", cpu_name(ci),
256 1.8 ad cpu_name(ci->ci_package1st));
257 1.1 ad }
258 1.1 ad #endif /* DEBUG */
259 1.1 ad }
260 1.1 ad
261 1.1 ad /*
262 1.1 ad * Fake up topology info if we have none, or if what we got was bogus.
263 1.5 ad * Used early in boot, and by cpu_topology_fake().
264 1.5 ad */
265 1.5 ad static void
266 1.5 ad cpu_topology_fake1(struct cpu_info *ci)
267 1.5 ad {
268 1.5 ad enum cpu_rel rel;
269 1.5 ad
270 1.5 ad for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
271 1.5 ad ci->ci_sibling[rel] = ci;
272 1.5 ad ci->ci_nsibling[rel] = 1;
273 1.5 ad }
274 1.5 ad if (!cpu_topology_present) {
275 1.5 ad ci->ci_package_id = cpu_index(ci);
276 1.5 ad }
277 1.6 ad ci->ci_schedstate.spc_flags |=
278 1.6 ad (SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
279 1.8 ad ci->ci_package1st = ci;
280 1.14 ad if (!cpu_topology_haveslow) {
281 1.14 ad ci->ci_is_slow = false;
282 1.14 ad }
283 1.5 ad }
284 1.5 ad
285 1.5 ad /*
286 1.5 ad * Fake up topology info if we have none, or if what we got was bogus.
287 1.1 ad * Don't override ci_package_id, etc, if cpu_topology_present is set.
288 1.1 ad * MD code also uses these.
289 1.1 ad */
290 1.1 ad static void
291 1.1 ad cpu_topology_fake(void)
292 1.1 ad {
293 1.1 ad CPU_INFO_ITERATOR cii;
294 1.1 ad struct cpu_info *ci;
295 1.1 ad
296 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) {
297 1.5 ad cpu_topology_fake1(ci);
298 1.11 ad /* Undo (early boot) flag set so everything links OK. */
299 1.11 ad ci->ci_schedstate.spc_flags &=
300 1.11 ad ~(SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
301 1.1 ad }
302 1.11 ad }
303 1.1 ad
304 1.1 ad /*
305 1.1 ad * Fix up basic CPU topology info. Right now that means attach each CPU to
306 1.12 skrll * circular lists of its siblings in the same core, and in the same package.
307 1.1 ad */
308 1.1 ad void
309 1.1 ad cpu_topology_init(void)
310 1.1 ad {
311 1.1 ad CPU_INFO_ITERATOR cii, cii2;
312 1.1 ad struct cpu_info *ci, *ci2, *ci3;
313 1.6 ad u_int minsmt, mincore;
314 1.1 ad
315 1.1 ad if (!cpu_topology_present) {
316 1.1 ad cpu_topology_fake();
317 1.11 ad goto linkit;
318 1.1 ad }
319 1.1 ad
320 1.1 ad /* Find siblings in same core and package. */
321 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) {
322 1.6 ad ci->ci_schedstate.spc_flags &=
323 1.6 ad ~(SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
324 1.1 ad for (CPU_INFO_FOREACH(cii2, ci2)) {
325 1.1 ad /* Avoid bad things happening. */
326 1.1 ad if (ci2->ci_package_id == ci->ci_package_id &&
327 1.1 ad ci2->ci_core_id == ci->ci_core_id &&
328 1.1 ad ci2->ci_smt_id == ci->ci_smt_id &&
329 1.1 ad ci2 != ci) {
330 1.10 mrg #ifdef DEBUG
331 1.10 mrg printf("cpu%u %p pkg %u core %u smt %u same as "
332 1.12 skrll "cpu%u %p pkg %u core %u smt %u\n",
333 1.10 mrg cpu_index(ci), ci, ci->ci_package_id,
334 1.10 mrg ci->ci_core_id, ci->ci_smt_id,
335 1.10 mrg cpu_index(ci2), ci2, ci2->ci_package_id,
336 1.10 mrg ci2->ci_core_id, ci2->ci_smt_id);
337 1.10 mrg #endif
338 1.1 ad printf("cpu_topology_init: info bogus, "
339 1.1 ad "faking it\n");
340 1.1 ad cpu_topology_fake();
341 1.11 ad goto linkit;
342 1.1 ad }
343 1.1 ad if (ci2 == ci ||
344 1.1 ad ci2->ci_package_id != ci->ci_package_id) {
345 1.1 ad continue;
346 1.1 ad }
347 1.1 ad /* Find CPUs in the same core. */
348 1.1 ad if (ci->ci_nsibling[CPUREL_CORE] == 1 &&
349 1.1 ad ci->ci_core_id == ci2->ci_core_id) {
350 1.1 ad cpu_topology_link(ci, ci2, CPUREL_CORE);
351 1.1 ad }
352 1.1 ad /* Find CPUs in the same package. */
353 1.1 ad if (ci->ci_nsibling[CPUREL_PACKAGE] == 1) {
354 1.1 ad cpu_topology_link(ci, ci2, CPUREL_PACKAGE);
355 1.1 ad }
356 1.1 ad if (ci->ci_nsibling[CPUREL_CORE] > 1 &&
357 1.1 ad ci->ci_nsibling[CPUREL_PACKAGE] > 1) {
358 1.1 ad break;
359 1.1 ad }
360 1.1 ad }
361 1.1 ad }
362 1.1 ad
363 1.11 ad linkit:
364 1.6 ad /* Identify lowest numbered SMT in each core. */
365 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) {
366 1.6 ad ci2 = ci3 = ci;
367 1.6 ad minsmt = ci->ci_smt_id;
368 1.6 ad do {
369 1.6 ad if (ci2->ci_smt_id < minsmt) {
370 1.6 ad ci3 = ci2;
371 1.6 ad minsmt = ci2->ci_smt_id;
372 1.1 ad }
373 1.6 ad ci2 = ci2->ci_sibling[CPUREL_CORE];
374 1.6 ad } while (ci2 != ci);
375 1.6 ad ci3->ci_schedstate.spc_flags |= SPCF_CORE1ST;
376 1.1 ad }
377 1.1 ad
378 1.6 ad /* Identify lowest numbered SMT in each package. */
379 1.6 ad ci3 = NULL;
380 1.1 ad for (CPU_INFO_FOREACH(cii, ci)) {
381 1.6 ad if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) == 0) {
382 1.6 ad continue;
383 1.1 ad }
384 1.1 ad ci2 = ci3 = ci;
385 1.6 ad mincore = ci->ci_core_id;
386 1.1 ad do {
387 1.6 ad if ((ci2->ci_schedstate.spc_flags &
388 1.6 ad SPCF_CORE1ST) != 0 &&
389 1.6 ad ci2->ci_core_id < mincore) {
390 1.1 ad ci3 = ci2;
391 1.6 ad mincore = ci2->ci_core_id;
392 1.1 ad }
393 1.6 ad ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
394 1.6 ad } while (ci2 != ci);
395 1.6 ad
396 1.6 ad if ((ci3->ci_schedstate.spc_flags & SPCF_PACKAGE1ST) != 0) {
397 1.6 ad /* Already identified - nothing more to do. */
398 1.6 ad continue;
399 1.6 ad }
400 1.6 ad ci3->ci_schedstate.spc_flags |= SPCF_PACKAGE1ST;
401 1.6 ad
402 1.6 ad /* Walk through all CPUs in package and point to first. */
403 1.8 ad ci2 = ci3;
404 1.6 ad do {
405 1.8 ad ci2->ci_package1st = ci3;
406 1.6 ad ci2->ci_sibling[CPUREL_PACKAGE1ST] = ci3;
407 1.6 ad ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
408 1.11 ad } while (ci2 != ci3);
409 1.1 ad
410 1.6 ad /* Now look for somebody else to link to. */
411 1.6 ad for (CPU_INFO_FOREACH(cii2, ci2)) {
412 1.6 ad if ((ci2->ci_schedstate.spc_flags & SPCF_PACKAGE1ST)
413 1.6 ad != 0 && ci2 != ci3) {
414 1.6 ad cpu_topology_link(ci3, ci2, CPUREL_PACKAGE1ST);
415 1.6 ad break;
416 1.6 ad }
417 1.6 ad }
418 1.6 ad }
419 1.6 ad
420 1.6 ad /* Walk through all packages, starting with value of ci3 from above. */
421 1.6 ad KASSERT(ci3 != NULL);
422 1.6 ad ci = ci3;
423 1.6 ad do {
424 1.6 ad /* Walk through CPUs in the package and copy in PACKAGE1ST. */
425 1.1 ad ci2 = ci;
426 1.1 ad do {
427 1.6 ad ci2->ci_sibling[CPUREL_PACKAGE1ST] =
428 1.6 ad ci->ci_sibling[CPUREL_PACKAGE1ST];
429 1.6 ad ci2->ci_nsibling[CPUREL_PACKAGE1ST] =
430 1.6 ad ci->ci_nsibling[CPUREL_PACKAGE1ST];
431 1.6 ad ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
432 1.1 ad } while (ci2 != ci);
433 1.6 ad ci = ci->ci_sibling[CPUREL_PACKAGE1ST];
434 1.6 ad } while (ci != ci3);
435 1.6 ad
436 1.6 ad if (cpu_topology_haveslow) {
437 1.6 ad /*
438 1.9 ad * For asymmetric systems where some CPUs are slower than
439 1.6 ad * others, mark first class CPUs for the scheduler. This
440 1.6 ad * conflicts with SMT right now so whinge if observed.
441 1.6 ad */
442 1.8 ad if (curcpu()->ci_nsibling[CPUREL_CORE] > 1) {
443 1.6 ad printf("cpu_topology_init: asymmetric & SMT??\n");
444 1.6 ad }
445 1.6 ad for (CPU_INFO_FOREACH(cii, ci)) {
446 1.6 ad if (!ci->ci_is_slow) {
447 1.6 ad ci->ci_schedstate.spc_flags |= SPCF_1STCLASS;
448 1.6 ad }
449 1.6 ad }
450 1.6 ad } else {
451 1.6 ad /*
452 1.6 ad * For any other configuration mark the 1st CPU in each
453 1.6 ad * core as a first class CPU.
454 1.6 ad */
455 1.6 ad for (CPU_INFO_FOREACH(cii, ci)) {
456 1.6 ad if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) != 0) {
457 1.6 ad ci->ci_schedstate.spc_flags |= SPCF_1STCLASS;
458 1.6 ad }
459 1.6 ad }
460 1.1 ad }
461 1.6 ad
462 1.6 ad cpu_topology_dump();
463 1.1 ad }
464 1.1 ad
465 1.1 ad /*
466 1.1 ad * Adjust one count, for a counter that's NOT updated from interrupt
467 1.1 ad * context. Hardly worth making an inline due to preemption stuff.
468 1.1 ad */
469 1.1 ad void
470 1.1 ad cpu_count(enum cpu_count idx, int64_t delta)
471 1.1 ad {
472 1.1 ad lwp_t *l = curlwp;
473 1.1 ad KPREEMPT_DISABLE(l);
474 1.1 ad l->l_cpu->ci_counts[idx] += delta;
475 1.1 ad KPREEMPT_ENABLE(l);
476 1.1 ad }
477 1.1 ad
478 1.1 ad /*
479 1.1 ad * Fetch fresh sum total for all counts. Expensive - don't call often.
480 1.15 ad *
481 1.18 andvar * If poll is true, the caller is okay with less recent values (but
482 1.15 ad * no more than 1/hz seconds old). Where this is called very often that
483 1.15 ad * should be the case.
484 1.15 ad *
485 1.15 ad * This should be reasonably quick so that any value collected get isn't
486 1.15 ad * totally out of whack, and it can also be called from interrupt context,
487 1.15 ad * so go to splvm() while summing the counters. It's tempting to use a spin
488 1.15 ad * mutex here but this routine is called from DDB.
489 1.1 ad */
490 1.1 ad void
491 1.15 ad cpu_count_sync(bool poll)
492 1.1 ad {
493 1.1 ad CPU_INFO_ITERATOR cii;
494 1.1 ad struct cpu_info *ci;
495 1.1 ad int64_t sum[CPU_COUNT_MAX], *ptr;
496 1.15 ad static int lasttick;
497 1.15 ad int curtick, s;
498 1.1 ad enum cpu_count i;
499 1.1 ad
500 1.1 ad KASSERT(sizeof(ci->ci_counts) == sizeof(cpu_counts));
501 1.1 ad
502 1.15 ad if (__predict_false(!mp_online)) {
503 1.1 ad memcpy(cpu_counts, curcpu()->ci_counts, sizeof(cpu_counts));
504 1.15 ad return;
505 1.1 ad }
506 1.1 ad
507 1.15 ad s = splvm();
508 1.15 ad curtick = getticks();
509 1.15 ad if (poll && atomic_load_acquire(&lasttick) == curtick) {
510 1.1 ad splx(s);
511 1.15 ad return;
512 1.1 ad }
513 1.15 ad memset(sum, 0, sizeof(sum));
514 1.15 ad curcpu()->ci_counts[CPU_COUNT_SYNC]++;
515 1.15 ad for (CPU_INFO_FOREACH(cii, ci)) {
516 1.15 ad ptr = ci->ci_counts;
517 1.15 ad for (i = 0; i < CPU_COUNT_MAX; i += 8) {
518 1.15 ad sum[i+0] += ptr[i+0];
519 1.15 ad sum[i+1] += ptr[i+1];
520 1.15 ad sum[i+2] += ptr[i+2];
521 1.15 ad sum[i+3] += ptr[i+3];
522 1.15 ad sum[i+4] += ptr[i+4];
523 1.15 ad sum[i+5] += ptr[i+5];
524 1.15 ad sum[i+6] += ptr[i+6];
525 1.15 ad sum[i+7] += ptr[i+7];
526 1.15 ad }
527 1.15 ad KASSERT(i == CPU_COUNT_MAX);
528 1.15 ad }
529 1.15 ad memcpy(cpu_counts, sum, sizeof(cpu_counts));
530 1.15 ad atomic_store_release(&lasttick, curtick);
531 1.15 ad splx(s);
532 1.1 ad }
533