subr_cpu.c revision 1.14.2.2 1 1.14.2.2 martin /* $NetBSD: subr_cpu.c,v 1.14.2.2 2020/04/08 14:08:52 martin Exp $ */
2 1.14.2.2 martin
3 1.14.2.2 martin /*-
4 1.14.2.2 martin * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019, 2020
5 1.14.2.2 martin * The NetBSD Foundation, Inc.
6 1.14.2.2 martin * All rights reserved.
7 1.14.2.2 martin *
8 1.14.2.2 martin * This code is derived from software contributed to The NetBSD Foundation
9 1.14.2.2 martin * by Andrew Doran.
10 1.14.2.2 martin *
11 1.14.2.2 martin * Redistribution and use in source and binary forms, with or without
12 1.14.2.2 martin * modification, are permitted provided that the following conditions
13 1.14.2.2 martin * are met:
14 1.14.2.2 martin * 1. Redistributions of source code must retain the above copyright
15 1.14.2.2 martin * notice, this list of conditions and the following disclaimer.
16 1.14.2.2 martin * 2. Redistributions in binary form must reproduce the above copyright
17 1.14.2.2 martin * notice, this list of conditions and the following disclaimer in the
18 1.14.2.2 martin * documentation and/or other materials provided with the distribution.
19 1.14.2.2 martin *
20 1.14.2.2 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.14.2.2 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.14.2.2 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.14.2.2 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.14.2.2 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.14.2.2 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.14.2.2 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.14.2.2 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.14.2.2 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.14.2.2 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.14.2.2 martin * POSSIBILITY OF SUCH DAMAGE.
31 1.14.2.2 martin */
32 1.14.2.2 martin
33 1.14.2.2 martin /*-
34 1.14.2.2 martin * Copyright (c)2007 YAMAMOTO Takashi,
35 1.14.2.2 martin * All rights reserved.
36 1.14.2.2 martin *
37 1.14.2.2 martin * Redistribution and use in source and binary forms, with or without
38 1.14.2.2 martin * modification, are permitted provided that the following conditions
39 1.14.2.2 martin * are met:
40 1.14.2.2 martin * 1. Redistributions of source code must retain the above copyright
41 1.14.2.2 martin * notice, this list of conditions and the following disclaimer.
42 1.14.2.2 martin * 2. Redistributions in binary form must reproduce the above copyright
43 1.14.2.2 martin * notice, this list of conditions and the following disclaimer in the
44 1.14.2.2 martin * documentation and/or other materials provided with the distribution.
45 1.14.2.2 martin *
46 1.14.2.2 martin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47 1.14.2.2 martin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 1.14.2.2 martin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 1.14.2.2 martin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50 1.14.2.2 martin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 1.14.2.2 martin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 1.14.2.2 martin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 1.14.2.2 martin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 1.14.2.2 martin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 1.14.2.2 martin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 1.14.2.2 martin * SUCH DAMAGE.
57 1.14.2.2 martin */
58 1.14.2.2 martin
59 1.14.2.2 martin /*
60 1.14.2.2 martin * CPU related routines shared with rump.
61 1.14.2.2 martin */
62 1.14.2.2 martin
63 1.14.2.2 martin #include <sys/cdefs.h>
64 1.14.2.2 martin __KERNEL_RCSID(0, "$NetBSD: subr_cpu.c,v 1.14.2.2 2020/04/08 14:08:52 martin Exp $");
65 1.14.2.2 martin
66 1.14.2.2 martin #include <sys/param.h>
67 1.14.2.2 martin #include <sys/systm.h>
68 1.14.2.2 martin #include <sys/sched.h>
69 1.14.2.2 martin #include <sys/conf.h>
70 1.14.2.2 martin #include <sys/cpu.h>
71 1.14.2.2 martin #include <sys/proc.h>
72 1.14.2.2 martin #include <sys/kernel.h>
73 1.14.2.2 martin #include <sys/kmem.h>
74 1.14.2.2 martin
75 1.14.2.2 martin static void cpu_topology_fake1(struct cpu_info *);
76 1.14.2.2 martin
77 1.14.2.2 martin kmutex_t cpu_lock __cacheline_aligned;
78 1.14.2.2 martin int ncpu __read_mostly;
79 1.14.2.2 martin int ncpuonline __read_mostly;
80 1.14.2.2 martin bool mp_online __read_mostly;
81 1.14.2.2 martin static bool cpu_topology_present __read_mostly;
82 1.14.2.2 martin static bool cpu_topology_haveslow __read_mostly;
83 1.14.2.2 martin int64_t cpu_counts[CPU_COUNT_MAX];
84 1.14.2.2 martin
85 1.14.2.2 martin /* An array of CPUs. There are ncpu entries. */
86 1.14.2.2 martin struct cpu_info **cpu_infos __read_mostly;
87 1.14.2.2 martin
88 1.14.2.2 martin /* Note: set on mi_cpu_attach() and idle_loop(). */
89 1.14.2.2 martin kcpuset_t * kcpuset_attached __read_mostly = NULL;
90 1.14.2.2 martin kcpuset_t * kcpuset_running __read_mostly = NULL;
91 1.14.2.2 martin
92 1.14.2.2 martin static char cpu_model[128];
93 1.14.2.2 martin
94 1.14.2.2 martin /*
95 1.14.2.2 martin * mi_cpu_init: early initialisation of MI CPU related structures.
96 1.14.2.2 martin *
97 1.14.2.2 martin * Note: may not block and memory allocator is not yet available.
98 1.14.2.2 martin */
99 1.14.2.2 martin void
100 1.14.2.2 martin mi_cpu_init(void)
101 1.14.2.2 martin {
102 1.14.2.2 martin struct cpu_info *ci;
103 1.14.2.2 martin
104 1.14.2.2 martin mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
105 1.14.2.2 martin
106 1.14.2.2 martin kcpuset_create(&kcpuset_attached, true);
107 1.14.2.2 martin kcpuset_create(&kcpuset_running, true);
108 1.14.2.2 martin kcpuset_set(kcpuset_running, 0);
109 1.14.2.2 martin
110 1.14.2.2 martin ci = curcpu();
111 1.14.2.2 martin cpu_topology_fake1(ci);
112 1.14.2.2 martin }
113 1.14.2.2 martin
114 1.14.2.2 martin int
115 1.14.2.2 martin cpu_setmodel(const char *fmt, ...)
116 1.14.2.2 martin {
117 1.14.2.2 martin int len;
118 1.14.2.2 martin va_list ap;
119 1.14.2.2 martin
120 1.14.2.2 martin va_start(ap, fmt);
121 1.14.2.2 martin len = vsnprintf(cpu_model, sizeof(cpu_model), fmt, ap);
122 1.14.2.2 martin va_end(ap);
123 1.14.2.2 martin return len;
124 1.14.2.2 martin }
125 1.14.2.2 martin
126 1.14.2.2 martin const char *
127 1.14.2.2 martin cpu_getmodel(void)
128 1.14.2.2 martin {
129 1.14.2.2 martin return cpu_model;
130 1.14.2.2 martin }
131 1.14.2.2 martin
132 1.14.2.2 martin bool
133 1.14.2.2 martin cpu_softintr_p(void)
134 1.14.2.2 martin {
135 1.14.2.2 martin
136 1.14.2.2 martin return (curlwp->l_pflag & LP_INTR) != 0;
137 1.14.2.2 martin }
138 1.14.2.2 martin
139 1.14.2.2 martin /*
140 1.14.2.2 martin * Collect CPU topology information as each CPU is attached. This can be
141 1.14.2.2 martin * called early during boot, so we need to be careful what we do.
142 1.14.2.2 martin */
143 1.14.2.2 martin void
144 1.14.2.2 martin cpu_topology_set(struct cpu_info *ci, u_int package_id, u_int core_id,
145 1.14.2.2 martin u_int smt_id, u_int numa_id)
146 1.14.2.2 martin {
147 1.14.2.2 martin enum cpu_rel rel;
148 1.14.2.2 martin
149 1.14.2.2 martin cpu_topology_present = true;
150 1.14.2.2 martin ci->ci_package_id = package_id;
151 1.14.2.2 martin ci->ci_core_id = core_id;
152 1.14.2.2 martin ci->ci_smt_id = smt_id;
153 1.14.2.2 martin ci->ci_numa_id = numa_id;
154 1.14.2.2 martin for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
155 1.14.2.2 martin ci->ci_sibling[rel] = ci;
156 1.14.2.2 martin ci->ci_nsibling[rel] = 1;
157 1.14.2.2 martin }
158 1.14.2.2 martin }
159 1.14.2.2 martin
160 1.14.2.2 martin /*
161 1.14.2.2 martin * Collect CPU relative speed
162 1.14.2.2 martin */
163 1.14.2.2 martin void
164 1.14.2.2 martin cpu_topology_setspeed(struct cpu_info *ci, bool slow)
165 1.14.2.2 martin {
166 1.14.2.2 martin
167 1.14.2.2 martin cpu_topology_haveslow |= slow;
168 1.14.2.2 martin ci->ci_is_slow = slow;
169 1.14.2.2 martin }
170 1.14.2.2 martin
171 1.14.2.2 martin /*
172 1.14.2.2 martin * Link a CPU into the given circular list.
173 1.14.2.2 martin */
174 1.14.2.2 martin static void
175 1.14.2.2 martin cpu_topology_link(struct cpu_info *ci, struct cpu_info *ci2, enum cpu_rel rel)
176 1.14.2.2 martin {
177 1.14.2.2 martin struct cpu_info *ci3;
178 1.14.2.2 martin
179 1.14.2.2 martin /* Walk to the end of the existing circular list and append. */
180 1.14.2.2 martin for (ci3 = ci2;; ci3 = ci3->ci_sibling[rel]) {
181 1.14.2.2 martin ci3->ci_nsibling[rel]++;
182 1.14.2.2 martin if (ci3->ci_sibling[rel] == ci2) {
183 1.14.2.2 martin break;
184 1.14.2.2 martin }
185 1.14.2.2 martin }
186 1.14.2.2 martin ci->ci_sibling[rel] = ci2;
187 1.14.2.2 martin ci3->ci_sibling[rel] = ci;
188 1.14.2.2 martin ci->ci_nsibling[rel] = ci3->ci_nsibling[rel];
189 1.14.2.2 martin }
190 1.14.2.2 martin
191 1.14.2.2 martin /*
192 1.14.2.2 martin * Print out the topology lists.
193 1.14.2.2 martin */
194 1.14.2.2 martin static void
195 1.14.2.2 martin cpu_topology_dump(void)
196 1.14.2.2 martin {
197 1.14.2.2 martin #ifdef DEBUG
198 1.14.2.2 martin CPU_INFO_ITERATOR cii;
199 1.14.2.2 martin struct cpu_info *ci, *ci2;
200 1.14.2.2 martin const char *names[] = { "core", "pkg", "1st" };
201 1.14.2.2 martin enum cpu_rel rel;
202 1.14.2.2 martin int i;
203 1.14.2.2 martin
204 1.14.2.2 martin CTASSERT(__arraycount(names) >= __arraycount(ci->ci_sibling));
205 1.14.2.2 martin
206 1.14.2.2 martin for (CPU_INFO_FOREACH(cii, ci)) {
207 1.14.2.2 martin if (cpu_topology_haveslow)
208 1.14.2.2 martin printf("%s ", ci->ci_is_slow ? "slow" : "fast");
209 1.14.2.2 martin for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
210 1.14.2.2 martin printf("%s has %d %s siblings:", cpu_name(ci),
211 1.14.2.2 martin ci->ci_nsibling[rel], names[rel]);
212 1.14.2.2 martin ci2 = ci->ci_sibling[rel];
213 1.14.2.2 martin i = 0;
214 1.14.2.2 martin do {
215 1.14.2.2 martin printf(" %s", cpu_name(ci2));
216 1.14.2.2 martin ci2 = ci2->ci_sibling[rel];
217 1.14.2.2 martin } while (++i < 64 && ci2 != ci->ci_sibling[rel]);
218 1.14.2.2 martin if (i == 64) {
219 1.14.2.2 martin printf(" GAVE UP");
220 1.14.2.2 martin }
221 1.14.2.2 martin printf("\n");
222 1.14.2.2 martin }
223 1.14.2.2 martin printf("%s first in package: %s\n", cpu_name(ci),
224 1.14.2.2 martin cpu_name(ci->ci_package1st));
225 1.14.2.2 martin }
226 1.14.2.2 martin #endif /* DEBUG */
227 1.14.2.2 martin }
228 1.14.2.2 martin
229 1.14.2.2 martin /*
230 1.14.2.2 martin * Fake up topology info if we have none, or if what we got was bogus.
231 1.14.2.2 martin * Used early in boot, and by cpu_topology_fake().
232 1.14.2.2 martin */
233 1.14.2.2 martin static void
234 1.14.2.2 martin cpu_topology_fake1(struct cpu_info *ci)
235 1.14.2.2 martin {
236 1.14.2.2 martin enum cpu_rel rel;
237 1.14.2.2 martin
238 1.14.2.2 martin for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
239 1.14.2.2 martin ci->ci_sibling[rel] = ci;
240 1.14.2.2 martin ci->ci_nsibling[rel] = 1;
241 1.14.2.2 martin }
242 1.14.2.2 martin if (!cpu_topology_present) {
243 1.14.2.2 martin ci->ci_package_id = cpu_index(ci);
244 1.14.2.2 martin }
245 1.14.2.2 martin ci->ci_schedstate.spc_flags |=
246 1.14.2.2 martin (SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
247 1.14.2.2 martin ci->ci_package1st = ci;
248 1.14.2.2 martin if (!cpu_topology_haveslow) {
249 1.14.2.2 martin ci->ci_is_slow = false;
250 1.14.2.2 martin }
251 1.14.2.2 martin }
252 1.14.2.2 martin
253 1.14.2.2 martin /*
254 1.14.2.2 martin * Fake up topology info if we have none, or if what we got was bogus.
255 1.14.2.2 martin * Don't override ci_package_id, etc, if cpu_topology_present is set.
256 1.14.2.2 martin * MD code also uses these.
257 1.14.2.2 martin */
258 1.14.2.2 martin static void
259 1.14.2.2 martin cpu_topology_fake(void)
260 1.14.2.2 martin {
261 1.14.2.2 martin CPU_INFO_ITERATOR cii;
262 1.14.2.2 martin struct cpu_info *ci;
263 1.14.2.2 martin
264 1.14.2.2 martin for (CPU_INFO_FOREACH(cii, ci)) {
265 1.14.2.2 martin cpu_topology_fake1(ci);
266 1.14.2.2 martin /* Undo (early boot) flag set so everything links OK. */
267 1.14.2.2 martin ci->ci_schedstate.spc_flags &=
268 1.14.2.2 martin ~(SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
269 1.14.2.2 martin }
270 1.14.2.2 martin }
271 1.14.2.2 martin
272 1.14.2.2 martin /*
273 1.14.2.2 martin * Fix up basic CPU topology info. Right now that means attach each CPU to
274 1.14.2.2 martin * circular lists of its siblings in the same core, and in the same package.
275 1.14.2.2 martin */
276 1.14.2.2 martin void
277 1.14.2.2 martin cpu_topology_init(void)
278 1.14.2.2 martin {
279 1.14.2.2 martin CPU_INFO_ITERATOR cii, cii2;
280 1.14.2.2 martin struct cpu_info *ci, *ci2, *ci3;
281 1.14.2.2 martin u_int minsmt, mincore;
282 1.14.2.2 martin
283 1.14.2.2 martin if (!cpu_topology_present) {
284 1.14.2.2 martin cpu_topology_fake();
285 1.14.2.2 martin goto linkit;
286 1.14.2.2 martin }
287 1.14.2.2 martin
288 1.14.2.2 martin /* Find siblings in same core and package. */
289 1.14.2.2 martin for (CPU_INFO_FOREACH(cii, ci)) {
290 1.14.2.2 martin ci->ci_schedstate.spc_flags &=
291 1.14.2.2 martin ~(SPCF_CORE1ST | SPCF_PACKAGE1ST | SPCF_1STCLASS);
292 1.14.2.2 martin for (CPU_INFO_FOREACH(cii2, ci2)) {
293 1.14.2.2 martin /* Avoid bad things happening. */
294 1.14.2.2 martin if (ci2->ci_package_id == ci->ci_package_id &&
295 1.14.2.2 martin ci2->ci_core_id == ci->ci_core_id &&
296 1.14.2.2 martin ci2->ci_smt_id == ci->ci_smt_id &&
297 1.14.2.2 martin ci2 != ci) {
298 1.14.2.2 martin #ifdef DEBUG
299 1.14.2.2 martin printf("cpu%u %p pkg %u core %u smt %u same as "
300 1.14.2.2 martin "cpu%u %p pkg %u core %u smt %u\n",
301 1.14.2.2 martin cpu_index(ci), ci, ci->ci_package_id,
302 1.14.2.2 martin ci->ci_core_id, ci->ci_smt_id,
303 1.14.2.2 martin cpu_index(ci2), ci2, ci2->ci_package_id,
304 1.14.2.2 martin ci2->ci_core_id, ci2->ci_smt_id);
305 1.14.2.2 martin #endif
306 1.14.2.2 martin printf("cpu_topology_init: info bogus, "
307 1.14.2.2 martin "faking it\n");
308 1.14.2.2 martin cpu_topology_fake();
309 1.14.2.2 martin goto linkit;
310 1.14.2.2 martin }
311 1.14.2.2 martin if (ci2 == ci ||
312 1.14.2.2 martin ci2->ci_package_id != ci->ci_package_id) {
313 1.14.2.2 martin continue;
314 1.14.2.2 martin }
315 1.14.2.2 martin /* Find CPUs in the same core. */
316 1.14.2.2 martin if (ci->ci_nsibling[CPUREL_CORE] == 1 &&
317 1.14.2.2 martin ci->ci_core_id == ci2->ci_core_id) {
318 1.14.2.2 martin cpu_topology_link(ci, ci2, CPUREL_CORE);
319 1.14.2.2 martin }
320 1.14.2.2 martin /* Find CPUs in the same package. */
321 1.14.2.2 martin if (ci->ci_nsibling[CPUREL_PACKAGE] == 1) {
322 1.14.2.2 martin cpu_topology_link(ci, ci2, CPUREL_PACKAGE);
323 1.14.2.2 martin }
324 1.14.2.2 martin if (ci->ci_nsibling[CPUREL_CORE] > 1 &&
325 1.14.2.2 martin ci->ci_nsibling[CPUREL_PACKAGE] > 1) {
326 1.14.2.2 martin break;
327 1.14.2.2 martin }
328 1.14.2.2 martin }
329 1.14.2.2 martin }
330 1.14.2.2 martin
331 1.14.2.2 martin linkit:
332 1.14.2.2 martin /* Identify lowest numbered SMT in each core. */
333 1.14.2.2 martin for (CPU_INFO_FOREACH(cii, ci)) {
334 1.14.2.2 martin ci2 = ci3 = ci;
335 1.14.2.2 martin minsmt = ci->ci_smt_id;
336 1.14.2.2 martin do {
337 1.14.2.2 martin if (ci2->ci_smt_id < minsmt) {
338 1.14.2.2 martin ci3 = ci2;
339 1.14.2.2 martin minsmt = ci2->ci_smt_id;
340 1.14.2.2 martin }
341 1.14.2.2 martin ci2 = ci2->ci_sibling[CPUREL_CORE];
342 1.14.2.2 martin } while (ci2 != ci);
343 1.14.2.2 martin ci3->ci_schedstate.spc_flags |= SPCF_CORE1ST;
344 1.14.2.2 martin }
345 1.14.2.2 martin
346 1.14.2.2 martin /* Identify lowest numbered SMT in each package. */
347 1.14.2.2 martin ci3 = NULL;
348 1.14.2.2 martin for (CPU_INFO_FOREACH(cii, ci)) {
349 1.14.2.2 martin if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) == 0) {
350 1.14.2.2 martin continue;
351 1.14.2.2 martin }
352 1.14.2.2 martin ci2 = ci3 = ci;
353 1.14.2.2 martin mincore = ci->ci_core_id;
354 1.14.2.2 martin do {
355 1.14.2.2 martin if ((ci2->ci_schedstate.spc_flags &
356 1.14.2.2 martin SPCF_CORE1ST) != 0 &&
357 1.14.2.2 martin ci2->ci_core_id < mincore) {
358 1.14.2.2 martin ci3 = ci2;
359 1.14.2.2 martin mincore = ci2->ci_core_id;
360 1.14.2.2 martin }
361 1.14.2.2 martin ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
362 1.14.2.2 martin } while (ci2 != ci);
363 1.14.2.2 martin
364 1.14.2.2 martin if ((ci3->ci_schedstate.spc_flags & SPCF_PACKAGE1ST) != 0) {
365 1.14.2.2 martin /* Already identified - nothing more to do. */
366 1.14.2.2 martin continue;
367 1.14.2.2 martin }
368 1.14.2.2 martin ci3->ci_schedstate.spc_flags |= SPCF_PACKAGE1ST;
369 1.14.2.2 martin
370 1.14.2.2 martin /* Walk through all CPUs in package and point to first. */
371 1.14.2.2 martin ci2 = ci3;
372 1.14.2.2 martin do {
373 1.14.2.2 martin ci2->ci_package1st = ci3;
374 1.14.2.2 martin ci2->ci_sibling[CPUREL_PACKAGE1ST] = ci3;
375 1.14.2.2 martin ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
376 1.14.2.2 martin } while (ci2 != ci3);
377 1.14.2.2 martin
378 1.14.2.2 martin /* Now look for somebody else to link to. */
379 1.14.2.2 martin for (CPU_INFO_FOREACH(cii2, ci2)) {
380 1.14.2.2 martin if ((ci2->ci_schedstate.spc_flags & SPCF_PACKAGE1ST)
381 1.14.2.2 martin != 0 && ci2 != ci3) {
382 1.14.2.2 martin cpu_topology_link(ci3, ci2, CPUREL_PACKAGE1ST);
383 1.14.2.2 martin break;
384 1.14.2.2 martin }
385 1.14.2.2 martin }
386 1.14.2.2 martin }
387 1.14.2.2 martin
388 1.14.2.2 martin /* Walk through all packages, starting with value of ci3 from above. */
389 1.14.2.2 martin KASSERT(ci3 != NULL);
390 1.14.2.2 martin ci = ci3;
391 1.14.2.2 martin do {
392 1.14.2.2 martin /* Walk through CPUs in the package and copy in PACKAGE1ST. */
393 1.14.2.2 martin ci2 = ci;
394 1.14.2.2 martin do {
395 1.14.2.2 martin ci2->ci_sibling[CPUREL_PACKAGE1ST] =
396 1.14.2.2 martin ci->ci_sibling[CPUREL_PACKAGE1ST];
397 1.14.2.2 martin ci2->ci_nsibling[CPUREL_PACKAGE1ST] =
398 1.14.2.2 martin ci->ci_nsibling[CPUREL_PACKAGE1ST];
399 1.14.2.2 martin ci2 = ci2->ci_sibling[CPUREL_PACKAGE];
400 1.14.2.2 martin } while (ci2 != ci);
401 1.14.2.2 martin ci = ci->ci_sibling[CPUREL_PACKAGE1ST];
402 1.14.2.2 martin } while (ci != ci3);
403 1.14.2.2 martin
404 1.14.2.2 martin if (cpu_topology_haveslow) {
405 1.14.2.2 martin /*
406 1.14.2.2 martin * For asymmetric systems where some CPUs are slower than
407 1.14.2.2 martin * others, mark first class CPUs for the scheduler. This
408 1.14.2.2 martin * conflicts with SMT right now so whinge if observed.
409 1.14.2.2 martin */
410 1.14.2.2 martin if (curcpu()->ci_nsibling[CPUREL_CORE] > 1) {
411 1.14.2.2 martin printf("cpu_topology_init: asymmetric & SMT??\n");
412 1.14.2.2 martin }
413 1.14.2.2 martin for (CPU_INFO_FOREACH(cii, ci)) {
414 1.14.2.2 martin if (!ci->ci_is_slow) {
415 1.14.2.2 martin ci->ci_schedstate.spc_flags |= SPCF_1STCLASS;
416 1.14.2.2 martin }
417 1.14.2.2 martin }
418 1.14.2.2 martin } else {
419 1.14.2.2 martin /*
420 1.14.2.2 martin * For any other configuration mark the 1st CPU in each
421 1.14.2.2 martin * core as a first class CPU.
422 1.14.2.2 martin */
423 1.14.2.2 martin for (CPU_INFO_FOREACH(cii, ci)) {
424 1.14.2.2 martin if ((ci->ci_schedstate.spc_flags & SPCF_CORE1ST) != 0) {
425 1.14.2.2 martin ci->ci_schedstate.spc_flags |= SPCF_1STCLASS;
426 1.14.2.2 martin }
427 1.14.2.2 martin }
428 1.14.2.2 martin }
429 1.14.2.2 martin
430 1.14.2.2 martin cpu_topology_dump();
431 1.14.2.2 martin }
432 1.14.2.2 martin
433 1.14.2.2 martin /*
434 1.14.2.2 martin * Adjust one count, for a counter that's NOT updated from interrupt
435 1.14.2.2 martin * context. Hardly worth making an inline due to preemption stuff.
436 1.14.2.2 martin */
437 1.14.2.2 martin void
438 1.14.2.2 martin cpu_count(enum cpu_count idx, int64_t delta)
439 1.14.2.2 martin {
440 1.14.2.2 martin lwp_t *l = curlwp;
441 1.14.2.2 martin KPREEMPT_DISABLE(l);
442 1.14.2.2 martin l->l_cpu->ci_counts[idx] += delta;
443 1.14.2.2 martin KPREEMPT_ENABLE(l);
444 1.14.2.2 martin }
445 1.14.2.2 martin
446 1.14.2.2 martin /*
447 1.14.2.2 martin * Fetch fresh sum total for all counts. Expensive - don't call often.
448 1.14.2.2 martin */
449 1.14.2.2 martin void
450 1.14.2.2 martin cpu_count_sync_all(void)
451 1.14.2.2 martin {
452 1.14.2.2 martin CPU_INFO_ITERATOR cii;
453 1.14.2.2 martin struct cpu_info *ci;
454 1.14.2.2 martin int64_t sum[CPU_COUNT_MAX], *ptr;
455 1.14.2.2 martin enum cpu_count i;
456 1.14.2.2 martin int s;
457 1.14.2.2 martin
458 1.14.2.2 martin KASSERT(sizeof(ci->ci_counts) == sizeof(cpu_counts));
459 1.14.2.2 martin
460 1.14.2.2 martin if (__predict_true(mp_online)) {
461 1.14.2.2 martin memset(sum, 0, sizeof(sum));
462 1.14.2.2 martin /*
463 1.14.2.2 martin * We want this to be reasonably quick, so any value we get
464 1.14.2.2 martin * isn't totally out of whack, so don't let the current LWP
465 1.14.2.2 martin * get preempted.
466 1.14.2.2 martin */
467 1.14.2.2 martin s = splvm();
468 1.14.2.2 martin curcpu()->ci_counts[CPU_COUNT_SYNC_ALL]++;
469 1.14.2.2 martin for (CPU_INFO_FOREACH(cii, ci)) {
470 1.14.2.2 martin ptr = ci->ci_counts;
471 1.14.2.2 martin for (i = 0; i < CPU_COUNT_MAX; i += 8) {
472 1.14.2.2 martin sum[i+0] += ptr[i+0];
473 1.14.2.2 martin sum[i+1] += ptr[i+1];
474 1.14.2.2 martin sum[i+2] += ptr[i+2];
475 1.14.2.2 martin sum[i+3] += ptr[i+3];
476 1.14.2.2 martin sum[i+4] += ptr[i+4];
477 1.14.2.2 martin sum[i+5] += ptr[i+5];
478 1.14.2.2 martin sum[i+6] += ptr[i+6];
479 1.14.2.2 martin sum[i+7] += ptr[i+7];
480 1.14.2.2 martin }
481 1.14.2.2 martin KASSERT(i == CPU_COUNT_MAX);
482 1.14.2.2 martin }
483 1.14.2.2 martin memcpy(cpu_counts, sum, sizeof(cpu_counts));
484 1.14.2.2 martin splx(s);
485 1.14.2.2 martin } else {
486 1.14.2.2 martin memcpy(cpu_counts, curcpu()->ci_counts, sizeof(cpu_counts));
487 1.14.2.2 martin }
488 1.14.2.2 martin }
489 1.14.2.2 martin
490 1.14.2.2 martin /*
491 1.14.2.2 martin * Fetch a fresh sum total for one single count. Expensive - don't call often.
492 1.14.2.2 martin */
493 1.14.2.2 martin int64_t
494 1.14.2.2 martin cpu_count_sync(enum cpu_count count)
495 1.14.2.2 martin {
496 1.14.2.2 martin CPU_INFO_ITERATOR cii;
497 1.14.2.2 martin struct cpu_info *ci;
498 1.14.2.2 martin int64_t sum;
499 1.14.2.2 martin int s;
500 1.14.2.2 martin
501 1.14.2.2 martin if (__predict_true(mp_online)) {
502 1.14.2.2 martin s = splvm();
503 1.14.2.2 martin curcpu()->ci_counts[CPU_COUNT_SYNC_ONE]++;
504 1.14.2.2 martin sum = 0;
505 1.14.2.2 martin for (CPU_INFO_FOREACH(cii, ci)) {
506 1.14.2.2 martin sum += ci->ci_counts[count];
507 1.14.2.2 martin }
508 1.14.2.2 martin splx(s);
509 1.14.2.2 martin } else {
510 1.14.2.2 martin /* XXX Early boot, iterator might not be available. */
511 1.14.2.2 martin sum = curcpu()->ci_counts[count];
512 1.14.2.2 martin }
513 1.14.2.2 martin return cpu_counts[count] = sum;
514 1.14.2.2 martin }
515