subr_cpu.c revision 1.2 1 /* $NetBSD: subr_cpu.c,v 1.2 2019/12/21 11:35:25 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c)2007 YAMAMOTO Takashi,
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58 /*
59 * CPU related routines shared with rump.
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: subr_cpu.c,v 1.2 2019/12/21 11:35:25 ad Exp $");
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/sched.h>
68 #include <sys/conf.h>
69 #include <sys/cpu.h>
70 #include <sys/proc.h>
71 #include <sys/kernel.h>
72 #include <sys/kmem.h>
73
74 kmutex_t cpu_lock __cacheline_aligned;
75 int ncpu __read_mostly;
76 int ncpuonline __read_mostly;
77 bool mp_online __read_mostly;
78 static bool cpu_topology_present __read_mostly;
79 int64_t cpu_counts[CPU_COUNT_MAX];
80
81 /* An array of CPUs. There are ncpu entries. */
82 struct cpu_info **cpu_infos __read_mostly;
83
84 /* Note: set on mi_cpu_attach() and idle_loop(). */
85 kcpuset_t * kcpuset_attached __read_mostly = NULL;
86 kcpuset_t * kcpuset_running __read_mostly = NULL;
87
88 static char cpu_model[128];
89
90 /*
91 * mi_cpu_init: early initialisation of MI CPU related structures.
92 *
93 * Note: may not block and memory allocator is not yet available.
94 */
95 void
96 mi_cpu_init(void)
97 {
98
99 mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
100
101 kcpuset_create(&kcpuset_attached, true);
102 kcpuset_create(&kcpuset_running, true);
103 kcpuset_set(kcpuset_running, 0);
104 }
105
106 int
107 cpu_setmodel(const char *fmt, ...)
108 {
109 int len;
110 va_list ap;
111
112 va_start(ap, fmt);
113 len = vsnprintf(cpu_model, sizeof(cpu_model), fmt, ap);
114 va_end(ap);
115 return len;
116 }
117
118 const char *
119 cpu_getmodel(void)
120 {
121 return cpu_model;
122 }
123
124 bool
125 cpu_softintr_p(void)
126 {
127
128 return (curlwp->l_pflag & LP_INTR) != 0;
129 }
130
131 /*
132 * Collect CPU topology information as each CPU is attached. This can be
133 * called early during boot, so we need to be careful what we do.
134 */
135 void
136 cpu_topology_set(struct cpu_info *ci, u_int package_id, u_int core_id,
137 u_int smt_id, u_int numa_id)
138 {
139 enum cpu_rel rel;
140
141 cpu_topology_present = true;
142 ci->ci_package_id = package_id;
143 ci->ci_core_id = core_id;
144 ci->ci_smt_id = smt_id;
145 ci->ci_numa_id = numa_id;
146 for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
147 ci->ci_sibling[rel] = ci;
148 ci->ci_nsibling[rel] = 1;
149 }
150 }
151
152 /*
153 * Link a CPU into the given circular list.
154 */
155 static void
156 cpu_topology_link(struct cpu_info *ci, struct cpu_info *ci2, enum cpu_rel rel)
157 {
158 struct cpu_info *ci3;
159
160 /* Walk to the end of the existing circular list and append. */
161 for (ci3 = ci2;; ci3 = ci3->ci_sibling[rel]) {
162 ci3->ci_nsibling[rel]++;
163 if (ci3->ci_sibling[rel] == ci2) {
164 break;
165 }
166 }
167 ci->ci_sibling[rel] = ci2;
168 ci3->ci_sibling[rel] = ci;
169 ci->ci_nsibling[rel] = ci3->ci_nsibling[rel];
170 }
171
172 /*
173 * Print out the topology lists.
174 */
175 static void
176 cpu_topology_dump(void)
177 {
178 #if DEBUG
179 CPU_INFO_ITERATOR cii;
180 struct cpu_info *ci, *ci2;
181 const char *names[] = { "core", "package", "peer", "smt" };
182 enum cpu_rel rel;
183 int i;
184
185 for (CPU_INFO_FOREACH(cii, ci)) {
186 for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
187 printf("%s has %d %s siblings:", cpu_name(ci),
188 ci->ci_nsibling[rel], names[rel]);
189 ci2 = ci->ci_sibling[rel];
190 i = 0;
191 do {
192 printf(" %s", cpu_name(ci2));
193 ci2 = ci2->ci_sibling[rel];
194 } while (++i < 64 && ci2 != ci->ci_sibling[rel]);
195 if (i == 64) {
196 printf(" GAVE UP");
197 }
198 printf("\n");
199 }
200 }
201 #endif /* DEBUG */
202 }
203
204 /*
205 * Fake up topology info if we have none, or if what we got was bogus.
206 * Don't override ci_package_id, etc, if cpu_topology_present is set.
207 * MD code also uses these.
208 */
209 static void
210 cpu_topology_fake(void)
211 {
212 CPU_INFO_ITERATOR cii;
213 struct cpu_info *ci;
214 enum cpu_rel rel;
215
216 for (CPU_INFO_FOREACH(cii, ci)) {
217 for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
218 ci->ci_sibling[rel] = ci;
219 ci->ci_nsibling[rel] = 1;
220 }
221 if (!cpu_topology_present) {
222 ci->ci_package_id = cpu_index(ci);
223 }
224 ci->ci_smt_primary = ci;
225 ci->ci_schedstate.spc_flags |= SPCF_SMTPRIMARY;
226 }
227 cpu_topology_dump();
228 }
229
230 /*
231 * Fix up basic CPU topology info. Right now that means attach each CPU to
232 * circular lists of its siblings in the same core, and in the same package.
233 */
234 void
235 cpu_topology_init(void)
236 {
237 CPU_INFO_ITERATOR cii, cii2;
238 struct cpu_info *ci, *ci2, *ci3;
239 u_int ncore, npackage, npeer, minsmt;
240 bool symmetric;
241
242 if (!cpu_topology_present) {
243 cpu_topology_fake();
244 return;
245 }
246
247 /* Find siblings in same core and package. */
248 for (CPU_INFO_FOREACH(cii, ci)) {
249 for (CPU_INFO_FOREACH(cii2, ci2)) {
250 /* Avoid bad things happening. */
251 if (ci2->ci_package_id == ci->ci_package_id &&
252 ci2->ci_core_id == ci->ci_core_id &&
253 ci2->ci_smt_id == ci->ci_smt_id &&
254 ci2 != ci) {
255 printf("cpu_topology_init: info bogus, "
256 "faking it\n");
257 cpu_topology_fake();
258 return;
259 }
260 if (ci2 == ci ||
261 ci2->ci_package_id != ci->ci_package_id) {
262 continue;
263 }
264 /* Find CPUs in the same core. */
265 if (ci->ci_nsibling[CPUREL_CORE] == 1 &&
266 ci->ci_core_id == ci2->ci_core_id) {
267 cpu_topology_link(ci, ci2, CPUREL_CORE);
268 }
269 /* Find CPUs in the same package. */
270 if (ci->ci_nsibling[CPUREL_PACKAGE] == 1) {
271 cpu_topology_link(ci, ci2, CPUREL_PACKAGE);
272 }
273 if (ci->ci_nsibling[CPUREL_CORE] > 1 &&
274 ci->ci_nsibling[CPUREL_PACKAGE] > 1) {
275 break;
276 }
277 }
278 }
279
280 /* Find peers in other packages, and peer SMTs in same package. */
281 for (CPU_INFO_FOREACH(cii, ci)) {
282 if (ci->ci_nsibling[CPUREL_PEER] <= 1) {
283 for (CPU_INFO_FOREACH(cii2, ci2)) {
284 if (ci != ci2 &&
285 ci->ci_package_id != ci2->ci_package_id &&
286 ci->ci_core_id == ci2->ci_core_id &&
287 ci->ci_smt_id == ci2->ci_smt_id) {
288 cpu_topology_link(ci, ci2,
289 CPUREL_PEER);
290 break;
291 }
292 }
293 }
294 if (ci->ci_nsibling[CPUREL_SMT] <= 1) {
295 for (CPU_INFO_FOREACH(cii2, ci2)) {
296 if (ci != ci2 &&
297 ci->ci_package_id == ci2->ci_package_id &&
298 ci->ci_core_id != ci2->ci_core_id &&
299 ci->ci_smt_id == ci2->ci_smt_id) {
300 cpu_topology_link(ci, ci2,
301 CPUREL_SMT);
302 break;
303 }
304 }
305 }
306 }
307
308 /* Determine whether the topology is bogus/symmetric. */
309 npackage = curcpu()->ci_nsibling[CPUREL_PACKAGE];
310 ncore = curcpu()->ci_nsibling[CPUREL_CORE];
311 npeer = curcpu()->ci_nsibling[CPUREL_PEER];
312 symmetric = true;
313 for (CPU_INFO_FOREACH(cii, ci)) {
314 if (npackage != ci->ci_nsibling[CPUREL_PACKAGE] ||
315 ncore != ci->ci_nsibling[CPUREL_CORE] ||
316 npeer != ci->ci_nsibling[CPUREL_PEER]) {
317 symmetric = false;
318 }
319 }
320 cpu_topology_dump();
321 if (symmetric == false) {
322 printf("cpu_topology_init: not symmetric, faking it\n");
323 cpu_topology_fake();
324 return;
325 }
326
327 /* Identify SMT primary in each core. */
328 for (CPU_INFO_FOREACH(cii, ci)) {
329 ci2 = ci3 = ci;
330 minsmt = ci->ci_smt_id;
331 do {
332 if (ci2->ci_smt_id < minsmt) {
333 ci3 = ci2;
334 minsmt = ci2->ci_smt_id;
335 }
336 ci2 = ci2->ci_sibling[CPUREL_CORE];
337 } while (ci2 != ci);
338
339 /*
340 * Mark the SMT primary, and walk back over the list
341 * pointing secondaries to the primary.
342 */
343 ci3->ci_schedstate.spc_flags |= SPCF_SMTPRIMARY;
344 ci2 = ci;
345 do {
346 ci2->ci_smt_primary = ci3;
347 ci2 = ci2->ci_sibling[CPUREL_CORE];
348 } while (ci2 != ci);
349 }
350 }
351
352 /*
353 * Print basic topology info.
354 */
355 void
356 cpu_topology_print(struct cpu_info *ci)
357 {
358
359 aprint_normal_dev(ci->ci_dev, "numa %u, package %u, core %u, smt %u\n",
360 ci->ci_numa_id, ci->ci_package_id, ci->ci_core_id, ci->ci_smt_id);
361 }
362
363 /*
364 * Adjust one count, for a counter that's NOT updated from interrupt
365 * context. Hardly worth making an inline due to preemption stuff.
366 */
367 void
368 cpu_count(enum cpu_count idx, int64_t delta)
369 {
370 lwp_t *l = curlwp;
371 KPREEMPT_DISABLE(l);
372 l->l_cpu->ci_counts[idx] += delta;
373 KPREEMPT_ENABLE(l);
374 }
375
376 /*
377 * Fetch fresh sum total for all counts. Expensive - don't call often.
378 */
379 void
380 cpu_count_sync_all(void)
381 {
382 CPU_INFO_ITERATOR cii;
383 struct cpu_info *ci;
384 int64_t sum[CPU_COUNT_MAX], *ptr;
385 enum cpu_count i;
386 int s;
387
388 KASSERT(sizeof(ci->ci_counts) == sizeof(cpu_counts));
389
390 if (__predict_true(mp_online)) {
391 memset(sum, 0, sizeof(sum));
392 /*
393 * We want this to be reasonably quick, so any value we get
394 * isn't totally out of whack, so don't let the current LWP
395 * get preempted.
396 */
397 s = splvm();
398 curcpu()->ci_counts[CPU_COUNT_SYNC_ALL]++;
399 for (CPU_INFO_FOREACH(cii, ci)) {
400 ptr = ci->ci_counts;
401 for (i = 0; i < CPU_COUNT_MAX; i += 8) {
402 sum[i+0] += ptr[i+0];
403 sum[i+1] += ptr[i+1];
404 sum[i+2] += ptr[i+2];
405 sum[i+3] += ptr[i+3];
406 sum[i+4] += ptr[i+4];
407 sum[i+5] += ptr[i+5];
408 sum[i+6] += ptr[i+6];
409 sum[i+7] += ptr[i+7];
410 }
411 KASSERT(i == CPU_COUNT_MAX);
412 }
413 memcpy(cpu_counts, sum, sizeof(cpu_counts));
414 splx(s);
415 } else {
416 memcpy(cpu_counts, curcpu()->ci_counts, sizeof(cpu_counts));
417 }
418 }
419
420 /*
421 * Fetch a fresh sum total for one single count. Expensive - don't call often.
422 */
423 int64_t
424 cpu_count_sync(enum cpu_count count)
425 {
426 CPU_INFO_ITERATOR cii;
427 struct cpu_info *ci;
428 int64_t sum;
429 int s;
430
431 if (__predict_true(mp_online)) {
432 s = splvm();
433 curcpu()->ci_counts[CPU_COUNT_SYNC_ONE]++;
434 sum = 0;
435 for (CPU_INFO_FOREACH(cii, ci)) {
436 sum += ci->ci_counts[count];
437 }
438 splx(s);
439 } else {
440 /* XXX Early boot, iterator might not be available. */
441 sum = curcpu()->ci_counts[count];
442 }
443 return cpu_counts[count] = sum;
444 }
445