kern_cpu.c revision 1.85 1 /* $NetBSD: kern_cpu.c,v 1.85 2019/12/17 00:59:14 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c)2007 YAMAMOTO Takashi,
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.85 2019/12/17 00:59:14 ad Exp $");
60
61 #ifdef _KERNEL_OPT
62 #include "opt_cpu_ucode.h"
63 #endif
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/idle.h>
68 #include <sys/sched.h>
69 #include <sys/intr.h>
70 #include <sys/conf.h>
71 #include <sys/cpu.h>
72 #include <sys/cpuio.h>
73 #include <sys/proc.h>
74 #include <sys/percpu.h>
75 #include <sys/kernel.h>
76 #include <sys/kauth.h>
77 #include <sys/xcall.h>
78 #include <sys/pool.h>
79 #include <sys/kmem.h>
80 #include <sys/select.h>
81 #include <sys/namei.h>
82 #include <sys/callout.h>
83 #include <sys/pcu.h>
84
85 #include <uvm/uvm_extern.h>
86
87 #include "ioconf.h"
88
89 /*
90 * If the port has stated that cpu_data is the first thing in cpu_info,
91 * verify that the claim is true. This will prevent them from getting out
92 * of sync.
93 */
94 #ifdef __HAVE_CPU_DATA_FIRST
95 CTASSERT(offsetof(struct cpu_info, ci_data) == 0);
96 #else
97 CTASSERT(offsetof(struct cpu_info, ci_data) != 0);
98 #endif
99
100 #ifndef _RUMPKERNEL /* XXX temporary */
101 static void cpu_xc_online(struct cpu_info *, void *);
102 static void cpu_xc_offline(struct cpu_info *, void *);
103
104 dev_type_ioctl(cpuctl_ioctl);
105
106 const struct cdevsw cpuctl_cdevsw = {
107 .d_open = nullopen,
108 .d_close = nullclose,
109 .d_read = nullread,
110 .d_write = nullwrite,
111 .d_ioctl = cpuctl_ioctl,
112 .d_stop = nullstop,
113 .d_tty = notty,
114 .d_poll = nopoll,
115 .d_mmap = nommap,
116 .d_kqfilter = nokqfilter,
117 .d_discard = nodiscard,
118 .d_flag = D_OTHER | D_MPSAFE
119 };
120 #endif /* ifndef _RUMPKERNEL XXX */
121
122 kmutex_t cpu_lock __cacheline_aligned;
123 int ncpu __read_mostly;
124 int ncpuonline __read_mostly;
125 bool mp_online __read_mostly;
126 static bool cpu_topology_present __read_mostly;
127 int64_t cpu_counts[CPU_COUNT_MAX];
128
129 /* An array of CPUs. There are ncpu entries. */
130 struct cpu_info **cpu_infos __read_mostly;
131
132 /* Note: set on mi_cpu_attach() and idle_loop(). */
133 kcpuset_t * kcpuset_attached __read_mostly = NULL;
134 kcpuset_t * kcpuset_running __read_mostly = NULL;
135
136 int (*compat_cpuctl_ioctl)(struct lwp *, u_long, void *) = (void *)enosys;
137
138 static char cpu_model[128];
139
140 /*
141 * mi_cpu_init: early initialisation of MI CPU related structures.
142 *
143 * Note: may not block and memory allocator is not yet available.
144 */
145 void
146 mi_cpu_init(void)
147 {
148
149 mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
150
151 kcpuset_create(&kcpuset_attached, true);
152 kcpuset_create(&kcpuset_running, true);
153 kcpuset_set(kcpuset_running, 0);
154 }
155
156 #ifndef _RUMPKERNEL /* XXX temporary */
157 int
158 mi_cpu_attach(struct cpu_info *ci)
159 {
160 int error;
161
162 KASSERT(maxcpus > 0);
163
164 ci->ci_index = ncpu;
165 kcpuset_set(kcpuset_attached, cpu_index(ci));
166
167 /*
168 * Create a convenience cpuset of just ourselves.
169 */
170 kcpuset_create(&ci->ci_data.cpu_kcpuset, true);
171 kcpuset_set(ci->ci_data.cpu_kcpuset, cpu_index(ci));
172
173 TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
174 __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
175
176 /* This is useful for eg, per-cpu evcnt */
177 snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d",
178 cpu_index(ci));
179
180 if (__predict_false(cpu_infos == NULL)) {
181 size_t ci_bufsize = (maxcpus + 1) * sizeof(struct cpu_info *);
182 cpu_infos = kmem_zalloc(ci_bufsize, KM_SLEEP);
183 }
184 cpu_infos[cpu_index(ci)] = ci;
185
186 sched_cpuattach(ci);
187
188 error = create_idle_lwp(ci);
189 if (error != 0) {
190 /* XXX revert sched_cpuattach */
191 return error;
192 }
193
194 if (ci == curcpu())
195 ci->ci_onproc = curlwp;
196 else
197 ci->ci_onproc = ci->ci_data.cpu_idlelwp;
198
199 percpu_init_cpu(ci);
200 softint_init(ci);
201 callout_init_cpu(ci);
202 xc_init_cpu(ci);
203 pool_cache_cpu_init(ci);
204 selsysinit(ci);
205 cache_cpu_init(ci);
206 TAILQ_INIT(&ci->ci_data.cpu_biodone);
207 ncpu++;
208 ncpuonline++;
209
210 return 0;
211 }
212
213 void
214 cpuctlattach(int dummy __unused)
215 {
216
217 KASSERT(cpu_infos != NULL);
218 }
219
220 int
221 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
222 {
223 CPU_INFO_ITERATOR cii;
224 cpustate_t *cs;
225 struct cpu_info *ci;
226 int error, i;
227 u_int id;
228
229 error = 0;
230
231 mutex_enter(&cpu_lock);
232 switch (cmd) {
233 case IOC_CPU_SETSTATE:
234 cs = data;
235 error = kauth_authorize_system(l->l_cred,
236 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
237 NULL);
238 if (error != 0)
239 break;
240 if (cs->cs_id >= maxcpus ||
241 (ci = cpu_lookup(cs->cs_id)) == NULL) {
242 error = ESRCH;
243 break;
244 }
245 cpu_setintr(ci, cs->cs_intr);
246 error = cpu_setstate(ci, cs->cs_online);
247 break;
248
249 case IOC_CPU_GETSTATE:
250 cs = data;
251 id = cs->cs_id;
252 memset(cs, 0, sizeof(*cs));
253 cs->cs_id = id;
254 if (cs->cs_id >= maxcpus ||
255 (ci = cpu_lookup(id)) == NULL) {
256 error = ESRCH;
257 break;
258 }
259 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
260 cs->cs_online = false;
261 else
262 cs->cs_online = true;
263 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
264 cs->cs_intr = false;
265 else
266 cs->cs_intr = true;
267 cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
268 cs->cs_lastmodhi = (int32_t)
269 (ci->ci_schedstate.spc_lastmod >> 32);
270 cs->cs_intrcnt = cpu_intr_count(ci) + 1;
271 cs->cs_hwid = ci->ci_cpuid;
272 break;
273
274 case IOC_CPU_MAPID:
275 i = 0;
276 for (CPU_INFO_FOREACH(cii, ci)) {
277 if (i++ == *(int *)data)
278 break;
279 }
280 if (ci == NULL)
281 error = ESRCH;
282 else
283 *(int *)data = cpu_index(ci);
284 break;
285
286 case IOC_CPU_GETCOUNT:
287 *(int *)data = ncpu;
288 break;
289
290 #ifdef CPU_UCODE
291 case IOC_CPU_UCODE_GET_VERSION:
292 error = cpu_ucode_get_version((struct cpu_ucode_version *)data);
293 break;
294
295 case IOC_CPU_UCODE_APPLY:
296 error = kauth_authorize_machdep(l->l_cred,
297 KAUTH_MACHDEP_CPU_UCODE_APPLY,
298 NULL, NULL, NULL, NULL);
299 if (error != 0)
300 break;
301 error = cpu_ucode_apply((const struct cpu_ucode *)data);
302 break;
303 #endif
304
305 default:
306 error = (*compat_cpuctl_ioctl)(l, cmd, data);
307 break;
308 }
309 mutex_exit(&cpu_lock);
310
311 return error;
312 }
313
314 struct cpu_info *
315 cpu_lookup(u_int idx)
316 {
317 struct cpu_info *ci;
318
319 /*
320 * cpu_infos is a NULL terminated array of MAXCPUS + 1 entries,
321 * so an index of MAXCPUS here is ok. See mi_cpu_attach.
322 */
323 KASSERT(idx <= maxcpus);
324
325 if (__predict_false(cpu_infos == NULL)) {
326 KASSERT(idx == 0);
327 return curcpu();
328 }
329
330 ci = cpu_infos[idx];
331 KASSERT(ci == NULL || cpu_index(ci) == idx);
332 KASSERTMSG(idx < maxcpus || ci == NULL, "idx %d ci %p", idx, ci);
333
334 return ci;
335 }
336
337 static void
338 cpu_xc_offline(struct cpu_info *ci, void *unused)
339 {
340 struct schedstate_percpu *spc, *mspc = NULL;
341 struct cpu_info *target_ci;
342 struct lwp *l;
343 CPU_INFO_ITERATOR cii;
344 int s;
345
346 /*
347 * Thread that made the cross call (separate context) holds
348 * cpu_lock on our behalf.
349 */
350 spc = &ci->ci_schedstate;
351 s = splsched();
352 spc->spc_flags |= SPCF_OFFLINE;
353 splx(s);
354
355 /* Take the first available CPU for the migration. */
356 for (CPU_INFO_FOREACH(cii, target_ci)) {
357 mspc = &target_ci->ci_schedstate;
358 if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
359 break;
360 }
361 KASSERT(target_ci != NULL);
362
363 /*
364 * Migrate all non-bound threads to the other CPU. Note that this
365 * runs from the xcall thread, thus handling of LSONPROC is not needed.
366 */
367 mutex_enter(proc_lock);
368 LIST_FOREACH(l, &alllwp, l_list) {
369 struct cpu_info *mci;
370
371 lwp_lock(l);
372 if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
373 lwp_unlock(l);
374 continue;
375 }
376 /* Regular case - no affinity. */
377 if (l->l_affinity == NULL) {
378 lwp_migrate(l, target_ci);
379 continue;
380 }
381 /* Affinity is set, find an online CPU in the set. */
382 for (CPU_INFO_FOREACH(cii, mci)) {
383 mspc = &mci->ci_schedstate;
384 if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
385 kcpuset_isset(l->l_affinity, cpu_index(mci)))
386 break;
387 }
388 if (mci == NULL) {
389 lwp_unlock(l);
390 mutex_exit(proc_lock);
391 goto fail;
392 }
393 lwp_migrate(l, mci);
394 }
395 mutex_exit(proc_lock);
396
397 #if PCU_UNIT_COUNT > 0
398 pcu_save_all_on_cpu();
399 #endif
400
401 #ifdef __HAVE_MD_CPU_OFFLINE
402 cpu_offline_md();
403 #endif
404 return;
405 fail:
406 /* Just unset the SPCF_OFFLINE flag, caller will check */
407 s = splsched();
408 spc->spc_flags &= ~SPCF_OFFLINE;
409 splx(s);
410 }
411
412 static void
413 cpu_xc_online(struct cpu_info *ci, void *unused)
414 {
415 struct schedstate_percpu *spc;
416 int s;
417
418 spc = &ci->ci_schedstate;
419 s = splsched();
420 spc->spc_flags &= ~SPCF_OFFLINE;
421 splx(s);
422 }
423
424 int
425 cpu_setstate(struct cpu_info *ci, bool online)
426 {
427 struct schedstate_percpu *spc;
428 CPU_INFO_ITERATOR cii;
429 struct cpu_info *ci2;
430 uint64_t where;
431 xcfunc_t func;
432 int nonline;
433
434 spc = &ci->ci_schedstate;
435
436 KASSERT(mutex_owned(&cpu_lock));
437
438 if (online) {
439 if ((spc->spc_flags & SPCF_OFFLINE) == 0)
440 return 0;
441 func = (xcfunc_t)cpu_xc_online;
442 } else {
443 if ((spc->spc_flags & SPCF_OFFLINE) != 0)
444 return 0;
445 nonline = 0;
446 /*
447 * Ensure that at least one CPU within the processor set
448 * stays online. Revisit this later.
449 */
450 for (CPU_INFO_FOREACH(cii, ci2)) {
451 if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
452 continue;
453 if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
454 continue;
455 nonline++;
456 }
457 if (nonline == 1)
458 return EBUSY;
459 func = (xcfunc_t)cpu_xc_offline;
460 }
461
462 where = xc_unicast(0, func, ci, NULL, ci);
463 xc_wait(where);
464 if (online) {
465 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
466 ncpuonline++;
467 } else {
468 if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
469 /* If was not set offline, then it is busy */
470 return EBUSY;
471 }
472 ncpuonline--;
473 }
474
475 spc->spc_lastmod = time_second;
476 return 0;
477 }
478 #endif /* ifndef _RUMPKERNEL XXX */
479
480 int
481 cpu_setmodel(const char *fmt, ...)
482 {
483 int len;
484 va_list ap;
485
486 va_start(ap, fmt);
487 len = vsnprintf(cpu_model, sizeof(cpu_model), fmt, ap);
488 va_end(ap);
489 return len;
490 }
491
492 const char *
493 cpu_getmodel(void)
494 {
495 return cpu_model;
496 }
497
498 #if defined(__HAVE_INTR_CONTROL) && !defined(_RUMPKERNEL) /* XXX */
499 static void
500 cpu_xc_intr(struct cpu_info *ci, void *unused)
501 {
502 struct schedstate_percpu *spc;
503 int s;
504
505 spc = &ci->ci_schedstate;
506 s = splsched();
507 spc->spc_flags &= ~SPCF_NOINTR;
508 splx(s);
509 }
510
511 static void
512 cpu_xc_nointr(struct cpu_info *ci, void *unused)
513 {
514 struct schedstate_percpu *spc;
515 int s;
516
517 spc = &ci->ci_schedstate;
518 s = splsched();
519 spc->spc_flags |= SPCF_NOINTR;
520 splx(s);
521 }
522
523 int
524 cpu_setintr(struct cpu_info *ci, bool intr)
525 {
526 struct schedstate_percpu *spc;
527 CPU_INFO_ITERATOR cii;
528 struct cpu_info *ci2;
529 uint64_t where;
530 xcfunc_t func;
531 int nintr;
532
533 spc = &ci->ci_schedstate;
534
535 KASSERT(mutex_owned(&cpu_lock));
536
537 if (intr) {
538 if ((spc->spc_flags & SPCF_NOINTR) == 0)
539 return 0;
540 func = (xcfunc_t)cpu_xc_intr;
541 } else {
542 if ((spc->spc_flags & SPCF_NOINTR) != 0)
543 return 0;
544 /*
545 * Ensure that at least one CPU within the system
546 * is handing device interrupts.
547 */
548 nintr = 0;
549 for (CPU_INFO_FOREACH(cii, ci2)) {
550 if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
551 continue;
552 if (ci2 == ci)
553 continue;
554 nintr++;
555 }
556 if (nintr == 0)
557 return EBUSY;
558 func = (xcfunc_t)cpu_xc_nointr;
559 }
560
561 where = xc_unicast(0, func, ci, NULL, ci);
562 xc_wait(where);
563 if (intr) {
564 KASSERT((spc->spc_flags & SPCF_NOINTR) == 0);
565 } else if ((spc->spc_flags & SPCF_NOINTR) == 0) {
566 /* If was not set offline, then it is busy */
567 return EBUSY;
568 }
569
570 /* Direct interrupts away from the CPU and record the change. */
571 cpu_intr_redistribute();
572 spc->spc_lastmod = time_second;
573 return 0;
574 }
575 #else /* __HAVE_INTR_CONTROL */
576 int
577 cpu_setintr(struct cpu_info *ci, bool intr)
578 {
579
580 return EOPNOTSUPP;
581 }
582
583 u_int
584 cpu_intr_count(struct cpu_info *ci)
585 {
586
587 return 0; /* 0 == "don't know" */
588 }
589 #endif /* __HAVE_INTR_CONTROL */
590
591 bool
592 cpu_softintr_p(void)
593 {
594
595 return (curlwp->l_pflag & LP_INTR) != 0;
596 }
597
598 /*
599 * Collect CPU topology information as each CPU is attached. This can be
600 * called early during boot, so we need to be careful what we do.
601 */
602 void
603 cpu_topology_set(struct cpu_info *ci, int package_id, int core_id, int smt_id)
604 {
605 enum cpu_rel rel;
606
607 cpu_topology_present = true;
608 ci->ci_package_id = package_id;
609 ci->ci_core_id = core_id;
610 ci->ci_smt_id = smt_id;
611 for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
612 ci->ci_sibling[rel] = ci;
613 ci->ci_nsibling[rel] = 1;
614 }
615 }
616
617 /*
618 * Link a CPU into the given circular list.
619 */
620 static void
621 cpu_topology_link(struct cpu_info *ci, struct cpu_info *ci2, enum cpu_rel rel)
622 {
623 struct cpu_info *ci3;
624
625 /* Walk to the end of the existing circular list and append. */
626 for (ci3 = ci2;; ci3 = ci3->ci_sibling[rel]) {
627 ci3->ci_nsibling[rel]++;
628 if (ci3->ci_sibling[rel] == ci2) {
629 break;
630 }
631 }
632 ci->ci_sibling[rel] = ci2;
633 ci3->ci_sibling[rel] = ci;
634 ci->ci_nsibling[rel] = ci3->ci_nsibling[rel];
635 }
636
637 /*
638 * Find peer CPUs in other packages.
639 */
640 static void
641 cpu_topology_peers(void)
642 {
643 CPU_INFO_ITERATOR cii, cii2;
644 struct cpu_info *ci, *ci2;
645
646 for (CPU_INFO_FOREACH(cii, ci)) {
647 if (ci->ci_nsibling[CPUREL_PEER] > 1) {
648 /* Already linked. */
649 continue;
650 }
651 for (CPU_INFO_FOREACH(cii2, ci2)) {
652 if (ci != ci2 &&
653 ci->ci_package_id != ci2->ci_package_id &&
654 ci->ci_core_id == ci2->ci_core_id &&
655 ci->ci_smt_id == ci2->ci_smt_id) {
656 cpu_topology_link(ci, ci2, CPUREL_PEER);
657 break;
658 }
659 }
660 }
661 }
662
663 /*
664 * Print out the topology lists.
665 */
666 static void
667 cpu_topology_print(void)
668 {
669 #ifdef DEBUG
670 CPU_INFO_ITERATOR cii;
671 struct cpu_info *ci, *ci2;
672 const char *names[] = { "core", "package", "peer" };
673 enum cpu_rel rel;
674 int i;
675
676 for (CPU_INFO_FOREACH(cii, ci)) {
677 for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
678 printf("%s has %dx %s siblings: ", cpu_name(ci),
679 ci->ci_nsibling[rel], names[rel]);
680 ci2 = ci->ci_sibling[rel];
681 i = 0;
682 do {
683 printf(" %s", cpu_name(ci2));
684 ci2 = ci2->ci_sibling[rel];
685 } while (++i < 64 && ci2 != ci->ci_sibling[rel]);
686 if (i == 64) {
687 printf(" GAVE UP");
688 }
689 printf("\n");
690 }
691 }
692 #endif /* DEBUG */
693 }
694
695 /*
696 * Fake up topology info if we have none, or if what we got was bogus.
697 * Don't override ci_package_id, etc, if cpu_topology_present is set.
698 * MD code also uses these.
699 */
700 static void
701 cpu_topology_fake(void)
702 {
703 CPU_INFO_ITERATOR cii;
704 struct cpu_info *ci;
705 enum cpu_rel rel;
706
707 for (CPU_INFO_FOREACH(cii, ci)) {
708 for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) {
709 ci->ci_sibling[rel] = ci;
710 ci->ci_nsibling[rel] = 1;
711 }
712 if (!cpu_topology_present) {
713 ci->ci_package_id = cpu_index(ci);
714 }
715 }
716 cpu_topology_print();
717 }
718
719 /*
720 * Fix up basic CPU topology info. Right now that means attach each CPU to
721 * circular lists of its siblings in the same core, and in the same package.
722 */
723 void
724 cpu_topology_init(void)
725 {
726 CPU_INFO_ITERATOR cii, cii2;
727 struct cpu_info *ci, *ci2;
728 int ncore, npackage, npeer;
729 bool symmetric;
730
731 if (!cpu_topology_present) {
732 cpu_topology_fake();
733 return;
734 }
735
736 /* Find siblings in same core and package. */
737 for (CPU_INFO_FOREACH(cii, ci)) {
738 for (CPU_INFO_FOREACH(cii2, ci2)) {
739 /* Avoid bad things happening. */
740 if (ci2->ci_package_id == ci->ci_package_id &&
741 ci2->ci_core_id == ci->ci_core_id &&
742 ci2->ci_smt_id == ci->ci_smt_id &&
743 ci2 != ci) {
744 printf("cpu_topology_init: info bogus, "
745 "faking it\n");
746 cpu_topology_fake();
747 return;
748 }
749 if (ci2 == ci ||
750 ci2->ci_package_id != ci->ci_package_id) {
751 continue;
752 }
753 /* Find CPUs in the same core. */
754 if (ci->ci_nsibling[CPUREL_CORE] == 1 &&
755 ci->ci_core_id == ci2->ci_core_id) {
756 cpu_topology_link(ci, ci2, CPUREL_CORE);
757 }
758 /* Find CPUs in the same package. */
759 if (ci->ci_nsibling[CPUREL_PACKAGE] == 1) {
760 cpu_topology_link(ci, ci2, CPUREL_PACKAGE);
761 }
762 if (ci->ci_nsibling[CPUREL_CORE] > 1 &&
763 ci->ci_nsibling[CPUREL_PACKAGE] > 1) {
764 break;
765 }
766 }
767 }
768
769 /* Find peers in other packages. */
770 cpu_topology_peers();
771
772 /* Determine whether the topology is bogus/symmetric. */
773 npackage = curcpu()->ci_nsibling[CPUREL_PACKAGE];
774 ncore = curcpu()->ci_nsibling[CPUREL_CORE];
775 npeer = curcpu()->ci_nsibling[CPUREL_PEER];
776 symmetric = true;
777 for (CPU_INFO_FOREACH(cii, ci)) {
778 if (npackage != ci->ci_nsibling[CPUREL_PACKAGE] ||
779 ncore != ci->ci_nsibling[CPUREL_CORE] ||
780 npeer != ci->ci_nsibling[CPUREL_PEER]) {
781 symmetric = false;
782 }
783 }
784 cpu_topology_print();
785 if (symmetric == false) {
786 printf("cpu_topology_init: not symmetric, faking it\n");
787 cpu_topology_fake();
788 }
789 }
790
791 #ifdef CPU_UCODE
792 int
793 cpu_ucode_load(struct cpu_ucode_softc *sc, const char *fwname)
794 {
795 firmware_handle_t fwh;
796 int error;
797
798 if (sc->sc_blob != NULL) {
799 firmware_free(sc->sc_blob, sc->sc_blobsize);
800 sc->sc_blob = NULL;
801 sc->sc_blobsize = 0;
802 }
803
804 error = cpu_ucode_md_open(&fwh, sc->loader_version, fwname);
805 if (error != 0) {
806 #ifdef DEBUG
807 printf("ucode: firmware_open(%s) failed: %i\n", fwname, error);
808 #endif
809 goto err0;
810 }
811
812 sc->sc_blobsize = firmware_get_size(fwh);
813 if (sc->sc_blobsize == 0) {
814 error = EFTYPE;
815 firmware_close(fwh);
816 goto err0;
817 }
818 sc->sc_blob = firmware_malloc(sc->sc_blobsize);
819 if (sc->sc_blob == NULL) {
820 error = ENOMEM;
821 firmware_close(fwh);
822 goto err0;
823 }
824
825 error = firmware_read(fwh, 0, sc->sc_blob, sc->sc_blobsize);
826 firmware_close(fwh);
827 if (error != 0)
828 goto err1;
829
830 return 0;
831
832 err1:
833 firmware_free(sc->sc_blob, sc->sc_blobsize);
834 sc->sc_blob = NULL;
835 sc->sc_blobsize = 0;
836 err0:
837 return error;
838 }
839 #endif
840
841 /*
842 * Adjust one count, for a counter that's NOT updated from interrupt
843 * context. Hardly worth making an inline due to preemption stuff.
844 */
845 void
846 cpu_count(enum cpu_count idx, int64_t delta)
847 {
848 lwp_t *l = curlwp;
849 KPREEMPT_DISABLE(l);
850 l->l_cpu->ci_counts[idx] += delta;
851 KPREEMPT_ENABLE(l);
852 }
853
854 /*
855 * Fetch fresh sum total for all counts. Expensive - don't call often.
856 */
857 void
858 cpu_count_sync_all(void)
859 {
860 CPU_INFO_ITERATOR cii;
861 struct cpu_info *ci;
862 int64_t sum[CPU_COUNT_MAX], *ptr;
863 enum cpu_count i;
864 int s;
865
866 KASSERT(sizeof(ci->ci_counts) == sizeof(cpu_counts));
867
868 if (__predict_true(mp_online)) {
869 memset(sum, 0, sizeof(sum));
870 /*
871 * We want this to be reasonably quick, so any value we get
872 * isn't totally out of whack, so don't let the current LWP
873 * get preempted.
874 */
875 s = splvm();
876 curcpu()->ci_counts[CPU_COUNT_SYNC_ALL]++;
877 for (CPU_INFO_FOREACH(cii, ci)) {
878 ptr = ci->ci_counts;
879 for (i = 0; i < CPU_COUNT_MAX; i += 8) {
880 sum[i+0] += ptr[i+0];
881 sum[i+1] += ptr[i+1];
882 sum[i+2] += ptr[i+2];
883 sum[i+3] += ptr[i+3];
884 sum[i+4] += ptr[i+4];
885 sum[i+5] += ptr[i+5];
886 sum[i+6] += ptr[i+6];
887 sum[i+7] += ptr[i+7];
888 }
889 KASSERT(i == CPU_COUNT_MAX);
890 }
891 memcpy(cpu_counts, sum, sizeof(cpu_counts));
892 splx(s);
893 } else {
894 memcpy(cpu_counts, curcpu()->ci_counts, sizeof(cpu_counts));
895 }
896 }
897
898 /*
899 * Fetch a fresh sum total for one single count. Expensive - don't call often.
900 */
901 int64_t
902 cpu_count_sync(enum cpu_count count)
903 {
904 CPU_INFO_ITERATOR cii;
905 struct cpu_info *ci;
906 int64_t sum;
907 int s;
908
909 if (__predict_true(mp_online)) {
910 s = splvm();
911 curcpu()->ci_counts[CPU_COUNT_SYNC_ONE]++;
912 sum = 0;
913 for (CPU_INFO_FOREACH(cii, ci)) {
914 sum += ci->ci_counts[count];
915 }
916 splx(s);
917 } else {
918 /* XXX Early boot, iterator might not be available. */
919 sum = curcpu()->ci_counts[count];
920 }
921 return cpu_counts[count] = sum;
922 }
923