kern_cpu.c revision 1.79 1 /* $NetBSD: kern_cpu.c,v 1.79 2019/12/02 23:22:43 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c)2007 YAMAMOTO Takashi,
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.79 2019/12/02 23:22:43 ad Exp $");
60
61 #include "opt_cpu_ucode.h"
62
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/idle.h>
66 #include <sys/sched.h>
67 #include <sys/intr.h>
68 #include <sys/conf.h>
69 #include <sys/cpu.h>
70 #include <sys/cpuio.h>
71 #include <sys/proc.h>
72 #include <sys/percpu.h>
73 #include <sys/kernel.h>
74 #include <sys/kauth.h>
75 #include <sys/xcall.h>
76 #include <sys/pool.h>
77 #include <sys/kmem.h>
78 #include <sys/select.h>
79 #include <sys/namei.h>
80 #include <sys/callout.h>
81 #include <sys/pcu.h>
82
83 #include <uvm/uvm_extern.h>
84
85 #include "ioconf.h"
86
87 /*
88 * If the port has stated that cpu_data is the first thing in cpu_info,
89 * verify that the claim is true. This will prevent them from getting out
90 * of sync.
91 */
92 #ifdef __HAVE_CPU_DATA_FIRST
93 CTASSERT(offsetof(struct cpu_info, ci_data) == 0);
94 #else
95 CTASSERT(offsetof(struct cpu_info, ci_data) != 0);
96 #endif
97
98 static void cpu_xc_online(struct cpu_info *, void *);
99 static void cpu_xc_offline(struct cpu_info *, void *);
100
101 dev_type_ioctl(cpuctl_ioctl);
102
103 const struct cdevsw cpuctl_cdevsw = {
104 .d_open = nullopen,
105 .d_close = nullclose,
106 .d_read = nullread,
107 .d_write = nullwrite,
108 .d_ioctl = cpuctl_ioctl,
109 .d_stop = nullstop,
110 .d_tty = notty,
111 .d_poll = nopoll,
112 .d_mmap = nommap,
113 .d_kqfilter = nokqfilter,
114 .d_discard = nodiscard,
115 .d_flag = D_OTHER | D_MPSAFE
116 };
117
118 kmutex_t cpu_lock __cacheline_aligned;
119 int ncpu __read_mostly;
120 int ncpuonline __read_mostly;
121 bool mp_online __read_mostly;
122 static bool cpu_topology_present __read_mostly;
123
124 /* An array of CPUs. There are ncpu entries. */
125 struct cpu_info **cpu_infos __read_mostly;
126
127 /* Note: set on mi_cpu_attach() and idle_loop(). */
128 kcpuset_t * kcpuset_attached __read_mostly = NULL;
129 kcpuset_t * kcpuset_running __read_mostly = NULL;
130
131 int (*compat_cpuctl_ioctl)(struct lwp *, u_long, void *) = (void *)enosys;
132
133 static char cpu_model[128];
134
135 /*
136 * mi_cpu_init: early initialisation of MI CPU related structures.
137 *
138 * Note: may not block and memory allocator is not yet available.
139 */
140 void
141 mi_cpu_init(void)
142 {
143
144 mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
145
146 kcpuset_create(&kcpuset_attached, true);
147 kcpuset_create(&kcpuset_running, true);
148 kcpuset_set(kcpuset_running, 0);
149 }
150
151 int
152 mi_cpu_attach(struct cpu_info *ci)
153 {
154 int error;
155
156 KASSERT(maxcpus > 0);
157
158 ci->ci_index = ncpu;
159 kcpuset_set(kcpuset_attached, cpu_index(ci));
160
161 /*
162 * Create a convenience cpuset of just ourselves.
163 */
164 kcpuset_create(&ci->ci_data.cpu_kcpuset, true);
165 kcpuset_set(ci->ci_data.cpu_kcpuset, cpu_index(ci));
166
167 TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
168 __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
169
170 /* This is useful for eg, per-cpu evcnt */
171 snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d",
172 cpu_index(ci));
173
174 if (__predict_false(cpu_infos == NULL)) {
175 size_t ci_bufsize = (maxcpus + 1) * sizeof(struct cpu_info *);
176 cpu_infos = kmem_zalloc(ci_bufsize, KM_SLEEP);
177 }
178 cpu_infos[cpu_index(ci)] = ci;
179
180 sched_cpuattach(ci);
181
182 error = create_idle_lwp(ci);
183 if (error != 0) {
184 /* XXX revert sched_cpuattach */
185 return error;
186 }
187
188 if (ci == curcpu())
189 ci->ci_onproc = curlwp;
190 else
191 ci->ci_onproc = ci->ci_data.cpu_idlelwp;
192
193 percpu_init_cpu(ci);
194 softint_init(ci);
195 callout_init_cpu(ci);
196 xc_init_cpu(ci);
197 pool_cache_cpu_init(ci);
198 selsysinit(ci);
199 cache_cpu_init(ci);
200 TAILQ_INIT(&ci->ci_data.cpu_biodone);
201 ncpu++;
202 ncpuonline++;
203
204 return 0;
205 }
206
207 void
208 cpuctlattach(int dummy __unused)
209 {
210
211 KASSERT(cpu_infos != NULL);
212 }
213
214 int
215 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
216 {
217 CPU_INFO_ITERATOR cii;
218 cpustate_t *cs;
219 struct cpu_info *ci;
220 int error, i;
221 u_int id;
222
223 error = 0;
224
225 mutex_enter(&cpu_lock);
226 switch (cmd) {
227 case IOC_CPU_SETSTATE:
228 cs = data;
229 error = kauth_authorize_system(l->l_cred,
230 KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
231 NULL);
232 if (error != 0)
233 break;
234 if (cs->cs_id >= maxcpus ||
235 (ci = cpu_lookup(cs->cs_id)) == NULL) {
236 error = ESRCH;
237 break;
238 }
239 cpu_setintr(ci, cs->cs_intr);
240 error = cpu_setstate(ci, cs->cs_online);
241 break;
242
243 case IOC_CPU_GETSTATE:
244 cs = data;
245 id = cs->cs_id;
246 memset(cs, 0, sizeof(*cs));
247 cs->cs_id = id;
248 if (cs->cs_id >= maxcpus ||
249 (ci = cpu_lookup(id)) == NULL) {
250 error = ESRCH;
251 break;
252 }
253 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
254 cs->cs_online = false;
255 else
256 cs->cs_online = true;
257 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
258 cs->cs_intr = false;
259 else
260 cs->cs_intr = true;
261 cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
262 cs->cs_lastmodhi = (int32_t)
263 (ci->ci_schedstate.spc_lastmod >> 32);
264 cs->cs_intrcnt = cpu_intr_count(ci) + 1;
265 cs->cs_hwid = ci->ci_cpuid;
266 break;
267
268 case IOC_CPU_MAPID:
269 i = 0;
270 for (CPU_INFO_FOREACH(cii, ci)) {
271 if (i++ == *(int *)data)
272 break;
273 }
274 if (ci == NULL)
275 error = ESRCH;
276 else
277 *(int *)data = cpu_index(ci);
278 break;
279
280 case IOC_CPU_GETCOUNT:
281 *(int *)data = ncpu;
282 break;
283
284 #ifdef CPU_UCODE
285 case IOC_CPU_UCODE_GET_VERSION:
286 error = cpu_ucode_get_version((struct cpu_ucode_version *)data);
287 break;
288
289 case IOC_CPU_UCODE_APPLY:
290 error = kauth_authorize_machdep(l->l_cred,
291 KAUTH_MACHDEP_CPU_UCODE_APPLY,
292 NULL, NULL, NULL, NULL);
293 if (error != 0)
294 break;
295 error = cpu_ucode_apply((const struct cpu_ucode *)data);
296 break;
297 #endif
298
299 default:
300 error = (*compat_cpuctl_ioctl)(l, cmd, data);
301 break;
302 }
303 mutex_exit(&cpu_lock);
304
305 return error;
306 }
307
308 struct cpu_info *
309 cpu_lookup(u_int idx)
310 {
311 struct cpu_info *ci;
312
313 /*
314 * cpu_infos is a NULL terminated array of MAXCPUS + 1 entries,
315 * so an index of MAXCPUS here is ok. See mi_cpu_attach.
316 */
317 KASSERT(idx <= maxcpus);
318
319 if (__predict_false(cpu_infos == NULL)) {
320 KASSERT(idx == 0);
321 return curcpu();
322 }
323
324 ci = cpu_infos[idx];
325 KASSERT(ci == NULL || cpu_index(ci) == idx);
326 KASSERTMSG(idx < maxcpus || ci == NULL, "idx %d ci %p", idx, ci);
327
328 return ci;
329 }
330
331 static void
332 cpu_xc_offline(struct cpu_info *ci, void *unused)
333 {
334 struct schedstate_percpu *spc, *mspc = NULL;
335 struct cpu_info *target_ci;
336 struct lwp *l;
337 CPU_INFO_ITERATOR cii;
338 int s;
339
340 /*
341 * Thread that made the cross call (separate context) holds
342 * cpu_lock on our behalf.
343 */
344 spc = &ci->ci_schedstate;
345 s = splsched();
346 spc->spc_flags |= SPCF_OFFLINE;
347 splx(s);
348
349 /* Take the first available CPU for the migration. */
350 for (CPU_INFO_FOREACH(cii, target_ci)) {
351 mspc = &target_ci->ci_schedstate;
352 if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
353 break;
354 }
355 KASSERT(target_ci != NULL);
356
357 /*
358 * Migrate all non-bound threads to the other CPU. Note that this
359 * runs from the xcall thread, thus handling of LSONPROC is not needed.
360 */
361 mutex_enter(proc_lock);
362 LIST_FOREACH(l, &alllwp, l_list) {
363 struct cpu_info *mci;
364
365 lwp_lock(l);
366 if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
367 lwp_unlock(l);
368 continue;
369 }
370 /* Regular case - no affinity. */
371 if (l->l_affinity == NULL) {
372 lwp_migrate(l, target_ci);
373 continue;
374 }
375 /* Affinity is set, find an online CPU in the set. */
376 for (CPU_INFO_FOREACH(cii, mci)) {
377 mspc = &mci->ci_schedstate;
378 if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
379 kcpuset_isset(l->l_affinity, cpu_index(mci)))
380 break;
381 }
382 if (mci == NULL) {
383 lwp_unlock(l);
384 mutex_exit(proc_lock);
385 goto fail;
386 }
387 lwp_migrate(l, mci);
388 }
389 mutex_exit(proc_lock);
390
391 #if PCU_UNIT_COUNT > 0
392 pcu_save_all_on_cpu();
393 #endif
394
395 #ifdef __HAVE_MD_CPU_OFFLINE
396 cpu_offline_md();
397 #endif
398 return;
399 fail:
400 /* Just unset the SPCF_OFFLINE flag, caller will check */
401 s = splsched();
402 spc->spc_flags &= ~SPCF_OFFLINE;
403 splx(s);
404 }
405
406 static void
407 cpu_xc_online(struct cpu_info *ci, void *unused)
408 {
409 struct schedstate_percpu *spc;
410 int s;
411
412 spc = &ci->ci_schedstate;
413 s = splsched();
414 spc->spc_flags &= ~SPCF_OFFLINE;
415 splx(s);
416 }
417
418 int
419 cpu_setstate(struct cpu_info *ci, bool online)
420 {
421 struct schedstate_percpu *spc;
422 CPU_INFO_ITERATOR cii;
423 struct cpu_info *ci2;
424 uint64_t where;
425 xcfunc_t func;
426 int nonline;
427
428 spc = &ci->ci_schedstate;
429
430 KASSERT(mutex_owned(&cpu_lock));
431
432 if (online) {
433 if ((spc->spc_flags & SPCF_OFFLINE) == 0)
434 return 0;
435 func = (xcfunc_t)cpu_xc_online;
436 } else {
437 if ((spc->spc_flags & SPCF_OFFLINE) != 0)
438 return 0;
439 nonline = 0;
440 /*
441 * Ensure that at least one CPU within the processor set
442 * stays online. Revisit this later.
443 */
444 for (CPU_INFO_FOREACH(cii, ci2)) {
445 if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
446 continue;
447 if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
448 continue;
449 nonline++;
450 }
451 if (nonline == 1)
452 return EBUSY;
453 func = (xcfunc_t)cpu_xc_offline;
454 }
455
456 where = xc_unicast(0, func, ci, NULL, ci);
457 xc_wait(where);
458 if (online) {
459 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
460 ncpuonline++;
461 } else {
462 if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
463 /* If was not set offline, then it is busy */
464 return EBUSY;
465 }
466 ncpuonline--;
467 }
468
469 spc->spc_lastmod = time_second;
470 return 0;
471 }
472
473 int
474 cpu_setmodel(const char *fmt, ...)
475 {
476 int len;
477 va_list ap;
478
479 va_start(ap, fmt);
480 len = vsnprintf(cpu_model, sizeof(cpu_model), fmt, ap);
481 va_end(ap);
482 return len;
483 }
484
485 const char *
486 cpu_getmodel(void)
487 {
488 return cpu_model;
489 }
490
491 #ifdef __HAVE_INTR_CONTROL
492 static void
493 cpu_xc_intr(struct cpu_info *ci, void *unused)
494 {
495 struct schedstate_percpu *spc;
496 int s;
497
498 spc = &ci->ci_schedstate;
499 s = splsched();
500 spc->spc_flags &= ~SPCF_NOINTR;
501 splx(s);
502 }
503
504 static void
505 cpu_xc_nointr(struct cpu_info *ci, void *unused)
506 {
507 struct schedstate_percpu *spc;
508 int s;
509
510 spc = &ci->ci_schedstate;
511 s = splsched();
512 spc->spc_flags |= SPCF_NOINTR;
513 splx(s);
514 }
515
516 int
517 cpu_setintr(struct cpu_info *ci, bool intr)
518 {
519 struct schedstate_percpu *spc;
520 CPU_INFO_ITERATOR cii;
521 struct cpu_info *ci2;
522 uint64_t where;
523 xcfunc_t func;
524 int nintr;
525
526 spc = &ci->ci_schedstate;
527
528 KASSERT(mutex_owned(&cpu_lock));
529
530 if (intr) {
531 if ((spc->spc_flags & SPCF_NOINTR) == 0)
532 return 0;
533 func = (xcfunc_t)cpu_xc_intr;
534 } else {
535 if ((spc->spc_flags & SPCF_NOINTR) != 0)
536 return 0;
537 /*
538 * Ensure that at least one CPU within the system
539 * is handing device interrupts.
540 */
541 nintr = 0;
542 for (CPU_INFO_FOREACH(cii, ci2)) {
543 if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
544 continue;
545 if (ci2 == ci)
546 continue;
547 nintr++;
548 }
549 if (nintr == 0)
550 return EBUSY;
551 func = (xcfunc_t)cpu_xc_nointr;
552 }
553
554 where = xc_unicast(0, func, ci, NULL, ci);
555 xc_wait(where);
556 if (intr) {
557 KASSERT((spc->spc_flags & SPCF_NOINTR) == 0);
558 } else if ((spc->spc_flags & SPCF_NOINTR) == 0) {
559 /* If was not set offline, then it is busy */
560 return EBUSY;
561 }
562
563 /* Direct interrupts away from the CPU and record the change. */
564 cpu_intr_redistribute();
565 spc->spc_lastmod = time_second;
566 return 0;
567 }
568 #else /* __HAVE_INTR_CONTROL */
569 int
570 cpu_setintr(struct cpu_info *ci, bool intr)
571 {
572
573 return EOPNOTSUPP;
574 }
575
576 u_int
577 cpu_intr_count(struct cpu_info *ci)
578 {
579
580 return 0; /* 0 == "don't know" */
581 }
582 #endif /* __HAVE_INTR_CONTROL */
583
584 bool
585 cpu_softintr_p(void)
586 {
587
588 return (curlwp->l_pflag & LP_INTR) != 0;
589 }
590
591 /*
592 * Collect CPU topology information as each CPU is attached. This can be
593 * called early during boot, so we need to be careful what we do.
594 */
595 void
596 cpu_topology_set(struct cpu_info *ci, int package_id, int core_id, int smt_id)
597 {
598
599 cpu_topology_present = true;
600 ci->ci_package_id = package_id;
601 ci->ci_core_id = core_id;
602 ci->ci_smt_id = smt_id;
603 ci->ci_package_cpus = ci;
604 ci->ci_npackage_cpus = 1;
605 ci->ci_core_cpus = ci;
606 ci->ci_ncore_cpus = 1;
607 }
608
609 /*
610 * Fake up toplogy info if we have none, or if what we got was bogus.
611 */
612 static void
613 cpu_topology_fake(void)
614 {
615 CPU_INFO_ITERATOR cii;
616 struct cpu_info *ci;
617
618 for (CPU_INFO_FOREACH(cii, ci)) {
619 ci->ci_package_id = cpu_index(ci);
620 ci->ci_core_id = 0;
621 ci->ci_smt_id = 0;
622 ci->ci_ncore_cpus = 1;
623 ci->ci_core_cpus = ci;
624 ci->ci_package_cpus = ci;
625 ci->ci_npackage_cpus = 1;
626 }
627 }
628
629 /*
630 * Fix up basic CPU topology info. Right now that means attach each CPU to
631 * circular lists of its siblings in the same core, and in the same package.
632 */
633 void
634 cpu_topology_init(void)
635 {
636 CPU_INFO_ITERATOR cii, cii2;
637 struct cpu_info *ci, *ci2, *ci3;
638
639 if (!cpu_topology_present) {
640 cpu_topology_fake();
641 return;
642 }
643
644 for (CPU_INFO_FOREACH(cii, ci)) {
645 ci->ci_ncore_cpus = 1;
646 ci->ci_core_cpus = ci;
647 ci->ci_package_cpus = ci;
648 ci->ci_npackage_cpus = 1;
649 }
650
651 for (CPU_INFO_FOREACH(cii, ci)) {
652 for (CPU_INFO_FOREACH(cii2, ci2)) {
653 /* Avoid bad things happening. */
654 if (ci2->ci_package_id == ci->ci_package_id &&
655 ci2->ci_core_id == ci->ci_core_id &&
656 ci2->ci_smt_id == ci->ci_smt_id &&
657 ci2 != ci) {
658 printf("cpu_topology_init: info bogus, "
659 "faking it\n");
660 cpu_topology_fake();
661 return;
662 }
663 if (ci2 == ci ||
664 ci2->ci_package_id != ci->ci_package_id) {
665 continue;
666 }
667 /*
668 * Find CPUs in the same core. Walk to the end of
669 * the existing circular list and append.
670 */
671 if (ci->ci_ncore_cpus == 1 &&
672 ci->ci_core_id == ci2->ci_core_id) {
673 for (ci3 = ci2;; ci3 = ci3->ci_core_cpus) {
674 ci3->ci_ncore_cpus++;
675 if (ci3->ci_core_cpus == ci2) {
676 break;
677 }
678 }
679 ci->ci_core_cpus = ci2;
680 ci3->ci_core_cpus = ci;
681 ci->ci_ncore_cpus = ci3->ci_ncore_cpus;
682 }
683 /* Same, but for package. */
684 if (ci->ci_npackage_cpus == 1) {
685 for (ci3 = ci2;; ci3 = ci3->ci_package_cpus) {
686 ci3->ci_npackage_cpus++;
687 if (ci3->ci_package_cpus == ci2) {
688 break;
689 }
690 }
691 ci->ci_package_cpus = ci2;
692 ci3->ci_package_cpus = ci;
693 ci->ci_npackage_cpus = ci3->ci_npackage_cpus;
694 }
695 if (ci->ci_ncore_cpus > 1 && ci->ci_npackage_cpus > 1) {
696 break;
697 }
698 }
699 }
700 }
701
702 #ifdef CPU_UCODE
703 int
704 cpu_ucode_load(struct cpu_ucode_softc *sc, const char *fwname)
705 {
706 firmware_handle_t fwh;
707 int error;
708
709 if (sc->sc_blob != NULL) {
710 firmware_free(sc->sc_blob, sc->sc_blobsize);
711 sc->sc_blob = NULL;
712 sc->sc_blobsize = 0;
713 }
714
715 error = cpu_ucode_md_open(&fwh, sc->loader_version, fwname);
716 if (error != 0) {
717 #ifdef DEBUG
718 printf("ucode: firmware_open(%s) failed: %i\n", fwname, error);
719 #endif
720 goto err0;
721 }
722
723 sc->sc_blobsize = firmware_get_size(fwh);
724 if (sc->sc_blobsize == 0) {
725 error = EFTYPE;
726 firmware_close(fwh);
727 goto err0;
728 }
729 sc->sc_blob = firmware_malloc(sc->sc_blobsize);
730 if (sc->sc_blob == NULL) {
731 error = ENOMEM;
732 firmware_close(fwh);
733 goto err0;
734 }
735
736 error = firmware_read(fwh, 0, sc->sc_blob, sc->sc_blobsize);
737 firmware_close(fwh);
738 if (error != 0)
739 goto err1;
740
741 return 0;
742
743 err1:
744 firmware_free(sc->sc_blob, sc->sc_blobsize);
745 sc->sc_blob = NULL;
746 sc->sc_blobsize = 0;
747 err0:
748 return error;
749 }
750 #endif
751