kern_cpu.c revision 1.58 1 1.58 matt /* $NetBSD: kern_cpu.c,v 1.58 2012/09/01 00:24:43 matt Exp $ */
2 1.3 ad
3 1.3 ad /*-
4 1.53 cegger * Copyright (c) 2007, 2008, 2009, 2010, 2012 The NetBSD Foundation, Inc.
5 1.3 ad * All rights reserved.
6 1.3 ad *
7 1.3 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.3 ad * by Andrew Doran.
9 1.3 ad *
10 1.3 ad * Redistribution and use in source and binary forms, with or without
11 1.3 ad * modification, are permitted provided that the following conditions
12 1.3 ad * are met:
13 1.3 ad * 1. Redistributions of source code must retain the above copyright
14 1.3 ad * notice, this list of conditions and the following disclaimer.
15 1.3 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.3 ad * notice, this list of conditions and the following disclaimer in the
17 1.3 ad * documentation and/or other materials provided with the distribution.
18 1.3 ad *
19 1.3 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.3 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.3 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.3 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.3 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.3 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.3 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.3 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.3 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.3 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.3 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.3 ad */
31 1.2 yamt
32 1.2 yamt /*-
33 1.2 yamt * Copyright (c)2007 YAMAMOTO Takashi,
34 1.2 yamt * All rights reserved.
35 1.2 yamt *
36 1.2 yamt * Redistribution and use in source and binary forms, with or without
37 1.2 yamt * modification, are permitted provided that the following conditions
38 1.2 yamt * are met:
39 1.2 yamt * 1. Redistributions of source code must retain the above copyright
40 1.2 yamt * notice, this list of conditions and the following disclaimer.
41 1.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
42 1.2 yamt * notice, this list of conditions and the following disclaimer in the
43 1.2 yamt * documentation and/or other materials provided with the distribution.
44 1.2 yamt *
45 1.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 1.2 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 1.2 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 1.2 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 1.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 1.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 1.2 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 1.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 1.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 1.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 1.2 yamt * SUCH DAMAGE.
56 1.2 yamt */
57 1.2 yamt
58 1.2 yamt #include <sys/cdefs.h>
59 1.58 matt __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.58 2012/09/01 00:24:43 matt Exp $");
60 1.53 cegger
61 1.53 cegger #include "opt_cpu_ucode.h"
62 1.57 drochner #include "opt_compat_netbsd.h"
63 1.2 yamt
64 1.2 yamt #include <sys/param.h>
65 1.2 yamt #include <sys/systm.h>
66 1.2 yamt #include <sys/idle.h>
67 1.2 yamt #include <sys/sched.h>
68 1.8 ad #include <sys/intr.h>
69 1.3 ad #include <sys/conf.h>
70 1.3 ad #include <sys/cpu.h>
71 1.3 ad #include <sys/cpuio.h>
72 1.3 ad #include <sys/proc.h>
73 1.17 yamt #include <sys/percpu.h>
74 1.3 ad #include <sys/kernel.h>
75 1.3 ad #include <sys/kauth.h>
76 1.7 ad #include <sys/xcall.h>
77 1.7 ad #include <sys/pool.h>
78 1.21 ad #include <sys/kmem.h>
79 1.22 ad #include <sys/select.h>
80 1.23 ad #include <sys/namei.h>
81 1.27 ad #include <sys/callout.h>
82 1.3 ad
83 1.6 ad #include <uvm/uvm_extern.h>
84 1.6 ad
85 1.45 matt /*
86 1.52 jym * If the port has stated that cpu_data is the first thing in cpu_info,
87 1.52 jym * verify that the claim is true. This will prevent them from getting out
88 1.45 matt * of sync.
89 1.45 matt */
90 1.45 matt #ifdef __HAVE_CPU_DATA_FIRST
91 1.45 matt CTASSERT(offsetof(struct cpu_info, ci_data) == 0);
92 1.45 matt #else
93 1.45 matt CTASSERT(offsetof(struct cpu_info, ci_data) != 0);
94 1.45 matt #endif
95 1.45 matt
96 1.3 ad void cpuctlattach(int);
97 1.3 ad
98 1.11 rmind static void cpu_xc_online(struct cpu_info *);
99 1.11 rmind static void cpu_xc_offline(struct cpu_info *);
100 1.7 ad
101 1.3 ad dev_type_ioctl(cpuctl_ioctl);
102 1.3 ad
103 1.3 ad const struct cdevsw cpuctl_cdevsw = {
104 1.3 ad nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl,
105 1.3 ad nullstop, notty, nopoll, nommap, nokqfilter,
106 1.3 ad D_OTHER | D_MPSAFE
107 1.3 ad };
108 1.11 rmind
109 1.46 rmind kmutex_t cpu_lock __cacheline_aligned;
110 1.46 rmind int ncpu __read_mostly;
111 1.46 rmind int ncpuonline __read_mostly;
112 1.46 rmind bool mp_online __read_mostly;
113 1.48 rmind
114 1.55 rmind /* Note: set on mi_cpu_attach() and idle_loop(). */
115 1.55 rmind kcpuset_t * kcpuset_attached __read_mostly = NULL;
116 1.55 rmind kcpuset_t * kcpuset_running __read_mostly = NULL;
117 1.48 rmind
118 1.46 rmind struct cpuqueue cpu_queue __cacheline_aligned
119 1.46 rmind = CIRCLEQ_HEAD_INITIALIZER(cpu_queue);
120 1.2 yamt
121 1.46 rmind static struct cpu_info **cpu_infos __read_mostly;
122 1.16 yamt
123 1.55 rmind /*
124 1.55 rmind * mi_cpu_init: early initialisation of MI CPU related structures.
125 1.55 rmind *
126 1.55 rmind * Note: may not block and memory allocator is not yet available.
127 1.55 rmind */
128 1.55 rmind void
129 1.55 rmind mi_cpu_init(void)
130 1.55 rmind {
131 1.55 rmind
132 1.55 rmind mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
133 1.55 rmind
134 1.55 rmind kcpuset_create(&kcpuset_attached, true);
135 1.55 rmind kcpuset_create(&kcpuset_running, true);
136 1.55 rmind kcpuset_set(kcpuset_running, 0);
137 1.55 rmind }
138 1.55 rmind
139 1.2 yamt int
140 1.2 yamt mi_cpu_attach(struct cpu_info *ci)
141 1.2 yamt {
142 1.2 yamt int error;
143 1.2 yamt
144 1.44 ad KASSERT(maxcpus > 0);
145 1.44 ad
146 1.5 rmind ci->ci_index = ncpu;
147 1.55 rmind kcpuset_set(kcpuset_attached, cpu_index(ci));
148 1.55 rmind
149 1.58 matt /*
150 1.58 matt * Create a convenience cpuset of just ourselves.
151 1.58 matt */
152 1.58 matt kcpuset_create(&ci->ci_data.cpu_kcpuset, true);
153 1.58 matt kcpuset_set(ci->ci_data.cpu_kcpuset, cpu_index(ci));
154 1.58 matt
155 1.24 ad CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain);
156 1.30 ad TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
157 1.30 ad __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
158 1.5 rmind
159 1.43 mrg /* This is useful for eg, per-cpu evcnt */
160 1.43 mrg snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d",
161 1.44 ad cpu_index(ci));
162 1.43 mrg
163 1.47 matt if (__predict_false(cpu_infos == NULL)) {
164 1.47 matt cpu_infos =
165 1.47 matt kmem_zalloc(sizeof(cpu_infos[0]) * maxcpus, KM_SLEEP);
166 1.47 matt }
167 1.47 matt cpu_infos[cpu_index(ci)] = ci;
168 1.47 matt
169 1.2 yamt sched_cpuattach(ci);
170 1.2 yamt
171 1.2 yamt error = create_idle_lwp(ci);
172 1.2 yamt if (error != 0) {
173 1.2 yamt /* XXX revert sched_cpuattach */
174 1.2 yamt return error;
175 1.2 yamt }
176 1.2 yamt
177 1.13 ad if (ci == curcpu())
178 1.13 ad ci->ci_data.cpu_onproc = curlwp;
179 1.13 ad else
180 1.13 ad ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp;
181 1.13 ad
182 1.17 yamt percpu_init_cpu(ci);
183 1.8 ad softint_init(ci);
184 1.27 ad callout_init_cpu(ci);
185 1.7 ad xc_init_cpu(ci);
186 1.14 ad pool_cache_cpu_init(ci);
187 1.22 ad selsysinit(ci);
188 1.23 ad cache_cpu_init(ci);
189 1.7 ad TAILQ_INIT(&ci->ci_data.cpu_biodone);
190 1.2 yamt ncpu++;
191 1.9 ad ncpuonline++;
192 1.2 yamt
193 1.2 yamt return 0;
194 1.2 yamt }
195 1.3 ad
196 1.3 ad void
197 1.3 ad cpuctlattach(int dummy)
198 1.3 ad {
199 1.3 ad
200 1.44 ad KASSERT(cpu_infos != NULL);
201 1.3 ad }
202 1.3 ad
203 1.3 ad int
204 1.3 ad cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
205 1.3 ad {
206 1.3 ad CPU_INFO_ITERATOR cii;
207 1.3 ad cpustate_t *cs;
208 1.3 ad struct cpu_info *ci;
209 1.3 ad int error, i;
210 1.3 ad u_int id;
211 1.3 ad
212 1.3 ad error = 0;
213 1.3 ad
214 1.3 ad mutex_enter(&cpu_lock);
215 1.3 ad switch (cmd) {
216 1.3 ad case IOC_CPU_SETSTATE:
217 1.56 joerg cs = data;
218 1.20 elad error = kauth_authorize_system(l->l_cred,
219 1.20 elad KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
220 1.20 elad NULL);
221 1.3 ad if (error != 0)
222 1.3 ad break;
223 1.44 ad if (cs->cs_id >= maxcpus ||
224 1.36 ad (ci = cpu_lookup(cs->cs_id)) == NULL) {
225 1.3 ad error = ESRCH;
226 1.3 ad break;
227 1.3 ad }
228 1.56 joerg cpu_setintr(ci, cs->cs_intr);
229 1.37 rmind error = cpu_setstate(ci, cs->cs_online);
230 1.3 ad break;
231 1.3 ad
232 1.3 ad case IOC_CPU_GETSTATE:
233 1.56 joerg cs = data;
234 1.3 ad id = cs->cs_id;
235 1.10 ad memset(cs, 0, sizeof(*cs));
236 1.3 ad cs->cs_id = id;
237 1.44 ad if (cs->cs_id >= maxcpus ||
238 1.36 ad (ci = cpu_lookup(id)) == NULL) {
239 1.3 ad error = ESRCH;
240 1.3 ad break;
241 1.3 ad }
242 1.3 ad if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
243 1.3 ad cs->cs_online = false;
244 1.3 ad else
245 1.3 ad cs->cs_online = true;
246 1.42 ad if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
247 1.42 ad cs->cs_intr = false;
248 1.42 ad else
249 1.42 ad cs->cs_intr = true;
250 1.42 ad cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
251 1.42 ad cs->cs_lastmodhi = (int32_t)
252 1.42 ad (ci->ci_schedstate.spc_lastmod >> 32);
253 1.42 ad cs->cs_intrcnt = cpu_intr_count(ci) + 1;
254 1.51 jdc cs->cs_hwid = ci->ci_cpuid;
255 1.3 ad break;
256 1.3 ad
257 1.3 ad case IOC_CPU_MAPID:
258 1.3 ad i = 0;
259 1.3 ad for (CPU_INFO_FOREACH(cii, ci)) {
260 1.3 ad if (i++ == *(int *)data)
261 1.3 ad break;
262 1.3 ad }
263 1.3 ad if (ci == NULL)
264 1.3 ad error = ESRCH;
265 1.3 ad else
266 1.38 rmind *(int *)data = cpu_index(ci);
267 1.3 ad break;
268 1.3 ad
269 1.3 ad case IOC_CPU_GETCOUNT:
270 1.3 ad *(int *)data = ncpu;
271 1.3 ad break;
272 1.3 ad
273 1.53 cegger #ifdef CPU_UCODE
274 1.53 cegger case IOC_CPU_UCODE_GET_VERSION:
275 1.57 drochner error = cpu_ucode_get_version((struct cpu_ucode_version *)data);
276 1.57 drochner break;
277 1.57 drochner
278 1.57 drochner /* XXX ifdef COMPAT */
279 1.57 drochner case OIOC_CPU_UCODE_GET_VERSION:
280 1.57 drochner error = compat6_cpu_ucode_get_version((struct compat6_cpu_ucode *)data);
281 1.53 cegger break;
282 1.53 cegger
283 1.53 cegger case IOC_CPU_UCODE_APPLY:
284 1.53 cegger error = kauth_authorize_machdep(l->l_cred,
285 1.53 cegger KAUTH_MACHDEP_CPU_UCODE_APPLY,
286 1.53 cegger NULL, NULL, NULL, NULL);
287 1.53 cegger if (error != 0)
288 1.53 cegger break;
289 1.57 drochner error = cpu_ucode_apply((const struct cpu_ucode *)data);
290 1.57 drochner break;
291 1.57 drochner
292 1.57 drochner /* XXX ifdef COMPAT */
293 1.57 drochner case OIOC_CPU_UCODE_APPLY:
294 1.57 drochner error = kauth_authorize_machdep(l->l_cred,
295 1.57 drochner KAUTH_MACHDEP_CPU_UCODE_APPLY,
296 1.57 drochner NULL, NULL, NULL, NULL);
297 1.57 drochner if (error != 0)
298 1.57 drochner break;
299 1.57 drochner error = compat6_cpu_ucode_apply((const struct compat6_cpu_ucode *)data);
300 1.53 cegger break;
301 1.53 cegger #endif
302 1.53 cegger
303 1.3 ad default:
304 1.3 ad error = ENOTTY;
305 1.3 ad break;
306 1.3 ad }
307 1.3 ad mutex_exit(&cpu_lock);
308 1.3 ad
309 1.3 ad return error;
310 1.3 ad }
311 1.3 ad
312 1.3 ad struct cpu_info *
313 1.36 ad cpu_lookup(u_int idx)
314 1.16 yamt {
315 1.44 ad struct cpu_info *ci;
316 1.44 ad
317 1.44 ad KASSERT(idx < maxcpus);
318 1.44 ad
319 1.44 ad if (__predict_false(cpu_infos == NULL)) {
320 1.44 ad KASSERT(idx == 0);
321 1.44 ad return curcpu();
322 1.44 ad }
323 1.16 yamt
324 1.44 ad ci = cpu_infos[idx];
325 1.16 yamt KASSERT(ci == NULL || cpu_index(ci) == idx);
326 1.16 yamt
327 1.16 yamt return ci;
328 1.16 yamt }
329 1.16 yamt
330 1.7 ad static void
331 1.11 rmind cpu_xc_offline(struct cpu_info *ci)
332 1.7 ad {
333 1.11 rmind struct schedstate_percpu *spc, *mspc = NULL;
334 1.37 rmind struct cpu_info *target_ci;
335 1.11 rmind struct lwp *l;
336 1.11 rmind CPU_INFO_ITERATOR cii;
337 1.7 ad int s;
338 1.7 ad
339 1.37 rmind /*
340 1.42 ad * Thread that made the cross call (separate context) holds
341 1.42 ad * cpu_lock on our behalf.
342 1.37 rmind */
343 1.11 rmind spc = &ci->ci_schedstate;
344 1.7 ad s = splsched();
345 1.7 ad spc->spc_flags |= SPCF_OFFLINE;
346 1.7 ad splx(s);
347 1.11 rmind
348 1.42 ad /* Take the first available CPU for the migration. */
349 1.37 rmind for (CPU_INFO_FOREACH(cii, target_ci)) {
350 1.37 rmind mspc = &target_ci->ci_schedstate;
351 1.11 rmind if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
352 1.11 rmind break;
353 1.11 rmind }
354 1.37 rmind KASSERT(target_ci != NULL);
355 1.11 rmind
356 1.11 rmind /*
357 1.37 rmind * Migrate all non-bound threads to the other CPU. Note that this
358 1.37 rmind * runs from the xcall thread, thus handling of LSONPROC is not needed.
359 1.11 rmind */
360 1.28 ad mutex_enter(proc_lock);
361 1.11 rmind LIST_FOREACH(l, &alllwp, l_list) {
362 1.37 rmind struct cpu_info *mci;
363 1.37 rmind
364 1.35 yamt lwp_lock(l);
365 1.37 rmind if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
366 1.35 yamt lwp_unlock(l);
367 1.37 rmind continue;
368 1.11 rmind }
369 1.49 rmind /* Regular case - no affinity. */
370 1.49 rmind if (l->l_affinity == NULL) {
371 1.37 rmind lwp_migrate(l, target_ci);
372 1.37 rmind continue;
373 1.37 rmind }
374 1.49 rmind /* Affinity is set, find an online CPU in the set. */
375 1.37 rmind for (CPU_INFO_FOREACH(cii, mci)) {
376 1.37 rmind mspc = &mci->ci_schedstate;
377 1.37 rmind if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
378 1.48 rmind kcpuset_isset(l->l_affinity, cpu_index(mci)))
379 1.37 rmind break;
380 1.37 rmind }
381 1.37 rmind if (mci == NULL) {
382 1.37 rmind lwp_unlock(l);
383 1.37 rmind mutex_exit(proc_lock);
384 1.37 rmind goto fail;
385 1.37 rmind }
386 1.37 rmind lwp_migrate(l, mci);
387 1.11 rmind }
388 1.28 ad mutex_exit(proc_lock);
389 1.19 joerg
390 1.19 joerg #ifdef __HAVE_MD_CPU_OFFLINE
391 1.19 joerg cpu_offline_md();
392 1.19 joerg #endif
393 1.37 rmind return;
394 1.37 rmind fail:
395 1.37 rmind /* Just unset the SPCF_OFFLINE flag, caller will check */
396 1.37 rmind s = splsched();
397 1.37 rmind spc->spc_flags &= ~SPCF_OFFLINE;
398 1.37 rmind splx(s);
399 1.7 ad }
400 1.7 ad
401 1.7 ad static void
402 1.11 rmind cpu_xc_online(struct cpu_info *ci)
403 1.7 ad {
404 1.11 rmind struct schedstate_percpu *spc;
405 1.7 ad int s;
406 1.7 ad
407 1.11 rmind spc = &ci->ci_schedstate;
408 1.7 ad s = splsched();
409 1.7 ad spc->spc_flags &= ~SPCF_OFFLINE;
410 1.7 ad splx(s);
411 1.7 ad }
412 1.7 ad
413 1.3 ad int
414 1.37 rmind cpu_setstate(struct cpu_info *ci, bool online)
415 1.3 ad {
416 1.3 ad struct schedstate_percpu *spc;
417 1.3 ad CPU_INFO_ITERATOR cii;
418 1.3 ad struct cpu_info *ci2;
419 1.7 ad uint64_t where;
420 1.7 ad xcfunc_t func;
421 1.3 ad int nonline;
422 1.3 ad
423 1.3 ad spc = &ci->ci_schedstate;
424 1.3 ad
425 1.3 ad KASSERT(mutex_owned(&cpu_lock));
426 1.3 ad
427 1.3 ad if (online) {
428 1.3 ad if ((spc->spc_flags & SPCF_OFFLINE) == 0)
429 1.3 ad return 0;
430 1.7 ad func = (xcfunc_t)cpu_xc_online;
431 1.9 ad ncpuonline++;
432 1.3 ad } else {
433 1.3 ad if ((spc->spc_flags & SPCF_OFFLINE) != 0)
434 1.3 ad return 0;
435 1.3 ad nonline = 0;
436 1.33 ad /*
437 1.33 ad * Ensure that at least one CPU within the processor set
438 1.33 ad * stays online. Revisit this later.
439 1.33 ad */
440 1.3 ad for (CPU_INFO_FOREACH(cii, ci2)) {
441 1.33 ad if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
442 1.33 ad continue;
443 1.33 ad if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
444 1.33 ad continue;
445 1.33 ad nonline++;
446 1.3 ad }
447 1.3 ad if (nonline == 1)
448 1.3 ad return EBUSY;
449 1.7 ad func = (xcfunc_t)cpu_xc_offline;
450 1.9 ad ncpuonline--;
451 1.3 ad }
452 1.3 ad
453 1.11 rmind where = xc_unicast(0, func, ci, NULL, ci);
454 1.7 ad xc_wait(where);
455 1.11 rmind if (online) {
456 1.11 rmind KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
457 1.37 rmind } else if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
458 1.37 rmind /* If was not set offline, then it is busy */
459 1.37 rmind return EBUSY;
460 1.11 rmind }
461 1.37 rmind
462 1.7 ad spc->spc_lastmod = time_second;
463 1.3 ad return 0;
464 1.3 ad }
465 1.39 ad
466 1.42 ad #ifdef __HAVE_INTR_CONTROL
467 1.42 ad static void
468 1.42 ad cpu_xc_intr(struct cpu_info *ci)
469 1.42 ad {
470 1.42 ad struct schedstate_percpu *spc;
471 1.42 ad int s;
472 1.42 ad
473 1.42 ad spc = &ci->ci_schedstate;
474 1.42 ad s = splsched();
475 1.42 ad spc->spc_flags &= ~SPCF_NOINTR;
476 1.42 ad splx(s);
477 1.42 ad }
478 1.42 ad
479 1.42 ad static void
480 1.42 ad cpu_xc_nointr(struct cpu_info *ci)
481 1.42 ad {
482 1.42 ad struct schedstate_percpu *spc;
483 1.42 ad int s;
484 1.42 ad
485 1.42 ad spc = &ci->ci_schedstate;
486 1.42 ad s = splsched();
487 1.42 ad spc->spc_flags |= SPCF_NOINTR;
488 1.42 ad splx(s);
489 1.42 ad }
490 1.42 ad
491 1.42 ad int
492 1.42 ad cpu_setintr(struct cpu_info *ci, bool intr)
493 1.42 ad {
494 1.42 ad struct schedstate_percpu *spc;
495 1.42 ad CPU_INFO_ITERATOR cii;
496 1.42 ad struct cpu_info *ci2;
497 1.42 ad uint64_t where;
498 1.42 ad xcfunc_t func;
499 1.42 ad int nintr;
500 1.42 ad
501 1.42 ad spc = &ci->ci_schedstate;
502 1.42 ad
503 1.42 ad KASSERT(mutex_owned(&cpu_lock));
504 1.42 ad
505 1.42 ad if (intr) {
506 1.42 ad if ((spc->spc_flags & SPCF_NOINTR) == 0)
507 1.42 ad return 0;
508 1.42 ad func = (xcfunc_t)cpu_xc_intr;
509 1.42 ad } else {
510 1.42 ad if ((spc->spc_flags & SPCF_NOINTR) != 0)
511 1.42 ad return 0;
512 1.42 ad /*
513 1.42 ad * Ensure that at least one CPU within the system
514 1.42 ad * is handing device interrupts.
515 1.42 ad */
516 1.42 ad nintr = 0;
517 1.42 ad for (CPU_INFO_FOREACH(cii, ci2)) {
518 1.42 ad if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
519 1.42 ad continue;
520 1.42 ad if (ci2 == ci)
521 1.42 ad continue;
522 1.42 ad nintr++;
523 1.42 ad }
524 1.42 ad if (nintr == 0)
525 1.42 ad return EBUSY;
526 1.42 ad func = (xcfunc_t)cpu_xc_nointr;
527 1.42 ad }
528 1.42 ad
529 1.42 ad where = xc_unicast(0, func, ci, NULL, ci);
530 1.42 ad xc_wait(where);
531 1.42 ad if (intr) {
532 1.42 ad KASSERT((spc->spc_flags & SPCF_NOINTR) == 0);
533 1.42 ad } else if ((spc->spc_flags & SPCF_NOINTR) == 0) {
534 1.42 ad /* If was not set offline, then it is busy */
535 1.42 ad return EBUSY;
536 1.42 ad }
537 1.42 ad
538 1.42 ad /* Direct interrupts away from the CPU and record the change. */
539 1.42 ad cpu_intr_redistribute();
540 1.42 ad spc->spc_lastmod = time_second;
541 1.42 ad return 0;
542 1.42 ad }
543 1.42 ad #else /* __HAVE_INTR_CONTROL */
544 1.42 ad int
545 1.42 ad cpu_setintr(struct cpu_info *ci, bool intr)
546 1.42 ad {
547 1.42 ad
548 1.42 ad return EOPNOTSUPP;
549 1.42 ad }
550 1.42 ad
551 1.42 ad u_int
552 1.42 ad cpu_intr_count(struct cpu_info *ci)
553 1.42 ad {
554 1.42 ad
555 1.42 ad return 0; /* 0 == "don't know" */
556 1.42 ad }
557 1.42 ad #endif /* __HAVE_INTR_CONTROL */
558 1.42 ad
559 1.39 ad bool
560 1.39 ad cpu_softintr_p(void)
561 1.39 ad {
562 1.39 ad
563 1.39 ad return (curlwp->l_pflag & LP_INTR) != 0;
564 1.39 ad }
565 1.53 cegger
566 1.53 cegger #ifdef CPU_UCODE
567 1.53 cegger int
568 1.53 cegger cpu_ucode_load(struct cpu_ucode_softc *sc, const char *fwname)
569 1.53 cegger {
570 1.53 cegger firmware_handle_t fwh;
571 1.53 cegger int error;
572 1.53 cegger
573 1.53 cegger if (sc->sc_blob != NULL) {
574 1.53 cegger firmware_free(sc->sc_blob, 0);
575 1.53 cegger sc->sc_blob = NULL;
576 1.53 cegger sc->sc_blobsize = 0;
577 1.53 cegger }
578 1.53 cegger
579 1.57 drochner error = cpu_ucode_md_open(&fwh, sc->loader_version, fwname);
580 1.53 cegger if (error != 0) {
581 1.53 cegger aprint_error("ucode: firmware_open failed: %i\n", error);
582 1.53 cegger goto err0;
583 1.53 cegger }
584 1.53 cegger
585 1.53 cegger sc->sc_blobsize = firmware_get_size(fwh);
586 1.53 cegger sc->sc_blob = firmware_malloc(sc->sc_blobsize);
587 1.53 cegger if (sc->sc_blob == NULL) {
588 1.53 cegger error = ENOMEM;
589 1.53 cegger firmware_close(fwh);
590 1.53 cegger goto err0;
591 1.53 cegger }
592 1.53 cegger
593 1.53 cegger error = firmware_read(fwh, 0, sc->sc_blob, sc->sc_blobsize);
594 1.53 cegger firmware_close(fwh);
595 1.53 cegger if (error != 0)
596 1.53 cegger goto err1;
597 1.53 cegger
598 1.53 cegger return 0;
599 1.53 cegger
600 1.53 cegger err1:
601 1.53 cegger firmware_free(sc->sc_blob, 0);
602 1.53 cegger sc->sc_blob = NULL;
603 1.53 cegger sc->sc_blobsize = 0;
604 1.53 cegger err0:
605 1.53 cegger return error;
606 1.53 cegger }
607 1.53 cegger #endif
608