1 1.98 mrg /* $NetBSD: kern_cpu.c,v 1.98 2025/01/17 04:11:33 mrg Exp $ */ 2 1.3 ad 3 1.3 ad /*- 4 1.79 ad * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019 The NetBSD Foundation, Inc. 5 1.3 ad * All rights reserved. 6 1.3 ad * 7 1.3 ad * This code is derived from software contributed to The NetBSD Foundation 8 1.3 ad * by Andrew Doran. 9 1.3 ad * 10 1.3 ad * Redistribution and use in source and binary forms, with or without 11 1.3 ad * modification, are permitted provided that the following conditions 12 1.3 ad * are met: 13 1.3 ad * 1. Redistributions of source code must retain the above copyright 14 1.3 ad * notice, this list of conditions and the following disclaimer. 15 1.3 ad * 2. Redistributions in binary form must reproduce the above copyright 16 1.3 ad * notice, this list of conditions and the following disclaimer in the 17 1.3 ad * documentation and/or other materials provided with the distribution. 18 1.3 ad * 19 1.3 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.3 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.3 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.3 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.3 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.3 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.3 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.3 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.3 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.3 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.3 ad * POSSIBILITY OF SUCH DAMAGE. 30 1.3 ad */ 31 1.2 yamt 32 1.2 yamt /*- 33 1.2 yamt * Copyright (c)2007 YAMAMOTO Takashi, 34 1.2 yamt * All rights reserved. 35 1.2 yamt * 36 1.2 yamt * Redistribution and use in source and binary forms, with or without 37 1.2 yamt * modification, are permitted provided that the following conditions 38 1.2 yamt * are met: 39 1.2 yamt * 1. Redistributions of source code must retain the above copyright 40 1.2 yamt * notice, this list of conditions and the following disclaimer. 41 1.2 yamt * 2. Redistributions in binary form must reproduce the above copyright 42 1.2 yamt * notice, this list of conditions and the following disclaimer in the 43 1.2 yamt * documentation and/or other materials provided with the distribution. 44 1.2 yamt * 45 1.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 1.2 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 1.2 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 1.2 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 1.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 1.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 1.2 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 1.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 1.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 1.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 1.2 yamt * SUCH DAMAGE. 56 1.2 yamt */ 57 1.2 yamt 58 1.88 ad /* 59 1.88 ad * CPU related routines not shared with rump. 60 1.88 ad */ 61 1.88 ad 62 1.2 yamt #include <sys/cdefs.h> 63 1.98 mrg __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.98 2025/01/17 04:11:33 mrg Exp $"); 64 1.53 cegger 65 1.82 ad #ifdef _KERNEL_OPT 66 1.53 cegger #include "opt_cpu_ucode.h" 67 1.95 riastrad #include "opt_heartbeat.h" 68 1.82 ad #endif 69 1.2 yamt 70 1.2 yamt #include <sys/param.h> 71 1.2 yamt #include <sys/systm.h> 72 1.2 yamt #include <sys/idle.h> 73 1.2 yamt #include <sys/sched.h> 74 1.8 ad #include <sys/intr.h> 75 1.3 ad #include <sys/conf.h> 76 1.3 ad #include <sys/cpu.h> 77 1.3 ad #include <sys/cpuio.h> 78 1.3 ad #include <sys/proc.h> 79 1.17 yamt #include <sys/percpu.h> 80 1.3 ad #include <sys/kernel.h> 81 1.3 ad #include <sys/kauth.h> 82 1.7 ad #include <sys/xcall.h> 83 1.7 ad #include <sys/pool.h> 84 1.21 ad #include <sys/kmem.h> 85 1.22 ad #include <sys/select.h> 86 1.23 ad #include <sys/namei.h> 87 1.27 ad #include <sys/callout.h> 88 1.60 drochner #include <sys/pcu.h> 89 1.95 riastrad #include <sys/heartbeat.h> 90 1.3 ad 91 1.6 ad #include <uvm/uvm_extern.h> 92 1.6 ad 93 1.70 christos #include "ioconf.h" 94 1.70 christos 95 1.45 matt /* 96 1.52 jym * If the port has stated that cpu_data is the first thing in cpu_info, 97 1.52 jym * verify that the claim is true. This will prevent them from getting out 98 1.45 matt * of sync. 99 1.45 matt */ 100 1.45 matt #ifdef __HAVE_CPU_DATA_FIRST 101 1.45 matt CTASSERT(offsetof(struct cpu_info, ci_data) == 0); 102 1.45 matt #else 103 1.45 matt CTASSERT(offsetof(struct cpu_info, ci_data) != 0); 104 1.45 matt #endif 105 1.45 matt 106 1.89 ad int (*compat_cpuctl_ioctl)(struct lwp *, u_long, void *) = (void *)enosys; 107 1.89 ad 108 1.76 uwe static void cpu_xc_online(struct cpu_info *, void *); 109 1.76 uwe static void cpu_xc_offline(struct cpu_info *, void *); 110 1.7 ad 111 1.3 ad dev_type_ioctl(cpuctl_ioctl); 112 1.3 ad 113 1.3 ad const struct cdevsw cpuctl_cdevsw = { 114 1.63 dholland .d_open = nullopen, 115 1.63 dholland .d_close = nullclose, 116 1.63 dholland .d_read = nullread, 117 1.63 dholland .d_write = nullwrite, 118 1.63 dholland .d_ioctl = cpuctl_ioctl, 119 1.63 dholland .d_stop = nullstop, 120 1.63 dholland .d_tty = notty, 121 1.63 dholland .d_poll = nopoll, 122 1.63 dholland .d_mmap = nommap, 123 1.63 dholland .d_kqfilter = nokqfilter, 124 1.66 dholland .d_discard = nodiscard, 125 1.63 dholland .d_flag = D_OTHER | D_MPSAFE 126 1.3 ad }; 127 1.55 rmind 128 1.2 yamt int 129 1.2 yamt mi_cpu_attach(struct cpu_info *ci) 130 1.2 yamt { 131 1.2 yamt int error; 132 1.2 yamt 133 1.44 ad KASSERT(maxcpus > 0); 134 1.44 ad 135 1.91 ad if ((ci->ci_index = ncpu) >= maxcpus) 136 1.91 ad panic("Too many CPUs. Increase MAXCPUS?"); 137 1.55 rmind kcpuset_set(kcpuset_attached, cpu_index(ci)); 138 1.55 rmind 139 1.58 matt /* 140 1.58 matt * Create a convenience cpuset of just ourselves. 141 1.58 matt */ 142 1.94 skrll kcpuset_create(&ci->ci_kcpuset, true); 143 1.94 skrll kcpuset_set(ci->ci_kcpuset, cpu_index(ci)); 144 1.58 matt 145 1.30 ad TAILQ_INIT(&ci->ci_data.cpu_ld_locks); 146 1.30 ad __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock); 147 1.5 rmind 148 1.43 mrg /* This is useful for eg, per-cpu evcnt */ 149 1.43 mrg snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d", 150 1.44 ad cpu_index(ci)); 151 1.43 mrg 152 1.47 matt if (__predict_false(cpu_infos == NULL)) { 153 1.62 mlelstv size_t ci_bufsize = (maxcpus + 1) * sizeof(struct cpu_info *); 154 1.62 mlelstv cpu_infos = kmem_zalloc(ci_bufsize, KM_SLEEP); 155 1.47 matt } 156 1.47 matt cpu_infos[cpu_index(ci)] = ci; 157 1.47 matt 158 1.2 yamt sched_cpuattach(ci); 159 1.2 yamt 160 1.2 yamt error = create_idle_lwp(ci); 161 1.2 yamt if (error != 0) { 162 1.2 yamt /* XXX revert sched_cpuattach */ 163 1.2 yamt return error; 164 1.2 yamt } 165 1.2 yamt 166 1.13 ad if (ci == curcpu()) 167 1.78 ad ci->ci_onproc = curlwp; 168 1.13 ad else 169 1.78 ad ci->ci_onproc = ci->ci_data.cpu_idlelwp; 170 1.13 ad 171 1.17 yamt percpu_init_cpu(ci); 172 1.8 ad softint_init(ci); 173 1.27 ad callout_init_cpu(ci); 174 1.7 ad xc_init_cpu(ci); 175 1.14 ad pool_cache_cpu_init(ci); 176 1.22 ad selsysinit(ci); 177 1.23 ad cache_cpu_init(ci); 178 1.7 ad TAILQ_INIT(&ci->ci_data.cpu_biodone); 179 1.2 yamt ncpu++; 180 1.9 ad ncpuonline++; 181 1.2 yamt 182 1.2 yamt return 0; 183 1.2 yamt } 184 1.3 ad 185 1.3 ad void 186 1.69 uebayasi cpuctlattach(int dummy __unused) 187 1.3 ad { 188 1.3 ad 189 1.44 ad KASSERT(cpu_infos != NULL); 190 1.3 ad } 191 1.3 ad 192 1.3 ad int 193 1.3 ad cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l) 194 1.3 ad { 195 1.3 ad CPU_INFO_ITERATOR cii; 196 1.3 ad cpustate_t *cs; 197 1.3 ad struct cpu_info *ci; 198 1.3 ad int error, i; 199 1.3 ad u_int id; 200 1.3 ad 201 1.3 ad error = 0; 202 1.3 ad 203 1.3 ad mutex_enter(&cpu_lock); 204 1.3 ad switch (cmd) { 205 1.3 ad case IOC_CPU_SETSTATE: 206 1.56 joerg cs = data; 207 1.20 elad error = kauth_authorize_system(l->l_cred, 208 1.20 elad KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL, 209 1.20 elad NULL); 210 1.3 ad if (error != 0) 211 1.3 ad break; 212 1.44 ad if (cs->cs_id >= maxcpus || 213 1.36 ad (ci = cpu_lookup(cs->cs_id)) == NULL) { 214 1.3 ad error = ESRCH; 215 1.3 ad break; 216 1.3 ad } 217 1.93 rin cpu_setintr(ci, cs->cs_intr); /* XXX neglect errors */ 218 1.37 rmind error = cpu_setstate(ci, cs->cs_online); 219 1.3 ad break; 220 1.3 ad 221 1.3 ad case IOC_CPU_GETSTATE: 222 1.56 joerg cs = data; 223 1.3 ad id = cs->cs_id; 224 1.10 ad memset(cs, 0, sizeof(*cs)); 225 1.3 ad cs->cs_id = id; 226 1.44 ad if (cs->cs_id >= maxcpus || 227 1.36 ad (ci = cpu_lookup(id)) == NULL) { 228 1.3 ad error = ESRCH; 229 1.3 ad break; 230 1.3 ad } 231 1.3 ad if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 232 1.3 ad cs->cs_online = false; 233 1.3 ad else 234 1.3 ad cs->cs_online = true; 235 1.42 ad if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) 236 1.42 ad cs->cs_intr = false; 237 1.42 ad else 238 1.42 ad cs->cs_intr = true; 239 1.42 ad cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod; 240 1.42 ad cs->cs_lastmodhi = (int32_t) 241 1.42 ad (ci->ci_schedstate.spc_lastmod >> 32); 242 1.42 ad cs->cs_intrcnt = cpu_intr_count(ci) + 1; 243 1.51 jdc cs->cs_hwid = ci->ci_cpuid; 244 1.3 ad break; 245 1.3 ad 246 1.3 ad case IOC_CPU_MAPID: 247 1.3 ad i = 0; 248 1.3 ad for (CPU_INFO_FOREACH(cii, ci)) { 249 1.3 ad if (i++ == *(int *)data) 250 1.3 ad break; 251 1.3 ad } 252 1.3 ad if (ci == NULL) 253 1.3 ad error = ESRCH; 254 1.3 ad else 255 1.38 rmind *(int *)data = cpu_index(ci); 256 1.3 ad break; 257 1.3 ad 258 1.3 ad case IOC_CPU_GETCOUNT: 259 1.3 ad *(int *)data = ncpu; 260 1.3 ad break; 261 1.3 ad 262 1.53 cegger #ifdef CPU_UCODE 263 1.53 cegger case IOC_CPU_UCODE_GET_VERSION: 264 1.57 drochner error = cpu_ucode_get_version((struct cpu_ucode_version *)data); 265 1.57 drochner break; 266 1.57 drochner 267 1.53 cegger case IOC_CPU_UCODE_APPLY: 268 1.53 cegger error = kauth_authorize_machdep(l->l_cred, 269 1.53 cegger KAUTH_MACHDEP_CPU_UCODE_APPLY, 270 1.53 cegger NULL, NULL, NULL, NULL); 271 1.53 cegger if (error != 0) 272 1.53 cegger break; 273 1.57 drochner error = cpu_ucode_apply((const struct cpu_ucode *)data); 274 1.57 drochner break; 275 1.59 drochner #endif 276 1.53 cegger 277 1.3 ad default: 278 1.73 christos error = (*compat_cpuctl_ioctl)(l, cmd, data); 279 1.3 ad break; 280 1.3 ad } 281 1.3 ad mutex_exit(&cpu_lock); 282 1.3 ad 283 1.3 ad return error; 284 1.3 ad } 285 1.3 ad 286 1.3 ad struct cpu_info * 287 1.36 ad cpu_lookup(u_int idx) 288 1.16 yamt { 289 1.44 ad struct cpu_info *ci; 290 1.44 ad 291 1.75 skrll /* 292 1.75 skrll * cpu_infos is a NULL terminated array of MAXCPUS + 1 entries, 293 1.75 skrll * so an index of MAXCPUS here is ok. See mi_cpu_attach. 294 1.75 skrll */ 295 1.75 skrll KASSERT(idx <= maxcpus); 296 1.44 ad 297 1.44 ad if (__predict_false(cpu_infos == NULL)) { 298 1.44 ad KASSERT(idx == 0); 299 1.44 ad return curcpu(); 300 1.44 ad } 301 1.16 yamt 302 1.44 ad ci = cpu_infos[idx]; 303 1.16 yamt KASSERT(ci == NULL || cpu_index(ci) == idx); 304 1.75 skrll KASSERTMSG(idx < maxcpus || ci == NULL, "idx %d ci %p", idx, ci); 305 1.16 yamt 306 1.16 yamt return ci; 307 1.16 yamt } 308 1.16 yamt 309 1.7 ad static void 310 1.76 uwe cpu_xc_offline(struct cpu_info *ci, void *unused) 311 1.7 ad { 312 1.11 rmind struct schedstate_percpu *spc, *mspc = NULL; 313 1.37 rmind struct cpu_info *target_ci; 314 1.11 rmind struct lwp *l; 315 1.11 rmind CPU_INFO_ITERATOR cii; 316 1.7 ad int s; 317 1.7 ad 318 1.37 rmind /* 319 1.42 ad * Thread that made the cross call (separate context) holds 320 1.42 ad * cpu_lock on our behalf. 321 1.37 rmind */ 322 1.11 rmind spc = &ci->ci_schedstate; 323 1.7 ad s = splsched(); 324 1.7 ad spc->spc_flags |= SPCF_OFFLINE; 325 1.7 ad splx(s); 326 1.11 rmind 327 1.42 ad /* Take the first available CPU for the migration. */ 328 1.37 rmind for (CPU_INFO_FOREACH(cii, target_ci)) { 329 1.37 rmind mspc = &target_ci->ci_schedstate; 330 1.11 rmind if ((mspc->spc_flags & SPCF_OFFLINE) == 0) 331 1.11 rmind break; 332 1.11 rmind } 333 1.37 rmind KASSERT(target_ci != NULL); 334 1.11 rmind 335 1.11 rmind /* 336 1.37 rmind * Migrate all non-bound threads to the other CPU. Note that this 337 1.37 rmind * runs from the xcall thread, thus handling of LSONPROC is not needed. 338 1.11 rmind */ 339 1.90 ad mutex_enter(&proc_lock); 340 1.11 rmind LIST_FOREACH(l, &alllwp, l_list) { 341 1.37 rmind struct cpu_info *mci; 342 1.37 rmind 343 1.35 yamt lwp_lock(l); 344 1.37 rmind if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) { 345 1.35 yamt lwp_unlock(l); 346 1.37 rmind continue; 347 1.11 rmind } 348 1.49 rmind /* Regular case - no affinity. */ 349 1.49 rmind if (l->l_affinity == NULL) { 350 1.37 rmind lwp_migrate(l, target_ci); 351 1.37 rmind continue; 352 1.37 rmind } 353 1.49 rmind /* Affinity is set, find an online CPU in the set. */ 354 1.37 rmind for (CPU_INFO_FOREACH(cii, mci)) { 355 1.37 rmind mspc = &mci->ci_schedstate; 356 1.37 rmind if ((mspc->spc_flags & SPCF_OFFLINE) == 0 && 357 1.48 rmind kcpuset_isset(l->l_affinity, cpu_index(mci))) 358 1.37 rmind break; 359 1.37 rmind } 360 1.37 rmind if (mci == NULL) { 361 1.37 rmind lwp_unlock(l); 362 1.90 ad mutex_exit(&proc_lock); 363 1.37 rmind goto fail; 364 1.37 rmind } 365 1.37 rmind lwp_migrate(l, mci); 366 1.11 rmind } 367 1.90 ad mutex_exit(&proc_lock); 368 1.19 joerg 369 1.60 drochner #if PCU_UNIT_COUNT > 0 370 1.60 drochner pcu_save_all_on_cpu(); 371 1.60 drochner #endif 372 1.60 drochner 373 1.96 riastrad heartbeat_suspend(); 374 1.96 riastrad 375 1.19 joerg #ifdef __HAVE_MD_CPU_OFFLINE 376 1.19 joerg cpu_offline_md(); 377 1.19 joerg #endif 378 1.37 rmind return; 379 1.37 rmind fail: 380 1.37 rmind /* Just unset the SPCF_OFFLINE flag, caller will check */ 381 1.37 rmind s = splsched(); 382 1.37 rmind spc->spc_flags &= ~SPCF_OFFLINE; 383 1.37 rmind splx(s); 384 1.7 ad } 385 1.7 ad 386 1.7 ad static void 387 1.76 uwe cpu_xc_online(struct cpu_info *ci, void *unused) 388 1.7 ad { 389 1.11 rmind struct schedstate_percpu *spc; 390 1.7 ad int s; 391 1.7 ad 392 1.95 riastrad heartbeat_resume(); 393 1.95 riastrad 394 1.11 rmind spc = &ci->ci_schedstate; 395 1.7 ad s = splsched(); 396 1.7 ad spc->spc_flags &= ~SPCF_OFFLINE; 397 1.7 ad splx(s); 398 1.7 ad } 399 1.7 ad 400 1.3 ad int 401 1.37 rmind cpu_setstate(struct cpu_info *ci, bool online) 402 1.3 ad { 403 1.3 ad struct schedstate_percpu *spc; 404 1.3 ad CPU_INFO_ITERATOR cii; 405 1.3 ad struct cpu_info *ci2; 406 1.7 ad uint64_t where; 407 1.7 ad xcfunc_t func; 408 1.3 ad int nonline; 409 1.3 ad 410 1.3 ad spc = &ci->ci_schedstate; 411 1.3 ad 412 1.3 ad KASSERT(mutex_owned(&cpu_lock)); 413 1.3 ad 414 1.3 ad if (online) { 415 1.3 ad if ((spc->spc_flags & SPCF_OFFLINE) == 0) 416 1.3 ad return 0; 417 1.7 ad func = (xcfunc_t)cpu_xc_online; 418 1.3 ad } else { 419 1.3 ad if ((spc->spc_flags & SPCF_OFFLINE) != 0) 420 1.3 ad return 0; 421 1.3 ad nonline = 0; 422 1.33 ad /* 423 1.33 ad * Ensure that at least one CPU within the processor set 424 1.33 ad * stays online. Revisit this later. 425 1.33 ad */ 426 1.3 ad for (CPU_INFO_FOREACH(cii, ci2)) { 427 1.33 ad if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0) 428 1.33 ad continue; 429 1.33 ad if (ci2->ci_schedstate.spc_psid != spc->spc_psid) 430 1.33 ad continue; 431 1.33 ad nonline++; 432 1.3 ad } 433 1.3 ad if (nonline == 1) 434 1.3 ad return EBUSY; 435 1.7 ad func = (xcfunc_t)cpu_xc_offline; 436 1.3 ad } 437 1.3 ad 438 1.11 rmind where = xc_unicast(0, func, ci, NULL, ci); 439 1.7 ad xc_wait(where); 440 1.11 rmind if (online) { 441 1.11 rmind KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0); 442 1.71 maxv ncpuonline++; 443 1.71 maxv } else { 444 1.71 maxv if ((spc->spc_flags & SPCF_OFFLINE) == 0) { 445 1.71 maxv /* If was not set offline, then it is busy */ 446 1.71 maxv return EBUSY; 447 1.71 maxv } 448 1.71 maxv ncpuonline--; 449 1.11 rmind } 450 1.37 rmind 451 1.7 ad spc->spc_lastmod = time_second; 452 1.3 ad return 0; 453 1.3 ad } 454 1.39 ad 455 1.98 mrg bool 456 1.98 mrg cpu_is_type(struct cpu_info *ci, int wanted) 457 1.98 mrg { 458 1.98 mrg 459 1.98 mrg return (ci->ci_schedstate.spc_flags & wanted) == wanted; 460 1.98 mrg } 461 1.98 mrg 462 1.98 mrg bool 463 1.98 mrg cpu_is_idle_1stclass(struct cpu_info *ci) 464 1.98 mrg { 465 1.98 mrg const int wanted = SPCF_IDLE | SPCF_1STCLASS; 466 1.98 mrg 467 1.98 mrg return cpu_is_type(ci, wanted); 468 1.98 mrg } 469 1.98 mrg 470 1.98 mrg bool 471 1.98 mrg cpu_is_1stclass(struct cpu_info *ci) 472 1.98 mrg { 473 1.98 mrg const int wanted = SPCF_1STCLASS; 474 1.98 mrg 475 1.98 mrg return cpu_is_type(ci, wanted); 476 1.98 mrg } 477 1.98 mrg 478 1.98 mrg bool 479 1.98 mrg cpu_is_better(struct cpu_info *ci1, struct cpu_info *ci2) 480 1.98 mrg { 481 1.98 mrg const int ci1_flags = ci1->ci_schedstate.spc_flags; 482 1.98 mrg const int ci2_flags = ci2->ci_schedstate.spc_flags; 483 1.98 mrg 484 1.98 mrg if ((ci1_flags & SPCF_1STCLASS) != 0 && 485 1.98 mrg (ci2_flags & SPCF_1STCLASS) == 0) 486 1.98 mrg return ci1; 487 1.98 mrg 488 1.98 mrg return ci2; 489 1.98 mrg } 490 1.98 mrg 491 1.88 ad #if defined(__HAVE_INTR_CONTROL) 492 1.42 ad static void 493 1.76 uwe cpu_xc_intr(struct cpu_info *ci, void *unused) 494 1.42 ad { 495 1.42 ad struct schedstate_percpu *spc; 496 1.42 ad int s; 497 1.42 ad 498 1.42 ad spc = &ci->ci_schedstate; 499 1.42 ad s = splsched(); 500 1.42 ad spc->spc_flags &= ~SPCF_NOINTR; 501 1.42 ad splx(s); 502 1.42 ad } 503 1.42 ad 504 1.42 ad static void 505 1.76 uwe cpu_xc_nointr(struct cpu_info *ci, void *unused) 506 1.42 ad { 507 1.42 ad struct schedstate_percpu *spc; 508 1.42 ad int s; 509 1.42 ad 510 1.42 ad spc = &ci->ci_schedstate; 511 1.42 ad s = splsched(); 512 1.42 ad spc->spc_flags |= SPCF_NOINTR; 513 1.42 ad splx(s); 514 1.42 ad } 515 1.42 ad 516 1.42 ad int 517 1.42 ad cpu_setintr(struct cpu_info *ci, bool intr) 518 1.42 ad { 519 1.42 ad struct schedstate_percpu *spc; 520 1.42 ad CPU_INFO_ITERATOR cii; 521 1.42 ad struct cpu_info *ci2; 522 1.42 ad uint64_t where; 523 1.42 ad xcfunc_t func; 524 1.42 ad int nintr; 525 1.42 ad 526 1.42 ad spc = &ci->ci_schedstate; 527 1.42 ad 528 1.42 ad KASSERT(mutex_owned(&cpu_lock)); 529 1.42 ad 530 1.42 ad if (intr) { 531 1.42 ad if ((spc->spc_flags & SPCF_NOINTR) == 0) 532 1.42 ad return 0; 533 1.42 ad func = (xcfunc_t)cpu_xc_intr; 534 1.42 ad } else { 535 1.93 rin if (CPU_IS_PRIMARY(ci)) /* XXX kern/45117 */ 536 1.92 jruoho return EINVAL; 537 1.42 ad if ((spc->spc_flags & SPCF_NOINTR) != 0) 538 1.42 ad return 0; 539 1.42 ad /* 540 1.42 ad * Ensure that at least one CPU within the system 541 1.42 ad * is handing device interrupts. 542 1.42 ad */ 543 1.42 ad nintr = 0; 544 1.42 ad for (CPU_INFO_FOREACH(cii, ci2)) { 545 1.42 ad if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) 546 1.42 ad continue; 547 1.42 ad if (ci2 == ci) 548 1.42 ad continue; 549 1.42 ad nintr++; 550 1.42 ad } 551 1.42 ad if (nintr == 0) 552 1.42 ad return EBUSY; 553 1.42 ad func = (xcfunc_t)cpu_xc_nointr; 554 1.42 ad } 555 1.42 ad 556 1.42 ad where = xc_unicast(0, func, ci, NULL, ci); 557 1.42 ad xc_wait(where); 558 1.42 ad if (intr) { 559 1.42 ad KASSERT((spc->spc_flags & SPCF_NOINTR) == 0); 560 1.42 ad } else if ((spc->spc_flags & SPCF_NOINTR) == 0) { 561 1.42 ad /* If was not set offline, then it is busy */ 562 1.42 ad return EBUSY; 563 1.42 ad } 564 1.42 ad 565 1.42 ad /* Direct interrupts away from the CPU and record the change. */ 566 1.42 ad cpu_intr_redistribute(); 567 1.42 ad spc->spc_lastmod = time_second; 568 1.42 ad return 0; 569 1.42 ad } 570 1.42 ad #else /* __HAVE_INTR_CONTROL */ 571 1.42 ad int 572 1.42 ad cpu_setintr(struct cpu_info *ci, bool intr) 573 1.42 ad { 574 1.42 ad 575 1.42 ad return EOPNOTSUPP; 576 1.42 ad } 577 1.42 ad 578 1.42 ad u_int 579 1.42 ad cpu_intr_count(struct cpu_info *ci) 580 1.42 ad { 581 1.42 ad 582 1.42 ad return 0; /* 0 == "don't know" */ 583 1.42 ad } 584 1.42 ad #endif /* __HAVE_INTR_CONTROL */ 585 1.42 ad 586 1.53 cegger #ifdef CPU_UCODE 587 1.53 cegger int 588 1.53 cegger cpu_ucode_load(struct cpu_ucode_softc *sc, const char *fwname) 589 1.53 cegger { 590 1.53 cegger firmware_handle_t fwh; 591 1.53 cegger int error; 592 1.53 cegger 593 1.53 cegger if (sc->sc_blob != NULL) { 594 1.67 ozaki firmware_free(sc->sc_blob, sc->sc_blobsize); 595 1.53 cegger sc->sc_blob = NULL; 596 1.53 cegger sc->sc_blobsize = 0; 597 1.53 cegger } 598 1.53 cegger 599 1.57 drochner error = cpu_ucode_md_open(&fwh, sc->loader_version, fwname); 600 1.53 cegger if (error != 0) { 601 1.77 mrg #ifdef DEBUG 602 1.77 mrg printf("ucode: firmware_open(%s) failed: %i\n", fwname, error); 603 1.77 mrg #endif 604 1.53 cegger goto err0; 605 1.53 cegger } 606 1.53 cegger 607 1.53 cegger sc->sc_blobsize = firmware_get_size(fwh); 608 1.74 msaitoh if (sc->sc_blobsize == 0) { 609 1.74 msaitoh error = EFTYPE; 610 1.74 msaitoh firmware_close(fwh); 611 1.74 msaitoh goto err0; 612 1.74 msaitoh } 613 1.53 cegger sc->sc_blob = firmware_malloc(sc->sc_blobsize); 614 1.53 cegger if (sc->sc_blob == NULL) { 615 1.53 cegger error = ENOMEM; 616 1.53 cegger firmware_close(fwh); 617 1.53 cegger goto err0; 618 1.53 cegger } 619 1.53 cegger 620 1.53 cegger error = firmware_read(fwh, 0, sc->sc_blob, sc->sc_blobsize); 621 1.53 cegger firmware_close(fwh); 622 1.53 cegger if (error != 0) 623 1.53 cegger goto err1; 624 1.53 cegger 625 1.53 cegger return 0; 626 1.53 cegger 627 1.53 cegger err1: 628 1.67 ozaki firmware_free(sc->sc_blob, sc->sc_blobsize); 629 1.53 cegger sc->sc_blob = NULL; 630 1.53 cegger sc->sc_blobsize = 0; 631 1.53 cegger err0: 632 1.53 cegger return error; 633 1.53 cegger } 634 1.53 cegger #endif 635