1 /* $NetBSD: cpu_acpi.c,v 1.18 2025/01/30 00:43:56 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jared McNeill <jmcneill (at) invisible.ca>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "tprof.h" 33 #include "opt_multiprocessor.h" 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: cpu_acpi.c,v 1.18 2025/01/30 00:43:56 jmcneill Exp $"); 37 38 #include <sys/param.h> 39 #include <sys/bus.h> 40 #include <sys/cpu.h> 41 #include <sys/device.h> 42 #include <sys/interrupt.h> 43 #include <sys/kcpuset.h> 44 #include <sys/kmem.h> 45 #include <sys/reboot.h> 46 47 #include <dev/acpi/acpireg.h> 48 #include <dev/acpi/acpivar.h> 49 #include <dev/acpi/acpi_srat.h> 50 #include <external/bsd/acpica/dist/include/amlresrc.h> 51 52 #include <arm/armreg.h> 53 #include <arm/cpu.h> 54 #include <arm/cpufunc.h> 55 #include <arm/cpuvar.h> 56 #include <arm/locore.h> 57 58 #include <arm/arm/psci.h> 59 60 #define LPI_IDLE_FACTOR 3 61 62 #if NTPROF > 0 63 #include <dev/tprof/tprof_armv8.h> 64 #endif 65 66 static int cpu_acpi_match(device_t, cfdata_t, void *); 67 static void cpu_acpi_attach(device_t, device_t, void *); 68 69 static void cpu_acpi_probe_lpi(device_t, struct cpu_info *ci); 70 void cpu_acpi_lpi_idle(void); 71 72 #if NTPROF > 0 73 static void cpu_acpi_tprof_init(device_t); 74 #endif 75 76 CFATTACH_DECL2_NEW(cpu_acpi, 0, 77 cpu_acpi_match, cpu_acpi_attach, NULL, NULL, 78 cpu_rescan, cpu_childdetached); 79 80 #ifdef MULTIPROCESSOR 81 static register_t 82 cpu_acpi_mpstart_pa(void) 83 { 84 85 return (register_t)KERN_VTOPHYS((vaddr_t)cpu_mpstart); 86 } 87 #endif /* MULTIPROCESSOR */ 88 89 static int 90 cpu_acpi_match(device_t parent, cfdata_t cf, void *aux) 91 { 92 ACPI_SUBTABLE_HEADER *hdrp = aux; 93 ACPI_MADT_GENERIC_INTERRUPT *gicc; 94 95 if (hdrp->Type != ACPI_MADT_TYPE_GENERIC_INTERRUPT) 96 return 0; 97 98 gicc = (ACPI_MADT_GENERIC_INTERRUPT *)hdrp; 99 100 return (gicc->Flags & ACPI_MADT_ENABLED) != 0; 101 } 102 103 static void 104 cpu_acpi_attach(device_t parent, device_t self, void *aux) 105 { 106 prop_dictionary_t dict = device_properties(self); 107 ACPI_MADT_GENERIC_INTERRUPT *gicc = aux; 108 const uint64_t mpidr = gicc->ArmMpidr; 109 const int unit = device_unit(self); 110 struct cpu_info *ci = &cpu_info_store[unit]; 111 struct acpisrat_node *node; 112 113 #ifdef MULTIPROCESSOR 114 if (cpu_mpidr_aff_read() != mpidr && (boothowto & RB_MD1) == 0) { 115 const u_int cpuindex = device_unit(self); 116 int error; 117 118 cpu_mpidr[cpuindex] = mpidr; 119 cpu_dcache_wb_range((vaddr_t)&cpu_mpidr[cpuindex], 120 sizeof(cpu_mpidr[cpuindex])); 121 122 /* XXX support spin table */ 123 error = psci_cpu_on(mpidr, cpu_acpi_mpstart_pa(), 0); 124 if (error != PSCI_SUCCESS) { 125 aprint_error_dev(self, "failed to start CPU\n"); 126 return; 127 } 128 129 sev(); 130 131 for (u_int i = 0x10000000; i > 0; i--) { 132 if (cpu_hatched_p(cpuindex)) 133 break; 134 } 135 } 136 #endif /* MULTIPROCESSOR */ 137 138 /* Assume that less efficient processors are faster. */ 139 prop_dictionary_set_uint32(dict, "capacity_dmips_mhz", 140 gicc->EfficiencyClass); 141 142 /* Store the ACPI Processor UID in cpu_info */ 143 ci->ci_acpiid = gicc->Uid; 144 145 /* Scan SRAT for NUMA info. */ 146 if (cpu_mpidr_aff_read() == mpidr) { 147 acpisrat_init(); 148 } 149 node = acpisrat_get_node(gicc->Uid); 150 if (node != NULL) { 151 ci->ci_numa_id = node->nodeid; 152 } 153 154 /* Attach the CPU */ 155 cpu_attach(self, mpidr); 156 157 if (ci->ci_dev == NULL) { 158 /* Not configured */ 159 return; 160 } 161 162 /* Probe for low-power idle states. */ 163 cpu_acpi_probe_lpi(self, ci); 164 165 #if NTPROF > 0 166 if (cpu_mpidr_aff_read() == mpidr && armv8_pmu_detect()) 167 config_interrupts(self, cpu_acpi_tprof_init); 168 #endif 169 } 170 171 static void 172 cpu_acpi_probe_lpi(device_t dev, struct cpu_info *ci) 173 { 174 ACPI_HANDLE hdl; 175 ACPI_BUFFER buf; 176 ACPI_OBJECT *obj, *lpi; 177 ACPI_STATUS rv; 178 uint32_t levelid; 179 uint32_t numlpi; 180 uint32_t n; 181 int enable_lpi; 182 183 if (get_bootconf_option(boot_args, "nolpi", 184 BOOTOPT_TYPE_BOOLEAN, &enable_lpi) && 185 !enable_lpi) { 186 return; 187 } 188 189 hdl = acpi_match_cpu_info(ci); 190 if (hdl == NULL) { 191 return; 192 } 193 rv = AcpiGetHandle(hdl, "_LPI", &hdl); 194 if (ACPI_FAILURE(rv)) { 195 return; 196 } 197 rv = acpi_eval_struct(hdl, NULL, &buf); 198 if (ACPI_FAILURE(rv)) { 199 return; 200 } 201 202 obj = buf.Pointer; 203 if (obj->Type != ACPI_TYPE_PACKAGE || 204 obj->Package.Count < 3 || 205 obj->Package.Elements[1].Type != ACPI_TYPE_INTEGER || 206 obj->Package.Elements[2].Type != ACPI_TYPE_INTEGER) { 207 goto out; 208 } 209 levelid = obj->Package.Elements[1].Integer.Value; 210 if (levelid != 0) { 211 /* We depend on platform coordination for now. */ 212 goto out; 213 } 214 numlpi = obj->Package.Elements[2].Integer.Value; 215 if (obj->Package.Count < 3 + numlpi || numlpi == 0) { 216 goto out; 217 } 218 ci->ci_lpi = kmem_zalloc(sizeof(*ci->ci_lpi) * numlpi, KM_SLEEP); 219 for (n = 0; n < numlpi; n++) { 220 lpi = &obj->Package.Elements[3 + n]; 221 if (lpi->Type != ACPI_TYPE_PACKAGE || 222 lpi->Package.Count < 10 || 223 lpi->Package.Elements[0].Type != ACPI_TYPE_INTEGER || 224 lpi->Package.Elements[1].Type != ACPI_TYPE_INTEGER || 225 lpi->Package.Elements[2].Type != ACPI_TYPE_INTEGER || 226 lpi->Package.Elements[3].Type != ACPI_TYPE_INTEGER || 227 !(lpi->Package.Elements[6].Type == ACPI_TYPE_BUFFER || 228 lpi->Package.Elements[6].Type == ACPI_TYPE_INTEGER)) { 229 continue; 230 } 231 232 if ((lpi->Package.Elements[2].Integer.Value & 1) == 0) { 233 /* LPI state is not enabled */ 234 continue; 235 } 236 237 ci->ci_lpi[ci->ci_nlpi].min_res 238 = lpi->Package.Elements[0].Integer.Value; 239 ci->ci_lpi[ci->ci_nlpi].wakeup_latency = 240 lpi->Package.Elements[1].Integer.Value; 241 ci->ci_lpi[ci->ci_nlpi].save_restore_flags = 242 lpi->Package.Elements[3].Integer.Value; 243 if (ci->ci_lpi[ci->ci_nlpi].save_restore_flags != 0) { 244 /* Not implemented yet */ 245 continue; 246 } 247 if (lpi->Package.Elements[6].Type == ACPI_TYPE_INTEGER) { 248 ci->ci_lpi[ci->ci_nlpi].reg_addr = 249 lpi->Package.Elements[6].Integer.Value; 250 } else { 251 ACPI_GENERIC_ADDRESS addr; 252 253 KASSERT(lpi->Package.Elements[6].Type == 254 ACPI_TYPE_BUFFER); 255 256 if (lpi->Package.Elements[6].Buffer.Length < 257 sizeof(AML_RESOURCE_GENERIC_REGISTER)) { 258 continue; 259 } 260 memcpy(&addr, lpi->Package.Elements[6].Buffer.Pointer + 261 sizeof(AML_RESOURCE_LARGE_HEADER), sizeof(addr)); 262 ci->ci_lpi[ci->ci_nlpi].reg_addr = addr.Address; 263 } 264 265 if (lpi->Package.Elements[9].Type == ACPI_TYPE_STRING) { 266 ci->ci_lpi[ci->ci_nlpi].name = 267 kmem_asprintf("LPI state %s", 268 lpi->Package.Elements[9].String.Pointer); 269 } else { 270 ci->ci_lpi[ci->ci_nlpi].name = 271 kmem_asprintf("LPI state %u", n + 1); 272 } 273 274 aprint_verbose_dev(ci->ci_dev, 275 "%s: min res %u, wakeup latency %u, flags %#x, " 276 "register %#x\n", 277 ci->ci_lpi[ci->ci_nlpi].name, 278 ci->ci_lpi[ci->ci_nlpi].min_res, 279 ci->ci_lpi[ci->ci_nlpi].wakeup_latency, 280 ci->ci_lpi[ci->ci_nlpi].save_restore_flags, 281 ci->ci_lpi[ci->ci_nlpi].reg_addr); 282 283 evcnt_attach_dynamic(&ci->ci_lpi[ci->ci_nlpi].events, 284 EVCNT_TYPE_MISC, NULL, ci->ci_cpuname, 285 ci->ci_lpi[ci->ci_nlpi].name); 286 287 ci->ci_nlpi++; 288 } 289 290 if (ci->ci_nlpi > 0) { 291 extern void (*arm_cpu_idle)(void); 292 arm_cpu_idle = cpu_acpi_lpi_idle; 293 } 294 295 out: 296 ACPI_FREE(buf.Pointer); 297 } 298 299 static inline void 300 cpu_acpi_idle(uint32_t addr) 301 { 302 if (addr == LPI_REG_ADDR_WFI) { 303 asm volatile("dsb sy; wfi"); 304 } else { 305 psci_cpu_suspend(addr); 306 } 307 } 308 309 void 310 cpu_acpi_lpi_idle(void) 311 { 312 struct cpu_info *ci = curcpu(); 313 struct timeval start, end; 314 int n; 315 316 DISABLE_INTERRUPT(); 317 318 microuptime(&start); 319 for (n = ci->ci_nlpi - 1; n >= 0; n--) { 320 if (ci->ci_last_idle > 321 LPI_IDLE_FACTOR * ci->ci_lpi[n].min_res) { 322 cpu_acpi_idle(ci->ci_lpi[n].reg_addr); 323 ci->ci_lpi[n].events.ev_count++; 324 break; 325 } 326 } 327 if (n == -1) { 328 /* Nothing in _LPI, let's just WFI. */ 329 cpu_acpi_idle(LPI_REG_ADDR_WFI); 330 } 331 microuptime(&end); 332 timersub(&end, &start, &end); 333 334 ci->ci_last_idle = end.tv_sec * 1000000 + end.tv_usec; 335 336 ENABLE_INTERRUPT(); 337 } 338 339 #if NTPROF > 0 340 static struct cpu_info * 341 cpu_acpi_find_processor(UINT32 uid) 342 { 343 CPU_INFO_ITERATOR cii; 344 struct cpu_info *ci; 345 346 for (CPU_INFO_FOREACH(cii, ci)) { 347 if (ci->ci_acpiid == uid) 348 return ci; 349 } 350 351 return NULL; 352 } 353 354 static ACPI_STATUS 355 cpu_acpi_tprof_intr_establish(ACPI_SUBTABLE_HEADER *hdrp, void *aux) 356 { 357 device_t dev = aux; 358 ACPI_MADT_GENERIC_INTERRUPT *gicc; 359 struct cpu_info *ci; 360 char xname[16]; 361 kcpuset_t *set; 362 int error; 363 void *ih; 364 365 if (hdrp->Type != ACPI_MADT_TYPE_GENERIC_INTERRUPT) 366 return AE_OK; 367 368 gicc = (ACPI_MADT_GENERIC_INTERRUPT *)hdrp; 369 if ((gicc->Flags & ACPI_MADT_ENABLED) == 0) 370 return AE_OK; 371 372 const bool cpu_primary_p = cpu_info_store[0].ci_cpuid == gicc->ArmMpidr; 373 const bool intr_ppi_p = gicc->PerformanceInterrupt < 32; 374 const int type = (gicc->Flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) ? 375 IST_EDGE : IST_LEVEL; 376 377 if (intr_ppi_p && !cpu_primary_p) 378 return AE_OK; 379 380 ci = cpu_acpi_find_processor(gicc->Uid); 381 if (ci == NULL) { 382 aprint_error_dev(dev, "couldn't find processor %#x\n", 383 gicc->Uid); 384 return AE_OK; 385 } 386 387 if (intr_ppi_p) { 388 strlcpy(xname, "pmu", sizeof(xname)); 389 } else { 390 snprintf(xname, sizeof(xname), "pmu %s", cpu_name(ci)); 391 } 392 393 ih = intr_establish_xname(gicc->PerformanceInterrupt, IPL_HIGH, 394 type | IST_MPSAFE, armv8_pmu_intr, NULL, xname); 395 if (ih == NULL) { 396 aprint_error_dev(dev, "couldn't establish %s interrupt\n", 397 xname); 398 return AE_OK; 399 } 400 401 if (!intr_ppi_p) { 402 kcpuset_create(&set, true); 403 kcpuset_set(set, cpu_index(ci)); 404 error = interrupt_distribute(ih, set, NULL); 405 kcpuset_destroy(set); 406 407 if (error) { 408 aprint_error_dev(dev, 409 "failed to distribute %s interrupt: %d\n", 410 xname, error); 411 return AE_OK; 412 } 413 } 414 415 aprint_normal("%s: PMU interrupting on irq %d\n", cpu_name(ci), 416 gicc->PerformanceInterrupt); 417 418 return AE_OK; 419 } 420 421 static void 422 cpu_acpi_tprof_init(device_t self) 423 { 424 int err = armv8_pmu_init(); 425 if (err) { 426 aprint_error_dev(self, 427 "failed to initialize PMU event counter\n"); 428 return; 429 } 430 431 if (acpi_madt_map() != AE_OK) { 432 aprint_error_dev(self, 433 "failed to map MADT, performance counters not available\n"); 434 return; 435 } 436 acpi_madt_walk(cpu_acpi_tprof_intr_establish, self); 437 acpi_madt_unmap(); 438 } 439 #endif 440