cpufreq_dt.c revision 1.20 1 /* $NetBSD: cpufreq_dt.c,v 1.20 2025/09/06 21:24:05 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2015-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: cpufreq_dt.c,v 1.20 2025/09/06 21:24:05 thorpej Exp $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/device.h>
35 #include <sys/kmem.h>
36 #include <sys/bus.h>
37 #include <sys/atomic.h>
38 #include <sys/xcall.h>
39 #include <sys/sysctl.h>
40 #include <sys/queue.h>
41 #include <sys/once.h>
42 #include <sys/cpu.h>
43
44 #include <dev/fdt/fdtvar.h>
45 #include <dev/fdt/fdt_opp.h>
46
47 struct cpufreq_dt_table {
48 int phandle;
49 TAILQ_ENTRY(cpufreq_dt_table) next;
50 };
51
52 static TAILQ_HEAD(, cpufreq_dt_table) cpufreq_dt_tables =
53 TAILQ_HEAD_INITIALIZER(cpufreq_dt_tables);
54 static kmutex_t cpufreq_dt_tables_lock;
55
56 struct cpufreq_dt_opp {
57 u_int freq_khz;
58 u_int voltage_uv;
59 u_int latency_ns;
60 };
61
62 struct cpufreq_dt_softc {
63 device_t sc_dev;
64 int sc_phandle;
65 struct clk *sc_clk;
66 struct fdtbus_regulator *sc_supply;
67
68 struct cpufreq_dt_opp *sc_opp;
69 ssize_t sc_nopp;
70
71 u_int sc_freq_target;
72 bool sc_freq_throttle;
73
74 u_int sc_busy;
75
76 char *sc_freq_available;
77 int sc_node_target;
78 int sc_node_current;
79 int sc_node_available;
80
81 struct cpufreq_dt_table sc_table;
82 };
83
84 static void
85 cpufreq_dt_change_cb(void *arg1, void *arg2)
86 {
87 struct cpufreq_dt_softc * const sc = arg1;
88 struct cpu_info *ci = curcpu();
89
90 ci->ci_data.cpu_cc_freq = clk_get_rate(sc->sc_clk);
91 }
92
93 static int
94 cpufreq_dt_set_rate(struct cpufreq_dt_softc *sc, u_int freq_khz)
95 {
96 struct cpufreq_dt_opp *opp = NULL;
97 u_int old_rate, new_rate, old_uv, new_uv;
98 uint64_t xc;
99 int error;
100 ssize_t n;
101
102 for (n = 0; n < sc->sc_nopp; n++)
103 if (sc->sc_opp[n].freq_khz == freq_khz) {
104 opp = &sc->sc_opp[n];
105 break;
106 }
107 if (opp == NULL)
108 return EINVAL;
109
110 old_rate = clk_get_rate(sc->sc_clk);
111 new_rate = freq_khz * 1000;
112 new_uv = opp->voltage_uv;
113
114 if (old_rate == new_rate)
115 return 0;
116
117 if (sc->sc_supply != NULL) {
118 error = fdtbus_regulator_get_voltage(sc->sc_supply, &old_uv);
119 if (error != 0)
120 return error;
121
122 if (new_uv > old_uv) {
123 error = fdtbus_regulator_set_voltage(sc->sc_supply,
124 new_uv, new_uv);
125 if (error != 0)
126 return error;
127 }
128 }
129
130 error = clk_set_rate(sc->sc_clk, new_rate);
131 if (error != 0)
132 return error;
133
134 const u_int latency_us = howmany(opp->latency_ns, 1000);
135 if (latency_us > 0)
136 delay(latency_us);
137
138 if (sc->sc_supply != NULL) {
139 if (new_uv < old_uv) {
140 error = fdtbus_regulator_set_voltage(sc->sc_supply,
141 new_uv, new_uv);
142 if (error != 0)
143 return error;
144 }
145 }
146
147 if (error == 0) {
148 xc = xc_broadcast(0, cpufreq_dt_change_cb, sc, NULL);
149 xc_wait(xc);
150
151 pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
152 }
153
154 return 0;
155 }
156
157 static void
158 cpufreq_dt_throttle_enable(device_t dev)
159 {
160 struct cpufreq_dt_softc * const sc = device_private(dev);
161
162 if (sc->sc_freq_throttle)
163 return;
164
165 const u_int freq_khz = sc->sc_opp[sc->sc_nopp - 1].freq_khz;
166
167 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
168 kpause("throttle", false, 1, NULL);
169
170 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
171 aprint_debug_dev(sc->sc_dev, "throttle enabled (%u.%03u MHz)\n",
172 freq_khz / 1000, freq_khz % 1000);
173 sc->sc_freq_throttle = true;
174 if (sc->sc_freq_target == 0)
175 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
176 }
177
178 atomic_dec_uint(&sc->sc_busy);
179 }
180
181 static void
182 cpufreq_dt_throttle_disable(device_t dev)
183 {
184 struct cpufreq_dt_softc * const sc = device_private(dev);
185
186 if (!sc->sc_freq_throttle)
187 return;
188
189 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
190 kpause("throttle", false, 1, NULL);
191
192 const u_int freq_khz = sc->sc_freq_target * 1000;
193
194 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
195 aprint_debug_dev(sc->sc_dev, "throttle disabled (%u.%03u MHz)\n",
196 freq_khz / 1000, freq_khz % 1000);
197 sc->sc_freq_throttle = false;
198 }
199
200 atomic_dec_uint(&sc->sc_busy);
201 }
202
203 static int
204 cpufreq_dt_sysctl_helper(SYSCTLFN_ARGS)
205 {
206 struct cpufreq_dt_softc * const sc = rnode->sysctl_data;
207 struct sysctlnode node;
208 u_int fq, oldfq = 0;
209 int error, n;
210
211 node = *rnode;
212 node.sysctl_data = &fq;
213
214 if (rnode->sysctl_num == sc->sc_node_target) {
215 if (sc->sc_freq_target == 0)
216 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
217 fq = sc->sc_freq_target;
218 } else
219 fq = clk_get_rate(sc->sc_clk) / 1000000;
220
221 if (rnode->sysctl_num == sc->sc_node_target)
222 oldfq = fq;
223
224 if (sc->sc_freq_target == 0)
225 sc->sc_freq_target = fq;
226
227 error = sysctl_lookup(SYSCTLFN_CALL(&node));
228 if (error || newp == NULL)
229 return error;
230
231 if (fq == oldfq || rnode->sysctl_num != sc->sc_node_target)
232 return 0;
233
234 for (n = 0; n < sc->sc_nopp; n++)
235 if (sc->sc_opp[n].freq_khz / 1000 == fq)
236 break;
237 if (n == sc->sc_nopp)
238 return EINVAL;
239
240 if (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
241 return EBUSY;
242
243 sc->sc_freq_target = fq;
244
245 if (sc->sc_freq_throttle)
246 error = 0;
247 else
248 error = cpufreq_dt_set_rate(sc, fq * 1000);
249
250 atomic_dec_uint(&sc->sc_busy);
251
252 return error;
253 }
254
255 static struct cpu_info *
256 cpufreq_dt_cpu_lookup(cpuid_t mpidr)
257 {
258 CPU_INFO_ITERATOR cii;
259 struct cpu_info *ci;
260
261 for (CPU_INFO_FOREACH(cii, ci)) {
262 if (ci->ci_cpuid == mpidr)
263 return ci;
264 }
265
266 return NULL;
267 }
268
269 static void
270 cpufreq_dt_init_sysctl(struct cpufreq_dt_softc *sc)
271 {
272 const struct sysctlnode *node, *cpunode;
273 struct sysctllog *cpufreq_log = NULL;
274 struct cpu_info *ci;
275 bus_addr_t mpidr;
276 int error, i;
277
278 if (fdtbus_get_reg(sc->sc_phandle, 0, &mpidr, 0) != 0)
279 return;
280
281 ci = cpufreq_dt_cpu_lookup(mpidr);
282 if (ci == NULL)
283 return;
284
285 sc->sc_freq_available = kmem_zalloc(strlen("XXXX ") * sc->sc_nopp, KM_SLEEP);
286 for (i = 0; i < sc->sc_nopp; i++) {
287 char buf[6];
288 snprintf(buf, sizeof(buf), i ? " %u" : "%u", sc->sc_opp[i].freq_khz / 1000);
289 strcat(sc->sc_freq_available, buf);
290 }
291
292 error = sysctl_createv(&cpufreq_log, 0, NULL, &node,
293 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
294 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
295 if (error)
296 goto sysctl_failed;
297 error = sysctl_createv(&cpufreq_log, 0, &node, &node,
298 0, CTLTYPE_NODE, "cpufreq", NULL,
299 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
300 if (error)
301 goto sysctl_failed;
302 error = sysctl_createv(&cpufreq_log, 0, &node, &cpunode,
303 0, CTLTYPE_NODE, cpu_name(ci), NULL,
304 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
305 if (error)
306 goto sysctl_failed;
307
308 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
309 CTLFLAG_READWRITE, CTLTYPE_INT, "target", NULL,
310 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
311 CTL_CREATE, CTL_EOL);
312 if (error)
313 goto sysctl_failed;
314 sc->sc_node_target = node->sysctl_num;
315
316 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
317 CTLFLAG_READWRITE, CTLTYPE_INT, "current", NULL,
318 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
319 CTL_CREATE, CTL_EOL);
320 if (error)
321 goto sysctl_failed;
322 sc->sc_node_current = node->sysctl_num;
323
324 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
325 0, CTLTYPE_STRING, "available", NULL,
326 NULL, 0, sc->sc_freq_available, 0,
327 CTL_CREATE, CTL_EOL);
328 if (error)
329 goto sysctl_failed;
330 sc->sc_node_available = node->sysctl_num;
331
332 return;
333
334 sysctl_failed:
335 aprint_error_dev(sc->sc_dev, "couldn't create sysctl nodes: %d\n", error);
336 sysctl_teardown(&cpufreq_log);
337 }
338
339 static int
340 cpufreq_dt_parse_opp(struct cpufreq_dt_softc *sc)
341 {
342 const int phandle = sc->sc_phandle;
343 const u_int *opp;
344 int len, i;
345
346 opp = fdtbus_get_prop(phandle, "operating-points", &len);
347 if (len < 8)
348 return ENXIO;
349
350 sc->sc_nopp = len / 8;
351 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
352 for (i = 0; i < sc->sc_nopp; i++, opp += 2) {
353 sc->sc_opp[i].freq_khz = be32toh(opp[0]);
354 sc->sc_opp[i].voltage_uv = be32toh(opp[1]);
355 }
356
357 return 0;
358 }
359
360 static const struct fdt_opp_info *
361 cpufreq_dt_lookup_opp_info(const int opp_table)
362 {
363 __link_set_decl(fdt_opps, struct fdt_opp_info);
364 struct fdt_opp_info * const *opp;
365 const struct fdt_opp_info *best_opp = NULL;
366 int match, best_match = 0;
367
368 __link_set_foreach(opp, fdt_opps) {
369 const struct device_compatible_entry compat_data[] = {
370 { .compat = (*opp)->opp_compat },
371 DEVICE_COMPAT_EOL
372 };
373
374 match = of_compatible_match(opp_table, compat_data);
375 if (match > best_match) {
376 best_match = match;
377 best_opp = *opp;
378 }
379 }
380
381 return best_opp;
382 }
383
384 static bool
385 cpufreq_dt_opp_v2_supported(const int opp_table, const int opp_node)
386 {
387 return true;
388 }
389
390 FDT_OPP(opp_v2, "operating-points-v2", cpufreq_dt_opp_v2_supported);
391
392 static bool
393 cpufreq_dt_node_supported(const struct fdt_opp_info *opp_info, const int opp_table, const int opp_node)
394 {
395 if (!fdtbus_status_okay(opp_node))
396 return false;
397 if (of_hasprop(opp_node, "opp-suspend"))
398 return false;
399
400 if (opp_info != NULL)
401 return opp_info->opp_supported(opp_table, opp_node);
402
403 return false;
404 }
405
406 static int
407 cpufreq_dt_parse_opp_v2(struct cpufreq_dt_softc *sc)
408 {
409 const int phandle = sc->sc_phandle;
410 struct cpufreq_dt_table *table;
411 const struct fdt_opp_info *opp_info;
412 const u_int *opp_uv;
413 uint64_t opp_hz;
414 int opp_node, len, i, index;
415
416 const int opp_table = fdtbus_get_phandle(phandle, "operating-points-v2");
417 if (opp_table < 0)
418 return ENOENT;
419
420 /* If the table is shared, only setup a single instance */
421 if (of_hasprop(opp_table, "opp-shared")) {
422 TAILQ_FOREACH(table, &cpufreq_dt_tables, next)
423 if (table->phandle == opp_table)
424 return EEXIST;
425 sc->sc_table.phandle = opp_table;
426 TAILQ_INSERT_TAIL(&cpufreq_dt_tables, &sc->sc_table, next);
427 }
428
429 opp_info = cpufreq_dt_lookup_opp_info(opp_table);
430
431 for (opp_node = OF_child(opp_table); opp_node; opp_node = OF_peer(opp_node)) {
432 if (!cpufreq_dt_node_supported(opp_info, opp_table, opp_node))
433 continue;
434 sc->sc_nopp++;
435 }
436
437 if (sc->sc_nopp == 0)
438 return EINVAL;
439
440 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
441 index = sc->sc_nopp - 1;
442 for (opp_node = OF_child(opp_table), i = 0; opp_node; opp_node = OF_peer(opp_node), i++) {
443 if (!cpufreq_dt_node_supported(opp_info, opp_table, opp_node))
444 continue;
445 if (of_getprop_uint64(opp_node, "opp-hz", &opp_hz) != 0)
446 return EINVAL;
447 opp_uv = fdtbus_get_prop(opp_node, "opp-microvolt", &len);
448 if (opp_uv == NULL || len < 1)
449 return EINVAL;
450 /* Table is in reverse order */
451 sc->sc_opp[index].freq_khz = (u_int)(opp_hz / 1000);
452 sc->sc_opp[index].voltage_uv = be32toh(opp_uv[0]);
453 of_getprop_uint32(opp_node, "clock-latency-ns", &sc->sc_opp[index].latency_ns);
454 --index;
455 }
456
457 return 0;
458 }
459
460 static int
461 cpufreq_dt_parse(struct cpufreq_dt_softc *sc)
462 {
463 const int phandle = sc->sc_phandle;
464 int error, i;
465
466 if (of_hasprop(phandle, "cpu-supply")) {
467 sc->sc_supply = fdtbus_regulator_acquire(phandle, "cpu-supply");
468 if (sc->sc_supply == NULL) {
469 aprint_error_dev(sc->sc_dev,
470 "couldn't acquire cpu-supply\n");
471 return ENXIO;
472 }
473 }
474 sc->sc_clk = fdtbus_clock_get_index(phandle, 0);
475 if (sc->sc_clk == NULL) {
476 aprint_error_dev(sc->sc_dev, "couldn't acquire clock\n");
477 return ENXIO;
478 }
479
480 mutex_enter(&cpufreq_dt_tables_lock);
481 if (of_hasprop(phandle, "operating-points"))
482 error = cpufreq_dt_parse_opp(sc);
483 else if (of_hasprop(phandle, "operating-points-v2"))
484 error = cpufreq_dt_parse_opp_v2(sc);
485 else
486 error = EINVAL;
487 mutex_exit(&cpufreq_dt_tables_lock);
488
489 if (error) {
490 if (error != EEXIST)
491 aprint_error_dev(sc->sc_dev,
492 "couldn't parse operating points: %d\n", error);
493 return error;
494 }
495
496 for (i = 0; i < sc->sc_nopp; i++) {
497 aprint_debug_dev(sc->sc_dev, "supported rate: %u.%03u MHz, %u uV\n",
498 sc->sc_opp[i].freq_khz / 1000,
499 sc->sc_opp[i].freq_khz % 1000,
500 sc->sc_opp[i].voltage_uv);
501 }
502
503 return 0;
504 }
505
506 static int
507 cpufreq_dt_match(device_t parent, cfdata_t cf, void *aux)
508 {
509 struct fdt_attach_args * const faa = aux;
510 const int phandle = faa->faa_phandle;
511 bus_addr_t addr;
512
513 if (fdtbus_get_reg(phandle, 0, &addr, NULL) != 0)
514 return 0;
515
516 if (!of_hasprop(phandle, "clocks"))
517 return 0;
518
519 if (!of_hasprop(phandle, "operating-points") &&
520 !of_hasprop(phandle, "operating-points-v2"))
521 return 0;
522
523 return 1;
524 }
525
526 static void
527 cpufreq_dt_init(device_t self)
528 {
529 struct cpufreq_dt_softc * const sc = device_private(self);
530 int error;
531
532 if ((error = cpufreq_dt_parse(sc)) != 0)
533 return;
534
535 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_ENABLE, cpufreq_dt_throttle_enable, true);
536 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_DISABLE, cpufreq_dt_throttle_disable, true);
537
538 cpufreq_dt_init_sysctl(sc);
539
540 if (sc->sc_nopp > 0) {
541 struct cpufreq_dt_opp * const opp = &sc->sc_opp[0];
542
543 aprint_normal_dev(sc->sc_dev, "rate: %u.%03u MHz, %u uV\n",
544 opp->freq_khz / 1000, opp->freq_khz % 1000, opp->voltage_uv);
545 cpufreq_dt_set_rate(sc, opp->freq_khz);
546 }
547 }
548
549 static int
550 cpufreq_dt_lock_init(void)
551 {
552 mutex_init(&cpufreq_dt_tables_lock, MUTEX_DEFAULT, IPL_NONE);
553 return 0;
554 }
555
556 static void
557 cpufreq_dt_attach(device_t parent, device_t self, void *aux)
558 {
559 static ONCE_DECL(locks);
560 struct cpufreq_dt_softc * const sc = device_private(self);
561 struct fdt_attach_args * const faa = aux;
562
563 RUN_ONCE(&locks, cpufreq_dt_lock_init);
564
565 sc->sc_dev = self;
566 sc->sc_phandle = faa->faa_phandle;
567
568 aprint_naive("\n");
569 aprint_normal("\n");
570
571 config_interrupts(self, cpufreq_dt_init);
572 }
573
574 CFATTACH_DECL_NEW(cpufreq_dt, sizeof(struct cpufreq_dt_softc),
575 cpufreq_dt_match, cpufreq_dt_attach, NULL, NULL);
576