cpufreq_dt.c revision 1.12 1 /* $NetBSD: cpufreq_dt.c,v 1.12 2019/10/28 21:14:58 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2015-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: cpufreq_dt.c,v 1.12 2019/10/28 21:14:58 jmcneill Exp $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/device.h>
35 #include <sys/kmem.h>
36 #include <sys/bus.h>
37 #include <sys/atomic.h>
38 #include <sys/xcall.h>
39 #include <sys/sysctl.h>
40 #include <sys/queue.h>
41 #include <sys/once.h>
42 #include <sys/cpu.h>
43
44 #include <dev/fdt/fdtvar.h>
45
46 struct cpufreq_dt_table {
47 int phandle;
48 TAILQ_ENTRY(cpufreq_dt_table) next;
49 };
50
51 static TAILQ_HEAD(, cpufreq_dt_table) cpufreq_dt_tables =
52 TAILQ_HEAD_INITIALIZER(cpufreq_dt_tables);
53 static kmutex_t cpufreq_dt_tables_lock;
54
55 struct cpufreq_dt_opp {
56 u_int freq_khz;
57 u_int voltage_uv;
58 u_int latency_ns;
59 };
60
61 struct cpufreq_dt_softc {
62 device_t sc_dev;
63 int sc_phandle;
64 struct clk *sc_clk;
65 struct fdtbus_regulator *sc_supply;
66
67 struct cpufreq_dt_opp *sc_opp;
68 ssize_t sc_nopp;
69
70 u_int sc_freq_target;
71 bool sc_freq_throttle;
72
73 u_int sc_busy;
74
75 char *sc_freq_available;
76 int sc_node_target;
77 int sc_node_current;
78 int sc_node_available;
79
80 struct cpufreq_dt_table sc_table;
81 };
82
83 static void
84 cpufreq_dt_change_cb(void *arg1, void *arg2)
85 {
86 #if notyet
87 struct cpu_info *ci = curcpu();
88 ci->ci_data.cpu_cc_freq = cpufreq_get_rate() * 1000000;
89 #endif
90 }
91
92 static int
93 cpufreq_dt_set_rate(struct cpufreq_dt_softc *sc, u_int freq_khz)
94 {
95 struct cpufreq_dt_opp *opp = NULL;
96 u_int old_rate, new_rate, old_uv, new_uv;
97 uint64_t xc;
98 int error;
99 ssize_t n;
100
101 for (n = 0; n < sc->sc_nopp; n++)
102 if (sc->sc_opp[n].freq_khz == freq_khz) {
103 opp = &sc->sc_opp[n];
104 break;
105 }
106 if (opp == NULL)
107 return EINVAL;
108
109 old_rate = clk_get_rate(sc->sc_clk);
110 new_rate = freq_khz * 1000;
111 new_uv = opp->voltage_uv;
112
113 if (old_rate == new_rate)
114 return 0;
115
116 if (sc->sc_supply != NULL) {
117 error = fdtbus_regulator_get_voltage(sc->sc_supply, &old_uv);
118 if (error != 0)
119 return error;
120
121 if (new_uv > old_uv) {
122 error = fdtbus_regulator_set_voltage(sc->sc_supply,
123 new_uv, new_uv);
124 if (error != 0)
125 return error;
126 }
127 }
128
129 error = clk_set_rate(sc->sc_clk, new_rate);
130 if (error != 0)
131 return error;
132
133 const u_int latency_us = howmany(opp->latency_ns, 1000);
134 if (latency_us > 0)
135 delay(latency_us);
136
137 if (sc->sc_supply != NULL) {
138 if (new_uv < old_uv) {
139 error = fdtbus_regulator_set_voltage(sc->sc_supply,
140 new_uv, new_uv);
141 if (error != 0)
142 return error;
143 }
144 }
145
146 if (error == 0) {
147 xc = xc_broadcast(0, cpufreq_dt_change_cb, sc, NULL);
148 xc_wait(xc);
149
150 pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
151 }
152
153 return 0;
154 }
155
156 static void
157 cpufreq_dt_throttle_enable(device_t dev)
158 {
159 struct cpufreq_dt_softc * const sc = device_private(dev);
160
161 if (sc->sc_freq_throttle)
162 return;
163
164 const u_int freq_khz = sc->sc_opp[sc->sc_nopp - 1].freq_khz;
165
166 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
167 kpause("throttle", false, 1, NULL);
168
169 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
170 aprint_debug_dev(sc->sc_dev, "throttle enabled (%u.%03u MHz)\n",
171 freq_khz / 1000, freq_khz % 1000);
172 sc->sc_freq_throttle = true;
173 if (sc->sc_freq_target == 0)
174 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
175 }
176
177 atomic_dec_uint(&sc->sc_busy);
178 }
179
180 static void
181 cpufreq_dt_throttle_disable(device_t dev)
182 {
183 struct cpufreq_dt_softc * const sc = device_private(dev);
184
185 if (!sc->sc_freq_throttle)
186 return;
187
188 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
189 kpause("throttle", false, 1, NULL);
190
191 const u_int freq_khz = sc->sc_freq_target * 1000;
192
193 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
194 aprint_debug_dev(sc->sc_dev, "throttle disabled (%u.%03u MHz)\n",
195 freq_khz / 1000, freq_khz % 1000);
196 sc->sc_freq_throttle = false;
197 }
198
199 atomic_dec_uint(&sc->sc_busy);
200 }
201
202 static int
203 cpufreq_dt_sysctl_helper(SYSCTLFN_ARGS)
204 {
205 struct cpufreq_dt_softc * const sc = rnode->sysctl_data;
206 struct sysctlnode node;
207 u_int fq, oldfq = 0;
208 int error, n;
209
210 node = *rnode;
211 node.sysctl_data = &fq;
212
213 if (rnode->sysctl_num == sc->sc_node_target) {
214 if (sc->sc_freq_target == 0)
215 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
216 fq = sc->sc_freq_target;
217 } else
218 fq = clk_get_rate(sc->sc_clk) / 1000000;
219
220 if (rnode->sysctl_num == sc->sc_node_target)
221 oldfq = fq;
222
223 if (sc->sc_freq_target == 0)
224 sc->sc_freq_target = fq;
225
226 error = sysctl_lookup(SYSCTLFN_CALL(&node));
227 if (error || newp == NULL)
228 return error;
229
230 if (fq == oldfq || rnode->sysctl_num != sc->sc_node_target)
231 return 0;
232
233 for (n = 0; n < sc->sc_nopp; n++)
234 if (sc->sc_opp[n].freq_khz / 1000 == fq)
235 break;
236 if (n == sc->sc_nopp)
237 return EINVAL;
238
239 if (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
240 return EBUSY;
241
242 sc->sc_freq_target = fq;
243
244 if (sc->sc_freq_throttle)
245 error = 0;
246 else
247 error = cpufreq_dt_set_rate(sc, fq * 1000);
248
249 atomic_dec_uint(&sc->sc_busy);
250
251 return error;
252 }
253
254 static struct cpu_info *
255 cpufreq_dt_cpu_lookup(cpuid_t mpidr)
256 {
257 CPU_INFO_ITERATOR cii;
258 struct cpu_info *ci;
259
260 for (CPU_INFO_FOREACH(cii, ci)) {
261 if (ci->ci_cpuid == mpidr)
262 return ci;
263 }
264
265 return NULL;
266 }
267
268 static void
269 cpufreq_dt_init_sysctl(struct cpufreq_dt_softc *sc)
270 {
271 const struct sysctlnode *node, *cpunode;
272 struct sysctllog *cpufreq_log = NULL;
273 struct cpu_info *ci;
274 bus_addr_t mpidr;
275 int error, i;
276
277 if (fdtbus_get_reg(sc->sc_phandle, 0, &mpidr, 0) != 0)
278 return;
279
280 ci = cpufreq_dt_cpu_lookup(mpidr);
281 if (ci == NULL)
282 return;
283
284 sc->sc_freq_available = kmem_zalloc(strlen("XXXX ") * sc->sc_nopp, KM_SLEEP);
285 for (i = 0; i < sc->sc_nopp; i++) {
286 char buf[6];
287 snprintf(buf, sizeof(buf), i ? " %u" : "%u", sc->sc_opp[i].freq_khz / 1000);
288 strcat(sc->sc_freq_available, buf);
289 }
290
291 error = sysctl_createv(&cpufreq_log, 0, NULL, &node,
292 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
293 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
294 if (error)
295 goto sysctl_failed;
296 error = sysctl_createv(&cpufreq_log, 0, &node, &node,
297 0, CTLTYPE_NODE, "cpufreq", NULL,
298 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
299 if (error)
300 goto sysctl_failed;
301 error = sysctl_createv(&cpufreq_log, 0, &node, &cpunode,
302 0, CTLTYPE_NODE, cpu_name(ci), NULL,
303 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
304 if (error)
305 goto sysctl_failed;
306
307 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
308 CTLFLAG_READWRITE, CTLTYPE_INT, "target", NULL,
309 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
310 CTL_CREATE, CTL_EOL);
311 if (error)
312 goto sysctl_failed;
313 sc->sc_node_target = node->sysctl_num;
314
315 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
316 CTLFLAG_READWRITE, CTLTYPE_INT, "current", NULL,
317 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
318 CTL_CREATE, CTL_EOL);
319 if (error)
320 goto sysctl_failed;
321 sc->sc_node_current = node->sysctl_num;
322
323 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
324 0, CTLTYPE_STRING, "available", NULL,
325 NULL, 0, sc->sc_freq_available, 0,
326 CTL_CREATE, CTL_EOL);
327 if (error)
328 goto sysctl_failed;
329 sc->sc_node_available = node->sysctl_num;
330
331 return;
332
333 sysctl_failed:
334 aprint_error_dev(sc->sc_dev, "couldn't create sysctl nodes: %d\n", error);
335 sysctl_teardown(&cpufreq_log);
336 }
337
338 static int
339 cpufreq_dt_parse_opp(struct cpufreq_dt_softc *sc)
340 {
341 const int phandle = sc->sc_phandle;
342 const u_int *opp;
343 int len, i;
344
345 opp = fdtbus_get_prop(phandle, "operating-points", &len);
346 if (len < 8)
347 return ENXIO;
348
349 sc->sc_nopp = len / 8;
350 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
351 for (i = 0; i < sc->sc_nopp; i++, opp += 2) {
352 sc->sc_opp[i].freq_khz = be32toh(opp[0]);
353 sc->sc_opp[i].voltage_uv = be32toh(opp[1]);
354 }
355
356 return 0;
357 }
358
359 static const struct fdt_opp_info *
360 cpufreq_dt_lookup_opp_info(const int opp_table)
361 {
362 __link_set_decl(fdt_opps, struct fdt_opp_info);
363 struct fdt_opp_info * const *opp;
364 const struct fdt_opp_info *best_opp = NULL;
365 int match, best_match = 0;
366
367 __link_set_foreach(opp, fdt_opps) {
368 const char * const compat[] = { (*opp)->opp_compat, NULL };
369 match = of_match_compatible(opp_table, compat);
370 if (match > best_match) {
371 best_match = match;
372 best_opp = *opp;
373 }
374 }
375
376 return best_opp;
377 }
378
379 static bool
380 cpufreq_dt_node_supported(const struct fdt_opp_info *opp_info, const int opp_table, const int opp_node)
381 {
382 if (!fdtbus_status_okay(opp_node))
383 return false;
384 if (of_hasprop(opp_node, "opp-suspend"))
385 return false;
386
387 if (opp_info != NULL)
388 return opp_info->opp_supported(opp_table, opp_node);
389
390 return true;
391 }
392
393 static int
394 cpufreq_dt_parse_opp_v2(struct cpufreq_dt_softc *sc)
395 {
396 const int phandle = sc->sc_phandle;
397 struct cpufreq_dt_table *table;
398 const struct fdt_opp_info *opp_info;
399 const u_int *opp_uv;
400 uint64_t opp_hz;
401 int opp_node, len, i, index;
402
403 const int opp_table = fdtbus_get_phandle(phandle, "operating-points-v2");
404 if (opp_table < 0)
405 return ENOENT;
406
407 /* If the table is shared, only setup a single instance */
408 if (of_hasprop(opp_table, "opp-shared")) {
409 TAILQ_FOREACH(table, &cpufreq_dt_tables, next)
410 if (table->phandle == opp_table)
411 return EEXIST;
412 sc->sc_table.phandle = opp_table;
413 TAILQ_INSERT_TAIL(&cpufreq_dt_tables, &sc->sc_table, next);
414 }
415
416 opp_info = cpufreq_dt_lookup_opp_info(opp_table);
417
418 for (opp_node = OF_child(opp_table); opp_node; opp_node = OF_peer(opp_node)) {
419 if (!cpufreq_dt_node_supported(opp_info, opp_table, opp_node))
420 continue;
421 sc->sc_nopp++;
422 }
423
424 if (sc->sc_nopp == 0)
425 return EINVAL;
426
427 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
428 index = sc->sc_nopp - 1;
429 for (opp_node = OF_child(opp_table), i = 0; opp_node; opp_node = OF_peer(opp_node), i++) {
430 if (!cpufreq_dt_node_supported(opp_info, opp_table, opp_node))
431 continue;
432 if (of_getprop_uint64(opp_node, "opp-hz", &opp_hz) != 0)
433 return EINVAL;
434 opp_uv = fdtbus_get_prop(opp_node, "opp-microvolt", &len);
435 if (opp_uv == NULL || len < 1)
436 return EINVAL;
437 /* Table is in reverse order */
438 sc->sc_opp[index].freq_khz = (u_int)(opp_hz / 1000);
439 sc->sc_opp[index].voltage_uv = be32toh(opp_uv[0]);
440 of_getprop_uint32(opp_node, "clock-latency-ns", &sc->sc_opp[index].latency_ns);
441 --index;
442 }
443
444 return 0;
445 }
446
447 static int
448 cpufreq_dt_parse(struct cpufreq_dt_softc *sc)
449 {
450 const int phandle = sc->sc_phandle;
451 int error, i;
452
453 if (of_hasprop(phandle, "cpu-supply")) {
454 sc->sc_supply = fdtbus_regulator_acquire(phandle, "cpu-supply");
455 if (sc->sc_supply == NULL) {
456 aprint_error_dev(sc->sc_dev,
457 "couldn't acquire cpu-supply\n");
458 return ENXIO;
459 }
460 }
461 sc->sc_clk = fdtbus_clock_get_index(phandle, 0);
462 if (sc->sc_clk == NULL) {
463 aprint_error_dev(sc->sc_dev, "couldn't acquire clock\n");
464 return ENXIO;
465 }
466
467 mutex_enter(&cpufreq_dt_tables_lock);
468 if (of_hasprop(phandle, "operating-points"))
469 error = cpufreq_dt_parse_opp(sc);
470 else if (of_hasprop(phandle, "operating-points-v2"))
471 error = cpufreq_dt_parse_opp_v2(sc);
472 else
473 error = EINVAL;
474 mutex_exit(&cpufreq_dt_tables_lock);
475
476 if (error) {
477 if (error != EEXIST)
478 aprint_error_dev(sc->sc_dev,
479 "couldn't parse operating points: %d\n", error);
480 return error;
481 }
482
483 for (i = 0; i < sc->sc_nopp; i++) {
484 aprint_verbose_dev(sc->sc_dev, "%u.%03u MHz, %u uV\n",
485 sc->sc_opp[i].freq_khz / 1000,
486 sc->sc_opp[i].freq_khz % 1000,
487 sc->sc_opp[i].voltage_uv);
488 }
489
490 return 0;
491 }
492
493 static int
494 cpufreq_dt_match(device_t parent, cfdata_t cf, void *aux)
495 {
496 struct fdt_attach_args * const faa = aux;
497 const int phandle = faa->faa_phandle;
498 bus_addr_t addr;
499
500 if (fdtbus_get_reg(phandle, 0, &addr, NULL) != 0)
501 return 0;
502
503 if (!of_hasprop(phandle, "clocks"))
504 return 0;
505
506 if (!of_hasprop(phandle, "operating-points") &&
507 !of_hasprop(phandle, "operating-points-v2"))
508 return 0;
509
510 return 1;
511 }
512
513 static void
514 cpufreq_dt_init(device_t self)
515 {
516 struct cpufreq_dt_softc * const sc = device_private(self);
517 int error;
518
519 if ((error = cpufreq_dt_parse(sc)) != 0)
520 return;
521
522 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_ENABLE, cpufreq_dt_throttle_enable, true);
523 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_DISABLE, cpufreq_dt_throttle_disable, true);
524
525 cpufreq_dt_init_sysctl(sc);
526 }
527
528 static int
529 cpufreq_dt_lock_init(void)
530 {
531 mutex_init(&cpufreq_dt_tables_lock, MUTEX_DEFAULT, IPL_NONE);
532 return 0;
533 }
534
535 static void
536 cpufreq_dt_attach(device_t parent, device_t self, void *aux)
537 {
538 static ONCE_DECL(locks);
539 struct cpufreq_dt_softc * const sc = device_private(self);
540 struct fdt_attach_args * const faa = aux;
541
542 RUN_ONCE(&locks, cpufreq_dt_lock_init);
543
544 sc->sc_dev = self;
545 sc->sc_phandle = faa->faa_phandle;
546
547 aprint_naive("\n");
548 aprint_normal("\n");
549
550 config_interrupts(self, cpufreq_dt_init);
551 }
552
553 CFATTACH_DECL_NEW(cpufreq_dt, sizeof(struct cpufreq_dt_softc),
554 cpufreq_dt_match, cpufreq_dt_attach, NULL, NULL);
555