cpufreq_dt.c revision 1.3.4.1 1 /* $NetBSD: cpufreq_dt.c,v 1.3.4.1 2019/06/10 22:07:07 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2015-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: cpufreq_dt.c,v 1.3.4.1 2019/06/10 22:07:07 christos Exp $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/device.h>
35 #include <sys/kmem.h>
36 #include <sys/bus.h>
37 #include <sys/atomic.h>
38 #include <sys/xcall.h>
39 #include <sys/sysctl.h>
40 #include <sys/queue.h>
41 #include <sys/once.h>
42
43 #include <dev/fdt/fdtvar.h>
44
45 struct cpufreq_dt_table {
46 int phandle;
47 TAILQ_ENTRY(cpufreq_dt_table) next;
48 };
49
50 static TAILQ_HEAD(, cpufreq_dt_table) cpufreq_dt_tables =
51 TAILQ_HEAD_INITIALIZER(cpufreq_dt_tables);
52 static kmutex_t cpufreq_dt_tables_lock;
53
54 struct cpufreq_dt_opp {
55 u_int freq_khz;
56 u_int voltage_uv;
57 u_int latency_ns;
58 };
59
60 struct cpufreq_dt_softc {
61 device_t sc_dev;
62 int sc_phandle;
63 struct clk *sc_clk;
64 struct fdtbus_regulator *sc_supply;
65
66 struct cpufreq_dt_opp *sc_opp;
67 ssize_t sc_nopp;
68
69 u_int sc_freq_target;
70 bool sc_freq_throttle;
71
72 u_int sc_busy;
73
74 char *sc_freq_available;
75 int sc_node_target;
76 int sc_node_current;
77 int sc_node_available;
78
79 struct cpufreq_dt_table sc_table;
80 };
81
82 static void
83 cpufreq_dt_change_cb(void *arg1, void *arg2)
84 {
85 #if notyet
86 struct cpu_info *ci = curcpu();
87 ci->ci_data.cpu_cc_freq = cpufreq_get_rate() * 1000000;
88 #endif
89 }
90
91 static int
92 cpufreq_dt_set_rate(struct cpufreq_dt_softc *sc, u_int freq_khz)
93 {
94 struct cpufreq_dt_opp *opp = NULL;
95 u_int old_rate, new_rate, old_uv, new_uv;
96 uint64_t xc;
97 int error;
98 ssize_t n;
99
100 for (n = 0; n < sc->sc_nopp; n++)
101 if (sc->sc_opp[n].freq_khz == freq_khz) {
102 opp = &sc->sc_opp[n];
103 break;
104 }
105 if (opp == NULL)
106 return EINVAL;
107
108 old_rate = clk_get_rate(sc->sc_clk);
109 new_rate = freq_khz * 1000;
110 new_uv = opp->voltage_uv;
111
112 if (old_rate == new_rate)
113 return 0;
114
115 if (sc->sc_supply != NULL) {
116 error = fdtbus_regulator_get_voltage(sc->sc_supply, &old_uv);
117 if (error != 0)
118 return error;
119
120 if (new_uv > old_uv) {
121 error = fdtbus_regulator_set_voltage(sc->sc_supply,
122 new_uv, new_uv);
123 if (error != 0)
124 return error;
125 }
126 }
127
128 error = clk_set_rate(sc->sc_clk, new_rate);
129 if (error != 0)
130 return error;
131
132 const u_int latency_us = howmany(opp->latency_ns, 1000);
133 if (latency_us > 0)
134 delay(latency_us);
135
136 if (sc->sc_supply != NULL) {
137 if (new_uv < old_uv) {
138 error = fdtbus_regulator_set_voltage(sc->sc_supply,
139 new_uv, new_uv);
140 if (error != 0)
141 return error;
142 }
143 }
144
145 if (error == 0) {
146 xc = xc_broadcast(0, cpufreq_dt_change_cb, sc, NULL);
147 xc_wait(xc);
148
149 pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
150 }
151
152 return 0;
153 }
154
155 static void
156 cpufreq_dt_throttle_enable(device_t dev)
157 {
158 struct cpufreq_dt_softc * const sc = device_private(dev);
159
160 if (sc->sc_freq_throttle)
161 return;
162
163 const u_int freq_khz = sc->sc_opp[sc->sc_nopp - 1].freq_khz;
164
165 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
166 kpause("throttle", false, 1, NULL);
167
168 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
169 aprint_debug_dev(sc->sc_dev, "throttle enabled (%u.%03u MHz)\n",
170 freq_khz / 1000, freq_khz % 1000);
171 sc->sc_freq_throttle = true;
172 if (sc->sc_freq_target == 0)
173 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
174 }
175
176 atomic_dec_uint(&sc->sc_busy);
177 }
178
179 static void
180 cpufreq_dt_throttle_disable(device_t dev)
181 {
182 struct cpufreq_dt_softc * const sc = device_private(dev);
183
184 if (!sc->sc_freq_throttle)
185 return;
186
187 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
188 kpause("throttle", false, 1, NULL);
189
190 const u_int freq_khz = sc->sc_freq_target * 1000;
191
192 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
193 aprint_debug_dev(sc->sc_dev, "throttle disabled (%u.%03u MHz)\n",
194 freq_khz / 1000, freq_khz % 1000);
195 sc->sc_freq_throttle = false;
196 }
197
198 atomic_dec_uint(&sc->sc_busy);
199 }
200
201 static int
202 cpufreq_dt_sysctl_helper(SYSCTLFN_ARGS)
203 {
204 struct cpufreq_dt_softc * const sc = rnode->sysctl_data;
205 struct sysctlnode node;
206 u_int fq, oldfq = 0;
207 int error, n;
208
209 node = *rnode;
210 node.sysctl_data = &fq;
211
212 if (rnode->sysctl_num == sc->sc_node_target) {
213 if (sc->sc_freq_target == 0)
214 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
215 fq = sc->sc_freq_target;
216 } else
217 fq = clk_get_rate(sc->sc_clk) / 1000000;
218
219 if (rnode->sysctl_num == sc->sc_node_target)
220 oldfq = fq;
221
222 if (sc->sc_freq_target == 0)
223 sc->sc_freq_target = fq;
224
225 error = sysctl_lookup(SYSCTLFN_CALL(&node));
226 if (error || newp == NULL)
227 return error;
228
229 if (fq == oldfq || rnode->sysctl_num != sc->sc_node_target)
230 return 0;
231
232 for (n = 0; n < sc->sc_nopp; n++)
233 if (sc->sc_opp[n].freq_khz / 1000 == fq)
234 break;
235 if (n == sc->sc_nopp)
236 return EINVAL;
237
238 if (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
239 return EBUSY;
240
241 sc->sc_freq_target = fq;
242
243 if (sc->sc_freq_throttle)
244 error = 0;
245 else
246 error = cpufreq_dt_set_rate(sc, fq * 1000);
247
248 atomic_dec_uint(&sc->sc_busy);
249
250 return error;
251 }
252
253 static void
254 cpufreq_dt_init_sysctl(struct cpufreq_dt_softc *sc)
255 {
256 const struct sysctlnode *node, *cpunode, *freqnode;
257 struct sysctllog *cpufreq_log = NULL;
258 const char *cpunodename;
259 int error, i;
260
261 sc->sc_freq_available = kmem_zalloc(strlen("XXXX ") * sc->sc_nopp, KM_SLEEP);
262 for (i = 0; i < sc->sc_nopp; i++) {
263 char buf[6];
264 snprintf(buf, sizeof(buf), i ? " %u" : "%u", sc->sc_opp[i].freq_khz / 1000);
265 strcat(sc->sc_freq_available, buf);
266 }
267
268 if (device_unit(sc->sc_dev) == 0)
269 cpunodename = "cpu";
270 else
271 cpunodename = device_xname(sc->sc_dev);
272
273 error = sysctl_createv(&cpufreq_log, 0, NULL, &node,
274 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
275 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
276 if (error)
277 goto sysctl_failed;
278 error = sysctl_createv(&cpufreq_log, 0, &node, &cpunode,
279 0, CTLTYPE_NODE, cpunodename, NULL,
280 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
281 if (error)
282 goto sysctl_failed;
283 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &freqnode,
284 0, CTLTYPE_NODE, "frequency", NULL,
285 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
286 if (error)
287 goto sysctl_failed;
288
289 error = sysctl_createv(&cpufreq_log, 0, &freqnode, &node,
290 CTLFLAG_READWRITE, CTLTYPE_INT, "target", NULL,
291 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
292 CTL_CREATE, CTL_EOL);
293 if (error)
294 goto sysctl_failed;
295 sc->sc_node_target = node->sysctl_num;
296
297 error = sysctl_createv(&cpufreq_log, 0, &freqnode, &node,
298 CTLFLAG_READWRITE, CTLTYPE_INT, "current", NULL,
299 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
300 CTL_CREATE, CTL_EOL);
301 if (error)
302 goto sysctl_failed;
303 sc->sc_node_current = node->sysctl_num;
304
305 error = sysctl_createv(&cpufreq_log, 0, &freqnode, &node,
306 0, CTLTYPE_STRING, "available", NULL,
307 NULL, 0, sc->sc_freq_available, 0,
308 CTL_CREATE, CTL_EOL);
309 if (error)
310 goto sysctl_failed;
311 sc->sc_node_available = node->sysctl_num;
312
313 return;
314
315 sysctl_failed:
316 aprint_error_dev(sc->sc_dev, "couldn't create sysctl nodes: %d\n", error);
317 sysctl_teardown(&cpufreq_log);
318 }
319
320 static int
321 cpufreq_dt_parse_opp(struct cpufreq_dt_softc *sc)
322 {
323 const int phandle = sc->sc_phandle;
324 const u_int *opp;
325 int len, i;
326
327 opp = fdtbus_get_prop(phandle, "operating-points", &len);
328 if (len < 8)
329 return ENXIO;
330
331 sc->sc_nopp = len / 8;
332 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
333 for (i = 0; i < sc->sc_nopp; i++, opp += 2) {
334 sc->sc_opp[i].freq_khz = be32toh(opp[0]);
335 sc->sc_opp[i].voltage_uv = be32toh(opp[1]);
336 }
337
338 return 0;
339 }
340
341 static int
342 cpufreq_dt_parse_opp_v2(struct cpufreq_dt_softc *sc)
343 {
344 const int phandle = sc->sc_phandle;
345 struct cpufreq_dt_table *table;
346 const u_int *opp_uv;
347 uint64_t opp_hz;
348 int opp_node, len, i;
349
350 const int opp_table = fdtbus_get_phandle(phandle, "operating-points-v2");
351 if (opp_table < 0)
352 return ENOENT;
353
354 /* If the table is shared, only setup a single instance */
355 if (of_hasprop(opp_table, "opp-shared")) {
356 TAILQ_FOREACH(table, &cpufreq_dt_tables, next)
357 if (table->phandle == opp_table)
358 return EEXIST;
359 sc->sc_table.phandle = opp_table;
360 TAILQ_INSERT_TAIL(&cpufreq_dt_tables, &sc->sc_table, next);
361 }
362
363 for (opp_node = OF_child(opp_table); opp_node; opp_node = OF_peer(opp_node)) {
364 if (fdtbus_status_okay(opp_node))
365 sc->sc_nopp++;
366 }
367
368 if (sc->sc_nopp == 0)
369 return EINVAL;
370
371 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
372 for (opp_node = OF_child(opp_table), i = 0; opp_node; opp_node = OF_peer(opp_node), i++) {
373 if (!fdtbus_status_okay(opp_node))
374 continue;
375 if (of_getprop_uint64(opp_node, "opp-hz", &opp_hz) != 0)
376 return EINVAL;
377 opp_uv = fdtbus_get_prop(opp_node, "opp-microvolt", &len);
378 if (opp_uv == NULL || len < 1)
379 return EINVAL;
380 /* Table is in reverse order */
381 const int index = sc->sc_nopp - i - 1;
382 sc->sc_opp[index].freq_khz = (u_int)(opp_hz / 1000);
383 sc->sc_opp[index].voltage_uv = be32toh(opp_uv[0]);
384 of_getprop_uint32(opp_node, "clock-latency-ns", &sc->sc_opp[index].latency_ns);
385 }
386
387 return 0;
388 }
389
390 static int
391 cpufreq_dt_parse(struct cpufreq_dt_softc *sc)
392 {
393 const int phandle = sc->sc_phandle;
394 int error, i;
395
396 if (of_hasprop(phandle, "cpu-supply")) {
397 sc->sc_supply = fdtbus_regulator_acquire(phandle, "cpu-supply");
398 if (sc->sc_supply == NULL) {
399 aprint_error_dev(sc->sc_dev,
400 "couldn't acquire cpu-supply\n");
401 return ENXIO;
402 }
403 }
404 sc->sc_clk = fdtbus_clock_get_index(phandle, 0);
405 if (sc->sc_clk == NULL) {
406 aprint_error_dev(sc->sc_dev, "couldn't acquire clock\n");
407 return ENXIO;
408 }
409
410 mutex_enter(&cpufreq_dt_tables_lock);
411 if (of_hasprop(phandle, "operating-points"))
412 error = cpufreq_dt_parse_opp(sc);
413 else if (of_hasprop(phandle, "operating-points-v2"))
414 error = cpufreq_dt_parse_opp_v2(sc);
415 else
416 error = EINVAL;
417 mutex_exit(&cpufreq_dt_tables_lock);
418
419 if (error) {
420 if (error != EEXIST)
421 aprint_error_dev(sc->sc_dev,
422 "couldn't parse operating points: %d\n", error);
423 return error;
424 }
425
426 for (i = 0; i < sc->sc_nopp; i++) {
427 aprint_verbose_dev(sc->sc_dev, "%u.%03u MHz, %u uV\n",
428 sc->sc_opp[i].freq_khz / 1000,
429 sc->sc_opp[i].freq_khz % 1000,
430 sc->sc_opp[i].voltage_uv);
431 }
432
433 return 0;
434 }
435
436 static int
437 cpufreq_dt_match(device_t parent, cfdata_t cf, void *aux)
438 {
439 struct fdt_attach_args * const faa = aux;
440 const int phandle = faa->faa_phandle;
441 bus_addr_t addr;
442
443 if (fdtbus_get_reg(phandle, 0, &addr, NULL) != 0)
444 return 0;
445
446 if (!of_hasprop(phandle, "clocks"))
447 return 0;
448
449 if (!of_hasprop(phandle, "operating-points") &&
450 !of_hasprop(phandle, "operating-points-v2"))
451 return 0;
452
453 return 1;
454 }
455
456 static void
457 cpufreq_dt_init(device_t self)
458 {
459 struct cpufreq_dt_softc * const sc = device_private(self);
460 int error;
461
462 if ((error = cpufreq_dt_parse(sc)) != 0)
463 return;
464
465 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_ENABLE, cpufreq_dt_throttle_enable, true);
466 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_DISABLE, cpufreq_dt_throttle_disable, true);
467
468 cpufreq_dt_init_sysctl(sc);
469 }
470
471 static int
472 cpufreq_dt_lock_init(void)
473 {
474 mutex_init(&cpufreq_dt_tables_lock, MUTEX_DEFAULT, IPL_NONE);
475 return 0;
476 }
477
478 static void
479 cpufreq_dt_attach(device_t parent, device_t self, void *aux)
480 {
481 static ONCE_DECL(locks);
482 struct cpufreq_dt_softc * const sc = device_private(self);
483 struct fdt_attach_args * const faa = aux;
484
485 RUN_ONCE(&locks, cpufreq_dt_lock_init);
486
487 sc->sc_dev = self;
488 sc->sc_phandle = faa->faa_phandle;
489
490 aprint_naive("\n");
491 aprint_normal("\n");
492
493 config_interrupts(self, cpufreq_dt_init);
494 }
495
496 CFATTACH_DECL_NEW(cpufreq_dt, sizeof(struct cpufreq_dt_softc),
497 cpufreq_dt_match, cpufreq_dt_attach, NULL, NULL);
498