cpufreq_dt.c revision 1.5 1 /* $NetBSD: cpufreq_dt.c,v 1.5 2018/09/01 23:41:16 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2015-2017 Jared McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: cpufreq_dt.c,v 1.5 2018/09/01 23:41:16 jmcneill Exp $");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/device.h>
35 #include <sys/kmem.h>
36 #include <sys/bus.h>
37 #include <sys/atomic.h>
38 #include <sys/xcall.h>
39 #include <sys/sysctl.h>
40 #include <sys/queue.h>
41 #include <sys/once.h>
42
43 #include <dev/fdt/fdtvar.h>
44
45 struct cpufreq_dt_table {
46 int phandle;
47 TAILQ_ENTRY(cpufreq_dt_table) next;
48 };
49
50 static TAILQ_HEAD(, cpufreq_dt_table) cpufreq_dt_tables =
51 TAILQ_HEAD_INITIALIZER(cpufreq_dt_tables);
52 static kmutex_t cpufreq_dt_tables_lock;
53
54 struct cpufreq_dt_opp {
55 u_int freq_khz;
56 u_int voltage_uv;
57 u_int latency_ns;
58 };
59
60 struct cpufreq_dt_softc {
61 device_t sc_dev;
62 int sc_phandle;
63 struct clk *sc_clk;
64 struct fdtbus_regulator *sc_supply;
65
66 struct cpufreq_dt_opp *sc_opp;
67 ssize_t sc_nopp;
68
69 u_int sc_freq_target;
70 bool sc_freq_throttle;
71
72 u_int sc_busy;
73
74 char *sc_freq_available;
75 int sc_node_target;
76 int sc_node_current;
77 int sc_node_available;
78
79 struct cpufreq_dt_table sc_table;
80 };
81
82 static void
83 cpufreq_dt_change_cb(void *arg1, void *arg2)
84 {
85 #if notyet
86 struct cpu_info *ci = curcpu();
87 ci->ci_data.cpu_cc_freq = cpufreq_get_rate() * 1000000;
88 #endif
89 }
90
91 static int
92 cpufreq_dt_set_rate(struct cpufreq_dt_softc *sc, u_int freq_khz)
93 {
94 struct cpufreq_dt_opp *opp = NULL;
95 u_int old_rate, new_rate, old_uv, new_uv;
96 uint64_t xc;
97 int error;
98 ssize_t n;
99
100 for (n = 0; n < sc->sc_nopp; n++)
101 if (sc->sc_opp[n].freq_khz == freq_khz) {
102 opp = &sc->sc_opp[n];
103 break;
104 }
105 if (opp == NULL)
106 return EINVAL;
107
108 old_rate = clk_get_rate(sc->sc_clk);
109 new_rate = freq_khz * 1000;
110 new_uv = opp->voltage_uv;
111
112 if (old_rate == new_rate)
113 return 0;
114
115 if (sc->sc_supply != NULL) {
116 error = fdtbus_regulator_get_voltage(sc->sc_supply, &old_uv);
117 if (error != 0)
118 return error;
119
120 if (new_uv > old_uv) {
121 error = fdtbus_regulator_set_voltage(sc->sc_supply,
122 new_uv, new_uv);
123 if (error != 0)
124 return error;
125 }
126 }
127
128 error = clk_set_rate(sc->sc_clk, new_rate);
129 if (error != 0)
130 return error;
131
132 const u_int latency_us = howmany(opp->latency_ns, 1000);
133 if (latency_us > 0)
134 delay(latency_us);
135
136 if (sc->sc_supply != NULL) {
137 if (new_uv < old_uv) {
138 error = fdtbus_regulator_set_voltage(sc->sc_supply,
139 new_uv, new_uv);
140 if (error != 0)
141 return error;
142 }
143 }
144
145 if (error == 0) {
146 xc = xc_broadcast(0, cpufreq_dt_change_cb, sc, NULL);
147 xc_wait(xc);
148
149 pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
150 }
151
152 return 0;
153 }
154
155 static void
156 cpufreq_dt_throttle_enable(device_t dev)
157 {
158 struct cpufreq_dt_softc * const sc = device_private(dev);
159
160 if (sc->sc_freq_throttle)
161 return;
162
163 const u_int freq_khz = sc->sc_opp[sc->sc_nopp - 1].freq_khz;
164
165 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
166 kpause("throttle", false, 1, NULL);
167
168 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
169 aprint_debug_dev(sc->sc_dev, "throttle enabled (%u.%03u MHz)\n",
170 freq_khz / 1000, freq_khz % 1000);
171 sc->sc_freq_throttle = true;
172 if (sc->sc_freq_target == 0)
173 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
174 }
175
176 atomic_dec_uint(&sc->sc_busy);
177 }
178
179 static void
180 cpufreq_dt_throttle_disable(device_t dev)
181 {
182 struct cpufreq_dt_softc * const sc = device_private(dev);
183
184 if (!sc->sc_freq_throttle)
185 return;
186
187 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
188 kpause("throttle", false, 1, NULL);
189
190 const u_int freq_khz = sc->sc_freq_target * 1000;
191
192 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
193 aprint_debug_dev(sc->sc_dev, "throttle disabled (%u.%03u MHz)\n",
194 freq_khz / 1000, freq_khz % 1000);
195 sc->sc_freq_throttle = false;
196 }
197
198 atomic_dec_uint(&sc->sc_busy);
199 }
200
201 static int
202 cpufreq_dt_sysctl_helper(SYSCTLFN_ARGS)
203 {
204 struct cpufreq_dt_softc * const sc = rnode->sysctl_data;
205 struct sysctlnode node;
206 u_int fq, oldfq = 0;
207 int error, n;
208
209 node = *rnode;
210 node.sysctl_data = &fq;
211
212 if (rnode->sysctl_num == sc->sc_node_target) {
213 if (sc->sc_freq_target == 0)
214 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
215 fq = sc->sc_freq_target;
216 } else
217 fq = clk_get_rate(sc->sc_clk) / 1000000;
218
219 if (rnode->sysctl_num == sc->sc_node_target)
220 oldfq = fq;
221
222 if (sc->sc_freq_target == 0)
223 sc->sc_freq_target = fq;
224
225 error = sysctl_lookup(SYSCTLFN_CALL(&node));
226 if (error || newp == NULL)
227 return error;
228
229 if (fq == oldfq || rnode->sysctl_num != sc->sc_node_target)
230 return 0;
231
232 for (n = 0; n < sc->sc_nopp; n++)
233 if (sc->sc_opp[n].freq_khz / 1000 == fq)
234 break;
235 if (n == sc->sc_nopp)
236 return EINVAL;
237
238 if (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
239 return EBUSY;
240
241 sc->sc_freq_target = fq;
242
243 if (sc->sc_freq_throttle)
244 error = 0;
245 else
246 error = cpufreq_dt_set_rate(sc, fq * 1000);
247
248 atomic_dec_uint(&sc->sc_busy);
249
250 return error;
251 }
252
253 static int
254 cpufreq_dt_instance_count(void)
255 {
256 deviter_t di;
257 int count = 0;
258
259 deviter_init(&di, 0);
260 while (deviter_next(&di) != NULL)
261 ++count;
262 deviter_release(&di);
263
264 return count;
265 }
266
267 static void
268 cpufreq_dt_init_sysctl(struct cpufreq_dt_softc *sc)
269 {
270 const struct sysctlnode *node, *cpunode, *freqnode;
271 struct sysctllog *cpufreq_log = NULL;
272 const char *cpunodename;
273 int error, i;
274
275 sc->sc_freq_available = kmem_zalloc(strlen("XXXX ") * sc->sc_nopp, KM_SLEEP);
276 for (i = 0; i < sc->sc_nopp; i++) {
277 char buf[6];
278 snprintf(buf, sizeof(buf), i ? " %u" : "%u", sc->sc_opp[i].freq_khz / 1000);
279 strcat(sc->sc_freq_available, buf);
280 }
281
282 if (cpufreq_dt_instance_count() > 1)
283 cpunodename = device_xname(sc->sc_dev);
284 else
285 cpunodename = "cpu";
286
287 error = sysctl_createv(&cpufreq_log, 0, NULL, &node,
288 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
289 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
290 if (error)
291 goto sysctl_failed;
292 error = sysctl_createv(&cpufreq_log, 0, &node, &cpunode,
293 0, CTLTYPE_NODE, cpunodename, NULL,
294 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
295 if (error)
296 goto sysctl_failed;
297 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &freqnode,
298 0, CTLTYPE_NODE, "frequency", NULL,
299 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
300 if (error)
301 goto sysctl_failed;
302
303 error = sysctl_createv(&cpufreq_log, 0, &freqnode, &node,
304 CTLFLAG_READWRITE, CTLTYPE_INT, "target", NULL,
305 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
306 CTL_CREATE, CTL_EOL);
307 if (error)
308 goto sysctl_failed;
309 sc->sc_node_target = node->sysctl_num;
310
311 error = sysctl_createv(&cpufreq_log, 0, &freqnode, &node,
312 CTLFLAG_READWRITE, CTLTYPE_INT, "current", NULL,
313 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
314 CTL_CREATE, CTL_EOL);
315 if (error)
316 goto sysctl_failed;
317 sc->sc_node_current = node->sysctl_num;
318
319 error = sysctl_createv(&cpufreq_log, 0, &freqnode, &node,
320 0, CTLTYPE_STRING, "available", NULL,
321 NULL, 0, sc->sc_freq_available, 0,
322 CTL_CREATE, CTL_EOL);
323 if (error)
324 goto sysctl_failed;
325 sc->sc_node_available = node->sysctl_num;
326
327 return;
328
329 sysctl_failed:
330 aprint_error_dev(sc->sc_dev, "couldn't create sysctl nodes: %d\n", error);
331 sysctl_teardown(&cpufreq_log);
332 }
333
334 static int
335 cpufreq_dt_parse_opp(struct cpufreq_dt_softc *sc)
336 {
337 const int phandle = sc->sc_phandle;
338 const u_int *opp;
339 int len, i;
340
341 opp = fdtbus_get_prop(phandle, "operating-points", &len);
342 if (len < 8)
343 return ENXIO;
344
345 sc->sc_nopp = len / 8;
346 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
347 for (i = 0; i < sc->sc_nopp; i++, opp += 2) {
348 sc->sc_opp[i].freq_khz = be32toh(opp[0]);
349 sc->sc_opp[i].voltage_uv = be32toh(opp[1]);
350 }
351
352 return 0;
353 }
354
355 static int
356 cpufreq_dt_parse_opp_v2(struct cpufreq_dt_softc *sc)
357 {
358 const int phandle = sc->sc_phandle;
359 struct cpufreq_dt_table *table;
360 uint64_t opp_hz;
361 uint32_t opp_uv;
362 int opp_node, i;
363
364 const int opp_table = fdtbus_get_phandle(phandle, "operating-points-v2");
365 if (opp_table < 0)
366 return ENOENT;
367
368 /* If the table is shared, only setup a single instance */
369 if (of_hasprop(opp_table, "opp-shared")) {
370 TAILQ_FOREACH(table, &cpufreq_dt_tables, next)
371 if (table->phandle == opp_table)
372 return EEXIST;
373 sc->sc_table.phandle = opp_table;
374 TAILQ_INSERT_TAIL(&cpufreq_dt_tables, &sc->sc_table, next);
375 }
376
377 for (opp_node = OF_child(opp_table); opp_node; opp_node = OF_peer(opp_node)) {
378 if (fdtbus_status_okay(opp_node))
379 sc->sc_nopp++;
380 }
381
382 if (sc->sc_nopp == 0)
383 return EINVAL;
384
385 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
386 for (opp_node = OF_child(opp_table), i = 0; opp_node; opp_node = OF_peer(opp_node), i++) {
387 if (!fdtbus_status_okay(opp_node))
388 continue;
389 if (of_getprop_uint64(opp_node, "opp-hz", &opp_hz) != 0)
390 return EINVAL;
391 if (of_getprop_uint32(opp_node, "opp-microvolt", &opp_uv) != 0)
392 return EINVAL;
393 sc->sc_opp[i].freq_khz = (u_int)(opp_hz / 1000);
394 sc->sc_opp[i].voltage_uv = opp_uv;
395 of_getprop_uint32(opp_node, "clock-latency-ns", &sc->sc_opp[i].latency_ns);
396 }
397
398 return 0;
399 }
400
401 static int
402 cpufreq_dt_parse(struct cpufreq_dt_softc *sc)
403 {
404 const int phandle = sc->sc_phandle;
405 int error, i;
406
407 if (of_hasprop(phandle, "cpu-supply")) {
408 sc->sc_supply = fdtbus_regulator_acquire(phandle, "cpu-supply");
409 if (sc->sc_supply == NULL) {
410 aprint_error_dev(sc->sc_dev,
411 "couldn't acquire cpu-supply\n");
412 return ENXIO;
413 }
414 }
415 sc->sc_clk = fdtbus_clock_get_index(phandle, 0);
416 if (sc->sc_clk == NULL) {
417 aprint_error_dev(sc->sc_dev, "couldn't acquire clock\n");
418 return ENXIO;
419 }
420
421 mutex_enter(&cpufreq_dt_tables_lock);
422 if (of_hasprop(phandle, "operating-points"))
423 error = cpufreq_dt_parse_opp(sc);
424 else if (of_hasprop(phandle, "operating-points-v2"))
425 error = cpufreq_dt_parse_opp_v2(sc);
426 else
427 error = EINVAL;
428 mutex_exit(&cpufreq_dt_tables_lock);
429
430 if (error) {
431 if (error != EEXIST)
432 aprint_error_dev(sc->sc_dev,
433 "couldn't parse operating points: %d\n", error);
434 return error;
435 }
436
437 for (i = 0; i < sc->sc_nopp; i++) {
438 aprint_verbose_dev(sc->sc_dev, "%u.%03u MHz, %u uV\n",
439 sc->sc_opp[i].freq_khz / 1000,
440 sc->sc_opp[i].freq_khz % 1000,
441 sc->sc_opp[i].voltage_uv);
442 }
443
444 return 0;
445 }
446
447 static int
448 cpufreq_dt_match(device_t parent, cfdata_t cf, void *aux)
449 {
450 struct fdt_attach_args * const faa = aux;
451 const int phandle = faa->faa_phandle;
452 bus_addr_t addr;
453
454 if (fdtbus_get_reg(phandle, 0, &addr, NULL) != 0)
455 return 0;
456
457 if (!of_hasprop(phandle, "clocks"))
458 return 0;
459
460 if (!of_hasprop(phandle, "operating-points") &&
461 !of_hasprop(phandle, "operating-points-v2"))
462 return 0;
463
464 return 1;
465 }
466
467 static void
468 cpufreq_dt_init(device_t self)
469 {
470 struct cpufreq_dt_softc * const sc = device_private(self);
471 int error;
472
473 if ((error = cpufreq_dt_parse(sc)) != 0)
474 return;
475
476 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_ENABLE, cpufreq_dt_throttle_enable, true);
477 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_DISABLE, cpufreq_dt_throttle_disable, true);
478
479 cpufreq_dt_init_sysctl(sc);
480 }
481
482 static int
483 cpufreq_dt_lock_init(void)
484 {
485 mutex_init(&cpufreq_dt_tables_lock, MUTEX_DEFAULT, IPL_NONE);
486 return 0;
487 }
488
489 static void
490 cpufreq_dt_attach(device_t parent, device_t self, void *aux)
491 {
492 static ONCE_DECL(locks);
493 struct cpufreq_dt_softc * const sc = device_private(self);
494 struct fdt_attach_args * const faa = aux;
495
496 RUN_ONCE(&locks, cpufreq_dt_lock_init);
497
498 sc->sc_dev = self;
499 sc->sc_phandle = faa->faa_phandle;
500
501 aprint_naive("\n");
502 aprint_normal("\n");
503
504 config_interrupts(self, cpufreq_dt_init);
505 }
506
507 CFATTACH_DECL_NEW(cpufreq_dt, sizeof(struct cpufreq_dt_softc),
508 cpufreq_dt_match, cpufreq_dt_attach, NULL, NULL);
509