Home | History | Annotate | Line # | Download | only in fdt
cpufreq_dt.c revision 1.15
      1 /* $NetBSD: cpufreq_dt.c,v 1.15 2020/06/03 15:44:45 jmcneill Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2015-2017 Jared McNeill <jmcneill (at) invisible.ca>
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: cpufreq_dt.c,v 1.15 2020/06/03 15:44:45 jmcneill Exp $");
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 #include <sys/device.h>
     35 #include <sys/kmem.h>
     36 #include <sys/bus.h>
     37 #include <sys/atomic.h>
     38 #include <sys/xcall.h>
     39 #include <sys/sysctl.h>
     40 #include <sys/queue.h>
     41 #include <sys/once.h>
     42 #include <sys/cpu.h>
     43 
     44 #include <dev/fdt/fdtvar.h>
     45 
     46 struct cpufreq_dt_table {
     47 	int			phandle;
     48 	TAILQ_ENTRY(cpufreq_dt_table) next;
     49 };
     50 
     51 static TAILQ_HEAD(, cpufreq_dt_table) cpufreq_dt_tables =
     52     TAILQ_HEAD_INITIALIZER(cpufreq_dt_tables);
     53 static kmutex_t cpufreq_dt_tables_lock;
     54 
     55 struct cpufreq_dt_opp {
     56 	u_int			freq_khz;
     57 	u_int			voltage_uv;
     58 	u_int			latency_ns;
     59 };
     60 
     61 struct cpufreq_dt_softc {
     62 	device_t		sc_dev;
     63 	int			sc_phandle;
     64 	struct clk		*sc_clk;
     65 	struct fdtbus_regulator	*sc_supply;
     66 
     67 	struct cpufreq_dt_opp	*sc_opp;
     68 	ssize_t			sc_nopp;
     69 
     70 	u_int			sc_freq_target;
     71 	bool			sc_freq_throttle;
     72 
     73 	u_int			sc_busy;
     74 
     75 	char			*sc_freq_available;
     76 	int			sc_node_target;
     77 	int			sc_node_current;
     78 	int			sc_node_available;
     79 
     80 	struct cpufreq_dt_table	sc_table;
     81 };
     82 
     83 static void
     84 cpufreq_dt_change_cb(void *arg1, void *arg2)
     85 {
     86 	struct cpufreq_dt_softc * const sc = arg1;
     87 	struct cpu_info *ci = curcpu();
     88 
     89 	ci->ci_data.cpu_cc_freq = sc->sc_freq_target * 1000000;
     90 }
     91 
     92 static int
     93 cpufreq_dt_set_rate(struct cpufreq_dt_softc *sc, u_int freq_khz)
     94 {
     95 	struct cpufreq_dt_opp *opp = NULL;
     96 	u_int old_rate, new_rate, old_uv, new_uv;
     97 	uint64_t xc;
     98 	int error;
     99 	ssize_t n;
    100 
    101 	for (n = 0; n < sc->sc_nopp; n++)
    102 		if (sc->sc_opp[n].freq_khz == freq_khz) {
    103 			opp = &sc->sc_opp[n];
    104 			break;
    105 		}
    106 	if (opp == NULL)
    107 		return EINVAL;
    108 
    109 	old_rate = clk_get_rate(sc->sc_clk);
    110 	new_rate = freq_khz * 1000;
    111 	new_uv = opp->voltage_uv;
    112 
    113 	if (old_rate == new_rate)
    114 		return 0;
    115 
    116 	if (sc->sc_supply != NULL) {
    117 		error = fdtbus_regulator_get_voltage(sc->sc_supply, &old_uv);
    118 		if (error != 0)
    119 			return error;
    120 
    121 		if (new_uv > old_uv) {
    122 			error = fdtbus_regulator_set_voltage(sc->sc_supply,
    123 			    new_uv, new_uv);
    124 			if (error != 0)
    125 				return error;
    126 		}
    127 	}
    128 
    129 	error = clk_set_rate(sc->sc_clk, new_rate);
    130 	if (error != 0)
    131 		return error;
    132 
    133 	const u_int latency_us = howmany(opp->latency_ns, 1000);
    134 	if (latency_us > 0)
    135 		delay(latency_us);
    136 
    137 	if (sc->sc_supply != NULL) {
    138 		if (new_uv < old_uv) {
    139 			error = fdtbus_regulator_set_voltage(sc->sc_supply,
    140 			    new_uv, new_uv);
    141 			if (error != 0)
    142 				return error;
    143 		}
    144 	}
    145 
    146 	if (error == 0) {
    147 		xc = xc_broadcast(0, cpufreq_dt_change_cb, sc, NULL);
    148 		xc_wait(xc);
    149 
    150 		pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
    151 	}
    152 
    153 	return 0;
    154 }
    155 
    156 static void
    157 cpufreq_dt_throttle_enable(device_t dev)
    158 {
    159 	struct cpufreq_dt_softc * const sc = device_private(dev);
    160 
    161 	if (sc->sc_freq_throttle)
    162 		return;
    163 
    164 	const u_int freq_khz = sc->sc_opp[sc->sc_nopp - 1].freq_khz;
    165 
    166 	while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
    167 		kpause("throttle", false, 1, NULL);
    168 
    169 	if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
    170 		aprint_debug_dev(sc->sc_dev, "throttle enabled (%u.%03u MHz)\n",
    171 		    freq_khz / 1000, freq_khz % 1000);
    172 		sc->sc_freq_throttle = true;
    173 		if (sc->sc_freq_target == 0)
    174 			sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
    175 	}
    176 
    177 	atomic_dec_uint(&sc->sc_busy);
    178 }
    179 
    180 static void
    181 cpufreq_dt_throttle_disable(device_t dev)
    182 {
    183 	struct cpufreq_dt_softc * const sc = device_private(dev);
    184 
    185 	if (!sc->sc_freq_throttle)
    186 		return;
    187 
    188 	while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
    189 		kpause("throttle", false, 1, NULL);
    190 
    191 	const u_int freq_khz = sc->sc_freq_target * 1000;
    192 
    193 	if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
    194 		aprint_debug_dev(sc->sc_dev, "throttle disabled (%u.%03u MHz)\n",
    195 		    freq_khz / 1000, freq_khz % 1000);
    196 		sc->sc_freq_throttle = false;
    197 	}
    198 
    199 	atomic_dec_uint(&sc->sc_busy);
    200 }
    201 
    202 static int
    203 cpufreq_dt_sysctl_helper(SYSCTLFN_ARGS)
    204 {
    205 	struct cpufreq_dt_softc * const sc = rnode->sysctl_data;
    206 	struct sysctlnode node;
    207 	u_int fq, oldfq = 0;
    208 	int error, n;
    209 
    210 	node = *rnode;
    211 	node.sysctl_data = &fq;
    212 
    213 	if (rnode->sysctl_num == sc->sc_node_target) {
    214 		if (sc->sc_freq_target == 0)
    215 			sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
    216 		fq = sc->sc_freq_target;
    217 	} else
    218 		fq = clk_get_rate(sc->sc_clk) / 1000000;
    219 
    220 	if (rnode->sysctl_num == sc->sc_node_target)
    221 		oldfq = fq;
    222 
    223 	if (sc->sc_freq_target == 0)
    224 		sc->sc_freq_target = fq;
    225 
    226 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    227 	if (error || newp == NULL)
    228 		return error;
    229 
    230 	if (fq == oldfq || rnode->sysctl_num != sc->sc_node_target)
    231 		return 0;
    232 
    233 	for (n = 0; n < sc->sc_nopp; n++)
    234 		if (sc->sc_opp[n].freq_khz / 1000 == fq)
    235 			break;
    236 	if (n == sc->sc_nopp)
    237 		return EINVAL;
    238 
    239 	if (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
    240 		return EBUSY;
    241 
    242 	sc->sc_freq_target = fq;
    243 
    244 	if (sc->sc_freq_throttle)
    245 		error = 0;
    246 	else
    247 		error = cpufreq_dt_set_rate(sc, fq * 1000);
    248 
    249 	atomic_dec_uint(&sc->sc_busy);
    250 
    251 	return error;
    252 }
    253 
    254 static struct cpu_info *
    255 cpufreq_dt_cpu_lookup(cpuid_t mpidr)
    256 {
    257 	CPU_INFO_ITERATOR cii;
    258 	struct cpu_info *ci;
    259 
    260 	for (CPU_INFO_FOREACH(cii, ci)) {
    261 		if (ci->ci_cpuid == mpidr)
    262 			return ci;
    263 	}
    264 
    265 	return NULL;
    266 }
    267 
    268 static void
    269 cpufreq_dt_init_sysctl(struct cpufreq_dt_softc *sc)
    270 {
    271 	const struct sysctlnode *node, *cpunode;
    272 	struct sysctllog *cpufreq_log = NULL;
    273 	struct cpu_info *ci;
    274 	bus_addr_t mpidr;
    275 	int error, i;
    276 
    277 	if (fdtbus_get_reg(sc->sc_phandle, 0, &mpidr, 0) != 0)
    278 		return;
    279 
    280 	ci = cpufreq_dt_cpu_lookup(mpidr);
    281 	if (ci == NULL)
    282 		return;
    283 
    284 	sc->sc_freq_available = kmem_zalloc(strlen("XXXX ") * sc->sc_nopp, KM_SLEEP);
    285 	for (i = 0; i < sc->sc_nopp; i++) {
    286 		char buf[6];
    287 		snprintf(buf, sizeof(buf), i ? " %u" : "%u", sc->sc_opp[i].freq_khz / 1000);
    288 		strcat(sc->sc_freq_available, buf);
    289 	}
    290 
    291 	error = sysctl_createv(&cpufreq_log, 0, NULL, &node,
    292 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
    293 	    NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
    294 	if (error)
    295 		goto sysctl_failed;
    296 	error = sysctl_createv(&cpufreq_log, 0, &node, &node,
    297 	    0, CTLTYPE_NODE, "cpufreq", NULL,
    298 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
    299 	if (error)
    300 		goto sysctl_failed;
    301 	error = sysctl_createv(&cpufreq_log, 0, &node, &cpunode,
    302 	    0, CTLTYPE_NODE, cpu_name(ci), NULL,
    303 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
    304 	if (error)
    305 		goto sysctl_failed;
    306 
    307 	error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
    308 	    CTLFLAG_READWRITE, CTLTYPE_INT, "target", NULL,
    309 	    cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
    310 	    CTL_CREATE, CTL_EOL);
    311 	if (error)
    312 		goto sysctl_failed;
    313 	sc->sc_node_target = node->sysctl_num;
    314 
    315 	error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
    316 	    CTLFLAG_READWRITE, CTLTYPE_INT, "current", NULL,
    317 	    cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
    318 	    CTL_CREATE, CTL_EOL);
    319 	if (error)
    320 		goto sysctl_failed;
    321 	sc->sc_node_current = node->sysctl_num;
    322 
    323 	error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
    324 	    0, CTLTYPE_STRING, "available", NULL,
    325 	    NULL, 0, sc->sc_freq_available, 0,
    326 	    CTL_CREATE, CTL_EOL);
    327 	if (error)
    328 		goto sysctl_failed;
    329 	sc->sc_node_available = node->sysctl_num;
    330 
    331 	return;
    332 
    333 sysctl_failed:
    334 	aprint_error_dev(sc->sc_dev, "couldn't create sysctl nodes: %d\n", error);
    335 	sysctl_teardown(&cpufreq_log);
    336 }
    337 
    338 static int
    339 cpufreq_dt_parse_opp(struct cpufreq_dt_softc *sc)
    340 {
    341 	const int phandle = sc->sc_phandle;
    342 	const u_int *opp;
    343 	int len, i;
    344 
    345 	opp = fdtbus_get_prop(phandle, "operating-points", &len);
    346 	if (len < 8)
    347 		return ENXIO;
    348 
    349 	sc->sc_nopp = len / 8;
    350 	sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
    351 	for (i = 0; i < sc->sc_nopp; i++, opp += 2) {
    352 		sc->sc_opp[i].freq_khz = be32toh(opp[0]);
    353 		sc->sc_opp[i].voltage_uv = be32toh(opp[1]);
    354 	}
    355 
    356 	return 0;
    357 }
    358 
    359 static const struct fdt_opp_info *
    360 cpufreq_dt_lookup_opp_info(const int opp_table)
    361 {
    362 	__link_set_decl(fdt_opps, struct fdt_opp_info);
    363 	struct fdt_opp_info * const *opp;
    364 	const struct fdt_opp_info *best_opp = NULL;
    365 	int match, best_match = 0;
    366 
    367 	__link_set_foreach(opp, fdt_opps) {
    368 		const char * const compat[] = { (*opp)->opp_compat, NULL };
    369 		match = of_match_compatible(opp_table, compat);
    370 		if (match > best_match) {
    371 			best_match = match;
    372 			best_opp = *opp;
    373 		}
    374 	}
    375 
    376 	return best_opp;
    377 }
    378 
    379 static bool
    380 cpufreq_dt_opp_v2_supported(const int opp_table, const int opp_node)
    381 {
    382 	return true;
    383 }
    384 
    385 FDT_OPP(opp_v2, "operating-points-v2", cpufreq_dt_opp_v2_supported);
    386 
    387 static bool
    388 cpufreq_dt_node_supported(const struct fdt_opp_info *opp_info, const int opp_table, const int opp_node)
    389 {
    390 	if (!fdtbus_status_okay(opp_node))
    391 		return false;
    392 	if (of_hasprop(opp_node, "opp-suspend"))
    393 		return false;
    394 
    395 	if (opp_info != NULL)
    396 		return opp_info->opp_supported(opp_table, opp_node);
    397 
    398 	return false;
    399 }
    400 
    401 static int
    402 cpufreq_dt_parse_opp_v2(struct cpufreq_dt_softc *sc)
    403 {
    404 	const int phandle = sc->sc_phandle;
    405 	struct cpufreq_dt_table *table;
    406 	const struct fdt_opp_info *opp_info;
    407 	const u_int *opp_uv;
    408 	uint64_t opp_hz;
    409 	int opp_node, len, i, index;
    410 
    411 	const int opp_table = fdtbus_get_phandle(phandle, "operating-points-v2");
    412 	if (opp_table < 0)
    413 		return ENOENT;
    414 
    415 	/* If the table is shared, only setup a single instance */
    416 	if (of_hasprop(opp_table, "opp-shared")) {
    417 		TAILQ_FOREACH(table, &cpufreq_dt_tables, next)
    418 			if (table->phandle == opp_table)
    419 				return EEXIST;
    420 		sc->sc_table.phandle = opp_table;
    421 		TAILQ_INSERT_TAIL(&cpufreq_dt_tables, &sc->sc_table, next);
    422 	}
    423 
    424 	opp_info = cpufreq_dt_lookup_opp_info(opp_table);
    425 
    426 	for (opp_node = OF_child(opp_table); opp_node; opp_node = OF_peer(opp_node)) {
    427 		if (!cpufreq_dt_node_supported(opp_info, opp_table, opp_node))
    428 			continue;
    429 		sc->sc_nopp++;
    430 	}
    431 
    432 	if (sc->sc_nopp == 0)
    433 		return EINVAL;
    434 
    435 	sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
    436 	index = sc->sc_nopp - 1;
    437 	for (opp_node = OF_child(opp_table), i = 0; opp_node; opp_node = OF_peer(opp_node), i++) {
    438 		if (!cpufreq_dt_node_supported(opp_info, opp_table, opp_node))
    439 			continue;
    440 		if (of_getprop_uint64(opp_node, "opp-hz", &opp_hz) != 0)
    441 			return EINVAL;
    442 		opp_uv = fdtbus_get_prop(opp_node, "opp-microvolt", &len);
    443 		if (opp_uv == NULL || len < 1)
    444 			return EINVAL;
    445 		/* Table is in reverse order */
    446 		sc->sc_opp[index].freq_khz = (u_int)(opp_hz / 1000);
    447 		sc->sc_opp[index].voltage_uv = be32toh(opp_uv[0]);
    448 		of_getprop_uint32(opp_node, "clock-latency-ns", &sc->sc_opp[index].latency_ns);
    449 		--index;
    450 	}
    451 
    452 	return 0;
    453 }
    454 
    455 static int
    456 cpufreq_dt_parse(struct cpufreq_dt_softc *sc)
    457 {
    458 	const int phandle = sc->sc_phandle;
    459 	int error, i;
    460 
    461 	if (of_hasprop(phandle, "cpu-supply")) {
    462 		sc->sc_supply = fdtbus_regulator_acquire(phandle, "cpu-supply");
    463 		if (sc->sc_supply == NULL) {
    464 			aprint_error_dev(sc->sc_dev,
    465 			    "couldn't acquire cpu-supply\n");
    466 			return ENXIO;
    467 		}
    468 	}
    469 	sc->sc_clk = fdtbus_clock_get_index(phandle, 0);
    470 	if (sc->sc_clk == NULL) {
    471 		aprint_error_dev(sc->sc_dev, "couldn't acquire clock\n");
    472 		return ENXIO;
    473 	}
    474 
    475 	mutex_enter(&cpufreq_dt_tables_lock);
    476 	if (of_hasprop(phandle, "operating-points"))
    477 		error = cpufreq_dt_parse_opp(sc);
    478 	else if (of_hasprop(phandle, "operating-points-v2"))
    479 		error = cpufreq_dt_parse_opp_v2(sc);
    480 	else
    481 		error = EINVAL;
    482 	mutex_exit(&cpufreq_dt_tables_lock);
    483 
    484 	if (error) {
    485 		if (error != EEXIST)
    486 			aprint_error_dev(sc->sc_dev,
    487 			    "couldn't parse operating points: %d\n", error);
    488 		return error;
    489 	}
    490 
    491 	for (i = 0; i < sc->sc_nopp; i++) {
    492 		aprint_debug_dev(sc->sc_dev, "supported rate: %u.%03u MHz, %u uV\n",
    493 		    sc->sc_opp[i].freq_khz / 1000,
    494 		    sc->sc_opp[i].freq_khz % 1000,
    495 		    sc->sc_opp[i].voltage_uv);
    496 	}
    497 
    498 	return 0;
    499 }
    500 
    501 static int
    502 cpufreq_dt_match(device_t parent, cfdata_t cf, void *aux)
    503 {
    504 	struct fdt_attach_args * const faa = aux;
    505 	const int phandle = faa->faa_phandle;
    506 	bus_addr_t addr;
    507 
    508 	if (fdtbus_get_reg(phandle, 0, &addr, NULL) != 0)
    509 		return 0;
    510 
    511 	if (!of_hasprop(phandle, "clocks"))
    512 		return 0;
    513 
    514 	if (!of_hasprop(phandle, "operating-points") &&
    515 	    !of_hasprop(phandle, "operating-points-v2"))
    516 		return 0;
    517 
    518 	return 1;
    519 }
    520 
    521 static void
    522 cpufreq_dt_init(device_t self)
    523 {
    524 	struct cpufreq_dt_softc * const sc = device_private(self);
    525 	int error;
    526 
    527 	if ((error = cpufreq_dt_parse(sc)) != 0)
    528 		return;
    529 
    530 	pmf_event_register(sc->sc_dev, PMFE_THROTTLE_ENABLE, cpufreq_dt_throttle_enable, true);
    531 	pmf_event_register(sc->sc_dev, PMFE_THROTTLE_DISABLE, cpufreq_dt_throttle_disable, true);
    532 
    533 	cpufreq_dt_init_sysctl(sc);
    534 
    535 	if (sc->sc_nopp > 0) {
    536 		struct cpufreq_dt_opp * const opp = &sc->sc_opp[0];
    537 
    538 		aprint_normal_dev(sc->sc_dev, "rate: %u.%03u MHz, %u uV\n",
    539 		    opp->freq_khz / 1000, opp->freq_khz % 1000, opp->voltage_uv);
    540 		cpufreq_dt_set_rate(sc, opp->freq_khz);
    541 	}
    542 }
    543 
    544 static int
    545 cpufreq_dt_lock_init(void)
    546 {
    547 	mutex_init(&cpufreq_dt_tables_lock, MUTEX_DEFAULT, IPL_NONE);
    548 	return 0;
    549 }
    550 
    551 static void
    552 cpufreq_dt_attach(device_t parent, device_t self, void *aux)
    553 {
    554 	static ONCE_DECL(locks);
    555 	struct cpufreq_dt_softc * const sc = device_private(self);
    556 	struct fdt_attach_args * const faa = aux;
    557 
    558 	RUN_ONCE(&locks, cpufreq_dt_lock_init);
    559 
    560 	sc->sc_dev = self;
    561 	sc->sc_phandle = faa->faa_phandle;
    562 
    563 	aprint_naive("\n");
    564 	aprint_normal("\n");
    565 
    566 	config_interrupts(self, cpufreq_dt_init);
    567 }
    568 
    569 CFATTACH_DECL_NEW(cpufreq_dt, sizeof(struct cpufreq_dt_softc),
    570     cpufreq_dt_match, cpufreq_dt_attach, NULL, NULL);
    571