Home | History | Annotate | Line # | Download | only in ic
      1 /* $NetBSD: scmi.c,v 1.1 2025/01/08 22:55:35 jmcneill Exp $ */
      2 /*	$OpenBSD: scmi.c,v 1.2 2024/11/25 22:12:18 tobhe Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 2023 Mark Kettenis <kettenis (at) openbsd.org>
      6  * Copyright (c) 2024 Tobias Heider <tobhe (at) openbsd.org>
      7  * Copyright (c) 2025 Jared McNeill <jmcneill (at) invisible.ca>
      8  *
      9  * Permission to use, copy, modify, and distribute this software for any
     10  * purpose with or without fee is hereby granted, provided that the above
     11  * copyright notice and this permission notice appear in all copies.
     12  *
     13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     20  */
     21 
     22 #include <sys/param.h>
     23 #include <sys/device.h>
     24 #include <sys/systm.h>
     25 #include <sys/kmem.h>
     26 #include <sys/sysctl.h>
     27 #include <sys/cpu.h>
     28 
     29 #include <arm/arm/smccc.h>
     30 #include <dev/ic/scmi.h>
     31 
     32 #define SCMI_SUCCESS		0
     33 #define SCMI_NOT_SUPPORTED	-1
     34 #define SCMI_DENIED		-3
     35 #define SCMI_BUSY		-6
     36 #define SCMI_COMMS_ERROR	-7
     37 
     38 /* Protocols */
     39 #define SCMI_BASE		0x10
     40 #define SCMI_PERF		0x13
     41 #define SCMI_CLOCK		0x14
     42 
     43 /* Common messages */
     44 #define SCMI_PROTOCOL_VERSION			0x0
     45 #define SCMI_PROTOCOL_ATTRIBUTES		0x1
     46 #define SCMI_PROTOCOL_MESSAGE_ATTRIBUTES	0x2
     47 
     48 /* Clock management messages */
     49 #define SCMI_CLOCK_ATTRIBUTES			0x3
     50 #define SCMI_CLOCK_DESCRIBE_RATES		0x4
     51 #define SCMI_CLOCK_RATE_SET			0x5
     52 #define SCMI_CLOCK_RATE_GET			0x6
     53 #define SCMI_CLOCK_CONFIG_SET			0x7
     54 #define  SCMI_CLOCK_CONFIG_SET_ENABLE		(1U << 0)
     55 
     56 /* Performance management messages */
     57 #define SCMI_PERF_DOMAIN_ATTRIBUTES		0x3
     58 #define SCMI_PERF_DESCRIBE_LEVELS		0x4
     59 #define SCMI_PERF_LIMITS_GET			0x6
     60 #define SCMI_PERF_LEVEL_SET			0x7
     61 #define SCMI_PERF_LEVEL_GET			0x8
     62 
     63 struct scmi_resp_perf_domain_attributes_40 {
     64 	uint32_t pa_attrs;
     65 #define SCMI_PERF_ATTR_CAN_LEVEL_SET		(1U << 30)
     66 #define SCMI_PERF_ATTR_LEVEL_INDEX_MODE		(1U << 25)
     67 	uint32_t pa_ratelimit;
     68 	uint32_t pa_sustifreq;
     69 	uint32_t pa_sustperf;
     70 	char 	 pa_name[16];
     71 };
     72 
     73 struct scmi_resp_perf_describe_levels_40 {
     74 	uint16_t pl_nret;
     75 	uint16_t pl_nrem;
     76 	struct {
     77 		uint32_t	pe_perf;
     78 		uint32_t	pe_cost;
     79 		uint16_t	pe_latency;
     80 		uint16_t	pe_reserved;
     81 		uint32_t	pe_ifreq;
     82 		uint32_t	pe_lindex;
     83 	} pl_entry[];
     84 };
     85 
     86 static void scmi_cpufreq_init_sysctl(struct scmi_softc *, uint32_t);
     87 
     88 static inline void
     89 scmi_message_header(volatile struct scmi_shmem *shmem,
     90     uint32_t protocol_id, uint32_t message_id)
     91 {
     92 	shmem->message_header = (protocol_id << 10) | (message_id << 0);
     93 }
     94 
     95 int32_t	scmi_smc_command(struct scmi_softc *);
     96 int32_t	scmi_mbox_command(struct scmi_softc *);
     97 
     98 int
     99 scmi_init_smc(struct scmi_softc *sc)
    100 {
    101 	volatile struct scmi_shmem *shmem;
    102 	int32_t status;
    103 	uint32_t vers;
    104 
    105 	if (sc->sc_smc_id == 0) {
    106 		aprint_error_dev(sc->sc_dev, "no SMC id\n");
    107 		return -1;
    108 	}
    109 
    110 	shmem = sc->sc_shmem_tx;
    111 
    112 	sc->sc_command = scmi_smc_command;
    113 
    114 	if ((shmem->channel_status & SCMI_CHANNEL_FREE) == 0) {
    115 		aprint_error_dev(sc->sc_dev, "channel busy\n");
    116 		return -1;
    117 	}
    118 
    119 	scmi_message_header(shmem, SCMI_BASE, SCMI_PROTOCOL_VERSION);
    120 	shmem->length = sizeof(uint32_t);
    121 	status = sc->sc_command(sc);
    122 	if (status != SCMI_SUCCESS) {
    123 		aprint_error_dev(sc->sc_dev, "protocol version command failed\n");
    124 		return -1;
    125 	}
    126 
    127 	vers = shmem->message_payload[1];
    128 	sc->sc_ver_major = vers >> 16;
    129 	sc->sc_ver_minor = vers & 0xfffff;
    130 	aprint_normal_dev(sc->sc_dev, "SCMI %d.%d\n",
    131 	    sc->sc_ver_major, sc->sc_ver_minor);
    132 
    133 	mutex_init(&sc->sc_shmem_tx_lock, MUTEX_DEFAULT, IPL_NONE);
    134 	mutex_init(&sc->sc_shmem_rx_lock, MUTEX_DEFAULT, IPL_NONE);
    135 
    136 	return 0;
    137 }
    138 
    139 int
    140 scmi_init_mbox(struct scmi_softc *sc)
    141 {
    142 	int32_t status;
    143 	uint32_t vers;
    144 
    145 	if (sc->sc_mbox_tx == NULL) {
    146 		aprint_error_dev(sc->sc_dev, "no tx mbox\n");
    147 		return -1;
    148 	}
    149 	if (sc->sc_mbox_rx == NULL) {
    150 		aprint_error_dev(sc->sc_dev, "no rx mbox\n");
    151 		return -1;
    152 	}
    153 
    154 	sc->sc_command = scmi_mbox_command;
    155 
    156 	scmi_message_header(sc->sc_shmem_tx, SCMI_BASE, SCMI_PROTOCOL_VERSION);
    157 	sc->sc_shmem_tx->length = sizeof(uint32_t);
    158 	status = sc->sc_command(sc);
    159 	if (status != SCMI_SUCCESS) {
    160 		aprint_error_dev(sc->sc_dev,
    161 		    "protocol version command failed\n");
    162 		return -1;
    163 	}
    164 
    165 	vers = sc->sc_shmem_tx->message_payload[1];
    166 	sc->sc_ver_major = vers >> 16;
    167 	sc->sc_ver_minor = vers & 0xfffff;
    168 	aprint_normal_dev(sc->sc_dev, "SCMI %d.%d\n",
    169 	    sc->sc_ver_major, sc->sc_ver_minor);
    170 
    171 	mutex_init(&sc->sc_shmem_tx_lock, MUTEX_DEFAULT, IPL_NONE);
    172 	mutex_init(&sc->sc_shmem_rx_lock, MUTEX_DEFAULT, IPL_NONE);
    173 
    174 	return 0;
    175 }
    176 
    177 int32_t
    178 scmi_smc_command(struct scmi_softc *sc)
    179 {
    180 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    181 	int32_t status;
    182 
    183 	shmem->channel_status = 0;
    184 	status = smccc_call(sc->sc_smc_id, 0, 0, 0, 0,
    185 			    NULL, NULL, NULL, NULL);
    186 	if (status != SMCCC_SUCCESS)
    187 		return SCMI_NOT_SUPPORTED;
    188 	if ((shmem->channel_status & SCMI_CHANNEL_ERROR))
    189 		return SCMI_COMMS_ERROR;
    190 	if ((shmem->channel_status & SCMI_CHANNEL_FREE) == 0)
    191 		return SCMI_BUSY;
    192 	return shmem->message_payload[0];
    193 }
    194 
    195 int32_t
    196 scmi_mbox_command(struct scmi_softc *sc)
    197 {
    198 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    199 	int ret;
    200 	int i;
    201 
    202 	shmem->channel_status = 0;
    203 	ret = sc->sc_mbox_tx_send(sc->sc_mbox_tx);
    204 	if (ret != 0)
    205 		return SCMI_NOT_SUPPORTED;
    206 
    207 	/* XXX: poll for now */
    208 	for (i = 0; i < 20; i++) {
    209 		if (shmem->channel_status & SCMI_CHANNEL_FREE)
    210 			break;
    211 		delay(10);
    212 	}
    213 	if ((shmem->channel_status & SCMI_CHANNEL_ERROR))
    214 		return SCMI_COMMS_ERROR;
    215 	if ((shmem->channel_status & SCMI_CHANNEL_FREE) == 0)
    216 		return SCMI_BUSY;
    217 
    218 	return shmem->message_payload[0];
    219 }
    220 
    221 #if notyet
    222 /* Clock management. */
    223 
    224 void	scmi_clock_enable(void *, uint32_t *, int);
    225 uint32_t scmi_clock_get_frequency(void *, uint32_t *);
    226 int	scmi_clock_set_frequency(void *, uint32_t *, uint32_t);
    227 
    228 void
    229 scmi_attach_clock(struct scmi_softc *sc, int node)
    230 {
    231 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    232 	int32_t status;
    233 	int nclocks;
    234 
    235 	scmi_message_header(shmem, SCMI_CLOCK, SCMI_PROTOCOL_ATTRIBUTES);
    236 	shmem->length = sizeof(uint32_t);
    237 	status = sc->sc_command(sc);
    238 	if (status != SCMI_SUCCESS)
    239 		return;
    240 
    241 	nclocks = shmem->message_payload[1] & 0xffff;
    242 	if (nclocks == 0)
    243 		return;
    244 
    245 	sc->sc_cd.cd_node = node;
    246 	sc->sc_cd.cd_cookie = sc;
    247 	sc->sc_cd.cd_enable = scmi_clock_enable;
    248 	sc->sc_cd.cd_get_frequency = scmi_clock_get_frequency;
    249 	sc->sc_cd.cd_set_frequency = scmi_clock_set_frequency;
    250 	clock_register(&sc->sc_cd);
    251 }
    252 
    253 void
    254 scmi_clock_enable(void *cookie, uint32_t *cells, int on)
    255 {
    256 	struct scmi_softc *sc = cookie;
    257 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    258 	uint32_t idx = cells[0];
    259 
    260 	scmi_message_header(shmem, SCMI_CLOCK, SCMI_CLOCK_CONFIG_SET);
    261 	shmem->length = 3 * sizeof(uint32_t);
    262 	shmem->message_payload[0] = idx;
    263 	shmem->message_payload[1] = on ? SCMI_CLOCK_CONFIG_SET_ENABLE : 0;
    264 	sc->sc_command(sc);
    265 }
    266 
    267 uint32_t
    268 scmi_clock_get_frequency(void *cookie, uint32_t *cells)
    269 {
    270 	struct scmi_softc *sc = cookie;
    271 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    272 	uint32_t idx = cells[0];
    273 	int32_t status;
    274 
    275 	scmi_message_header(shmem, SCMI_CLOCK, SCMI_CLOCK_RATE_GET);
    276 	shmem->length = 2 * sizeof(uint32_t);
    277 	shmem->message_payload[0] = idx;
    278 	status = sc->sc_command(sc);
    279 	if (status != SCMI_SUCCESS)
    280 		return 0;
    281 	if (shmem->message_payload[2] != 0)
    282 		return 0;
    283 
    284 	return shmem->message_payload[1];
    285 }
    286 
    287 int
    288 scmi_clock_set_frequency(void *cookie, uint32_t *cells, uint32_t freq)
    289 {
    290 	struct scmi_softc *sc = cookie;
    291 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    292 	uint32_t idx = cells[0];
    293 	int32_t status;
    294 
    295 	scmi_message_header(shmem, SCMI_CLOCK, SCMI_CLOCK_RATE_SET);
    296 	shmem->length = 5 * sizeof(uint32_t);
    297 	shmem->message_payload[0] = 0;
    298 	shmem->message_payload[1] = idx;
    299 	shmem->message_payload[2] = freq;
    300 	shmem->message_payload[3] = 0;
    301 	status = sc->sc_command(sc);
    302 	if (status != SCMI_SUCCESS)
    303 		return -1;
    304 
    305 	return 0;
    306 }
    307 #endif
    308 
    309 /* Performance management */
    310 void	scmi_perf_descr_levels(struct scmi_softc *, int);
    311 
    312 void
    313 scmi_attach_perf(struct scmi_softc *sc)
    314 {
    315 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    316 	int32_t status;
    317 	uint32_t vers;
    318 	int i;
    319 
    320 	scmi_message_header(sc->sc_shmem_tx, SCMI_PERF, SCMI_PROTOCOL_VERSION);
    321 	sc->sc_shmem_tx->length = sizeof(uint32_t);
    322 	status = sc->sc_command(sc);
    323 	if (status != SCMI_SUCCESS) {
    324 		aprint_error_dev(sc->sc_dev,
    325 		    "SCMI_PROTOCOL_VERSION failed\n");
    326 		return;
    327 	}
    328 
    329 	vers = shmem->message_payload[1];
    330 	if (vers != 0x40000) {
    331 		aprint_error_dev(sc->sc_dev,
    332 		    "invalid perf protocol version (0x%x != 0x4000)", vers);
    333 		return;
    334 	}
    335 
    336 	scmi_message_header(shmem, SCMI_PERF, SCMI_PROTOCOL_ATTRIBUTES);
    337 	shmem->length = sizeof(uint32_t);
    338 	status = sc->sc_command(sc);
    339 	if (status != SCMI_SUCCESS) {
    340 		aprint_error_dev(sc->sc_dev,
    341 		    "SCMI_PROTOCOL_ATTRIBUTES failed\n");
    342 		return;
    343 	}
    344 
    345 	sc->sc_perf_ndomains = shmem->message_payload[1] & 0xffff;
    346 	sc->sc_perf_domains = kmem_zalloc(sc->sc_perf_ndomains *
    347 	    sizeof(struct scmi_perf_domain), KM_SLEEP);
    348 	sc->sc_perf_power_unit = (shmem->message_payload[1] >> 16) & 0x3;
    349 
    350 	/* Add one frequency sensor per perf domain */
    351 	for (i = 0; i < sc->sc_perf_ndomains; i++) {
    352 		volatile struct scmi_resp_perf_domain_attributes_40 *pa;
    353 
    354 		scmi_message_header(shmem, SCMI_PERF,
    355 		    SCMI_PERF_DOMAIN_ATTRIBUTES);
    356 		shmem->length = 2 * sizeof(uint32_t);
    357 		shmem->message_payload[0] = i;
    358 		status = sc->sc_command(sc);
    359 		if (status != SCMI_SUCCESS) {
    360 			aprint_error_dev(sc->sc_dev,
    361 			    "SCMI_PERF_DOMAIN_ATTRIBUTES failed\n");
    362 			return;
    363 		}
    364 
    365 		pa = (volatile struct scmi_resp_perf_domain_attributes_40 *)
    366 		    &shmem->message_payload[1];
    367 		aprint_debug_dev(sc->sc_dev,
    368 		    "dom %u attr %#x rate_limit %u sfreq %u sperf %u "
    369 		    "name \"%s\"\n",
    370 		    i, pa->pa_attrs, pa->pa_ratelimit, pa->pa_sustifreq,
    371 		    pa->pa_sustperf, pa->pa_name);
    372 
    373 		sc->sc_perf_domains[i].pd_domain_id = i;
    374 		sc->sc_perf_domains[i].pd_sc = sc;
    375 		for (int map = 0; map < sc->sc_perf_ndmap; map++) {
    376 			if (sc->sc_perf_dmap[map].pm_domain == i) {
    377 				sc->sc_perf_domains[i].pd_ci =
    378 				    sc->sc_perf_dmap[map].pm_ci;
    379 				break;
    380 			}
    381 		}
    382 		snprintf(sc->sc_perf_domains[i].pd_name,
    383 		    sizeof(sc->sc_perf_domains[i].pd_name), "%s", pa->pa_name);
    384 		sc->sc_perf_domains[i].pd_can_level_set =
    385 		    (pa->pa_attrs & SCMI_PERF_ATTR_CAN_LEVEL_SET) != 0;
    386 		sc->sc_perf_domains[i].pd_level_index_mode =
    387 		    (pa->pa_attrs & SCMI_PERF_ATTR_LEVEL_INDEX_MODE) != 0;
    388 		sc->sc_perf_domains[i].pd_rate_limit = pa->pa_ratelimit;
    389 		sc->sc_perf_domains[i].pd_sustained_perf = pa->pa_sustperf;
    390 
    391 		scmi_perf_descr_levels(sc, i);
    392 
    393 		if (sc->sc_perf_domains[i].pd_can_level_set &&
    394 		    sc->sc_perf_domains[i].pd_nlevels > 0 &&
    395 		    sc->sc_perf_domains[i].pd_levels[0].pl_ifreq != 0) {
    396 			scmi_cpufreq_init_sysctl(sc, i);
    397 		}
    398 	}
    399 	return;
    400 }
    401 
    402 void
    403 scmi_perf_descr_levels(struct scmi_softc *sc, int domain)
    404 {
    405 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    406 	volatile struct scmi_resp_perf_describe_levels_40 *pl;
    407 	struct scmi_perf_domain *pd = &sc->sc_perf_domains[domain];
    408 	int status, i, idx;
    409 
    410 	idx = 0;
    411 	do {
    412 		scmi_message_header(shmem, SCMI_PERF,
    413 		    SCMI_PERF_DESCRIBE_LEVELS);
    414 		shmem->length = sizeof(uint32_t) * 3;
    415 		shmem->message_payload[0] = domain;
    416 		shmem->message_payload[1] = idx;
    417 		status = sc->sc_command(sc);
    418 		if (status != SCMI_SUCCESS) {
    419 			aprint_error_dev(sc->sc_dev,
    420 			    "SCMI_PERF_DESCRIBE_LEVELS failed\n");
    421 			return;
    422 		}
    423 
    424 		pl = (volatile struct scmi_resp_perf_describe_levels_40 *)
    425 		    &shmem->message_payload[1];
    426 
    427 		if (pd->pd_levels == NULL) {
    428 			pd->pd_nlevels = pl->pl_nret + pl->pl_nrem;
    429 			pd->pd_levels = kmem_zalloc(pd->pd_nlevels *
    430 			    sizeof(struct scmi_perf_level),
    431 			    KM_SLEEP);
    432 		}
    433 
    434 		for (i = 0; i < pl->pl_nret; i++) {
    435 			pd->pd_levels[idx + i].pl_cost =
    436 			    pl->pl_entry[i].pe_cost;
    437 			pd->pd_levels[idx + i].pl_perf =
    438 			    pl->pl_entry[i].pe_perf;
    439 			pd->pd_levels[idx + i].pl_ifreq =
    440 			    pl->pl_entry[i].pe_ifreq;
    441 			aprint_debug_dev(sc->sc_dev,
    442 			    "dom %u pl %u cost %u perf %i ifreq %u\n",
    443 			    domain, idx + i,
    444 			    pl->pl_entry[i].pe_cost,
    445 			    pl->pl_entry[i].pe_perf,
    446 			    pl->pl_entry[i].pe_ifreq);
    447 		}
    448 		idx += pl->pl_nret;
    449 	} while (pl->pl_nrem);
    450 }
    451 
    452 static int32_t
    453 scmi_perf_limits_get(struct scmi_perf_domain *pd, uint32_t *max_level,
    454     uint32_t *min_level)
    455 {
    456 	struct scmi_softc *sc = pd->pd_sc;
    457 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    458 	int32_t status;
    459 
    460 	if (pd->pd_levels == NULL) {
    461 		return SCMI_NOT_SUPPORTED;
    462 	}
    463 
    464 	mutex_enter(&sc->sc_shmem_tx_lock);
    465 	scmi_message_header(shmem, SCMI_PERF, SCMI_PERF_LIMITS_GET);
    466 	shmem->length = sizeof(uint32_t) * 2;
    467 	shmem->message_payload[0] = pd->pd_domain_id;
    468 	status = sc->sc_command(sc);
    469 	if (status == SCMI_SUCCESS) {
    470 		*max_level = shmem->message_payload[1];
    471 		*min_level = shmem->message_payload[2];
    472 	}
    473 	mutex_exit(&sc->sc_shmem_tx_lock);
    474 
    475 	return status;
    476 }
    477 
    478 static int32_t
    479 scmi_perf_level_get(struct scmi_perf_domain *pd, uint32_t *perf_level)
    480 {
    481 	struct scmi_softc *sc = pd->pd_sc;
    482 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    483 	int32_t status;
    484 
    485 	if (pd->pd_levels == NULL) {
    486 		return SCMI_NOT_SUPPORTED;
    487 	}
    488 
    489 	mutex_enter(&sc->sc_shmem_tx_lock);
    490 	scmi_message_header(shmem, SCMI_PERF, SCMI_PERF_LEVEL_GET);
    491 	shmem->length = sizeof(uint32_t) * 2;
    492 	shmem->message_payload[0] = pd->pd_domain_id;
    493 	status = sc->sc_command(sc);
    494 	if (status == SCMI_SUCCESS) {
    495 		*perf_level = shmem->message_payload[1];
    496 	}
    497 	mutex_exit(&sc->sc_shmem_tx_lock);
    498 
    499 	return status;
    500 }
    501 
    502 static int32_t
    503 scmi_perf_level_set(struct scmi_perf_domain *pd, uint32_t perf_level)
    504 {
    505 	struct scmi_softc *sc = pd->pd_sc;
    506 	volatile struct scmi_shmem *shmem = sc->sc_shmem_tx;
    507 	int32_t status;
    508 
    509 	if (pd->pd_levels == NULL) {
    510 		return SCMI_NOT_SUPPORTED;
    511 	}
    512 
    513 	mutex_enter(&sc->sc_shmem_tx_lock);
    514 	scmi_message_header(shmem, SCMI_PERF, SCMI_PERF_LEVEL_SET);
    515 	shmem->length = sizeof(uint32_t) * 3;
    516 	shmem->message_payload[0] = pd->pd_domain_id;
    517 	shmem->message_payload[1] = perf_level;
    518 	status = sc->sc_command(sc);
    519 	mutex_exit(&sc->sc_shmem_tx_lock);
    520 
    521 	return status;
    522 }
    523 
    524 static u_int
    525 scmi_cpufreq_level_to_mhz(struct scmi_perf_domain *pd, uint32_t level)
    526 {
    527 	ssize_t n;
    528 
    529 	if (pd->pd_level_index_mode) {
    530 		if (level < pd->pd_nlevels) {
    531 			return pd->pd_levels[level].pl_ifreq / 1000;
    532 		}
    533 	} else {
    534 		for (n = 0; n < pd->pd_nlevels; n++) {
    535 			if (pd->pd_levels[n].pl_perf == level) {
    536 				return pd->pd_levels[n].pl_ifreq / 1000;
    537 			}
    538 		}
    539 	}
    540 
    541 	return 0;
    542 }
    543 
    544 static int
    545 scmi_cpufreq_set_rate(struct scmi_softc *sc, struct scmi_perf_domain *pd,
    546     u_int freq_mhz)
    547 {
    548 	uint32_t perf_level = -1;
    549 	int32_t status;
    550 	ssize_t n;
    551 
    552 	for (n = 0; n < pd->pd_nlevels; n++) {
    553 		if (pd->pd_levels[n].pl_ifreq / 1000 == freq_mhz) {
    554 			perf_level = pd->pd_level_index_mode ?
    555 			    n : pd->pd_levels[n].pl_perf;
    556 			break;
    557 		}
    558 	}
    559 	if (n == pd->pd_nlevels)
    560 		return EINVAL;
    561 
    562 	status = scmi_perf_level_set(pd, perf_level);
    563 	if (status != SCMI_SUCCESS) {
    564 		return EIO;
    565 	}
    566 
    567 	if (pd->pd_rate_limit > 0)
    568 		delay(pd->pd_rate_limit);
    569 
    570 	return 0;
    571 }
    572 
    573 static int
    574 scmi_cpufreq_sysctl_helper(SYSCTLFN_ARGS)
    575 {
    576 	struct scmi_perf_domain * const pd = rnode->sysctl_data;
    577 	struct scmi_softc * const sc = pd->pd_sc;
    578 	struct sysctlnode node;
    579 	u_int fq, oldfq = 0, old_target;
    580 	uint32_t level;
    581 	int32_t status;
    582 	int error;
    583 
    584 	node = *rnode;
    585 	node.sysctl_data = &fq;
    586 
    587 	if (rnode->sysctl_num == pd->pd_node_target) {
    588 		if (pd->pd_freq_target == 0) {
    589 			status = scmi_perf_level_get(pd, &level);
    590 			if (status != SCMI_SUCCESS) {
    591 				return EIO;
    592 			}
    593 			pd->pd_freq_target =
    594 			    scmi_cpufreq_level_to_mhz(pd, level);
    595 		}
    596 		fq = pd->pd_freq_target;
    597 	} else {
    598 		status = scmi_perf_level_get(pd, &level);
    599 		if (status != SCMI_SUCCESS) {
    600 			return EIO;
    601 		}
    602 		fq = scmi_cpufreq_level_to_mhz(pd, level);
    603 	}
    604 
    605 	if (rnode->sysctl_num == pd->pd_node_target)
    606 		oldfq = fq;
    607 
    608 	if (pd->pd_freq_target == 0)
    609 		pd->pd_freq_target = fq;
    610 
    611 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    612 	if (error || newp == NULL)
    613 		return error;
    614 
    615 	if (fq == oldfq || rnode->sysctl_num != pd->pd_node_target)
    616 		return 0;
    617 
    618 	if (atomic_cas_uint(&pd->pd_busy, 0, 1) != 0)
    619 		return EBUSY;
    620 
    621 	old_target = pd->pd_freq_target;
    622 	pd->pd_freq_target = fq;
    623 
    624 	error = scmi_cpufreq_set_rate(sc, pd, fq);
    625 	if (error != 0) {
    626 		pd->pd_freq_target = old_target;
    627 	}
    628 
    629 	atomic_dec_uint(&pd->pd_busy);
    630 
    631 	return error;
    632 }
    633 
    634 static void
    635 scmi_cpufreq_init_sysctl(struct scmi_softc *sc, uint32_t domain_id)
    636 {
    637 	const struct sysctlnode *node, *cpunode;
    638 	struct scmi_perf_domain *pd = &sc->sc_perf_domains[domain_id];
    639 	struct cpu_info *ci = pd->pd_ci;
    640 	struct sysctllog *cpufreq_log = NULL;
    641 	uint32_t max_level, min_level;
    642 	int32_t status;
    643 	int error, i;
    644 
    645 	if (ci == NULL)
    646 		return;
    647 
    648 	status = scmi_perf_limits_get(pd, &max_level, &min_level);
    649 	if (status != SCMI_SUCCESS) {
    650 		/*
    651 		 * Not supposed to happen, but at least one implementation
    652 		 * returns DENIED here. Assume that there are no limits.
    653 		 */
    654 		min_level = 0;
    655 		max_level = UINT32_MAX;
    656 	}
    657 	aprint_debug_dev(sc->sc_dev, "dom %u limits max %u min %u\n",
    658 	    domain_id, max_level, min_level);
    659 
    660 	pd->pd_freq_available = kmem_zalloc(strlen("XXXX ") *
    661 	    pd->pd_nlevels, KM_SLEEP);
    662 	for (i = 0; i < pd->pd_nlevels; i++) {
    663 		char buf[6];
    664 		uint32_t level = pd->pd_level_index_mode ?
    665 				 i : pd->pd_levels[i].pl_perf;
    666 
    667 		if (level < min_level) {
    668 			continue;
    669 		} else if (level > max_level) {
    670 			break;
    671 		}
    672 
    673 		snprintf(buf, sizeof(buf), i ? " %u" : "%u",
    674 		    pd->pd_levels[i].pl_ifreq / 1000);
    675 		strcat(pd->pd_freq_available, buf);
    676 		if (level == pd->pd_sustained_perf) {
    677 			break;
    678 		}
    679 	}
    680 
    681 	error = sysctl_createv(&cpufreq_log, 0, NULL, &node,
    682 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
    683 	    NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
    684 	if (error)
    685 		goto sysctl_failed;
    686 	error = sysctl_createv(&cpufreq_log, 0, &node, &node,
    687 	    0, CTLTYPE_NODE, "cpufreq", NULL,
    688 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
    689 	if (error)
    690 		goto sysctl_failed;
    691 	error = sysctl_createv(&cpufreq_log, 0, &node, &cpunode,
    692 	    0, CTLTYPE_NODE, cpu_name(ci), NULL,
    693 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
    694 	if (error)
    695 		goto sysctl_failed;
    696 
    697 	error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
    698 	    CTLFLAG_READWRITE, CTLTYPE_INT, "target", NULL,
    699 	    scmi_cpufreq_sysctl_helper, 0, (void *)pd, 0,
    700 	    CTL_CREATE, CTL_EOL);
    701 	if (error)
    702 		goto sysctl_failed;
    703 	pd->pd_node_target = node->sysctl_num;
    704 
    705 	error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
    706 	    CTLFLAG_READWRITE, CTLTYPE_INT, "current", NULL,
    707 	    scmi_cpufreq_sysctl_helper, 0, (void *)pd, 0,
    708 	    CTL_CREATE, CTL_EOL);
    709 	if (error)
    710 		goto sysctl_failed;
    711 	pd->pd_node_current = node->sysctl_num;
    712 
    713 	error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
    714 	    0, CTLTYPE_STRING, "available", NULL,
    715 	    NULL, 0, pd->pd_freq_available, 0,
    716 	    CTL_CREATE, CTL_EOL);
    717 	if (error)
    718 		goto sysctl_failed;
    719 	pd->pd_node_available = node->sysctl_num;
    720 
    721 	return;
    722 
    723 sysctl_failed:
    724 	aprint_error_dev(sc->sc_dev, "couldn't create sysctl nodes: %d\n",
    725 	    error);
    726 	sysctl_teardown(&cpufreq_log);
    727 }
    728