Home | History | Annotate | Line # | Download | only in kern
      1 /*	$NetBSD: subr_cpufreq.c,v 1.11 2026/01/04 03:15:28 riastradh Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jukka Ruohonen.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  *
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.11 2026/01/04 03:15:28 riastradh Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/cpu.h>
     37 #include <sys/cpufreq.h>
     38 #include <sys/kernel.h>
     39 #include <sys/kmem.h>
     40 #include <sys/mutex.h>
     41 #include <sys/sdt.h>
     42 #include <sys/time.h>
     43 #include <sys/xcall.h>
     44 
     45 static int	 cpufreq_latency(void);
     46 static uint32_t	 cpufreq_get_max(void);
     47 static uint32_t	 cpufreq_get_min(void);
     48 static uint32_t	 cpufreq_get_raw(struct cpu_info *);
     49 static void	 cpufreq_get_state_raw(uint32_t, struct cpufreq_state *);
     50 static void	 cpufreq_set_raw(struct cpu_info *, uint32_t);
     51 static void	 cpufreq_set_all_raw(uint32_t);
     52 
     53 static kmutex_t		cpufreq_lock __cacheline_aligned;
     54 static struct cpufreq  *cf_backend __read_mostly = NULL;
     55 
     56 void
     57 cpufreq_init(void)
     58 {
     59 
     60 	mutex_init(&cpufreq_lock, MUTEX_DEFAULT, IPL_NONE);
     61 	cf_backend = kmem_zalloc(sizeof(*cf_backend), KM_SLEEP);
     62 }
     63 
     64 int
     65 cpufreq_register(struct cpufreq *cf)
     66 {
     67 	uint32_t c, i, j, k, m;
     68 	int rv;
     69 
     70 	if (cold != 0)
     71 		return SET_ERROR(EBUSY);
     72 
     73 	KASSERT(cf != NULL);
     74 	KASSERT(cf_backend != NULL);
     75 	KASSERT(cf->cf_get_freq != NULL);
     76 	KASSERT(cf->cf_set_freq != NULL);
     77 	KASSERT(cf->cf_state_count > 0);
     78 	KASSERT(cf->cf_state_count < CPUFREQ_STATE_MAX);
     79 
     80 	mutex_enter(&cpufreq_lock);
     81 
     82 	if (cf_backend->cf_init != false) {
     83 		mutex_exit(&cpufreq_lock);
     84 		return SET_ERROR(EALREADY);
     85 	}
     86 
     87 	cf_backend->cf_init = true;
     88 	cf_backend->cf_mp = cf->cf_mp;
     89 	cf_backend->cf_cookie = cf->cf_cookie;
     90 	cf_backend->cf_get_freq = cf->cf_get_freq;
     91 	cf_backend->cf_set_freq = cf->cf_set_freq;
     92 
     93 	(void)strlcpy(cf_backend->cf_name, cf->cf_name, sizeof(cf->cf_name));
     94 
     95 	/*
     96 	 * Sanity check the values and verify descending order.
     97 	 */
     98 	for (c = i = 0; i < cf->cf_state_count; i++) {
     99 
    100 		CTASSERT(CPUFREQ_STATE_ENABLED != 0);
    101 		CTASSERT(CPUFREQ_STATE_DISABLED != 0);
    102 
    103 		if (cf->cf_state[i].cfs_freq == 0)
    104 			continue;
    105 
    106 		if (cf->cf_state[i].cfs_freq > 9999 &&
    107 		    cf->cf_state[i].cfs_freq != CPUFREQ_STATE_ENABLED &&
    108 		    cf->cf_state[i].cfs_freq != CPUFREQ_STATE_DISABLED)
    109 			continue;
    110 
    111 		for (j = k = 0; j < i; j++) {
    112 
    113 			if (cf->cf_state[i].cfs_freq >=
    114 			    cf->cf_state[j].cfs_freq) {
    115 				k = 1;
    116 				break;
    117 			}
    118 		}
    119 
    120 		if (k != 0)
    121 			continue;
    122 
    123 		cf_backend->cf_state[c].cfs_index = c;
    124 		cf_backend->cf_state[c].cfs_freq = cf->cf_state[i].cfs_freq;
    125 		cf_backend->cf_state[c].cfs_power = cf->cf_state[i].cfs_power;
    126 
    127 		c++;
    128 	}
    129 
    130 	cf_backend->cf_state_count = c;
    131 
    132 	if (cf_backend->cf_state_count == 0) {
    133 		mutex_exit(&cpufreq_lock);
    134 		cpufreq_deregister();
    135 		return SET_ERROR(EINVAL);
    136 	}
    137 
    138 	rv = cpufreq_latency();
    139 
    140 	if (rv != 0) {
    141 		mutex_exit(&cpufreq_lock);
    142 		cpufreq_deregister();
    143 		return rv;
    144 	}
    145 
    146 	m = cpufreq_get_max();
    147 	cpufreq_set_all_raw(m);
    148 	mutex_exit(&cpufreq_lock);
    149 
    150 	return 0;
    151 }
    152 
    153 void
    154 cpufreq_deregister(void)
    155 {
    156 
    157 	mutex_enter(&cpufreq_lock);
    158 	memset(cf_backend, 0, sizeof(*cf_backend));
    159 	mutex_exit(&cpufreq_lock);
    160 }
    161 
    162 static int
    163 cpufreq_latency(void)
    164 {
    165 	struct cpufreq *cf = cf_backend;
    166 	struct timespec nta, ntb;
    167 	const uint32_t n = 10;
    168 	uint32_t i, j, l, m;
    169 	uint64_t s;
    170 
    171 	l = cpufreq_get_min();
    172 	m = cpufreq_get_max();
    173 
    174 	/*
    175 	 * For each state, sample the average transition
    176 	 * latency required to set the state for all CPUs.
    177 	 */
    178 	for (i = 0; i < cf->cf_state_count; i++) {
    179 
    180 		for (s = 0, j = 0; j < n; j++) {
    181 
    182 			/*
    183 			 * Attempt to exclude possible
    184 			 * caching done by the backend.
    185 			 */
    186 			if (i == 0)
    187 				cpufreq_set_all_raw(l);
    188 			else {
    189 				cpufreq_set_all_raw(m);
    190 			}
    191 
    192 			nanotime(&nta);
    193 			cpufreq_set_all_raw(cf->cf_state[i].cfs_freq);
    194 			nanotime(&ntb);
    195 			timespecsub(&ntb, &nta, &ntb);
    196 
    197 			if (ntb.tv_sec != 0 ||
    198 			    ntb.tv_nsec > CPUFREQ_LATENCY_MAX)
    199 				continue;
    200 
    201 			if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX)
    202 				break;
    203 
    204 			/* Convert to microseconds to prevent overflow */
    205 			s += ntb.tv_nsec / 1000;
    206 		}
    207 
    208 		/*
    209 		 * Consider the backend unsuitable if
    210 		 * the transition latency was too high.
    211 		 */
    212 		if (s == 0)
    213 			return SET_ERROR(EMSGSIZE);
    214 
    215 		cf->cf_state[i].cfs_latency = s / n;
    216 	}
    217 
    218 	return 0;
    219 }
    220 
    221 void
    222 cpufreq_suspend(struct cpu_info *ci)
    223 {
    224 	struct cpufreq *cf = cf_backend;
    225 	uint32_t l, s;
    226 
    227 	mutex_enter(&cpufreq_lock);
    228 
    229 	if (cf->cf_init != true) {
    230 		mutex_exit(&cpufreq_lock);
    231 		return;
    232 	}
    233 
    234 	l = cpufreq_get_min();
    235 	s = cpufreq_get_raw(ci);
    236 
    237 	cpufreq_set_raw(ci, l);
    238 	cf->cf_state_saved = s;
    239 
    240 	mutex_exit(&cpufreq_lock);
    241 }
    242 
    243 void
    244 cpufreq_resume(struct cpu_info *ci)
    245 {
    246 	struct cpufreq *cf = cf_backend;
    247 
    248 	mutex_enter(&cpufreq_lock);
    249 
    250 	if (cf->cf_init != true || cf->cf_state_saved == 0) {
    251 		mutex_exit(&cpufreq_lock);
    252 		return;
    253 	}
    254 
    255 	cpufreq_set_raw(ci, cf->cf_state_saved);
    256 	mutex_exit(&cpufreq_lock);
    257 }
    258 
    259 uint32_t
    260 cpufreq_get(struct cpu_info *ci)
    261 {
    262 	struct cpufreq *cf = cf_backend;
    263 	uint32_t freq;
    264 
    265 	mutex_enter(&cpufreq_lock);
    266 
    267 	if (cf->cf_init != true) {
    268 		mutex_exit(&cpufreq_lock);
    269 		return 0;
    270 	}
    271 
    272 	freq = cpufreq_get_raw(ci);
    273 	mutex_exit(&cpufreq_lock);
    274 
    275 	return freq;
    276 }
    277 
    278 static uint32_t
    279 cpufreq_get_max(void)
    280 {
    281 	struct cpufreq *cf = cf_backend;
    282 
    283 	KASSERT(cf->cf_init != false);
    284 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    285 
    286 	return cf->cf_state[0].cfs_freq;
    287 }
    288 
    289 static uint32_t
    290 cpufreq_get_min(void)
    291 {
    292 	struct cpufreq *cf = cf_backend;
    293 
    294 	KASSERT(cf->cf_init != false);
    295 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    296 
    297 	return cf->cf_state[cf->cf_state_count - 1].cfs_freq;
    298 }
    299 
    300 static uint32_t
    301 cpufreq_get_raw(struct cpu_info *ci)
    302 {
    303 	struct cpufreq *cf = cf_backend;
    304 	uint32_t freq = 0;
    305 	uint64_t xc;
    306 
    307 	KASSERT(cf->cf_init != false);
    308 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    309 
    310 	xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci);
    311 	xc_wait(xc);
    312 
    313 	return freq;
    314 }
    315 
    316 int
    317 cpufreq_get_backend(struct cpufreq *dst)
    318 {
    319 	struct cpufreq *cf = cf_backend;
    320 
    321 	mutex_enter(&cpufreq_lock);
    322 
    323 	if (cf->cf_init != true || dst == NULL) {
    324 		mutex_exit(&cpufreq_lock);
    325 		return SET_ERROR(ENODEV);
    326 	}
    327 
    328 	memcpy(dst, cf, sizeof(*cf));
    329 	mutex_exit(&cpufreq_lock);
    330 
    331 	return 0;
    332 }
    333 
    334 int
    335 cpufreq_get_state(uint32_t freq, struct cpufreq_state *cfs)
    336 {
    337 	struct cpufreq *cf = cf_backend;
    338 
    339 	mutex_enter(&cpufreq_lock);
    340 
    341 	if (cf->cf_init != true || cfs == NULL) {
    342 		mutex_exit(&cpufreq_lock);
    343 		return SET_ERROR(ENODEV);
    344 	}
    345 
    346 	cpufreq_get_state_raw(freq, cfs);
    347 	mutex_exit(&cpufreq_lock);
    348 
    349 	return 0;
    350 }
    351 
    352 int
    353 cpufreq_get_state_index(uint32_t index, struct cpufreq_state *cfs)
    354 {
    355 	struct cpufreq *cf = cf_backend;
    356 
    357 	mutex_enter(&cpufreq_lock);
    358 
    359 	if (cf->cf_init != true || cfs == NULL) {
    360 		mutex_exit(&cpufreq_lock);
    361 		return SET_ERROR(ENODEV);
    362 	}
    363 
    364 	if (index >= cf->cf_state_count) {
    365 		mutex_exit(&cpufreq_lock);
    366 		return SET_ERROR(EINVAL);
    367 	}
    368 
    369 	memcpy(cfs, &cf->cf_state[index], sizeof(*cfs));
    370 	mutex_exit(&cpufreq_lock);
    371 
    372 	return 0;
    373 }
    374 
    375 static void
    376 cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs)
    377 {
    378 	struct cpufreq *cf = cf_backend;
    379 	uint32_t f, hi, i = 0, lo = 0;
    380 
    381 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    382 	KASSERT(cf->cf_init != false);
    383 	KASSERT(cfs != NULL);
    384 
    385 	hi = cf->cf_state_count;
    386 
    387 	while (lo < hi) {
    388 
    389 		i = (lo + hi) >> 1;
    390 		f = cf->cf_state[i].cfs_freq;
    391 
    392 		if (freq == f)
    393 			break;
    394 		else if (freq > f)
    395 			hi = i;
    396 		else {
    397 			lo = i + 1;
    398 		}
    399 	}
    400 
    401 	memcpy(cfs, &cf->cf_state[i], sizeof(*cfs));
    402 }
    403 
    404 void
    405 cpufreq_set(struct cpu_info *ci, uint32_t freq)
    406 {
    407 	struct cpufreq *cf = cf_backend;
    408 
    409 	mutex_enter(&cpufreq_lock);
    410 
    411 	if (__predict_false(cf->cf_init != true)) {
    412 		mutex_exit(&cpufreq_lock);
    413 		return;
    414 	}
    415 
    416 	cpufreq_set_raw(ci, freq);
    417 	mutex_exit(&cpufreq_lock);
    418 }
    419 
    420 static void
    421 cpufreq_set_raw(struct cpu_info *ci, uint32_t freq)
    422 {
    423 	struct cpufreq *cf = cf_backend;
    424 	uint64_t xc;
    425 
    426 	KASSERT(cf->cf_init != false);
    427 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    428 
    429 	xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci);
    430 	xc_wait(xc);
    431 }
    432 
    433 void
    434 cpufreq_set_all(uint32_t freq)
    435 {
    436 	struct cpufreq *cf = cf_backend;
    437 
    438 	mutex_enter(&cpufreq_lock);
    439 
    440 	if (__predict_false(cf->cf_init != true)) {
    441 		mutex_exit(&cpufreq_lock);
    442 		return;
    443 	}
    444 
    445 	cpufreq_set_all_raw(freq);
    446 	mutex_exit(&cpufreq_lock);
    447 }
    448 
    449 static void
    450 cpufreq_set_all_raw(uint32_t freq)
    451 {
    452 	struct cpufreq *cf = cf_backend;
    453 	uint64_t xc;
    454 
    455 	KASSERT(cf->cf_init != false);
    456 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    457 
    458 	xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
    459 	xc_wait(xc);
    460 }
    461 
    462 #ifdef notyet
    463 void
    464 cpufreq_set_higher(struct cpu_info *ci)
    465 {
    466 	cpufreq_set_step(ci, -1);
    467 }
    468 
    469 void
    470 cpufreq_set_lower(struct cpu_info *ci)
    471 {
    472 	cpufreq_set_step(ci, 1);
    473 }
    474 
    475 static void
    476 cpufreq_set_step(struct cpu_info *ci, int32_t step)
    477 {
    478 	struct cpufreq *cf = cf_backend;
    479 	struct cpufreq_state cfs;
    480 	uint32_t freq;
    481 	int32_t index;
    482 
    483 	mutex_enter(&cpufreq_lock);
    484 
    485 	if (__predict_false(cf->cf_init != true)) {
    486 		mutex_exit(&cpufreq_lock);
    487 		return;
    488 	}
    489 
    490 	freq = cpufreq_get_raw(ci);
    491 
    492 	if (__predict_false(freq == 0)) {
    493 		mutex_exit(&cpufreq_lock);
    494 		return;
    495 	}
    496 
    497 	cpufreq_get_state_raw(freq, &cfs);
    498 	index = cfs.cfs_index + step;
    499 
    500 	if (index < 0 || index >= (int32_t)cf->cf_state_count) {
    501 		mutex_exit(&cpufreq_lock);
    502 		return;
    503 	}
    504 
    505 	cpufreq_set_raw(ci, cf->cf_state[index].cfs_freq);
    506 	mutex_exit(&cpufreq_lock);
    507 }
    508 #endif
    509