Home | History | Annotate | Line # | Download | only in kern
      1 /*	$NetBSD: subr_cpufreq.c,v 1.10 2023/04/09 09:18:09 riastradh Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jukka Ruohonen.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  *
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.10 2023/04/09 09:18:09 riastradh Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/cpu.h>
     37 #include <sys/cpufreq.h>
     38 #include <sys/kernel.h>
     39 #include <sys/kmem.h>
     40 #include <sys/mutex.h>
     41 #include <sys/time.h>
     42 #include <sys/xcall.h>
     43 
     44 static int	 cpufreq_latency(void);
     45 static uint32_t	 cpufreq_get_max(void);
     46 static uint32_t	 cpufreq_get_min(void);
     47 static uint32_t	 cpufreq_get_raw(struct cpu_info *);
     48 static void	 cpufreq_get_state_raw(uint32_t, struct cpufreq_state *);
     49 static void	 cpufreq_set_raw(struct cpu_info *, uint32_t);
     50 static void	 cpufreq_set_all_raw(uint32_t);
     51 
     52 static kmutex_t		cpufreq_lock __cacheline_aligned;
     53 static struct cpufreq  *cf_backend __read_mostly = NULL;
     54 
     55 void
     56 cpufreq_init(void)
     57 {
     58 
     59 	mutex_init(&cpufreq_lock, MUTEX_DEFAULT, IPL_NONE);
     60 	cf_backend = kmem_zalloc(sizeof(*cf_backend), KM_SLEEP);
     61 }
     62 
     63 int
     64 cpufreq_register(struct cpufreq *cf)
     65 {
     66 	uint32_t c, i, j, k, m;
     67 	int rv;
     68 
     69 	if (cold != 0)
     70 		return EBUSY;
     71 
     72 	KASSERT(cf != NULL);
     73 	KASSERT(cf_backend != NULL);
     74 	KASSERT(cf->cf_get_freq != NULL);
     75 	KASSERT(cf->cf_set_freq != NULL);
     76 	KASSERT(cf->cf_state_count > 0);
     77 	KASSERT(cf->cf_state_count < CPUFREQ_STATE_MAX);
     78 
     79 	mutex_enter(&cpufreq_lock);
     80 
     81 	if (cf_backend->cf_init != false) {
     82 		mutex_exit(&cpufreq_lock);
     83 		return EALREADY;
     84 	}
     85 
     86 	cf_backend->cf_init = true;
     87 	cf_backend->cf_mp = cf->cf_mp;
     88 	cf_backend->cf_cookie = cf->cf_cookie;
     89 	cf_backend->cf_get_freq = cf->cf_get_freq;
     90 	cf_backend->cf_set_freq = cf->cf_set_freq;
     91 
     92 	(void)strlcpy(cf_backend->cf_name, cf->cf_name, sizeof(cf->cf_name));
     93 
     94 	/*
     95 	 * Sanity check the values and verify descending order.
     96 	 */
     97 	for (c = i = 0; i < cf->cf_state_count; i++) {
     98 
     99 		CTASSERT(CPUFREQ_STATE_ENABLED != 0);
    100 		CTASSERT(CPUFREQ_STATE_DISABLED != 0);
    101 
    102 		if (cf->cf_state[i].cfs_freq == 0)
    103 			continue;
    104 
    105 		if (cf->cf_state[i].cfs_freq > 9999 &&
    106 		    cf->cf_state[i].cfs_freq != CPUFREQ_STATE_ENABLED &&
    107 		    cf->cf_state[i].cfs_freq != CPUFREQ_STATE_DISABLED)
    108 			continue;
    109 
    110 		for (j = k = 0; j < i; j++) {
    111 
    112 			if (cf->cf_state[i].cfs_freq >=
    113 			    cf->cf_state[j].cfs_freq) {
    114 				k = 1;
    115 				break;
    116 			}
    117 		}
    118 
    119 		if (k != 0)
    120 			continue;
    121 
    122 		cf_backend->cf_state[c].cfs_index = c;
    123 		cf_backend->cf_state[c].cfs_freq = cf->cf_state[i].cfs_freq;
    124 		cf_backend->cf_state[c].cfs_power = cf->cf_state[i].cfs_power;
    125 
    126 		c++;
    127 	}
    128 
    129 	cf_backend->cf_state_count = c;
    130 
    131 	if (cf_backend->cf_state_count == 0) {
    132 		mutex_exit(&cpufreq_lock);
    133 		cpufreq_deregister();
    134 		return EINVAL;
    135 	}
    136 
    137 	rv = cpufreq_latency();
    138 
    139 	if (rv != 0) {
    140 		mutex_exit(&cpufreq_lock);
    141 		cpufreq_deregister();
    142 		return rv;
    143 	}
    144 
    145 	m = cpufreq_get_max();
    146 	cpufreq_set_all_raw(m);
    147 	mutex_exit(&cpufreq_lock);
    148 
    149 	return 0;
    150 }
    151 
    152 void
    153 cpufreq_deregister(void)
    154 {
    155 
    156 	mutex_enter(&cpufreq_lock);
    157 	memset(cf_backend, 0, sizeof(*cf_backend));
    158 	mutex_exit(&cpufreq_lock);
    159 }
    160 
    161 static int
    162 cpufreq_latency(void)
    163 {
    164 	struct cpufreq *cf = cf_backend;
    165 	struct timespec nta, ntb;
    166 	const uint32_t n = 10;
    167 	uint32_t i, j, l, m;
    168 	uint64_t s;
    169 
    170 	l = cpufreq_get_min();
    171 	m = cpufreq_get_max();
    172 
    173 	/*
    174 	 * For each state, sample the average transition
    175 	 * latency required to set the state for all CPUs.
    176 	 */
    177 	for (i = 0; i < cf->cf_state_count; i++) {
    178 
    179 		for (s = 0, j = 0; j < n; j++) {
    180 
    181 			/*
    182 			 * Attempt to exclude possible
    183 			 * caching done by the backend.
    184 			 */
    185 			if (i == 0)
    186 				cpufreq_set_all_raw(l);
    187 			else {
    188 				cpufreq_set_all_raw(m);
    189 			}
    190 
    191 			nanotime(&nta);
    192 			cpufreq_set_all_raw(cf->cf_state[i].cfs_freq);
    193 			nanotime(&ntb);
    194 			timespecsub(&ntb, &nta, &ntb);
    195 
    196 			if (ntb.tv_sec != 0 ||
    197 			    ntb.tv_nsec > CPUFREQ_LATENCY_MAX)
    198 				continue;
    199 
    200 			if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX)
    201 				break;
    202 
    203 			/* Convert to microseconds to prevent overflow */
    204 			s += ntb.tv_nsec / 1000;
    205 		}
    206 
    207 		/*
    208 		 * Consider the backend unsuitable if
    209 		 * the transition latency was too high.
    210 		 */
    211 		if (s == 0)
    212 			return EMSGSIZE;
    213 
    214 		cf->cf_state[i].cfs_latency = s / n;
    215 	}
    216 
    217 	return 0;
    218 }
    219 
    220 void
    221 cpufreq_suspend(struct cpu_info *ci)
    222 {
    223 	struct cpufreq *cf = cf_backend;
    224 	uint32_t l, s;
    225 
    226 	mutex_enter(&cpufreq_lock);
    227 
    228 	if (cf->cf_init != true) {
    229 		mutex_exit(&cpufreq_lock);
    230 		return;
    231 	}
    232 
    233 	l = cpufreq_get_min();
    234 	s = cpufreq_get_raw(ci);
    235 
    236 	cpufreq_set_raw(ci, l);
    237 	cf->cf_state_saved = s;
    238 
    239 	mutex_exit(&cpufreq_lock);
    240 }
    241 
    242 void
    243 cpufreq_resume(struct cpu_info *ci)
    244 {
    245 	struct cpufreq *cf = cf_backend;
    246 
    247 	mutex_enter(&cpufreq_lock);
    248 
    249 	if (cf->cf_init != true || cf->cf_state_saved == 0) {
    250 		mutex_exit(&cpufreq_lock);
    251 		return;
    252 	}
    253 
    254 	cpufreq_set_raw(ci, cf->cf_state_saved);
    255 	mutex_exit(&cpufreq_lock);
    256 }
    257 
    258 uint32_t
    259 cpufreq_get(struct cpu_info *ci)
    260 {
    261 	struct cpufreq *cf = cf_backend;
    262 	uint32_t freq;
    263 
    264 	mutex_enter(&cpufreq_lock);
    265 
    266 	if (cf->cf_init != true) {
    267 		mutex_exit(&cpufreq_lock);
    268 		return 0;
    269 	}
    270 
    271 	freq = cpufreq_get_raw(ci);
    272 	mutex_exit(&cpufreq_lock);
    273 
    274 	return freq;
    275 }
    276 
    277 static uint32_t
    278 cpufreq_get_max(void)
    279 {
    280 	struct cpufreq *cf = cf_backend;
    281 
    282 	KASSERT(cf->cf_init != false);
    283 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    284 
    285 	return cf->cf_state[0].cfs_freq;
    286 }
    287 
    288 static uint32_t
    289 cpufreq_get_min(void)
    290 {
    291 	struct cpufreq *cf = cf_backend;
    292 
    293 	KASSERT(cf->cf_init != false);
    294 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    295 
    296 	return cf->cf_state[cf->cf_state_count - 1].cfs_freq;
    297 }
    298 
    299 static uint32_t
    300 cpufreq_get_raw(struct cpu_info *ci)
    301 {
    302 	struct cpufreq *cf = cf_backend;
    303 	uint32_t freq = 0;
    304 	uint64_t xc;
    305 
    306 	KASSERT(cf->cf_init != false);
    307 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    308 
    309 	xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci);
    310 	xc_wait(xc);
    311 
    312 	return freq;
    313 }
    314 
    315 int
    316 cpufreq_get_backend(struct cpufreq *dst)
    317 {
    318 	struct cpufreq *cf = cf_backend;
    319 
    320 	mutex_enter(&cpufreq_lock);
    321 
    322 	if (cf->cf_init != true || dst == NULL) {
    323 		mutex_exit(&cpufreq_lock);
    324 		return ENODEV;
    325 	}
    326 
    327 	memcpy(dst, cf, sizeof(*cf));
    328 	mutex_exit(&cpufreq_lock);
    329 
    330 	return 0;
    331 }
    332 
    333 int
    334 cpufreq_get_state(uint32_t freq, struct cpufreq_state *cfs)
    335 {
    336 	struct cpufreq *cf = cf_backend;
    337 
    338 	mutex_enter(&cpufreq_lock);
    339 
    340 	if (cf->cf_init != true || cfs == NULL) {
    341 		mutex_exit(&cpufreq_lock);
    342 		return ENODEV;
    343 	}
    344 
    345 	cpufreq_get_state_raw(freq, cfs);
    346 	mutex_exit(&cpufreq_lock);
    347 
    348 	return 0;
    349 }
    350 
    351 int
    352 cpufreq_get_state_index(uint32_t index, struct cpufreq_state *cfs)
    353 {
    354 	struct cpufreq *cf = cf_backend;
    355 
    356 	mutex_enter(&cpufreq_lock);
    357 
    358 	if (cf->cf_init != true || cfs == NULL) {
    359 		mutex_exit(&cpufreq_lock);
    360 		return ENODEV;
    361 	}
    362 
    363 	if (index >= cf->cf_state_count) {
    364 		mutex_exit(&cpufreq_lock);
    365 		return EINVAL;
    366 	}
    367 
    368 	memcpy(cfs, &cf->cf_state[index], sizeof(*cfs));
    369 	mutex_exit(&cpufreq_lock);
    370 
    371 	return 0;
    372 }
    373 
    374 static void
    375 cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs)
    376 {
    377 	struct cpufreq *cf = cf_backend;
    378 	uint32_t f, hi, i = 0, lo = 0;
    379 
    380 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    381 	KASSERT(cf->cf_init != false);
    382 	KASSERT(cfs != NULL);
    383 
    384 	hi = cf->cf_state_count;
    385 
    386 	while (lo < hi) {
    387 
    388 		i = (lo + hi) >> 1;
    389 		f = cf->cf_state[i].cfs_freq;
    390 
    391 		if (freq == f)
    392 			break;
    393 		else if (freq > f)
    394 			hi = i;
    395 		else {
    396 			lo = i + 1;
    397 		}
    398 	}
    399 
    400 	memcpy(cfs, &cf->cf_state[i], sizeof(*cfs));
    401 }
    402 
    403 void
    404 cpufreq_set(struct cpu_info *ci, uint32_t freq)
    405 {
    406 	struct cpufreq *cf = cf_backend;
    407 
    408 	mutex_enter(&cpufreq_lock);
    409 
    410 	if (__predict_false(cf->cf_init != true)) {
    411 		mutex_exit(&cpufreq_lock);
    412 		return;
    413 	}
    414 
    415 	cpufreq_set_raw(ci, freq);
    416 	mutex_exit(&cpufreq_lock);
    417 }
    418 
    419 static void
    420 cpufreq_set_raw(struct cpu_info *ci, uint32_t freq)
    421 {
    422 	struct cpufreq *cf = cf_backend;
    423 	uint64_t xc;
    424 
    425 	KASSERT(cf->cf_init != false);
    426 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    427 
    428 	xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci);
    429 	xc_wait(xc);
    430 }
    431 
    432 void
    433 cpufreq_set_all(uint32_t freq)
    434 {
    435 	struct cpufreq *cf = cf_backend;
    436 
    437 	mutex_enter(&cpufreq_lock);
    438 
    439 	if (__predict_false(cf->cf_init != true)) {
    440 		mutex_exit(&cpufreq_lock);
    441 		return;
    442 	}
    443 
    444 	cpufreq_set_all_raw(freq);
    445 	mutex_exit(&cpufreq_lock);
    446 }
    447 
    448 static void
    449 cpufreq_set_all_raw(uint32_t freq)
    450 {
    451 	struct cpufreq *cf = cf_backend;
    452 	uint64_t xc;
    453 
    454 	KASSERT(cf->cf_init != false);
    455 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    456 
    457 	xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
    458 	xc_wait(xc);
    459 }
    460 
    461 #ifdef notyet
    462 void
    463 cpufreq_set_higher(struct cpu_info *ci)
    464 {
    465 	cpufreq_set_step(ci, -1);
    466 }
    467 
    468 void
    469 cpufreq_set_lower(struct cpu_info *ci)
    470 {
    471 	cpufreq_set_step(ci, 1);
    472 }
    473 
    474 static void
    475 cpufreq_set_step(struct cpu_info *ci, int32_t step)
    476 {
    477 	struct cpufreq *cf = cf_backend;
    478 	struct cpufreq_state cfs;
    479 	uint32_t freq;
    480 	int32_t index;
    481 
    482 	mutex_enter(&cpufreq_lock);
    483 
    484 	if (__predict_false(cf->cf_init != true)) {
    485 		mutex_exit(&cpufreq_lock);
    486 		return;
    487 	}
    488 
    489 	freq = cpufreq_get_raw(ci);
    490 
    491 	if (__predict_false(freq == 0)) {
    492 		mutex_exit(&cpufreq_lock);
    493 		return;
    494 	}
    495 
    496 	cpufreq_get_state_raw(freq, &cfs);
    497 	index = cfs.cfs_index + step;
    498 
    499 	if (index < 0 || index >= (int32_t)cf->cf_state_count) {
    500 		mutex_exit(&cpufreq_lock);
    501 		return;
    502 	}
    503 
    504 	cpufreq_set_raw(ci, cf->cf_state[index].cfs_freq);
    505 	mutex_exit(&cpufreq_lock);
    506 }
    507 #endif
    508