Home | History | Annotate | Line # | Download | only in kern
subr_cpufreq.c revision 1.1
      1 /*	$NetBSD: subr_cpufreq.c,v 1.1 2011/09/28 10:55:48 jruoho Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jukka Ruohonen.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  *
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.1 2011/09/28 10:55:48 jruoho Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/cpu.h>
     37 #include <sys/cpufreq.h>
     38 #include <sys/kmem.h>
     39 #include <sys/mutex.h>
     40 #include <sys/once.h>
     41 #include <sys/time.h>
     42 #include <sys/xcall.h>
     43 
     44 static int	 cpufreq_init(void);
     45 static int	 cpufreq_latency(void);
     46 static uint32_t	 cpufreq_get_max(void);
     47 static uint32_t	 cpufreq_get_min(void);
     48 static uint32_t	 cpufreq_get_raw(struct cpu_info *);
     49 static void	 cpufreq_get_state_raw(uint32_t, struct cpufreq_state *);
     50 static void	 cpufreq_set_raw(struct cpu_info *, uint32_t);
     51 static void	 cpufreq_set_all_raw(uint32_t);
     52 
     53 static kmutex_t cpufreq_lock __cacheline_aligned;
     54 static struct cpufreq *cf_backend __read_mostly = NULL;
     55 
     56 static int
     57 cpufreq_init(void)
     58 {
     59 
     60 	mutex_init(&cpufreq_lock, MUTEX_DEFAULT, IPL_NONE);
     61 
     62 	return 0;
     63 }
     64 
     65 int
     66 cpufreq_register(struct cpufreq *cf)
     67 {
     68 	static ONCE_DECL(cpufreq_once);
     69 	uint32_t count, i, j, k, m;
     70 	int rv;
     71 
     72 	rv = RUN_ONCE(&cpufreq_once, cpufreq_init);
     73 
     74 	KASSERT(rv == 0);
     75 	KASSERT(cf != NULL);
     76 	KASSERT(cf->cf_get_freq != NULL);
     77 	KASSERT(cf->cf_set_freq != NULL);
     78 	KASSERT(cf->cf_state_count > 0);
     79 	KASSERT(cf->cf_state_count < CPUFREQ_STATE_MAX);
     80 
     81 	mutex_enter(&cpufreq_lock);
     82 
     83 	if (cf_backend != NULL) {
     84 		mutex_exit(&cpufreq_lock);
     85 		return EALREADY;
     86 	}
     87 
     88 	mutex_exit(&cpufreq_lock);
     89 	cf_backend = kmem_zalloc(sizeof(*cf), KM_SLEEP);
     90 
     91 	if (cf_backend == NULL)
     92 		return ENOMEM;
     93 
     94 	mutex_enter(&cpufreq_lock);
     95 
     96 	cf_backend->cf_mp = cf->cf_mp;
     97 	cf_backend->cf_cookie = cf->cf_cookie;
     98 	cf_backend->cf_get_freq = cf->cf_get_freq;
     99 	cf_backend->cf_set_freq = cf->cf_set_freq;
    100 
    101 	(void)strlcpy(cf_backend->cf_name, cf->cf_name, sizeof(cf->cf_name));
    102 
    103 	/*
    104 	 * Sanity check the values and verify descending order.
    105 	 */
    106 	for (count = i = 0; i < cf->cf_state_count; i++) {
    107 
    108 		CTASSERT(CPUFREQ_STATE_ENABLED != 0);
    109 		CTASSERT(CPUFREQ_STATE_DISABLED != 0);
    110 
    111 		if (cf->cf_state[i].cfs_freq == 0)
    112 			continue;
    113 
    114 		for (j = k = 0; j < i; j++) {
    115 
    116 			if (cf->cf_state[i].cfs_freq >=
    117 			    cf->cf_state[j].cfs_freq) {
    118 				k = 1;
    119 				break;
    120 			}
    121 		}
    122 
    123 		if (k != 0)
    124 			continue;
    125 
    126 		cf_backend->cf_state[i].cfs_index = count;
    127 		cf_backend->cf_state[i].cfs_freq = cf->cf_state[i].cfs_freq;
    128 		cf_backend->cf_state[i].cfs_power = cf->cf_state[i].cfs_power;
    129 
    130 		count++;
    131 	}
    132 
    133 	cf_backend->cf_state_count = count;
    134 
    135 	if (cf_backend->cf_state_count == 0) {
    136 		mutex_exit(&cpufreq_lock);
    137 		cpufreq_deregister();
    138 		return EINVAL;
    139 	}
    140 
    141 	rv = cpufreq_latency();
    142 
    143 	if (rv != 0) {
    144 		mutex_exit(&cpufreq_lock);
    145 		cpufreq_deregister();
    146 		return rv;
    147 	}
    148 
    149 	m = cpufreq_get_max();
    150 	cpufreq_set_all_raw(m);
    151 	mutex_exit(&cpufreq_lock);
    152 
    153 	return 0;
    154 }
    155 
    156 void
    157 cpufreq_deregister(void)
    158 {
    159 
    160 	mutex_enter(&cpufreq_lock);
    161 
    162 	if (cf_backend == NULL) {
    163 		mutex_exit(&cpufreq_lock);
    164 		return;
    165 	}
    166 
    167 	mutex_exit(&cpufreq_lock);
    168 	kmem_free(cf_backend, sizeof(*cf_backend));
    169 	cf_backend = NULL;
    170 }
    171 
    172 static int
    173 cpufreq_latency(void)
    174 {
    175 	struct cpufreq *cf = cf_backend;
    176 	struct timespec nta, ntb;
    177 	const uint32_t n = 10;
    178 	uint32_t i, j, l, m;
    179 	uint64_t s;
    180 
    181 	l = cpufreq_get_min();
    182 	m = cpufreq_get_max();
    183 
    184 	/*
    185 	 * For each state, sample the average transition
    186 	 * latency required to set the state for all CPUs.
    187 	 */
    188 	for (i = 0; i < cf->cf_state_count; i++) {
    189 
    190 		for (s = 0, j = 0; j < n; j++) {
    191 
    192 			/*
    193 			 * Attempt to exclude possible
    194 			 * caching done by the backend.
    195 			 */
    196 			if (i == 0)
    197 				cpufreq_set_all_raw(l);
    198 			else {
    199 				cpufreq_set_all_raw(m);
    200 			}
    201 
    202 			nta.tv_sec = nta.tv_nsec = 0;
    203 			ntb.tv_sec = ntb.tv_nsec = 0;
    204 
    205 			nanotime(&nta);
    206 			cpufreq_set_all_raw(cf->cf_state[i].cfs_freq);
    207 			nanotime(&ntb);
    208 			timespecsub(&ntb, &nta, &ntb);
    209 
    210 			if (ntb.tv_sec != 0 ||
    211 			    ntb.tv_nsec > CPUFREQ_LATENCY_MAX)
    212 				continue;
    213 
    214 			if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX)
    215 				break;
    216 
    217 			s += ntb.tv_nsec;
    218 		}
    219 
    220 		/*
    221 		 * Consider the backend unsuitable if
    222 		 * the transition latency was too high.
    223 		 */
    224 		if (s == 0)
    225 			return EMSGSIZE;
    226 
    227 		cf->cf_state[i].cfs_latency = s / n;
    228 	}
    229 
    230 	return 0;
    231 }
    232 
    233 void
    234 cpufreq_suspend(struct cpu_info *ci)
    235 {
    236 	struct cpufreq *cf;
    237 	uint32_t l, s;
    238 
    239 	mutex_enter(&cpufreq_lock);
    240 	cf = cf_backend;
    241 
    242 	if (cf == NULL) {
    243 		mutex_exit(&cpufreq_lock);
    244 		return;
    245 	}
    246 
    247 	l = cpufreq_get_min();
    248 	s = cpufreq_get_raw(ci);
    249 
    250 	cpufreq_set_raw(ci, l);
    251 	cf->cf_state_saved = s;
    252 
    253 	mutex_exit(&cpufreq_lock);
    254 }
    255 
    256 void
    257 cpufreq_resume(struct cpu_info *ci)
    258 {
    259 	struct cpufreq *cf;
    260 
    261 	mutex_enter(&cpufreq_lock);
    262 	cf = cf_backend;
    263 
    264 	if (cf == NULL || cf->cf_state_saved == 0) {
    265 		mutex_exit(&cpufreq_lock);
    266 		return;
    267 	}
    268 
    269 	cpufreq_set_raw(ci, cf->cf_state_saved);
    270 	mutex_exit(&cpufreq_lock);
    271 }
    272 
    273 uint32_t
    274 cpufreq_get(struct cpu_info *ci)
    275 {
    276 	struct cpufreq *cf;
    277 	uint32_t freq;
    278 
    279 	mutex_enter(&cpufreq_lock);
    280 	cf = cf_backend;
    281 
    282 	if (cf == NULL) {
    283 		mutex_exit(&cpufreq_lock);
    284 		return 0;
    285 	}
    286 
    287 	freq = cpufreq_get_raw(ci);
    288 	mutex_exit(&cpufreq_lock);
    289 
    290 	return freq;
    291 }
    292 
    293 static uint32_t
    294 cpufreq_get_max(void)
    295 {
    296 	struct cpufreq *cf = cf_backend;
    297 
    298 	KASSERT(cf != NULL);
    299 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    300 
    301 	return cf->cf_state[0].cfs_freq;
    302 }
    303 
    304 static uint32_t
    305 cpufreq_get_min(void)
    306 {
    307 	struct cpufreq *cf = cf_backend;
    308 
    309 	KASSERT(cf != NULL);
    310 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    311 
    312 	return cf->cf_state[cf->cf_state_count - 1].cfs_freq;
    313 }
    314 
    315 static uint32_t
    316 cpufreq_get_raw(struct cpu_info *ci)
    317 {
    318 	struct cpufreq *cf = cf_backend;
    319 	uint32_t freq = 0;
    320 	uint64_t xc;
    321 
    322 	KASSERT(cf != NULL);
    323 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    324 
    325 	xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci);
    326 	xc_wait(xc);
    327 
    328 	return freq;
    329 }
    330 
    331 int
    332 cpufreq_get_backend(struct cpufreq *cf)
    333 {
    334 
    335 	mutex_enter(&cpufreq_lock);
    336 
    337 	if (cf_backend == NULL || cf == NULL) {
    338 		mutex_exit(&cpufreq_lock);
    339 		return ENODEV;
    340 	}
    341 
    342 	(void)memcpy(cf, cf_backend, sizeof(*cf));
    343 	mutex_exit(&cpufreq_lock);
    344 
    345 	return 0;
    346 }
    347 
    348 int
    349 cpufreq_get_state(uint32_t freq, struct cpufreq_state *cfs)
    350 {
    351 	struct cpufreq *cf;
    352 
    353 	mutex_enter(&cpufreq_lock);
    354 	cf = cf_backend;
    355 
    356 	if (cf == NULL || cfs == NULL) {
    357 		mutex_exit(&cpufreq_lock);
    358 		return ENODEV;
    359 	}
    360 
    361 	cpufreq_get_state_raw(freq, cfs);
    362 	mutex_exit(&cpufreq_lock);
    363 
    364 	return 0;
    365 }
    366 
    367 int
    368 cpufreq_get_state_index(uint32_t index, struct cpufreq_state *cfs)
    369 {
    370 	struct cpufreq *cf;
    371 
    372 	mutex_enter(&cpufreq_lock);
    373 	cf = cf_backend;
    374 
    375 	if (cf == NULL || cfs == NULL) {
    376 		mutex_exit(&cpufreq_lock);
    377 		return ENODEV;
    378 	}
    379 
    380 	if (index >= cf->cf_state_count) {
    381 		mutex_exit(&cpu_lock);
    382 		return EINVAL;
    383 	}
    384 
    385 	(void)memcpy(cfs, &cf->cf_state[index], sizeof(*cfs));
    386 	mutex_exit(&cpufreq_lock);
    387 
    388 	return 0;
    389 }
    390 
    391 static void
    392 cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs)
    393 {
    394 	struct cpufreq *cf = cf_backend;
    395 	uint32_t f, hi, i = 0, lo = 0;
    396 
    397 	KASSERT(cf != NULL && cfs != NULL);
    398 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    399 
    400 	hi = cf->cf_state_count;
    401 
    402 	while (lo < hi) {
    403 
    404 		i = (lo + hi) >> 1;
    405 		f = cf->cf_state[i].cfs_freq;
    406 
    407 		if (freq == f)
    408 			break;
    409 		else if (freq > f)
    410 			hi = i;
    411 		else {
    412 			lo = i + 1;
    413 		}
    414 	}
    415 
    416 	(void)memcpy(cfs, &cf->cf_state[i], sizeof(*cfs));
    417 }
    418 
    419 void
    420 cpufreq_set(struct cpu_info *ci, uint32_t freq)
    421 {
    422 	struct cpufreq *cf;
    423 
    424 	mutex_enter(&cpufreq_lock);
    425 	cf = cf_backend;
    426 
    427 	if (__predict_false(cf == NULL)) {
    428 		mutex_exit(&cpufreq_lock);
    429 		return;
    430 	}
    431 
    432 	cpufreq_set_raw(ci, freq);
    433 	mutex_exit(&cpufreq_lock);
    434 }
    435 
    436 static void
    437 cpufreq_set_raw(struct cpu_info *ci, uint32_t freq)
    438 {
    439 	struct cpufreq *cf = cf_backend;
    440 	uint64_t xc;
    441 
    442 	KASSERT(cf != NULL);
    443 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    444 
    445 	xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci);
    446 	xc_wait(xc);
    447 }
    448 
    449 void
    450 cpufreq_set_all(uint32_t freq)
    451 {
    452 	struct cpufreq *cf;
    453 
    454 	mutex_enter(&cpufreq_lock);
    455 	cf = cf_backend;
    456 
    457 	if (__predict_false(cf == NULL)) {
    458 		mutex_exit(&cpufreq_lock);
    459 		return;
    460 	}
    461 
    462 	cpufreq_set_all_raw(freq);
    463 	mutex_exit(&cpufreq_lock);
    464 }
    465 
    466 static void
    467 cpufreq_set_all_raw(uint32_t freq)
    468 {
    469 	struct cpufreq *cf = cf_backend;
    470 	uint64_t xc;
    471 
    472 	KASSERT(cf != NULL);
    473 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    474 
    475 	xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
    476 	xc_wait(xc);
    477 }
    478 
    479 #ifdef notyet
    480 void
    481 cpufreq_set_higher(struct cpu_info *ci)
    482 {
    483 	cpufreq_set_step(ci, -1);
    484 }
    485 
    486 void
    487 cpufreq_set_lower(struct cpu_info *ci)
    488 {
    489 	cpufreq_set_step(ci, 1);
    490 }
    491 
    492 static void
    493 cpufreq_set_step(struct cpu_info *ci, int32_t step)
    494 {
    495 	struct cpufreq_state cfs;
    496 	struct cpufreq *cf;
    497 	uint32_t freq;
    498 	int32_t index;
    499 
    500 	mutex_enter(&cpufreq_lock);
    501 	cf = cf_backend;
    502 
    503 	if (__predict_false(cf == NULL)) {
    504 		mutex_exit(&cpufreq_lock);
    505 		return;
    506 	}
    507 
    508 	freq = cpufreq_get_raw(ci);
    509 
    510 	if (__predict_false(freq == 0)) {
    511 		mutex_exit(&cpufreq_lock);
    512 		return;
    513 	}
    514 
    515 	cpufreq_get_state_raw(freq, &cfs);
    516 	index = cfs.cfs_index + step;
    517 
    518 	if (index < 0 || index >= (int32_t)cf->cf_state_count) {
    519 		mutex_exit(&cpufreq_lock);
    520 		return;
    521 	}
    522 
    523 	cpufreq_set_raw(ci, cf->cf_state[index].cfs_freq);
    524 	mutex_exit(&cpufreq_lock);
    525 }
    526 #endif
    527