Home | History | Annotate | Line # | Download | only in kern
subr_cpufreq.c revision 1.6
      1 /*	$NetBSD: subr_cpufreq.c,v 1.6 2011/10/25 11:35:49 jruoho Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jukka Ruohonen.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  *
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.6 2011/10/25 11:35:49 jruoho Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/cpu.h>
     37 #include <sys/cpufreq.h>
     38 #include <sys/kmem.h>
     39 #include <sys/mutex.h>
     40 #include <sys/time.h>
     41 #include <sys/xcall.h>
     42 
     43 static int	 cpufreq_latency(void);
     44 static uint32_t	 cpufreq_get_max(void);
     45 static uint32_t	 cpufreq_get_min(void);
     46 static uint32_t	 cpufreq_get_raw(struct cpu_info *);
     47 static void	 cpufreq_get_state_raw(uint32_t, struct cpufreq_state *);
     48 static void	 cpufreq_set_raw(struct cpu_info *, uint32_t);
     49 static void	 cpufreq_set_all_raw(uint32_t);
     50 
     51 static kmutex_t		cpufreq_lock __cacheline_aligned;
     52 static struct cpufreq  *cf_backend __read_mostly = NULL;
     53 
     54 void
     55 cpufreq_init(void)
     56 {
     57 
     58 	mutex_init(&cpufreq_lock, MUTEX_DEFAULT, IPL_NONE);
     59 	cf_backend = kmem_zalloc(sizeof(*cf_backend), KM_SLEEP);
     60 }
     61 
     62 int
     63 cpufreq_register(struct cpufreq *cf)
     64 {
     65 	uint32_t c, i, j, k;
     66 	int rv;
     67 
     68 	KASSERT(cf != NULL);
     69 	KASSERT(cf_backend != NULL);
     70 	KASSERT(cf->cf_get_freq != NULL);
     71 	KASSERT(cf->cf_set_freq != NULL);
     72 	KASSERT(cf->cf_state_count > 0);
     73 	KASSERT(cf->cf_state_count < CPUFREQ_STATE_MAX);
     74 
     75 	mutex_enter(&cpufreq_lock);
     76 
     77 	if (cf_backend->cf_init != false) {
     78 		mutex_exit(&cpufreq_lock);
     79 		return EALREADY;
     80 	}
     81 
     82 	cf_backend->cf_init = true;
     83 	cf_backend->cf_mp = cf->cf_mp;
     84 	cf_backend->cf_cookie = cf->cf_cookie;
     85 	cf_backend->cf_get_freq = cf->cf_get_freq;
     86 	cf_backend->cf_set_freq = cf->cf_set_freq;
     87 
     88 	(void)strlcpy(cf_backend->cf_name, cf->cf_name, sizeof(cf->cf_name));
     89 
     90 	/*
     91 	 * Sanity check the values and verify descending order.
     92 	 */
     93 	for (c = i = 0; i < cf->cf_state_count; i++) {
     94 
     95 		CTASSERT(CPUFREQ_STATE_ENABLED != 0);
     96 		CTASSERT(CPUFREQ_STATE_DISABLED != 0);
     97 
     98 		if (cf->cf_state[i].cfs_freq == 0)
     99 			continue;
    100 
    101 		if (cf->cf_state[i].cfs_freq > 9999 &&
    102 		    cf->cf_state[i].cfs_freq != CPUFREQ_STATE_ENABLED &&
    103 		    cf->cf_state[i].cfs_freq != CPUFREQ_STATE_DISABLED)
    104 			continue;
    105 
    106 		for (j = k = 0; j < i; j++) {
    107 
    108 			if (cf->cf_state[i].cfs_freq >=
    109 			    cf->cf_state[j].cfs_freq) {
    110 				k = 1;
    111 				break;
    112 			}
    113 		}
    114 
    115 		if (k != 0)
    116 			continue;
    117 
    118 		cf_backend->cf_state[c].cfs_index = c;
    119 		cf_backend->cf_state[c].cfs_freq = cf->cf_state[i].cfs_freq;
    120 		cf_backend->cf_state[c].cfs_power = cf->cf_state[i].cfs_power;
    121 
    122 		c++;
    123 	}
    124 
    125 	cf_backend->cf_state_count = c;
    126 
    127 	if (cf_backend->cf_state_count == 0) {
    128 		mutex_exit(&cpufreq_lock);
    129 		cpufreq_deregister();
    130 		return EINVAL;
    131 	}
    132 
    133 	rv = cpufreq_latency();
    134 
    135 	if (rv != 0) {
    136 		mutex_exit(&cpufreq_lock);
    137 		cpufreq_deregister();
    138 		return rv;
    139 	}
    140 
    141 	mutex_exit(&cpufreq_lock);
    142 
    143 	return 0;
    144 }
    145 
    146 void
    147 cpufreq_deregister(void)
    148 {
    149 
    150 	mutex_enter(&cpufreq_lock);
    151 	memset(cf_backend, 0, sizeof(*cf_backend));
    152 	mutex_exit(&cpufreq_lock);
    153 }
    154 
    155 static int
    156 cpufreq_latency(void)
    157 {
    158 	struct cpufreq *cf = cf_backend;
    159 	struct timeval nta, ntb;
    160 	const uint32_t n = 10;
    161 	uint32_t i, j, l, m;
    162 	uint64_t s;
    163 
    164 	l = cpufreq_get_min();
    165 	m = cpufreq_get_max();
    166 
    167 	/*
    168 	 * For each state, sample the average transition
    169 	 * latency required to set the state for all CPUs.
    170 	 */
    171 	for (i = 0; i < cf->cf_state_count; i++) {
    172 
    173 		for (s = 0, j = 0; j < n; j++) {
    174 
    175 			/*
    176 			 * Attempt to exclude possible
    177 			 * caching done by the backend.
    178 			 */
    179 			if (i == 0)
    180 				cpufreq_set_all_raw(l);
    181 			else {
    182 				cpufreq_set_all_raw(m);
    183 			}
    184 
    185 			nta.tv_sec = nta.tv_usec = 0;
    186 			ntb.tv_sec = ntb.tv_usec = 0;
    187 
    188 			microtime(&nta);
    189 			cpufreq_set_all_raw(cf->cf_state[i].cfs_freq);
    190 			microtime(&ntb);
    191 			timersub(&ntb, &nta, &ntb);
    192 
    193 			if (ntb.tv_sec != 0 ||
    194 			    ntb.tv_usec > CPUFREQ_LATENCY_MAX)
    195 				continue;
    196 
    197 			if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX)
    198 				break;
    199 
    200 			s += ntb.tv_usec;
    201 		}
    202 
    203 		/*
    204 		 * Consider the backend unsuitable if
    205 		 * the transition latency was too high.
    206 		 */
    207 		if (s == 0)
    208 			return EMSGSIZE;
    209 
    210 		cf->cf_state[i].cfs_latency = s / n;
    211 	}
    212 
    213 	return 0;
    214 }
    215 
    216 void
    217 cpufreq_suspend(struct cpu_info *ci)
    218 {
    219 	struct cpufreq *cf = cf_backend;
    220 	uint32_t l, s;
    221 
    222 	mutex_enter(&cpufreq_lock);
    223 
    224 	if (cf->cf_init != true) {
    225 		mutex_exit(&cpufreq_lock);
    226 		return;
    227 	}
    228 
    229 	l = cpufreq_get_min();
    230 	s = cpufreq_get_raw(ci);
    231 
    232 	cpufreq_set_raw(ci, l);
    233 	cf->cf_state_saved = s;
    234 
    235 	mutex_exit(&cpufreq_lock);
    236 }
    237 
    238 void
    239 cpufreq_resume(struct cpu_info *ci)
    240 {
    241 	struct cpufreq *cf = cf_backend;
    242 
    243 	mutex_enter(&cpufreq_lock);
    244 
    245 	if (cf->cf_init != true || cf->cf_state_saved == 0) {
    246 		mutex_exit(&cpufreq_lock);
    247 		return;
    248 	}
    249 
    250 	cpufreq_set_raw(ci, cf->cf_state_saved);
    251 	mutex_exit(&cpufreq_lock);
    252 }
    253 
    254 uint32_t
    255 cpufreq_get(struct cpu_info *ci)
    256 {
    257 	struct cpufreq *cf = cf_backend;
    258 	uint32_t freq;
    259 
    260 	mutex_enter(&cpufreq_lock);
    261 
    262 	if (cf->cf_init != true) {
    263 		mutex_exit(&cpufreq_lock);
    264 		return 0;
    265 	}
    266 
    267 	freq = cpufreq_get_raw(ci);
    268 	mutex_exit(&cpufreq_lock);
    269 
    270 	return freq;
    271 }
    272 
    273 static uint32_t
    274 cpufreq_get_max(void)
    275 {
    276 	struct cpufreq *cf = cf_backend;
    277 
    278 	KASSERT(cf->cf_init != false);
    279 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    280 
    281 	return cf->cf_state[0].cfs_freq;
    282 }
    283 
    284 static uint32_t
    285 cpufreq_get_min(void)
    286 {
    287 	struct cpufreq *cf = cf_backend;
    288 
    289 	KASSERT(cf->cf_init != false);
    290 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    291 
    292 	return cf->cf_state[cf->cf_state_count - 1].cfs_freq;
    293 }
    294 
    295 static uint32_t
    296 cpufreq_get_raw(struct cpu_info *ci)
    297 {
    298 	struct cpufreq *cf = cf_backend;
    299 	uint32_t freq = 0;
    300 	uint64_t xc;
    301 
    302 	KASSERT(cf->cf_init != false);
    303 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    304 
    305 	xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci);
    306 	xc_wait(xc);
    307 
    308 	return freq;
    309 }
    310 
    311 int
    312 cpufreq_get_backend(struct cpufreq *dst)
    313 {
    314 	struct cpufreq *cf = cf_backend;
    315 
    316 	mutex_enter(&cpufreq_lock);
    317 
    318 	if (cf->cf_init != true || dst == NULL) {
    319 		mutex_exit(&cpufreq_lock);
    320 		return ENODEV;
    321 	}
    322 
    323 	memcpy(dst, cf, sizeof(*cf));
    324 	mutex_exit(&cpufreq_lock);
    325 
    326 	return 0;
    327 }
    328 
    329 int
    330 cpufreq_get_state(uint32_t freq, struct cpufreq_state *cfs)
    331 {
    332 	struct cpufreq *cf = cf_backend;
    333 
    334 	mutex_enter(&cpufreq_lock);
    335 
    336 	if (cf->cf_init != true || cfs == NULL) {
    337 		mutex_exit(&cpufreq_lock);
    338 		return ENODEV;
    339 	}
    340 
    341 	cpufreq_get_state_raw(freq, cfs);
    342 	mutex_exit(&cpufreq_lock);
    343 
    344 	return 0;
    345 }
    346 
    347 int
    348 cpufreq_get_state_index(uint32_t index, struct cpufreq_state *cfs)
    349 {
    350 	struct cpufreq *cf = cf_backend;
    351 
    352 	mutex_enter(&cpufreq_lock);
    353 
    354 	if (cf->cf_init != true || cfs == NULL) {
    355 		mutex_exit(&cpufreq_lock);
    356 		return ENODEV;
    357 	}
    358 
    359 	if (index >= cf->cf_state_count) {
    360 		mutex_exit(&cpu_lock);
    361 		return EINVAL;
    362 	}
    363 
    364 	memcpy(cfs, &cf->cf_state[index], sizeof(*cfs));
    365 	mutex_exit(&cpufreq_lock);
    366 
    367 	return 0;
    368 }
    369 
    370 static void
    371 cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs)
    372 {
    373 	struct cpufreq *cf = cf_backend;
    374 	uint32_t f, hi, i = 0, lo = 0;
    375 
    376 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    377 	KASSERT(cf->cf_init != false && cfs != NULL);
    378 
    379 	hi = cf->cf_state_count;
    380 
    381 	while (lo < hi) {
    382 
    383 		i = (lo + hi) >> 1;
    384 		f = cf->cf_state[i].cfs_freq;
    385 
    386 		if (freq == f)
    387 			break;
    388 		else if (freq > f)
    389 			hi = i;
    390 		else {
    391 			lo = i + 1;
    392 		}
    393 	}
    394 
    395 	memcpy(cfs, &cf->cf_state[i], sizeof(*cfs));
    396 }
    397 
    398 void
    399 cpufreq_set(struct cpu_info *ci, uint32_t freq)
    400 {
    401 	struct cpufreq *cf = cf_backend;
    402 
    403 	mutex_enter(&cpufreq_lock);
    404 
    405 	if (__predict_false(cf->cf_init != true)) {
    406 		mutex_exit(&cpufreq_lock);
    407 		return;
    408 	}
    409 
    410 	cpufreq_set_raw(ci, freq);
    411 	mutex_exit(&cpufreq_lock);
    412 }
    413 
    414 static void
    415 cpufreq_set_raw(struct cpu_info *ci, uint32_t freq)
    416 {
    417 	struct cpufreq *cf = cf_backend;
    418 	uint64_t xc;
    419 
    420 	KASSERT(cf->cf_init != false);
    421 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    422 
    423 	xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci);
    424 	xc_wait(xc);
    425 }
    426 
    427 void
    428 cpufreq_set_all(uint32_t freq)
    429 {
    430 	struct cpufreq *cf = cf_backend;
    431 
    432 	mutex_enter(&cpufreq_lock);
    433 
    434 	if (__predict_false(cf->cf_init != true)) {
    435 		mutex_exit(&cpufreq_lock);
    436 		return;
    437 	}
    438 
    439 	cpufreq_set_all_raw(freq);
    440 	mutex_exit(&cpufreq_lock);
    441 }
    442 
    443 static void
    444 cpufreq_set_all_raw(uint32_t freq)
    445 {
    446 	struct cpufreq *cf = cf_backend;
    447 	uint64_t xc;
    448 
    449 	KASSERT(cf->cf_init != false);
    450 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    451 
    452 	xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
    453 	xc_wait(xc);
    454 }
    455 
    456 #ifdef notyet
    457 void
    458 cpufreq_set_higher(struct cpu_info *ci)
    459 {
    460 	cpufreq_set_step(ci, -1);
    461 }
    462 
    463 void
    464 cpufreq_set_lower(struct cpu_info *ci)
    465 {
    466 	cpufreq_set_step(ci, 1);
    467 }
    468 
    469 static void
    470 cpufreq_set_step(struct cpu_info *ci, int32_t step)
    471 {
    472 	struct cpufreq *cf = cf_backend;
    473 	struct cpufreq_state cfs;
    474 	uint32_t freq;
    475 	int32_t index;
    476 
    477 	mutex_enter(&cpufreq_lock);
    478 
    479 	if (__predict_false(cf->cf_init != true)) {
    480 		mutex_exit(&cpufreq_lock);
    481 		return;
    482 	}
    483 
    484 	freq = cpufreq_get_raw(ci);
    485 
    486 	if (__predict_false(freq == 0)) {
    487 		mutex_exit(&cpufreq_lock);
    488 		return;
    489 	}
    490 
    491 	cpufreq_get_state_raw(freq, &cfs);
    492 	index = cfs.cfs_index + step;
    493 
    494 	if (index < 0 || index >= (int32_t)cf->cf_state_count) {
    495 		mutex_exit(&cpufreq_lock);
    496 		return;
    497 	}
    498 
    499 	cpufreq_set_raw(ci, cf->cf_state[index].cfs_freq);
    500 	mutex_exit(&cpufreq_lock);
    501 }
    502 #endif
    503