Home | History | Annotate | Line # | Download | only in kern
subr_cpufreq.c revision 1.3
      1 /*	$NetBSD: subr_cpufreq.c,v 1.3 2011/09/30 04:01:21 jruoho Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jukka Ruohonen.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  *
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.3 2011/09/30 04:01:21 jruoho Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/cpu.h>
     37 #include <sys/cpufreq.h>
     38 #include <sys/kmem.h>
     39 #include <sys/mutex.h>
     40 #include <sys/time.h>
     41 #include <sys/xcall.h>
     42 
     43 static int	 cpufreq_latency(void);
     44 static uint32_t	 cpufreq_get_max(void);
     45 static uint32_t	 cpufreq_get_min(void);
     46 static uint32_t	 cpufreq_get_raw(struct cpu_info *);
     47 static void	 cpufreq_get_state_raw(uint32_t, struct cpufreq_state *);
     48 static void	 cpufreq_set_raw(struct cpu_info *, uint32_t);
     49 static void	 cpufreq_set_all_raw(uint32_t);
     50 
     51 static kmutex_t		cpufreq_lock __cacheline_aligned;
     52 static struct cpufreq  *cf_backend __read_mostly = NULL;
     53 
     54 void
     55 cpufreq_init(void)
     56 {
     57 
     58 	mutex_init(&cpufreq_lock, MUTEX_DEFAULT, IPL_NONE);
     59 	cf_backend = kmem_zalloc(sizeof(*cf_backend), KM_SLEEP);
     60 }
     61 
     62 int
     63 cpufreq_register(struct cpufreq *cf)
     64 {
     65 	uint32_t c, i, j, k, m;
     66 	int rv;
     67 
     68 	KASSERT(cf != NULL);
     69 	KASSERT(cf_backend != NULL);
     70 	KASSERT(cf->cf_get_freq != NULL);
     71 	KASSERT(cf->cf_set_freq != NULL);
     72 	KASSERT(cf->cf_state_count > 0);
     73 	KASSERT(cf->cf_state_count < CPUFREQ_STATE_MAX);
     74 
     75 	mutex_enter(&cpufreq_lock);
     76 
     77 	if (cf_backend->cf_init != false) {
     78 		mutex_exit(&cpufreq_lock);
     79 		return EALREADY;
     80 	}
     81 
     82 	cf_backend->cf_init = true;
     83 	cf_backend->cf_mp = cf->cf_mp;
     84 	cf_backend->cf_cookie = cf->cf_cookie;
     85 	cf_backend->cf_get_freq = cf->cf_get_freq;
     86 	cf_backend->cf_set_freq = cf->cf_set_freq;
     87 
     88 	(void)strlcpy(cf_backend->cf_name, cf->cf_name, sizeof(cf->cf_name));
     89 
     90 	/*
     91 	 * Sanity check the values and verify descending order.
     92 	 */
     93 	for (c = i = 0; i < cf->cf_state_count; i++) {
     94 
     95 		CTASSERT(CPUFREQ_STATE_ENABLED != 0);
     96 		CTASSERT(CPUFREQ_STATE_DISABLED != 0);
     97 
     98 		if (cf->cf_state[i].cfs_freq == 0)
     99 			continue;
    100 
    101 		if (cf->cf_state[i].cfs_freq > 9999 &&
    102 		    cf->cf_state[i].cfs_freq != CPUFREQ_STATE_ENABLED &&
    103 		    cf->cf_state[i].cfs_freq != CPUFREQ_STATE_DISABLED)
    104 			continue;
    105 
    106 		for (j = k = 0; j < i; j++) {
    107 
    108 			if (cf->cf_state[i].cfs_freq >=
    109 			    cf->cf_state[j].cfs_freq) {
    110 				k = 1;
    111 				break;
    112 			}
    113 		}
    114 
    115 		if (k != 0)
    116 			continue;
    117 
    118 		cf_backend->cf_state[c].cfs_index = c;
    119 		cf_backend->cf_state[c].cfs_freq = cf->cf_state[i].cfs_freq;
    120 		cf_backend->cf_state[c].cfs_power = cf->cf_state[i].cfs_power;
    121 
    122 		c++;
    123 	}
    124 
    125 	cf_backend->cf_state_count = c;
    126 
    127 	if (cf_backend->cf_state_count == 0) {
    128 		mutex_exit(&cpufreq_lock);
    129 		cpufreq_deregister();
    130 		return EINVAL;
    131 	}
    132 
    133 	rv = cpufreq_latency();
    134 
    135 	if (rv != 0) {
    136 		mutex_exit(&cpufreq_lock);
    137 		cpufreq_deregister();
    138 		return rv;
    139 	}
    140 
    141 	m = cpufreq_get_max();
    142 	cpufreq_set_all_raw(m);
    143 	mutex_exit(&cpufreq_lock);
    144 
    145 	return 0;
    146 }
    147 
    148 void
    149 cpufreq_deregister(void)
    150 {
    151 
    152 	mutex_enter(&cpufreq_lock);
    153 	memset(cf_backend, 0, sizeof(*cf_backend));
    154 	mutex_exit(&cpufreq_lock);
    155 }
    156 
    157 static int
    158 cpufreq_latency(void)
    159 {
    160 	struct cpufreq *cf = cf_backend;
    161 	struct timespec nta, ntb;
    162 	const uint32_t n = 10;
    163 	uint32_t i, j, l, m;
    164 	uint64_t s;
    165 
    166 	l = cpufreq_get_min();
    167 	m = cpufreq_get_max();
    168 
    169 	/*
    170 	 * For each state, sample the average transition
    171 	 * latency required to set the state for all CPUs.
    172 	 */
    173 	for (i = 0; i < cf->cf_state_count; i++) {
    174 
    175 		for (s = 0, j = 0; j < n; j++) {
    176 
    177 			/*
    178 			 * Attempt to exclude possible
    179 			 * caching done by the backend.
    180 			 */
    181 			if (i == 0)
    182 				cpufreq_set_all_raw(l);
    183 			else {
    184 				cpufreq_set_all_raw(m);
    185 			}
    186 
    187 			nta.tv_sec = nta.tv_nsec = 0;
    188 			ntb.tv_sec = ntb.tv_nsec = 0;
    189 
    190 			nanotime(&nta);
    191 			cpufreq_set_all_raw(cf->cf_state[i].cfs_freq);
    192 			nanotime(&ntb);
    193 			timespecsub(&ntb, &nta, &ntb);
    194 
    195 			if (ntb.tv_sec != 0 ||
    196 			    ntb.tv_nsec > CPUFREQ_LATENCY_MAX)
    197 				continue;
    198 
    199 			if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX)
    200 				break;
    201 
    202 			s += ntb.tv_nsec;
    203 		}
    204 
    205 		/*
    206 		 * Consider the backend unsuitable if
    207 		 * the transition latency was too high.
    208 		 */
    209 		if (s == 0)
    210 			return EMSGSIZE;
    211 
    212 		cf->cf_state[i].cfs_latency = s / n;
    213 	}
    214 
    215 	return 0;
    216 }
    217 
    218 void
    219 cpufreq_suspend(struct cpu_info *ci)
    220 {
    221 	struct cpufreq *cf = cf_backend;
    222 	uint32_t l, s;
    223 
    224 	mutex_enter(&cpufreq_lock);
    225 
    226 	if (cf->cf_init != true) {
    227 		mutex_exit(&cpufreq_lock);
    228 		return;
    229 	}
    230 
    231 	l = cpufreq_get_min();
    232 	s = cpufreq_get_raw(ci);
    233 
    234 	cpufreq_set_raw(ci, l);
    235 	cf->cf_state_saved = s;
    236 
    237 	mutex_exit(&cpufreq_lock);
    238 }
    239 
    240 void
    241 cpufreq_resume(struct cpu_info *ci)
    242 {
    243 	struct cpufreq *cf = cf_backend;
    244 
    245 	mutex_enter(&cpufreq_lock);
    246 
    247 	if (cf->cf_init != true || cf->cf_state_saved == 0) {
    248 		mutex_exit(&cpufreq_lock);
    249 		return;
    250 	}
    251 
    252 	cpufreq_set_raw(ci, cf->cf_state_saved);
    253 	mutex_exit(&cpufreq_lock);
    254 }
    255 
    256 uint32_t
    257 cpufreq_get(struct cpu_info *ci)
    258 {
    259 	struct cpufreq *cf = cf_backend;
    260 	uint32_t freq;
    261 
    262 	mutex_enter(&cpufreq_lock);
    263 
    264 	if (cf->cf_init != true) {
    265 		mutex_exit(&cpufreq_lock);
    266 		return 0;
    267 	}
    268 
    269 	freq = cpufreq_get_raw(ci);
    270 	mutex_exit(&cpufreq_lock);
    271 
    272 	return freq;
    273 }
    274 
    275 static uint32_t
    276 cpufreq_get_max(void)
    277 {
    278 	struct cpufreq *cf = cf_backend;
    279 
    280 	KASSERT(cf->cf_init != false);
    281 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    282 
    283 	return cf->cf_state[0].cfs_freq;
    284 }
    285 
    286 static uint32_t
    287 cpufreq_get_min(void)
    288 {
    289 	struct cpufreq *cf = cf_backend;
    290 
    291 	KASSERT(cf->cf_init != false);
    292 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    293 
    294 	return cf->cf_state[cf->cf_state_count - 1].cfs_freq;
    295 }
    296 
    297 static uint32_t
    298 cpufreq_get_raw(struct cpu_info *ci)
    299 {
    300 	struct cpufreq *cf = cf_backend;
    301 	uint32_t freq = 0;
    302 	uint64_t xc;
    303 
    304 	KASSERT(cf->cf_init != false);
    305 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    306 
    307 	xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci);
    308 	xc_wait(xc);
    309 
    310 	return freq;
    311 }
    312 
    313 int
    314 cpufreq_get_backend(struct cpufreq *dst)
    315 {
    316 	struct cpufreq *cf = cf_backend;
    317 
    318 	mutex_enter(&cpufreq_lock);
    319 
    320 	if (cf->cf_init != true || dst == NULL) {
    321 		mutex_exit(&cpufreq_lock);
    322 		return ENODEV;
    323 	}
    324 
    325 	memcpy(dst, cf, sizeof(*cf));
    326 	mutex_exit(&cpufreq_lock);
    327 
    328 	return 0;
    329 }
    330 
    331 int
    332 cpufreq_get_state(uint32_t freq, struct cpufreq_state *cfs)
    333 {
    334 	struct cpufreq *cf = cf_backend;
    335 
    336 	mutex_enter(&cpufreq_lock);
    337 
    338 	if (cf->cf_init != true || cfs == NULL) {
    339 		mutex_exit(&cpufreq_lock);
    340 		return ENODEV;
    341 	}
    342 
    343 	cpufreq_get_state_raw(freq, cfs);
    344 	mutex_exit(&cpufreq_lock);
    345 
    346 	return 0;
    347 }
    348 
    349 int
    350 cpufreq_get_state_index(uint32_t index, struct cpufreq_state *cfs)
    351 {
    352 	struct cpufreq *cf = cf_backend;
    353 
    354 	mutex_enter(&cpufreq_lock);
    355 
    356 	if (cf->cf_init != true || cfs == NULL) {
    357 		mutex_exit(&cpufreq_lock);
    358 		return ENODEV;
    359 	}
    360 
    361 	if (index >= cf->cf_state_count) {
    362 		mutex_exit(&cpu_lock);
    363 		return EINVAL;
    364 	}
    365 
    366 	memcpy(cfs, &cf->cf_state[index], sizeof(*cfs));
    367 	mutex_exit(&cpufreq_lock);
    368 
    369 	return 0;
    370 }
    371 
    372 static void
    373 cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs)
    374 {
    375 	struct cpufreq *cf = cf_backend;
    376 	uint32_t f, hi, i = 0, lo = 0;
    377 
    378 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    379 	KASSERT(cf->cf_init != false && cfs != NULL);
    380 
    381 	hi = cf->cf_state_count;
    382 
    383 	while (lo < hi) {
    384 
    385 		i = (lo + hi) >> 1;
    386 		f = cf->cf_state[i].cfs_freq;
    387 
    388 		if (freq == f)
    389 			break;
    390 		else if (freq > f)
    391 			hi = i;
    392 		else {
    393 			lo = i + 1;
    394 		}
    395 	}
    396 
    397 	memcpy(cfs, &cf->cf_state[i], sizeof(*cfs));
    398 }
    399 
    400 void
    401 cpufreq_set(struct cpu_info *ci, uint32_t freq)
    402 {
    403 	struct cpufreq *cf = cf_backend;
    404 
    405 	mutex_enter(&cpufreq_lock);
    406 
    407 	if (__predict_false(cf->cf_init != true)) {
    408 		mutex_exit(&cpufreq_lock);
    409 		return;
    410 	}
    411 
    412 	cpufreq_set_raw(ci, freq);
    413 	mutex_exit(&cpufreq_lock);
    414 }
    415 
    416 static void
    417 cpufreq_set_raw(struct cpu_info *ci, uint32_t freq)
    418 {
    419 	struct cpufreq *cf = cf_backend;
    420 	uint64_t xc;
    421 
    422 	KASSERT(cf->cf_init != false);
    423 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    424 
    425 	xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci);
    426 	xc_wait(xc);
    427 }
    428 
    429 void
    430 cpufreq_set_all(uint32_t freq)
    431 {
    432 	struct cpufreq *cf = cf_backend;
    433 
    434 	mutex_enter(&cpufreq_lock);
    435 
    436 	if (__predict_false(cf->cf_init != true)) {
    437 		mutex_exit(&cpufreq_lock);
    438 		return;
    439 	}
    440 
    441 	cpufreq_set_all_raw(freq);
    442 	mutex_exit(&cpufreq_lock);
    443 }
    444 
    445 static void
    446 cpufreq_set_all_raw(uint32_t freq)
    447 {
    448 	struct cpufreq *cf = cf_backend;
    449 	uint64_t xc;
    450 
    451 	KASSERT(cf->cf_init != false);
    452 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    453 
    454 	xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
    455 	xc_wait(xc);
    456 }
    457 
    458 #ifdef notyet
    459 void
    460 cpufreq_set_higher(struct cpu_info *ci)
    461 {
    462 	cpufreq_set_step(ci, -1);
    463 }
    464 
    465 void
    466 cpufreq_set_lower(struct cpu_info *ci)
    467 {
    468 	cpufreq_set_step(ci, 1);
    469 }
    470 
    471 static void
    472 cpufreq_set_step(struct cpu_info *ci, int32_t step)
    473 {
    474 	struct cpufreq *cf = cf_backend;
    475 	struct cpufreq_state cfs;
    476 	uint32_t freq;
    477 	int32_t index;
    478 
    479 	mutex_enter(&cpufreq_lock);
    480 	cf = cf_backend;
    481 
    482 	if (__predict_false(cf->cf_init != true)) {
    483 		mutex_exit(&cpufreq_lock);
    484 		return;
    485 	}
    486 
    487 	freq = cpufreq_get_raw(ci);
    488 
    489 	if (__predict_false(freq == 0)) {
    490 		mutex_exit(&cpufreq_lock);
    491 		return;
    492 	}
    493 
    494 	cpufreq_get_state_raw(freq, &cfs);
    495 	index = cfs.cfs_index + step;
    496 
    497 	if (index < 0 || index >= (int32_t)cf->cf_state_count) {
    498 		mutex_exit(&cpufreq_lock);
    499 		return;
    500 	}
    501 
    502 	cpufreq_set_raw(ci, cf->cf_state[index].cfs_freq);
    503 	mutex_exit(&cpufreq_lock);
    504 }
    505 #endif
    506