subr_cpufreq.c revision 1.7 1 /* $NetBSD: subr_cpufreq.c,v 1.7 2011/10/25 18:26:09 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jukka Ruohonen.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.7 2011/10/25 18:26:09 christos Exp $");
34
35 #include <sys/param.h>
36 #include <sys/cpu.h>
37 #include <sys/cpufreq.h>
38 #include <sys/kmem.h>
39 #include <sys/mutex.h>
40 #include <sys/time.h>
41 #include <sys/xcall.h>
42
43 static int cpufreq_latency(void);
44 static uint32_t cpufreq_get_max(void);
45 static uint32_t cpufreq_get_min(void);
46 static uint32_t cpufreq_get_raw(struct cpu_info *);
47 static void cpufreq_get_state_raw(uint32_t, struct cpufreq_state *);
48 static void cpufreq_set_raw(struct cpu_info *, uint32_t);
49 static void cpufreq_set_all_raw(uint32_t);
50
51 static kmutex_t cpufreq_lock __cacheline_aligned;
52 static struct cpufreq *cf_backend __read_mostly = NULL;
53
54 void
55 cpufreq_init(void)
56 {
57
58 mutex_init(&cpufreq_lock, MUTEX_DEFAULT, IPL_NONE);
59 cf_backend = kmem_zalloc(sizeof(*cf_backend), KM_SLEEP);
60 }
61
62 int
63 cpufreq_register(struct cpufreq *cf)
64 {
65 uint32_t c, i, j, k;
66 int rv;
67
68 KASSERT(cf != NULL);
69 KASSERT(cf_backend != NULL);
70 KASSERT(cf->cf_get_freq != NULL);
71 KASSERT(cf->cf_set_freq != NULL);
72 KASSERT(cf->cf_state_count > 0);
73 KASSERT(cf->cf_state_count < CPUFREQ_STATE_MAX);
74
75 mutex_enter(&cpufreq_lock);
76
77 if (cf_backend->cf_init != false) {
78 mutex_exit(&cpufreq_lock);
79 return EALREADY;
80 }
81
82 cf_backend->cf_init = true;
83 cf_backend->cf_mp = cf->cf_mp;
84 cf_backend->cf_cookie = cf->cf_cookie;
85 cf_backend->cf_get_freq = cf->cf_get_freq;
86 cf_backend->cf_set_freq = cf->cf_set_freq;
87
88 (void)strlcpy(cf_backend->cf_name, cf->cf_name, sizeof(cf->cf_name));
89
90 /*
91 * Sanity check the values and verify descending order.
92 */
93 for (c = i = 0; i < cf->cf_state_count; i++) {
94
95 CTASSERT(CPUFREQ_STATE_ENABLED != 0);
96 CTASSERT(CPUFREQ_STATE_DISABLED != 0);
97
98 if (cf->cf_state[i].cfs_freq == 0)
99 continue;
100
101 if (cf->cf_state[i].cfs_freq > 9999 &&
102 cf->cf_state[i].cfs_freq != CPUFREQ_STATE_ENABLED &&
103 cf->cf_state[i].cfs_freq != CPUFREQ_STATE_DISABLED)
104 continue;
105
106 for (j = k = 0; j < i; j++) {
107
108 if (cf->cf_state[i].cfs_freq >=
109 cf->cf_state[j].cfs_freq) {
110 k = 1;
111 break;
112 }
113 }
114
115 if (k != 0)
116 continue;
117
118 cf_backend->cf_state[c].cfs_index = c;
119 cf_backend->cf_state[c].cfs_freq = cf->cf_state[i].cfs_freq;
120 cf_backend->cf_state[c].cfs_power = cf->cf_state[i].cfs_power;
121
122 c++;
123 }
124
125 cf_backend->cf_state_count = c;
126
127 if (cf_backend->cf_state_count == 0) {
128 mutex_exit(&cpufreq_lock);
129 cpufreq_deregister();
130 return EINVAL;
131 }
132
133 rv = cpufreq_latency();
134
135 if (rv != 0) {
136 mutex_exit(&cpufreq_lock);
137 cpufreq_deregister();
138 return rv;
139 }
140
141 mutex_exit(&cpufreq_lock);
142
143 return 0;
144 }
145
146 void
147 cpufreq_deregister(void)
148 {
149
150 mutex_enter(&cpufreq_lock);
151 memset(cf_backend, 0, sizeof(*cf_backend));
152 mutex_exit(&cpufreq_lock);
153 }
154
155 static int
156 cpufreq_latency(void)
157 {
158 struct cpufreq *cf = cf_backend;
159 struct timespec nta, ntb;
160 const uint32_t n = 10;
161 uint32_t i, j, l, m;
162 uint64_t s;
163
164 l = cpufreq_get_min();
165 m = cpufreq_get_max();
166
167 /*
168 * For each state, sample the average transition
169 * latency required to set the state for all CPUs.
170 */
171 for (i = 0; i < cf->cf_state_count; i++) {
172
173 for (s = 0, j = 0; j < n; j++) {
174
175 /*
176 * Attempt to exclude possible
177 * caching done by the backend.
178 */
179 if (i == 0)
180 cpufreq_set_all_raw(l);
181 else {
182 cpufreq_set_all_raw(m);
183 }
184
185 nanotime(&nta);
186 cpufreq_set_all_raw(cf->cf_state[i].cfs_freq);
187 nanotime(&ntb);
188 timespecsub(&ntb, &nta, &ntb);
189
190 if (ntb.tv_sec != 0 ||
191 ntb.tv_nsec > CPUFREQ_LATENCY_MAX)
192 continue;
193
194 if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX)
195 break;
196
197 /* Convert to microseconds to prevent overflow */
198 s += ntb.tv_nsec / 1000;
199 }
200
201 /*
202 * Consider the backend unsuitable if
203 * the transition latency was too high.
204 */
205 if (s == 0)
206 return EMSGSIZE;
207
208 cf->cf_state[i].cfs_latency = s / n;
209 }
210
211 return 0;
212 }
213
214 void
215 cpufreq_suspend(struct cpu_info *ci)
216 {
217 struct cpufreq *cf = cf_backend;
218 uint32_t l, s;
219
220 mutex_enter(&cpufreq_lock);
221
222 if (cf->cf_init != true) {
223 mutex_exit(&cpufreq_lock);
224 return;
225 }
226
227 l = cpufreq_get_min();
228 s = cpufreq_get_raw(ci);
229
230 cpufreq_set_raw(ci, l);
231 cf->cf_state_saved = s;
232
233 mutex_exit(&cpufreq_lock);
234 }
235
236 void
237 cpufreq_resume(struct cpu_info *ci)
238 {
239 struct cpufreq *cf = cf_backend;
240
241 mutex_enter(&cpufreq_lock);
242
243 if (cf->cf_init != true || cf->cf_state_saved == 0) {
244 mutex_exit(&cpufreq_lock);
245 return;
246 }
247
248 cpufreq_set_raw(ci, cf->cf_state_saved);
249 mutex_exit(&cpufreq_lock);
250 }
251
252 uint32_t
253 cpufreq_get(struct cpu_info *ci)
254 {
255 struct cpufreq *cf = cf_backend;
256 uint32_t freq;
257
258 mutex_enter(&cpufreq_lock);
259
260 if (cf->cf_init != true) {
261 mutex_exit(&cpufreq_lock);
262 return 0;
263 }
264
265 freq = cpufreq_get_raw(ci);
266 mutex_exit(&cpufreq_lock);
267
268 return freq;
269 }
270
271 static uint32_t
272 cpufreq_get_max(void)
273 {
274 struct cpufreq *cf = cf_backend;
275
276 KASSERT(cf->cf_init != false);
277 KASSERT(mutex_owned(&cpufreq_lock) != 0);
278
279 return cf->cf_state[0].cfs_freq;
280 }
281
282 static uint32_t
283 cpufreq_get_min(void)
284 {
285 struct cpufreq *cf = cf_backend;
286
287 KASSERT(cf->cf_init != false);
288 KASSERT(mutex_owned(&cpufreq_lock) != 0);
289
290 return cf->cf_state[cf->cf_state_count - 1].cfs_freq;
291 }
292
293 static uint32_t
294 cpufreq_get_raw(struct cpu_info *ci)
295 {
296 struct cpufreq *cf = cf_backend;
297 uint32_t freq = 0;
298 uint64_t xc;
299
300 KASSERT(cf->cf_init != false);
301 KASSERT(mutex_owned(&cpufreq_lock) != 0);
302
303 xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci);
304 xc_wait(xc);
305
306 return freq;
307 }
308
309 int
310 cpufreq_get_backend(struct cpufreq *dst)
311 {
312 struct cpufreq *cf = cf_backend;
313
314 mutex_enter(&cpufreq_lock);
315
316 if (cf->cf_init != true || dst == NULL) {
317 mutex_exit(&cpufreq_lock);
318 return ENODEV;
319 }
320
321 memcpy(dst, cf, sizeof(*cf));
322 mutex_exit(&cpufreq_lock);
323
324 return 0;
325 }
326
327 int
328 cpufreq_get_state(uint32_t freq, struct cpufreq_state *cfs)
329 {
330 struct cpufreq *cf = cf_backend;
331
332 mutex_enter(&cpufreq_lock);
333
334 if (cf->cf_init != true || cfs == NULL) {
335 mutex_exit(&cpufreq_lock);
336 return ENODEV;
337 }
338
339 cpufreq_get_state_raw(freq, cfs);
340 mutex_exit(&cpufreq_lock);
341
342 return 0;
343 }
344
345 int
346 cpufreq_get_state_index(uint32_t index, struct cpufreq_state *cfs)
347 {
348 struct cpufreq *cf = cf_backend;
349
350 mutex_enter(&cpufreq_lock);
351
352 if (cf->cf_init != true || cfs == NULL) {
353 mutex_exit(&cpufreq_lock);
354 return ENODEV;
355 }
356
357 if (index >= cf->cf_state_count) {
358 mutex_exit(&cpu_lock);
359 return EINVAL;
360 }
361
362 memcpy(cfs, &cf->cf_state[index], sizeof(*cfs));
363 mutex_exit(&cpufreq_lock);
364
365 return 0;
366 }
367
368 static void
369 cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs)
370 {
371 struct cpufreq *cf = cf_backend;
372 uint32_t f, hi, i = 0, lo = 0;
373
374 KASSERT(mutex_owned(&cpufreq_lock) != 0);
375 KASSERT(cf->cf_init != false && cfs != NULL);
376
377 hi = cf->cf_state_count;
378
379 while (lo < hi) {
380
381 i = (lo + hi) >> 1;
382 f = cf->cf_state[i].cfs_freq;
383
384 if (freq == f)
385 break;
386 else if (freq > f)
387 hi = i;
388 else {
389 lo = i + 1;
390 }
391 }
392
393 memcpy(cfs, &cf->cf_state[i], sizeof(*cfs));
394 }
395
396 void
397 cpufreq_set(struct cpu_info *ci, uint32_t freq)
398 {
399 struct cpufreq *cf = cf_backend;
400
401 mutex_enter(&cpufreq_lock);
402
403 if (__predict_false(cf->cf_init != true)) {
404 mutex_exit(&cpufreq_lock);
405 return;
406 }
407
408 cpufreq_set_raw(ci, freq);
409 mutex_exit(&cpufreq_lock);
410 }
411
412 static void
413 cpufreq_set_raw(struct cpu_info *ci, uint32_t freq)
414 {
415 struct cpufreq *cf = cf_backend;
416 uint64_t xc;
417
418 KASSERT(cf->cf_init != false);
419 KASSERT(mutex_owned(&cpufreq_lock) != 0);
420
421 xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci);
422 xc_wait(xc);
423 }
424
425 void
426 cpufreq_set_all(uint32_t freq)
427 {
428 struct cpufreq *cf = cf_backend;
429
430 mutex_enter(&cpufreq_lock);
431
432 if (__predict_false(cf->cf_init != true)) {
433 mutex_exit(&cpufreq_lock);
434 return;
435 }
436
437 cpufreq_set_all_raw(freq);
438 mutex_exit(&cpufreq_lock);
439 }
440
441 static void
442 cpufreq_set_all_raw(uint32_t freq)
443 {
444 struct cpufreq *cf = cf_backend;
445 uint64_t xc;
446
447 KASSERT(cf->cf_init != false);
448 KASSERT(mutex_owned(&cpufreq_lock) != 0);
449
450 xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
451 xc_wait(xc);
452 }
453
454 #ifdef notyet
455 void
456 cpufreq_set_higher(struct cpu_info *ci)
457 {
458 cpufreq_set_step(ci, -1);
459 }
460
461 void
462 cpufreq_set_lower(struct cpu_info *ci)
463 {
464 cpufreq_set_step(ci, 1);
465 }
466
467 static void
468 cpufreq_set_step(struct cpu_info *ci, int32_t step)
469 {
470 struct cpufreq *cf = cf_backend;
471 struct cpufreq_state cfs;
472 uint32_t freq;
473 int32_t index;
474
475 mutex_enter(&cpufreq_lock);
476
477 if (__predict_false(cf->cf_init != true)) {
478 mutex_exit(&cpufreq_lock);
479 return;
480 }
481
482 freq = cpufreq_get_raw(ci);
483
484 if (__predict_false(freq == 0)) {
485 mutex_exit(&cpufreq_lock);
486 return;
487 }
488
489 cpufreq_get_state_raw(freq, &cfs);
490 index = cfs.cfs_index + step;
491
492 if (index < 0 || index >= (int32_t)cf->cf_state_count) {
493 mutex_exit(&cpufreq_lock);
494 return;
495 }
496
497 cpufreq_set_raw(ci, cf->cf_state[index].cfs_freq);
498 mutex_exit(&cpufreq_lock);
499 }
500 #endif
501