subr_cpufreq.c revision 1.2 1 /* $NetBSD: subr_cpufreq.c,v 1.2 2011/09/28 15:52:48 jruoho Exp $ */
2
3 /*-
4 * Copyright (c) 2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jukka Ruohonen.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.2 2011/09/28 15:52:48 jruoho Exp $");
34
35 #include <sys/param.h>
36 #include <sys/cpu.h>
37 #include <sys/cpufreq.h>
38 #include <sys/kmem.h>
39 #include <sys/mutex.h>
40 #include <sys/time.h>
41 #include <sys/xcall.h>
42
43 static int cpufreq_latency(void);
44 static uint32_t cpufreq_get_max(void);
45 static uint32_t cpufreq_get_min(void);
46 static uint32_t cpufreq_get_raw(struct cpu_info *);
47 static void cpufreq_get_state_raw(uint32_t, struct cpufreq_state *);
48 static void cpufreq_set_raw(struct cpu_info *, uint32_t);
49 static void cpufreq_set_all_raw(uint32_t);
50
51 static kmutex_t cpufreq_lock __cacheline_aligned;
52 static struct cpufreq *cf_backend __read_mostly = NULL;
53
54 void
55 cpufreq_init(void)
56 {
57
58 mutex_init(&cpufreq_lock, MUTEX_DEFAULT, IPL_NONE);
59 }
60
61 int
62 cpufreq_register(struct cpufreq *cf)
63 {
64 uint32_t count, i, j, k, m;
65 int rv;
66
67 KASSERT(cf != NULL);
68 KASSERT(cf->cf_get_freq != NULL);
69 KASSERT(cf->cf_set_freq != NULL);
70 KASSERT(cf->cf_state_count > 0);
71 KASSERT(cf->cf_state_count < CPUFREQ_STATE_MAX);
72
73 mutex_enter(&cpufreq_lock);
74
75 if (cf_backend != NULL) {
76 mutex_exit(&cpufreq_lock);
77 return EALREADY;
78 }
79
80 mutex_exit(&cpufreq_lock);
81 cf_backend = kmem_zalloc(sizeof(*cf), KM_SLEEP);
82
83 if (cf_backend == NULL)
84 return ENOMEM;
85
86 mutex_enter(&cpufreq_lock);
87
88 cf_backend->cf_mp = cf->cf_mp;
89 cf_backend->cf_cookie = cf->cf_cookie;
90 cf_backend->cf_get_freq = cf->cf_get_freq;
91 cf_backend->cf_set_freq = cf->cf_set_freq;
92
93 (void)strlcpy(cf_backend->cf_name, cf->cf_name, sizeof(cf->cf_name));
94
95 /*
96 * Sanity check the values and verify descending order.
97 */
98 for (count = i = 0; i < cf->cf_state_count; i++) {
99
100 CTASSERT(CPUFREQ_STATE_ENABLED != 0);
101 CTASSERT(CPUFREQ_STATE_DISABLED != 0);
102
103 if (cf->cf_state[i].cfs_freq == 0)
104 continue;
105
106 for (j = k = 0; j < i; j++) {
107
108 if (cf->cf_state[i].cfs_freq >=
109 cf->cf_state[j].cfs_freq) {
110 k = 1;
111 break;
112 }
113 }
114
115 if (k != 0)
116 continue;
117
118 cf_backend->cf_state[i].cfs_index = count;
119 cf_backend->cf_state[i].cfs_freq = cf->cf_state[i].cfs_freq;
120 cf_backend->cf_state[i].cfs_power = cf->cf_state[i].cfs_power;
121
122 count++;
123 }
124
125 cf_backend->cf_state_count = count;
126
127 if (cf_backend->cf_state_count == 0) {
128 mutex_exit(&cpufreq_lock);
129 cpufreq_deregister();
130 return EINVAL;
131 }
132
133 rv = cpufreq_latency();
134
135 if (rv != 0) {
136 mutex_exit(&cpufreq_lock);
137 cpufreq_deregister();
138 return rv;
139 }
140
141 m = cpufreq_get_max();
142 cpufreq_set_all_raw(m);
143 mutex_exit(&cpufreq_lock);
144
145 return 0;
146 }
147
148 void
149 cpufreq_deregister(void)
150 {
151
152 mutex_enter(&cpufreq_lock);
153
154 if (cf_backend == NULL) {
155 mutex_exit(&cpufreq_lock);
156 return;
157 }
158
159 mutex_exit(&cpufreq_lock);
160 kmem_free(cf_backend, sizeof(*cf_backend));
161 cf_backend = NULL;
162 }
163
164 static int
165 cpufreq_latency(void)
166 {
167 struct cpufreq *cf = cf_backend;
168 struct timespec nta, ntb;
169 const uint32_t n = 10;
170 uint32_t i, j, l, m;
171 uint64_t s;
172
173 l = cpufreq_get_min();
174 m = cpufreq_get_max();
175
176 /*
177 * For each state, sample the average transition
178 * latency required to set the state for all CPUs.
179 */
180 for (i = 0; i < cf->cf_state_count; i++) {
181
182 for (s = 0, j = 0; j < n; j++) {
183
184 /*
185 * Attempt to exclude possible
186 * caching done by the backend.
187 */
188 if (i == 0)
189 cpufreq_set_all_raw(l);
190 else {
191 cpufreq_set_all_raw(m);
192 }
193
194 nta.tv_sec = nta.tv_nsec = 0;
195 ntb.tv_sec = ntb.tv_nsec = 0;
196
197 nanotime(&nta);
198 cpufreq_set_all_raw(cf->cf_state[i].cfs_freq);
199 nanotime(&ntb);
200 timespecsub(&ntb, &nta, &ntb);
201
202 if (ntb.tv_sec != 0 ||
203 ntb.tv_nsec > CPUFREQ_LATENCY_MAX)
204 continue;
205
206 if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX)
207 break;
208
209 s += ntb.tv_nsec;
210 }
211
212 /*
213 * Consider the backend unsuitable if
214 * the transition latency was too high.
215 */
216 if (s == 0)
217 return EMSGSIZE;
218
219 cf->cf_state[i].cfs_latency = s / n;
220 }
221
222 return 0;
223 }
224
225 void
226 cpufreq_suspend(struct cpu_info *ci)
227 {
228 struct cpufreq *cf;
229 uint32_t l, s;
230
231 mutex_enter(&cpufreq_lock);
232 cf = cf_backend;
233
234 if (cf == NULL) {
235 mutex_exit(&cpufreq_lock);
236 return;
237 }
238
239 l = cpufreq_get_min();
240 s = cpufreq_get_raw(ci);
241
242 cpufreq_set_raw(ci, l);
243 cf->cf_state_saved = s;
244
245 mutex_exit(&cpufreq_lock);
246 }
247
248 void
249 cpufreq_resume(struct cpu_info *ci)
250 {
251 struct cpufreq *cf;
252
253 mutex_enter(&cpufreq_lock);
254 cf = cf_backend;
255
256 if (cf == NULL || cf->cf_state_saved == 0) {
257 mutex_exit(&cpufreq_lock);
258 return;
259 }
260
261 cpufreq_set_raw(ci, cf->cf_state_saved);
262 mutex_exit(&cpufreq_lock);
263 }
264
265 uint32_t
266 cpufreq_get(struct cpu_info *ci)
267 {
268 struct cpufreq *cf;
269 uint32_t freq;
270
271 mutex_enter(&cpufreq_lock);
272 cf = cf_backend;
273
274 if (cf == NULL) {
275 mutex_exit(&cpufreq_lock);
276 return 0;
277 }
278
279 freq = cpufreq_get_raw(ci);
280 mutex_exit(&cpufreq_lock);
281
282 return freq;
283 }
284
285 static uint32_t
286 cpufreq_get_max(void)
287 {
288 struct cpufreq *cf = cf_backend;
289
290 KASSERT(cf != NULL);
291 KASSERT(mutex_owned(&cpufreq_lock) != 0);
292
293 return cf->cf_state[0].cfs_freq;
294 }
295
296 static uint32_t
297 cpufreq_get_min(void)
298 {
299 struct cpufreq *cf = cf_backend;
300
301 KASSERT(cf != NULL);
302 KASSERT(mutex_owned(&cpufreq_lock) != 0);
303
304 return cf->cf_state[cf->cf_state_count - 1].cfs_freq;
305 }
306
307 static uint32_t
308 cpufreq_get_raw(struct cpu_info *ci)
309 {
310 struct cpufreq *cf = cf_backend;
311 uint32_t freq = 0;
312 uint64_t xc;
313
314 KASSERT(cf != NULL);
315 KASSERT(mutex_owned(&cpufreq_lock) != 0);
316
317 xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci);
318 xc_wait(xc);
319
320 return freq;
321 }
322
323 int
324 cpufreq_get_backend(struct cpufreq *cf)
325 {
326
327 mutex_enter(&cpufreq_lock);
328
329 if (cf_backend == NULL || cf == NULL) {
330 mutex_exit(&cpufreq_lock);
331 return ENODEV;
332 }
333
334 (void)memcpy(cf, cf_backend, sizeof(*cf));
335 mutex_exit(&cpufreq_lock);
336
337 return 0;
338 }
339
340 int
341 cpufreq_get_state(uint32_t freq, struct cpufreq_state *cfs)
342 {
343 struct cpufreq *cf;
344
345 mutex_enter(&cpufreq_lock);
346 cf = cf_backend;
347
348 if (cf == NULL || cfs == NULL) {
349 mutex_exit(&cpufreq_lock);
350 return ENODEV;
351 }
352
353 cpufreq_get_state_raw(freq, cfs);
354 mutex_exit(&cpufreq_lock);
355
356 return 0;
357 }
358
359 int
360 cpufreq_get_state_index(uint32_t index, struct cpufreq_state *cfs)
361 {
362 struct cpufreq *cf;
363
364 mutex_enter(&cpufreq_lock);
365 cf = cf_backend;
366
367 if (cf == NULL || cfs == NULL) {
368 mutex_exit(&cpufreq_lock);
369 return ENODEV;
370 }
371
372 if (index >= cf->cf_state_count) {
373 mutex_exit(&cpu_lock);
374 return EINVAL;
375 }
376
377 (void)memcpy(cfs, &cf->cf_state[index], sizeof(*cfs));
378 mutex_exit(&cpufreq_lock);
379
380 return 0;
381 }
382
383 static void
384 cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs)
385 {
386 struct cpufreq *cf = cf_backend;
387 uint32_t f, hi, i = 0, lo = 0;
388
389 KASSERT(cf != NULL && cfs != NULL);
390 KASSERT(mutex_owned(&cpufreq_lock) != 0);
391
392 hi = cf->cf_state_count;
393
394 while (lo < hi) {
395
396 i = (lo + hi) >> 1;
397 f = cf->cf_state[i].cfs_freq;
398
399 if (freq == f)
400 break;
401 else if (freq > f)
402 hi = i;
403 else {
404 lo = i + 1;
405 }
406 }
407
408 (void)memcpy(cfs, &cf->cf_state[i], sizeof(*cfs));
409 }
410
411 void
412 cpufreq_set(struct cpu_info *ci, uint32_t freq)
413 {
414 struct cpufreq *cf;
415
416 mutex_enter(&cpufreq_lock);
417 cf = cf_backend;
418
419 if (__predict_false(cf == NULL)) {
420 mutex_exit(&cpufreq_lock);
421 return;
422 }
423
424 cpufreq_set_raw(ci, freq);
425 mutex_exit(&cpufreq_lock);
426 }
427
428 static void
429 cpufreq_set_raw(struct cpu_info *ci, uint32_t freq)
430 {
431 struct cpufreq *cf = cf_backend;
432 uint64_t xc;
433
434 KASSERT(cf != NULL);
435 KASSERT(mutex_owned(&cpufreq_lock) != 0);
436
437 xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci);
438 xc_wait(xc);
439 }
440
441 void
442 cpufreq_set_all(uint32_t freq)
443 {
444 struct cpufreq *cf;
445
446 mutex_enter(&cpufreq_lock);
447 cf = cf_backend;
448
449 if (__predict_false(cf == NULL)) {
450 mutex_exit(&cpufreq_lock);
451 return;
452 }
453
454 cpufreq_set_all_raw(freq);
455 mutex_exit(&cpufreq_lock);
456 }
457
458 static void
459 cpufreq_set_all_raw(uint32_t freq)
460 {
461 struct cpufreq *cf = cf_backend;
462 uint64_t xc;
463
464 KASSERT(cf != NULL);
465 KASSERT(mutex_owned(&cpufreq_lock) != 0);
466
467 xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
468 xc_wait(xc);
469 }
470
471 #ifdef notyet
472 void
473 cpufreq_set_higher(struct cpu_info *ci)
474 {
475 cpufreq_set_step(ci, -1);
476 }
477
478 void
479 cpufreq_set_lower(struct cpu_info *ci)
480 {
481 cpufreq_set_step(ci, 1);
482 }
483
484 static void
485 cpufreq_set_step(struct cpu_info *ci, int32_t step)
486 {
487 struct cpufreq_state cfs;
488 struct cpufreq *cf;
489 uint32_t freq;
490 int32_t index;
491
492 mutex_enter(&cpufreq_lock);
493 cf = cf_backend;
494
495 if (__predict_false(cf == NULL)) {
496 mutex_exit(&cpufreq_lock);
497 return;
498 }
499
500 freq = cpufreq_get_raw(ci);
501
502 if (__predict_false(freq == 0)) {
503 mutex_exit(&cpufreq_lock);
504 return;
505 }
506
507 cpufreq_get_state_raw(freq, &cfs);
508 index = cfs.cfs_index + step;
509
510 if (index < 0 || index >= (int32_t)cf->cf_state_count) {
511 mutex_exit(&cpufreq_lock);
512 return;
513 }
514
515 cpufreq_set_raw(ci, cf->cf_state[index].cfs_freq);
516 mutex_exit(&cpufreq_lock);
517 }
518 #endif
519