Home | History | Annotate | Line # | Download | only in kern
subr_cpufreq.c revision 1.1
      1  1.1  jruoho /*	$NetBSD: subr_cpufreq.c,v 1.1 2011/09/28 10:55:48 jruoho Exp $ */
      2  1.1  jruoho 
      3  1.1  jruoho /*-
      4  1.1  jruoho  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5  1.1  jruoho  * All rights reserved.
      6  1.1  jruoho  *
      7  1.1  jruoho  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1  jruoho  * by Jukka Ruohonen.
      9  1.1  jruoho  *
     10  1.1  jruoho  * Redistribution and use in source and binary forms, with or without
     11  1.1  jruoho  * modification, are permitted provided that the following conditions
     12  1.1  jruoho  * are met:
     13  1.1  jruoho  *
     14  1.1  jruoho  * 1. Redistributions of source code must retain the above copyright
     15  1.1  jruoho  *    notice, this list of conditions and the following disclaimer.
     16  1.1  jruoho  * 2. Redistributions in binary form must reproduce the above copyright
     17  1.1  jruoho  *    notice, this list of conditions and the following disclaimer in the
     18  1.1  jruoho  *    documentation and/or other materials provided with the distribution.
     19  1.1  jruoho  *
     20  1.1  jruoho  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  1.1  jruoho  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  1.1  jruoho  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  1.1  jruoho  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  1.1  jruoho  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  1.1  jruoho  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  1.1  jruoho  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  1.1  jruoho  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  1.1  jruoho  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  1.1  jruoho  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  1.1  jruoho  * POSSIBILITY OF SUCH DAMAGE.
     31  1.1  jruoho  */
     32  1.1  jruoho #include <sys/cdefs.h>
     33  1.1  jruoho __KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.1 2011/09/28 10:55:48 jruoho Exp $");
     34  1.1  jruoho 
     35  1.1  jruoho #include <sys/param.h>
     36  1.1  jruoho #include <sys/cpu.h>
     37  1.1  jruoho #include <sys/cpufreq.h>
     38  1.1  jruoho #include <sys/kmem.h>
     39  1.1  jruoho #include <sys/mutex.h>
     40  1.1  jruoho #include <sys/once.h>
     41  1.1  jruoho #include <sys/time.h>
     42  1.1  jruoho #include <sys/xcall.h>
     43  1.1  jruoho 
     44  1.1  jruoho static int	 cpufreq_init(void);
     45  1.1  jruoho static int	 cpufreq_latency(void);
     46  1.1  jruoho static uint32_t	 cpufreq_get_max(void);
     47  1.1  jruoho static uint32_t	 cpufreq_get_min(void);
     48  1.1  jruoho static uint32_t	 cpufreq_get_raw(struct cpu_info *);
     49  1.1  jruoho static void	 cpufreq_get_state_raw(uint32_t, struct cpufreq_state *);
     50  1.1  jruoho static void	 cpufreq_set_raw(struct cpu_info *, uint32_t);
     51  1.1  jruoho static void	 cpufreq_set_all_raw(uint32_t);
     52  1.1  jruoho 
     53  1.1  jruoho static kmutex_t cpufreq_lock __cacheline_aligned;
     54  1.1  jruoho static struct cpufreq *cf_backend __read_mostly = NULL;
     55  1.1  jruoho 
     56  1.1  jruoho static int
     57  1.1  jruoho cpufreq_init(void)
     58  1.1  jruoho {
     59  1.1  jruoho 
     60  1.1  jruoho 	mutex_init(&cpufreq_lock, MUTEX_DEFAULT, IPL_NONE);
     61  1.1  jruoho 
     62  1.1  jruoho 	return 0;
     63  1.1  jruoho }
     64  1.1  jruoho 
     65  1.1  jruoho int
     66  1.1  jruoho cpufreq_register(struct cpufreq *cf)
     67  1.1  jruoho {
     68  1.1  jruoho 	static ONCE_DECL(cpufreq_once);
     69  1.1  jruoho 	uint32_t count, i, j, k, m;
     70  1.1  jruoho 	int rv;
     71  1.1  jruoho 
     72  1.1  jruoho 	rv = RUN_ONCE(&cpufreq_once, cpufreq_init);
     73  1.1  jruoho 
     74  1.1  jruoho 	KASSERT(rv == 0);
     75  1.1  jruoho 	KASSERT(cf != NULL);
     76  1.1  jruoho 	KASSERT(cf->cf_get_freq != NULL);
     77  1.1  jruoho 	KASSERT(cf->cf_set_freq != NULL);
     78  1.1  jruoho 	KASSERT(cf->cf_state_count > 0);
     79  1.1  jruoho 	KASSERT(cf->cf_state_count < CPUFREQ_STATE_MAX);
     80  1.1  jruoho 
     81  1.1  jruoho 	mutex_enter(&cpufreq_lock);
     82  1.1  jruoho 
     83  1.1  jruoho 	if (cf_backend != NULL) {
     84  1.1  jruoho 		mutex_exit(&cpufreq_lock);
     85  1.1  jruoho 		return EALREADY;
     86  1.1  jruoho 	}
     87  1.1  jruoho 
     88  1.1  jruoho 	mutex_exit(&cpufreq_lock);
     89  1.1  jruoho 	cf_backend = kmem_zalloc(sizeof(*cf), KM_SLEEP);
     90  1.1  jruoho 
     91  1.1  jruoho 	if (cf_backend == NULL)
     92  1.1  jruoho 		return ENOMEM;
     93  1.1  jruoho 
     94  1.1  jruoho 	mutex_enter(&cpufreq_lock);
     95  1.1  jruoho 
     96  1.1  jruoho 	cf_backend->cf_mp = cf->cf_mp;
     97  1.1  jruoho 	cf_backend->cf_cookie = cf->cf_cookie;
     98  1.1  jruoho 	cf_backend->cf_get_freq = cf->cf_get_freq;
     99  1.1  jruoho 	cf_backend->cf_set_freq = cf->cf_set_freq;
    100  1.1  jruoho 
    101  1.1  jruoho 	(void)strlcpy(cf_backend->cf_name, cf->cf_name, sizeof(cf->cf_name));
    102  1.1  jruoho 
    103  1.1  jruoho 	/*
    104  1.1  jruoho 	 * Sanity check the values and verify descending order.
    105  1.1  jruoho 	 */
    106  1.1  jruoho 	for (count = i = 0; i < cf->cf_state_count; i++) {
    107  1.1  jruoho 
    108  1.1  jruoho 		CTASSERT(CPUFREQ_STATE_ENABLED != 0);
    109  1.1  jruoho 		CTASSERT(CPUFREQ_STATE_DISABLED != 0);
    110  1.1  jruoho 
    111  1.1  jruoho 		if (cf->cf_state[i].cfs_freq == 0)
    112  1.1  jruoho 			continue;
    113  1.1  jruoho 
    114  1.1  jruoho 		for (j = k = 0; j < i; j++) {
    115  1.1  jruoho 
    116  1.1  jruoho 			if (cf->cf_state[i].cfs_freq >=
    117  1.1  jruoho 			    cf->cf_state[j].cfs_freq) {
    118  1.1  jruoho 				k = 1;
    119  1.1  jruoho 				break;
    120  1.1  jruoho 			}
    121  1.1  jruoho 		}
    122  1.1  jruoho 
    123  1.1  jruoho 		if (k != 0)
    124  1.1  jruoho 			continue;
    125  1.1  jruoho 
    126  1.1  jruoho 		cf_backend->cf_state[i].cfs_index = count;
    127  1.1  jruoho 		cf_backend->cf_state[i].cfs_freq = cf->cf_state[i].cfs_freq;
    128  1.1  jruoho 		cf_backend->cf_state[i].cfs_power = cf->cf_state[i].cfs_power;
    129  1.1  jruoho 
    130  1.1  jruoho 		count++;
    131  1.1  jruoho 	}
    132  1.1  jruoho 
    133  1.1  jruoho 	cf_backend->cf_state_count = count;
    134  1.1  jruoho 
    135  1.1  jruoho 	if (cf_backend->cf_state_count == 0) {
    136  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    137  1.1  jruoho 		cpufreq_deregister();
    138  1.1  jruoho 		return EINVAL;
    139  1.1  jruoho 	}
    140  1.1  jruoho 
    141  1.1  jruoho 	rv = cpufreq_latency();
    142  1.1  jruoho 
    143  1.1  jruoho 	if (rv != 0) {
    144  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    145  1.1  jruoho 		cpufreq_deregister();
    146  1.1  jruoho 		return rv;
    147  1.1  jruoho 	}
    148  1.1  jruoho 
    149  1.1  jruoho 	m = cpufreq_get_max();
    150  1.1  jruoho 	cpufreq_set_all_raw(m);
    151  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    152  1.1  jruoho 
    153  1.1  jruoho 	return 0;
    154  1.1  jruoho }
    155  1.1  jruoho 
    156  1.1  jruoho void
    157  1.1  jruoho cpufreq_deregister(void)
    158  1.1  jruoho {
    159  1.1  jruoho 
    160  1.1  jruoho 	mutex_enter(&cpufreq_lock);
    161  1.1  jruoho 
    162  1.1  jruoho 	if (cf_backend == NULL) {
    163  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    164  1.1  jruoho 		return;
    165  1.1  jruoho 	}
    166  1.1  jruoho 
    167  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    168  1.1  jruoho 	kmem_free(cf_backend, sizeof(*cf_backend));
    169  1.1  jruoho 	cf_backend = NULL;
    170  1.1  jruoho }
    171  1.1  jruoho 
    172  1.1  jruoho static int
    173  1.1  jruoho cpufreq_latency(void)
    174  1.1  jruoho {
    175  1.1  jruoho 	struct cpufreq *cf = cf_backend;
    176  1.1  jruoho 	struct timespec nta, ntb;
    177  1.1  jruoho 	const uint32_t n = 10;
    178  1.1  jruoho 	uint32_t i, j, l, m;
    179  1.1  jruoho 	uint64_t s;
    180  1.1  jruoho 
    181  1.1  jruoho 	l = cpufreq_get_min();
    182  1.1  jruoho 	m = cpufreq_get_max();
    183  1.1  jruoho 
    184  1.1  jruoho 	/*
    185  1.1  jruoho 	 * For each state, sample the average transition
    186  1.1  jruoho 	 * latency required to set the state for all CPUs.
    187  1.1  jruoho 	 */
    188  1.1  jruoho 	for (i = 0; i < cf->cf_state_count; i++) {
    189  1.1  jruoho 
    190  1.1  jruoho 		for (s = 0, j = 0; j < n; j++) {
    191  1.1  jruoho 
    192  1.1  jruoho 			/*
    193  1.1  jruoho 			 * Attempt to exclude possible
    194  1.1  jruoho 			 * caching done by the backend.
    195  1.1  jruoho 			 */
    196  1.1  jruoho 			if (i == 0)
    197  1.1  jruoho 				cpufreq_set_all_raw(l);
    198  1.1  jruoho 			else {
    199  1.1  jruoho 				cpufreq_set_all_raw(m);
    200  1.1  jruoho 			}
    201  1.1  jruoho 
    202  1.1  jruoho 			nta.tv_sec = nta.tv_nsec = 0;
    203  1.1  jruoho 			ntb.tv_sec = ntb.tv_nsec = 0;
    204  1.1  jruoho 
    205  1.1  jruoho 			nanotime(&nta);
    206  1.1  jruoho 			cpufreq_set_all_raw(cf->cf_state[i].cfs_freq);
    207  1.1  jruoho 			nanotime(&ntb);
    208  1.1  jruoho 			timespecsub(&ntb, &nta, &ntb);
    209  1.1  jruoho 
    210  1.1  jruoho 			if (ntb.tv_sec != 0 ||
    211  1.1  jruoho 			    ntb.tv_nsec > CPUFREQ_LATENCY_MAX)
    212  1.1  jruoho 				continue;
    213  1.1  jruoho 
    214  1.1  jruoho 			if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX)
    215  1.1  jruoho 				break;
    216  1.1  jruoho 
    217  1.1  jruoho 			s += ntb.tv_nsec;
    218  1.1  jruoho 		}
    219  1.1  jruoho 
    220  1.1  jruoho 		/*
    221  1.1  jruoho 		 * Consider the backend unsuitable if
    222  1.1  jruoho 		 * the transition latency was too high.
    223  1.1  jruoho 		 */
    224  1.1  jruoho 		if (s == 0)
    225  1.1  jruoho 			return EMSGSIZE;
    226  1.1  jruoho 
    227  1.1  jruoho 		cf->cf_state[i].cfs_latency = s / n;
    228  1.1  jruoho 	}
    229  1.1  jruoho 
    230  1.1  jruoho 	return 0;
    231  1.1  jruoho }
    232  1.1  jruoho 
    233  1.1  jruoho void
    234  1.1  jruoho cpufreq_suspend(struct cpu_info *ci)
    235  1.1  jruoho {
    236  1.1  jruoho 	struct cpufreq *cf;
    237  1.1  jruoho 	uint32_t l, s;
    238  1.1  jruoho 
    239  1.1  jruoho 	mutex_enter(&cpufreq_lock);
    240  1.1  jruoho 	cf = cf_backend;
    241  1.1  jruoho 
    242  1.1  jruoho 	if (cf == NULL) {
    243  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    244  1.1  jruoho 		return;
    245  1.1  jruoho 	}
    246  1.1  jruoho 
    247  1.1  jruoho 	l = cpufreq_get_min();
    248  1.1  jruoho 	s = cpufreq_get_raw(ci);
    249  1.1  jruoho 
    250  1.1  jruoho 	cpufreq_set_raw(ci, l);
    251  1.1  jruoho 	cf->cf_state_saved = s;
    252  1.1  jruoho 
    253  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    254  1.1  jruoho }
    255  1.1  jruoho 
    256  1.1  jruoho void
    257  1.1  jruoho cpufreq_resume(struct cpu_info *ci)
    258  1.1  jruoho {
    259  1.1  jruoho 	struct cpufreq *cf;
    260  1.1  jruoho 
    261  1.1  jruoho 	mutex_enter(&cpufreq_lock);
    262  1.1  jruoho 	cf = cf_backend;
    263  1.1  jruoho 
    264  1.1  jruoho 	if (cf == NULL || cf->cf_state_saved == 0) {
    265  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    266  1.1  jruoho 		return;
    267  1.1  jruoho 	}
    268  1.1  jruoho 
    269  1.1  jruoho 	cpufreq_set_raw(ci, cf->cf_state_saved);
    270  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    271  1.1  jruoho }
    272  1.1  jruoho 
    273  1.1  jruoho uint32_t
    274  1.1  jruoho cpufreq_get(struct cpu_info *ci)
    275  1.1  jruoho {
    276  1.1  jruoho 	struct cpufreq *cf;
    277  1.1  jruoho 	uint32_t freq;
    278  1.1  jruoho 
    279  1.1  jruoho 	mutex_enter(&cpufreq_lock);
    280  1.1  jruoho 	cf = cf_backend;
    281  1.1  jruoho 
    282  1.1  jruoho 	if (cf == NULL) {
    283  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    284  1.1  jruoho 		return 0;
    285  1.1  jruoho 	}
    286  1.1  jruoho 
    287  1.1  jruoho 	freq = cpufreq_get_raw(ci);
    288  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    289  1.1  jruoho 
    290  1.1  jruoho 	return freq;
    291  1.1  jruoho }
    292  1.1  jruoho 
    293  1.1  jruoho static uint32_t
    294  1.1  jruoho cpufreq_get_max(void)
    295  1.1  jruoho {
    296  1.1  jruoho 	struct cpufreq *cf = cf_backend;
    297  1.1  jruoho 
    298  1.1  jruoho 	KASSERT(cf != NULL);
    299  1.1  jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    300  1.1  jruoho 
    301  1.1  jruoho 	return cf->cf_state[0].cfs_freq;
    302  1.1  jruoho }
    303  1.1  jruoho 
    304  1.1  jruoho static uint32_t
    305  1.1  jruoho cpufreq_get_min(void)
    306  1.1  jruoho {
    307  1.1  jruoho 	struct cpufreq *cf = cf_backend;
    308  1.1  jruoho 
    309  1.1  jruoho 	KASSERT(cf != NULL);
    310  1.1  jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    311  1.1  jruoho 
    312  1.1  jruoho 	return cf->cf_state[cf->cf_state_count - 1].cfs_freq;
    313  1.1  jruoho }
    314  1.1  jruoho 
    315  1.1  jruoho static uint32_t
    316  1.1  jruoho cpufreq_get_raw(struct cpu_info *ci)
    317  1.1  jruoho {
    318  1.1  jruoho 	struct cpufreq *cf = cf_backend;
    319  1.1  jruoho 	uint32_t freq = 0;
    320  1.1  jruoho 	uint64_t xc;
    321  1.1  jruoho 
    322  1.1  jruoho 	KASSERT(cf != NULL);
    323  1.1  jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    324  1.1  jruoho 
    325  1.1  jruoho 	xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci);
    326  1.1  jruoho 	xc_wait(xc);
    327  1.1  jruoho 
    328  1.1  jruoho 	return freq;
    329  1.1  jruoho }
    330  1.1  jruoho 
    331  1.1  jruoho int
    332  1.1  jruoho cpufreq_get_backend(struct cpufreq *cf)
    333  1.1  jruoho {
    334  1.1  jruoho 
    335  1.1  jruoho 	mutex_enter(&cpufreq_lock);
    336  1.1  jruoho 
    337  1.1  jruoho 	if (cf_backend == NULL || cf == NULL) {
    338  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    339  1.1  jruoho 		return ENODEV;
    340  1.1  jruoho 	}
    341  1.1  jruoho 
    342  1.1  jruoho 	(void)memcpy(cf, cf_backend, sizeof(*cf));
    343  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    344  1.1  jruoho 
    345  1.1  jruoho 	return 0;
    346  1.1  jruoho }
    347  1.1  jruoho 
    348  1.1  jruoho int
    349  1.1  jruoho cpufreq_get_state(uint32_t freq, struct cpufreq_state *cfs)
    350  1.1  jruoho {
    351  1.1  jruoho 	struct cpufreq *cf;
    352  1.1  jruoho 
    353  1.1  jruoho 	mutex_enter(&cpufreq_lock);
    354  1.1  jruoho 	cf = cf_backend;
    355  1.1  jruoho 
    356  1.1  jruoho 	if (cf == NULL || cfs == NULL) {
    357  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    358  1.1  jruoho 		return ENODEV;
    359  1.1  jruoho 	}
    360  1.1  jruoho 
    361  1.1  jruoho 	cpufreq_get_state_raw(freq, cfs);
    362  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    363  1.1  jruoho 
    364  1.1  jruoho 	return 0;
    365  1.1  jruoho }
    366  1.1  jruoho 
    367  1.1  jruoho int
    368  1.1  jruoho cpufreq_get_state_index(uint32_t index, struct cpufreq_state *cfs)
    369  1.1  jruoho {
    370  1.1  jruoho 	struct cpufreq *cf;
    371  1.1  jruoho 
    372  1.1  jruoho 	mutex_enter(&cpufreq_lock);
    373  1.1  jruoho 	cf = cf_backend;
    374  1.1  jruoho 
    375  1.1  jruoho 	if (cf == NULL || cfs == NULL) {
    376  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    377  1.1  jruoho 		return ENODEV;
    378  1.1  jruoho 	}
    379  1.1  jruoho 
    380  1.1  jruoho 	if (index >= cf->cf_state_count) {
    381  1.1  jruoho 		mutex_exit(&cpu_lock);
    382  1.1  jruoho 		return EINVAL;
    383  1.1  jruoho 	}
    384  1.1  jruoho 
    385  1.1  jruoho 	(void)memcpy(cfs, &cf->cf_state[index], sizeof(*cfs));
    386  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    387  1.1  jruoho 
    388  1.1  jruoho 	return 0;
    389  1.1  jruoho }
    390  1.1  jruoho 
    391  1.1  jruoho static void
    392  1.1  jruoho cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs)
    393  1.1  jruoho {
    394  1.1  jruoho 	struct cpufreq *cf = cf_backend;
    395  1.1  jruoho 	uint32_t f, hi, i = 0, lo = 0;
    396  1.1  jruoho 
    397  1.1  jruoho 	KASSERT(cf != NULL && cfs != NULL);
    398  1.1  jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    399  1.1  jruoho 
    400  1.1  jruoho 	hi = cf->cf_state_count;
    401  1.1  jruoho 
    402  1.1  jruoho 	while (lo < hi) {
    403  1.1  jruoho 
    404  1.1  jruoho 		i = (lo + hi) >> 1;
    405  1.1  jruoho 		f = cf->cf_state[i].cfs_freq;
    406  1.1  jruoho 
    407  1.1  jruoho 		if (freq == f)
    408  1.1  jruoho 			break;
    409  1.1  jruoho 		else if (freq > f)
    410  1.1  jruoho 			hi = i;
    411  1.1  jruoho 		else {
    412  1.1  jruoho 			lo = i + 1;
    413  1.1  jruoho 		}
    414  1.1  jruoho 	}
    415  1.1  jruoho 
    416  1.1  jruoho 	(void)memcpy(cfs, &cf->cf_state[i], sizeof(*cfs));
    417  1.1  jruoho }
    418  1.1  jruoho 
    419  1.1  jruoho void
    420  1.1  jruoho cpufreq_set(struct cpu_info *ci, uint32_t freq)
    421  1.1  jruoho {
    422  1.1  jruoho 	struct cpufreq *cf;
    423  1.1  jruoho 
    424  1.1  jruoho 	mutex_enter(&cpufreq_lock);
    425  1.1  jruoho 	cf = cf_backend;
    426  1.1  jruoho 
    427  1.1  jruoho 	if (__predict_false(cf == NULL)) {
    428  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    429  1.1  jruoho 		return;
    430  1.1  jruoho 	}
    431  1.1  jruoho 
    432  1.1  jruoho 	cpufreq_set_raw(ci, freq);
    433  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    434  1.1  jruoho }
    435  1.1  jruoho 
    436  1.1  jruoho static void
    437  1.1  jruoho cpufreq_set_raw(struct cpu_info *ci, uint32_t freq)
    438  1.1  jruoho {
    439  1.1  jruoho 	struct cpufreq *cf = cf_backend;
    440  1.1  jruoho 	uint64_t xc;
    441  1.1  jruoho 
    442  1.1  jruoho 	KASSERT(cf != NULL);
    443  1.1  jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    444  1.1  jruoho 
    445  1.1  jruoho 	xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci);
    446  1.1  jruoho 	xc_wait(xc);
    447  1.1  jruoho }
    448  1.1  jruoho 
    449  1.1  jruoho void
    450  1.1  jruoho cpufreq_set_all(uint32_t freq)
    451  1.1  jruoho {
    452  1.1  jruoho 	struct cpufreq *cf;
    453  1.1  jruoho 
    454  1.1  jruoho 	mutex_enter(&cpufreq_lock);
    455  1.1  jruoho 	cf = cf_backend;
    456  1.1  jruoho 
    457  1.1  jruoho 	if (__predict_false(cf == NULL)) {
    458  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    459  1.1  jruoho 		return;
    460  1.1  jruoho 	}
    461  1.1  jruoho 
    462  1.1  jruoho 	cpufreq_set_all_raw(freq);
    463  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    464  1.1  jruoho }
    465  1.1  jruoho 
    466  1.1  jruoho static void
    467  1.1  jruoho cpufreq_set_all_raw(uint32_t freq)
    468  1.1  jruoho {
    469  1.1  jruoho 	struct cpufreq *cf = cf_backend;
    470  1.1  jruoho 	uint64_t xc;
    471  1.1  jruoho 
    472  1.1  jruoho 	KASSERT(cf != NULL);
    473  1.1  jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    474  1.1  jruoho 
    475  1.1  jruoho 	xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
    476  1.1  jruoho 	xc_wait(xc);
    477  1.1  jruoho }
    478  1.1  jruoho 
    479  1.1  jruoho #ifdef notyet
    480  1.1  jruoho void
    481  1.1  jruoho cpufreq_set_higher(struct cpu_info *ci)
    482  1.1  jruoho {
    483  1.1  jruoho 	cpufreq_set_step(ci, -1);
    484  1.1  jruoho }
    485  1.1  jruoho 
    486  1.1  jruoho void
    487  1.1  jruoho cpufreq_set_lower(struct cpu_info *ci)
    488  1.1  jruoho {
    489  1.1  jruoho 	cpufreq_set_step(ci, 1);
    490  1.1  jruoho }
    491  1.1  jruoho 
    492  1.1  jruoho static void
    493  1.1  jruoho cpufreq_set_step(struct cpu_info *ci, int32_t step)
    494  1.1  jruoho {
    495  1.1  jruoho 	struct cpufreq_state cfs;
    496  1.1  jruoho 	struct cpufreq *cf;
    497  1.1  jruoho 	uint32_t freq;
    498  1.1  jruoho 	int32_t index;
    499  1.1  jruoho 
    500  1.1  jruoho 	mutex_enter(&cpufreq_lock);
    501  1.1  jruoho 	cf = cf_backend;
    502  1.1  jruoho 
    503  1.1  jruoho 	if (__predict_false(cf == NULL)) {
    504  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    505  1.1  jruoho 		return;
    506  1.1  jruoho 	}
    507  1.1  jruoho 
    508  1.1  jruoho 	freq = cpufreq_get_raw(ci);
    509  1.1  jruoho 
    510  1.1  jruoho 	if (__predict_false(freq == 0)) {
    511  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    512  1.1  jruoho 		return;
    513  1.1  jruoho 	}
    514  1.1  jruoho 
    515  1.1  jruoho 	cpufreq_get_state_raw(freq, &cfs);
    516  1.1  jruoho 	index = cfs.cfs_index + step;
    517  1.1  jruoho 
    518  1.1  jruoho 	if (index < 0 || index >= (int32_t)cf->cf_state_count) {
    519  1.1  jruoho 		mutex_exit(&cpufreq_lock);
    520  1.1  jruoho 		return;
    521  1.1  jruoho 	}
    522  1.1  jruoho 
    523  1.1  jruoho 	cpufreq_set_raw(ci, cf->cf_state[index].cfs_freq);
    524  1.1  jruoho 	mutex_exit(&cpufreq_lock);
    525  1.1  jruoho }
    526  1.1  jruoho #endif
    527