Home | History | Annotate | Line # | Download | only in kern
subr_cpufreq.c revision 1.10
      1  1.10  riastrad /*	$NetBSD: subr_cpufreq.c,v 1.10 2023/04/09 09:18:09 riastradh Exp $ */
      2   1.1    jruoho 
      3   1.1    jruoho /*-
      4   1.1    jruoho  * Copyright (c) 2011 The NetBSD Foundation, Inc.
      5   1.1    jruoho  * All rights reserved.
      6   1.1    jruoho  *
      7   1.1    jruoho  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1    jruoho  * by Jukka Ruohonen.
      9   1.1    jruoho  *
     10   1.1    jruoho  * Redistribution and use in source and binary forms, with or without
     11   1.1    jruoho  * modification, are permitted provided that the following conditions
     12   1.1    jruoho  * are met:
     13   1.1    jruoho  *
     14   1.1    jruoho  * 1. Redistributions of source code must retain the above copyright
     15   1.1    jruoho  *    notice, this list of conditions and the following disclaimer.
     16   1.1    jruoho  * 2. Redistributions in binary form must reproduce the above copyright
     17   1.1    jruoho  *    notice, this list of conditions and the following disclaimer in the
     18   1.1    jruoho  *    documentation and/or other materials provided with the distribution.
     19   1.1    jruoho  *
     20   1.1    jruoho  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21   1.1    jruoho  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22   1.1    jruoho  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23   1.1    jruoho  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24   1.1    jruoho  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   1.1    jruoho  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   1.1    jruoho  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   1.1    jruoho  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   1.1    jruoho  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   1.1    jruoho  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   1.1    jruoho  * POSSIBILITY OF SUCH DAMAGE.
     31   1.1    jruoho  */
     32   1.1    jruoho #include <sys/cdefs.h>
     33  1.10  riastrad __KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.10 2023/04/09 09:18:09 riastradh Exp $");
     34   1.1    jruoho 
     35   1.1    jruoho #include <sys/param.h>
     36   1.1    jruoho #include <sys/cpu.h>
     37   1.1    jruoho #include <sys/cpufreq.h>
     38   1.8    jruoho #include <sys/kernel.h>
     39   1.1    jruoho #include <sys/kmem.h>
     40   1.1    jruoho #include <sys/mutex.h>
     41   1.1    jruoho #include <sys/time.h>
     42   1.1    jruoho #include <sys/xcall.h>
     43   1.1    jruoho 
     44   1.1    jruoho static int	 cpufreq_latency(void);
     45   1.1    jruoho static uint32_t	 cpufreq_get_max(void);
     46   1.1    jruoho static uint32_t	 cpufreq_get_min(void);
     47   1.1    jruoho static uint32_t	 cpufreq_get_raw(struct cpu_info *);
     48   1.1    jruoho static void	 cpufreq_get_state_raw(uint32_t, struct cpufreq_state *);
     49   1.1    jruoho static void	 cpufreq_set_raw(struct cpu_info *, uint32_t);
     50   1.1    jruoho static void	 cpufreq_set_all_raw(uint32_t);
     51   1.1    jruoho 
     52   1.3    jruoho static kmutex_t		cpufreq_lock __cacheline_aligned;
     53   1.3    jruoho static struct cpufreq  *cf_backend __read_mostly = NULL;
     54   1.1    jruoho 
     55   1.2    jruoho void
     56   1.1    jruoho cpufreq_init(void)
     57   1.1    jruoho {
     58   1.1    jruoho 
     59   1.1    jruoho 	mutex_init(&cpufreq_lock, MUTEX_DEFAULT, IPL_NONE);
     60   1.3    jruoho 	cf_backend = kmem_zalloc(sizeof(*cf_backend), KM_SLEEP);
     61   1.1    jruoho }
     62   1.1    jruoho 
     63   1.1    jruoho int
     64   1.1    jruoho cpufreq_register(struct cpufreq *cf)
     65   1.1    jruoho {
     66   1.8    jruoho 	uint32_t c, i, j, k, m;
     67   1.1    jruoho 	int rv;
     68   1.1    jruoho 
     69   1.8    jruoho 	if (cold != 0)
     70   1.8    jruoho 		return EBUSY;
     71   1.8    jruoho 
     72   1.1    jruoho 	KASSERT(cf != NULL);
     73   1.3    jruoho 	KASSERT(cf_backend != NULL);
     74   1.1    jruoho 	KASSERT(cf->cf_get_freq != NULL);
     75   1.1    jruoho 	KASSERT(cf->cf_set_freq != NULL);
     76   1.1    jruoho 	KASSERT(cf->cf_state_count > 0);
     77   1.1    jruoho 	KASSERT(cf->cf_state_count < CPUFREQ_STATE_MAX);
     78   1.1    jruoho 
     79   1.1    jruoho 	mutex_enter(&cpufreq_lock);
     80   1.1    jruoho 
     81   1.3    jruoho 	if (cf_backend->cf_init != false) {
     82   1.1    jruoho 		mutex_exit(&cpufreq_lock);
     83   1.1    jruoho 		return EALREADY;
     84   1.1    jruoho 	}
     85   1.1    jruoho 
     86   1.3    jruoho 	cf_backend->cf_init = true;
     87   1.1    jruoho 	cf_backend->cf_mp = cf->cf_mp;
     88   1.1    jruoho 	cf_backend->cf_cookie = cf->cf_cookie;
     89   1.1    jruoho 	cf_backend->cf_get_freq = cf->cf_get_freq;
     90   1.1    jruoho 	cf_backend->cf_set_freq = cf->cf_set_freq;
     91   1.1    jruoho 
     92   1.1    jruoho 	(void)strlcpy(cf_backend->cf_name, cf->cf_name, sizeof(cf->cf_name));
     93   1.1    jruoho 
     94   1.1    jruoho 	/*
     95   1.1    jruoho 	 * Sanity check the values and verify descending order.
     96   1.1    jruoho 	 */
     97   1.3    jruoho 	for (c = i = 0; i < cf->cf_state_count; i++) {
     98   1.1    jruoho 
     99   1.1    jruoho 		CTASSERT(CPUFREQ_STATE_ENABLED != 0);
    100   1.1    jruoho 		CTASSERT(CPUFREQ_STATE_DISABLED != 0);
    101   1.1    jruoho 
    102   1.1    jruoho 		if (cf->cf_state[i].cfs_freq == 0)
    103   1.1    jruoho 			continue;
    104   1.1    jruoho 
    105   1.3    jruoho 		if (cf->cf_state[i].cfs_freq > 9999 &&
    106   1.3    jruoho 		    cf->cf_state[i].cfs_freq != CPUFREQ_STATE_ENABLED &&
    107   1.3    jruoho 		    cf->cf_state[i].cfs_freq != CPUFREQ_STATE_DISABLED)
    108   1.3    jruoho 			continue;
    109   1.3    jruoho 
    110   1.1    jruoho 		for (j = k = 0; j < i; j++) {
    111   1.1    jruoho 
    112   1.1    jruoho 			if (cf->cf_state[i].cfs_freq >=
    113   1.1    jruoho 			    cf->cf_state[j].cfs_freq) {
    114   1.1    jruoho 				k = 1;
    115   1.1    jruoho 				break;
    116   1.1    jruoho 			}
    117   1.1    jruoho 		}
    118   1.1    jruoho 
    119   1.1    jruoho 		if (k != 0)
    120   1.1    jruoho 			continue;
    121   1.1    jruoho 
    122   1.3    jruoho 		cf_backend->cf_state[c].cfs_index = c;
    123   1.3    jruoho 		cf_backend->cf_state[c].cfs_freq = cf->cf_state[i].cfs_freq;
    124   1.3    jruoho 		cf_backend->cf_state[c].cfs_power = cf->cf_state[i].cfs_power;
    125   1.1    jruoho 
    126   1.3    jruoho 		c++;
    127   1.1    jruoho 	}
    128   1.1    jruoho 
    129   1.3    jruoho 	cf_backend->cf_state_count = c;
    130   1.1    jruoho 
    131   1.1    jruoho 	if (cf_backend->cf_state_count == 0) {
    132   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    133   1.1    jruoho 		cpufreq_deregister();
    134   1.1    jruoho 		return EINVAL;
    135   1.1    jruoho 	}
    136   1.1    jruoho 
    137   1.1    jruoho 	rv = cpufreq_latency();
    138   1.1    jruoho 
    139   1.1    jruoho 	if (rv != 0) {
    140   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    141   1.1    jruoho 		cpufreq_deregister();
    142   1.1    jruoho 		return rv;
    143   1.1    jruoho 	}
    144   1.1    jruoho 
    145   1.8    jruoho 	m = cpufreq_get_max();
    146   1.8    jruoho 	cpufreq_set_all_raw(m);
    147   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    148   1.1    jruoho 
    149   1.1    jruoho 	return 0;
    150   1.1    jruoho }
    151   1.1    jruoho 
    152   1.1    jruoho void
    153   1.1    jruoho cpufreq_deregister(void)
    154   1.1    jruoho {
    155   1.1    jruoho 
    156   1.1    jruoho 	mutex_enter(&cpufreq_lock);
    157   1.3    jruoho 	memset(cf_backend, 0, sizeof(*cf_backend));
    158   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    159   1.1    jruoho }
    160   1.1    jruoho 
    161   1.1    jruoho static int
    162   1.1    jruoho cpufreq_latency(void)
    163   1.1    jruoho {
    164   1.1    jruoho 	struct cpufreq *cf = cf_backend;
    165   1.7  christos 	struct timespec nta, ntb;
    166   1.1    jruoho 	const uint32_t n = 10;
    167   1.1    jruoho 	uint32_t i, j, l, m;
    168   1.1    jruoho 	uint64_t s;
    169   1.1    jruoho 
    170   1.1    jruoho 	l = cpufreq_get_min();
    171   1.1    jruoho 	m = cpufreq_get_max();
    172   1.1    jruoho 
    173   1.1    jruoho 	/*
    174   1.1    jruoho 	 * For each state, sample the average transition
    175   1.1    jruoho 	 * latency required to set the state for all CPUs.
    176   1.1    jruoho 	 */
    177   1.1    jruoho 	for (i = 0; i < cf->cf_state_count; i++) {
    178   1.1    jruoho 
    179   1.1    jruoho 		for (s = 0, j = 0; j < n; j++) {
    180   1.1    jruoho 
    181   1.1    jruoho 			/*
    182   1.1    jruoho 			 * Attempt to exclude possible
    183   1.1    jruoho 			 * caching done by the backend.
    184   1.1    jruoho 			 */
    185   1.1    jruoho 			if (i == 0)
    186   1.1    jruoho 				cpufreq_set_all_raw(l);
    187   1.1    jruoho 			else {
    188   1.1    jruoho 				cpufreq_set_all_raw(m);
    189   1.1    jruoho 			}
    190   1.1    jruoho 
    191   1.7  christos 			nanotime(&nta);
    192   1.1    jruoho 			cpufreq_set_all_raw(cf->cf_state[i].cfs_freq);
    193   1.7  christos 			nanotime(&ntb);
    194   1.7  christos 			timespecsub(&ntb, &nta, &ntb);
    195   1.1    jruoho 
    196   1.1    jruoho 			if (ntb.tv_sec != 0 ||
    197   1.7  christos 			    ntb.tv_nsec > CPUFREQ_LATENCY_MAX)
    198   1.1    jruoho 				continue;
    199   1.1    jruoho 
    200   1.1    jruoho 			if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX)
    201   1.1    jruoho 				break;
    202   1.1    jruoho 
    203   1.7  christos 			/* Convert to microseconds to prevent overflow */
    204   1.7  christos 			s += ntb.tv_nsec / 1000;
    205   1.1    jruoho 		}
    206   1.1    jruoho 
    207   1.1    jruoho 		/*
    208   1.1    jruoho 		 * Consider the backend unsuitable if
    209   1.1    jruoho 		 * the transition latency was too high.
    210   1.1    jruoho 		 */
    211   1.1    jruoho 		if (s == 0)
    212   1.1    jruoho 			return EMSGSIZE;
    213   1.1    jruoho 
    214   1.1    jruoho 		cf->cf_state[i].cfs_latency = s / n;
    215   1.1    jruoho 	}
    216   1.1    jruoho 
    217   1.1    jruoho 	return 0;
    218   1.1    jruoho }
    219   1.1    jruoho 
    220   1.1    jruoho void
    221   1.1    jruoho cpufreq_suspend(struct cpu_info *ci)
    222   1.1    jruoho {
    223   1.3    jruoho 	struct cpufreq *cf = cf_backend;
    224   1.1    jruoho 	uint32_t l, s;
    225   1.1    jruoho 
    226   1.1    jruoho 	mutex_enter(&cpufreq_lock);
    227   1.1    jruoho 
    228   1.3    jruoho 	if (cf->cf_init != true) {
    229   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    230   1.1    jruoho 		return;
    231   1.1    jruoho 	}
    232   1.1    jruoho 
    233   1.1    jruoho 	l = cpufreq_get_min();
    234   1.1    jruoho 	s = cpufreq_get_raw(ci);
    235   1.1    jruoho 
    236   1.1    jruoho 	cpufreq_set_raw(ci, l);
    237   1.1    jruoho 	cf->cf_state_saved = s;
    238   1.1    jruoho 
    239   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    240   1.1    jruoho }
    241   1.1    jruoho 
    242   1.1    jruoho void
    243   1.1    jruoho cpufreq_resume(struct cpu_info *ci)
    244   1.1    jruoho {
    245   1.3    jruoho 	struct cpufreq *cf = cf_backend;
    246   1.1    jruoho 
    247   1.1    jruoho 	mutex_enter(&cpufreq_lock);
    248   1.1    jruoho 
    249   1.3    jruoho 	if (cf->cf_init != true || cf->cf_state_saved == 0) {
    250   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    251   1.1    jruoho 		return;
    252   1.1    jruoho 	}
    253   1.1    jruoho 
    254   1.1    jruoho 	cpufreq_set_raw(ci, cf->cf_state_saved);
    255   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    256   1.1    jruoho }
    257   1.1    jruoho 
    258   1.1    jruoho uint32_t
    259   1.1    jruoho cpufreq_get(struct cpu_info *ci)
    260   1.1    jruoho {
    261   1.3    jruoho 	struct cpufreq *cf = cf_backend;
    262   1.1    jruoho 	uint32_t freq;
    263   1.1    jruoho 
    264   1.1    jruoho 	mutex_enter(&cpufreq_lock);
    265   1.1    jruoho 
    266   1.3    jruoho 	if (cf->cf_init != true) {
    267   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    268   1.1    jruoho 		return 0;
    269   1.1    jruoho 	}
    270   1.1    jruoho 
    271   1.1    jruoho 	freq = cpufreq_get_raw(ci);
    272   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    273   1.1    jruoho 
    274   1.1    jruoho 	return freq;
    275   1.1    jruoho }
    276   1.1    jruoho 
    277   1.1    jruoho static uint32_t
    278   1.1    jruoho cpufreq_get_max(void)
    279   1.1    jruoho {
    280   1.1    jruoho 	struct cpufreq *cf = cf_backend;
    281   1.1    jruoho 
    282   1.3    jruoho 	KASSERT(cf->cf_init != false);
    283   1.1    jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    284   1.1    jruoho 
    285   1.1    jruoho 	return cf->cf_state[0].cfs_freq;
    286   1.1    jruoho }
    287   1.1    jruoho 
    288   1.1    jruoho static uint32_t
    289   1.1    jruoho cpufreq_get_min(void)
    290   1.1    jruoho {
    291   1.1    jruoho 	struct cpufreq *cf = cf_backend;
    292   1.1    jruoho 
    293   1.3    jruoho 	KASSERT(cf->cf_init != false);
    294   1.1    jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    295   1.1    jruoho 
    296   1.1    jruoho 	return cf->cf_state[cf->cf_state_count - 1].cfs_freq;
    297   1.1    jruoho }
    298   1.1    jruoho 
    299   1.1    jruoho static uint32_t
    300   1.1    jruoho cpufreq_get_raw(struct cpu_info *ci)
    301   1.1    jruoho {
    302   1.1    jruoho 	struct cpufreq *cf = cf_backend;
    303   1.1    jruoho 	uint32_t freq = 0;
    304   1.1    jruoho 	uint64_t xc;
    305   1.1    jruoho 
    306   1.3    jruoho 	KASSERT(cf->cf_init != false);
    307   1.1    jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    308   1.1    jruoho 
    309   1.1    jruoho 	xc = xc_unicast(0, (*cf->cf_get_freq), cf->cf_cookie, &freq, ci);
    310   1.1    jruoho 	xc_wait(xc);
    311   1.1    jruoho 
    312   1.1    jruoho 	return freq;
    313   1.1    jruoho }
    314   1.1    jruoho 
    315   1.1    jruoho int
    316   1.3    jruoho cpufreq_get_backend(struct cpufreq *dst)
    317   1.1    jruoho {
    318   1.3    jruoho 	struct cpufreq *cf = cf_backend;
    319   1.1    jruoho 
    320   1.1    jruoho 	mutex_enter(&cpufreq_lock);
    321   1.1    jruoho 
    322   1.3    jruoho 	if (cf->cf_init != true || dst == NULL) {
    323   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    324   1.1    jruoho 		return ENODEV;
    325   1.1    jruoho 	}
    326   1.1    jruoho 
    327   1.3    jruoho 	memcpy(dst, cf, sizeof(*cf));
    328   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    329   1.1    jruoho 
    330   1.1    jruoho 	return 0;
    331   1.1    jruoho }
    332   1.1    jruoho 
    333   1.1    jruoho int
    334   1.1    jruoho cpufreq_get_state(uint32_t freq, struct cpufreq_state *cfs)
    335   1.1    jruoho {
    336   1.3    jruoho 	struct cpufreq *cf = cf_backend;
    337   1.1    jruoho 
    338   1.1    jruoho 	mutex_enter(&cpufreq_lock);
    339   1.1    jruoho 
    340   1.3    jruoho 	if (cf->cf_init != true || cfs == NULL) {
    341   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    342   1.1    jruoho 		return ENODEV;
    343   1.1    jruoho 	}
    344   1.1    jruoho 
    345   1.1    jruoho 	cpufreq_get_state_raw(freq, cfs);
    346   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    347   1.1    jruoho 
    348   1.1    jruoho 	return 0;
    349   1.1    jruoho }
    350   1.1    jruoho 
    351   1.1    jruoho int
    352   1.1    jruoho cpufreq_get_state_index(uint32_t index, struct cpufreq_state *cfs)
    353   1.1    jruoho {
    354   1.3    jruoho 	struct cpufreq *cf = cf_backend;
    355   1.1    jruoho 
    356   1.1    jruoho 	mutex_enter(&cpufreq_lock);
    357   1.1    jruoho 
    358   1.3    jruoho 	if (cf->cf_init != true || cfs == NULL) {
    359   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    360   1.1    jruoho 		return ENODEV;
    361   1.1    jruoho 	}
    362   1.1    jruoho 
    363   1.1    jruoho 	if (index >= cf->cf_state_count) {
    364   1.9    martin 		mutex_exit(&cpufreq_lock);
    365   1.1    jruoho 		return EINVAL;
    366   1.1    jruoho 	}
    367   1.1    jruoho 
    368   1.3    jruoho 	memcpy(cfs, &cf->cf_state[index], sizeof(*cfs));
    369   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    370   1.1    jruoho 
    371   1.1    jruoho 	return 0;
    372   1.1    jruoho }
    373   1.1    jruoho 
    374   1.1    jruoho static void
    375   1.1    jruoho cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs)
    376   1.1    jruoho {
    377   1.1    jruoho 	struct cpufreq *cf = cf_backend;
    378   1.1    jruoho 	uint32_t f, hi, i = 0, lo = 0;
    379   1.1    jruoho 
    380   1.1    jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    381  1.10  riastrad 	KASSERT(cf->cf_init != false);
    382  1.10  riastrad 	KASSERT(cfs != NULL);
    383   1.1    jruoho 
    384   1.1    jruoho 	hi = cf->cf_state_count;
    385   1.1    jruoho 
    386   1.1    jruoho 	while (lo < hi) {
    387   1.1    jruoho 
    388   1.1    jruoho 		i = (lo + hi) >> 1;
    389   1.1    jruoho 		f = cf->cf_state[i].cfs_freq;
    390   1.1    jruoho 
    391   1.1    jruoho 		if (freq == f)
    392   1.1    jruoho 			break;
    393   1.1    jruoho 		else if (freq > f)
    394   1.1    jruoho 			hi = i;
    395   1.1    jruoho 		else {
    396   1.1    jruoho 			lo = i + 1;
    397   1.1    jruoho 		}
    398   1.1    jruoho 	}
    399   1.1    jruoho 
    400   1.3    jruoho 	memcpy(cfs, &cf->cf_state[i], sizeof(*cfs));
    401   1.1    jruoho }
    402   1.1    jruoho 
    403   1.1    jruoho void
    404   1.1    jruoho cpufreq_set(struct cpu_info *ci, uint32_t freq)
    405   1.1    jruoho {
    406   1.3    jruoho 	struct cpufreq *cf = cf_backend;
    407   1.1    jruoho 
    408   1.1    jruoho 	mutex_enter(&cpufreq_lock);
    409   1.1    jruoho 
    410   1.3    jruoho 	if (__predict_false(cf->cf_init != true)) {
    411   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    412   1.1    jruoho 		return;
    413   1.1    jruoho 	}
    414   1.1    jruoho 
    415   1.1    jruoho 	cpufreq_set_raw(ci, freq);
    416   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    417   1.1    jruoho }
    418   1.1    jruoho 
    419   1.1    jruoho static void
    420   1.1    jruoho cpufreq_set_raw(struct cpu_info *ci, uint32_t freq)
    421   1.1    jruoho {
    422   1.1    jruoho 	struct cpufreq *cf = cf_backend;
    423   1.1    jruoho 	uint64_t xc;
    424   1.1    jruoho 
    425   1.3    jruoho 	KASSERT(cf->cf_init != false);
    426   1.1    jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    427   1.1    jruoho 
    428   1.1    jruoho 	xc = xc_unicast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq, ci);
    429   1.1    jruoho 	xc_wait(xc);
    430   1.1    jruoho }
    431   1.1    jruoho 
    432   1.1    jruoho void
    433   1.1    jruoho cpufreq_set_all(uint32_t freq)
    434   1.1    jruoho {
    435   1.3    jruoho 	struct cpufreq *cf = cf_backend;
    436   1.1    jruoho 
    437   1.1    jruoho 	mutex_enter(&cpufreq_lock);
    438   1.1    jruoho 
    439   1.3    jruoho 	if (__predict_false(cf->cf_init != true)) {
    440   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    441   1.1    jruoho 		return;
    442   1.1    jruoho 	}
    443   1.1    jruoho 
    444   1.1    jruoho 	cpufreq_set_all_raw(freq);
    445   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    446   1.1    jruoho }
    447   1.1    jruoho 
    448   1.1    jruoho static void
    449   1.1    jruoho cpufreq_set_all_raw(uint32_t freq)
    450   1.1    jruoho {
    451   1.1    jruoho 	struct cpufreq *cf = cf_backend;
    452   1.1    jruoho 	uint64_t xc;
    453   1.1    jruoho 
    454   1.3    jruoho 	KASSERT(cf->cf_init != false);
    455   1.1    jruoho 	KASSERT(mutex_owned(&cpufreq_lock) != 0);
    456   1.1    jruoho 
    457   1.1    jruoho 	xc = xc_broadcast(0, (*cf->cf_set_freq), cf->cf_cookie, &freq);
    458   1.1    jruoho 	xc_wait(xc);
    459   1.1    jruoho }
    460   1.1    jruoho 
    461   1.1    jruoho #ifdef notyet
    462   1.1    jruoho void
    463   1.1    jruoho cpufreq_set_higher(struct cpu_info *ci)
    464   1.1    jruoho {
    465   1.1    jruoho 	cpufreq_set_step(ci, -1);
    466   1.1    jruoho }
    467   1.1    jruoho 
    468   1.1    jruoho void
    469   1.1    jruoho cpufreq_set_lower(struct cpu_info *ci)
    470   1.1    jruoho {
    471   1.1    jruoho 	cpufreq_set_step(ci, 1);
    472   1.1    jruoho }
    473   1.1    jruoho 
    474   1.1    jruoho static void
    475   1.1    jruoho cpufreq_set_step(struct cpu_info *ci, int32_t step)
    476   1.1    jruoho {
    477   1.3    jruoho 	struct cpufreq *cf = cf_backend;
    478   1.1    jruoho 	struct cpufreq_state cfs;
    479   1.1    jruoho 	uint32_t freq;
    480   1.1    jruoho 	int32_t index;
    481   1.1    jruoho 
    482   1.1    jruoho 	mutex_enter(&cpufreq_lock);
    483   1.1    jruoho 
    484   1.3    jruoho 	if (__predict_false(cf->cf_init != true)) {
    485   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    486   1.1    jruoho 		return;
    487   1.1    jruoho 	}
    488   1.1    jruoho 
    489   1.1    jruoho 	freq = cpufreq_get_raw(ci);
    490   1.1    jruoho 
    491   1.1    jruoho 	if (__predict_false(freq == 0)) {
    492   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    493   1.1    jruoho 		return;
    494   1.1    jruoho 	}
    495   1.1    jruoho 
    496   1.1    jruoho 	cpufreq_get_state_raw(freq, &cfs);
    497   1.1    jruoho 	index = cfs.cfs_index + step;
    498   1.1    jruoho 
    499   1.1    jruoho 	if (index < 0 || index >= (int32_t)cf->cf_state_count) {
    500   1.1    jruoho 		mutex_exit(&cpufreq_lock);
    501   1.1    jruoho 		return;
    502   1.1    jruoho 	}
    503   1.1    jruoho 
    504   1.1    jruoho 	cpufreq_set_raw(ci, cf->cf_state[index].cfs_freq);
    505   1.1    jruoho 	mutex_exit(&cpufreq_lock);
    506   1.1    jruoho }
    507   1.1    jruoho #endif
    508