Lines Matching refs:pd
407 struct scmi_perf_domain *pd = &sc->sc_perf_domains[domain];
427 if (pd->pd_levels == NULL) {
428 pd->pd_nlevels = pl->pl_nret + pl->pl_nrem;
429 pd->pd_levels = kmem_zalloc(pd->pd_nlevels *
435 pd->pd_levels[idx + i].pl_cost =
437 pd->pd_levels[idx + i].pl_perf =
439 pd->pd_levels[idx + i].pl_ifreq =
453 scmi_perf_limits_get(struct scmi_perf_domain *pd, uint32_t *max_level,
456 struct scmi_softc *sc = pd->pd_sc;
460 if (pd->pd_levels == NULL) {
467 shmem->message_payload[0] = pd->pd_domain_id;
479 scmi_perf_level_get(struct scmi_perf_domain *pd, uint32_t *perf_level)
481 struct scmi_softc *sc = pd->pd_sc;
485 if (pd->pd_levels == NULL) {
492 shmem->message_payload[0] = pd->pd_domain_id;
503 scmi_perf_level_set(struct scmi_perf_domain *pd, uint32_t perf_level)
505 struct scmi_softc *sc = pd->pd_sc;
509 if (pd->pd_levels == NULL) {
516 shmem->message_payload[0] = pd->pd_domain_id;
525 scmi_cpufreq_level_to_mhz(struct scmi_perf_domain *pd, uint32_t level)
529 if (pd->pd_level_index_mode) {
530 if (level < pd->pd_nlevels) {
531 return pd->pd_levels[level].pl_ifreq / 1000;
534 for (n = 0; n < pd->pd_nlevels; n++) {
535 if (pd->pd_levels[n].pl_perf == level) {
536 return pd->pd_levels[n].pl_ifreq / 1000;
545 scmi_cpufreq_set_rate(struct scmi_softc *sc, struct scmi_perf_domain *pd,
552 for (n = 0; n < pd->pd_nlevels; n++) {
553 if (pd->pd_levels[n].pl_ifreq / 1000 == freq_mhz) {
554 perf_level = pd->pd_level_index_mode ?
555 n : pd->pd_levels[n].pl_perf;
559 if (n == pd->pd_nlevels)
562 status = scmi_perf_level_set(pd, perf_level);
567 if (pd->pd_rate_limit > 0)
568 delay(pd->pd_rate_limit);
576 struct scmi_perf_domain * const pd = rnode->sysctl_data;
577 struct scmi_softc * const sc = pd->pd_sc;
587 if (rnode->sysctl_num == pd->pd_node_target) {
588 if (pd->pd_freq_target == 0) {
589 status = scmi_perf_level_get(pd, &level);
593 pd->pd_freq_target =
594 scmi_cpufreq_level_to_mhz(pd, level);
596 fq = pd->pd_freq_target;
598 status = scmi_perf_level_get(pd, &level);
602 fq = scmi_cpufreq_level_to_mhz(pd, level);
605 if (rnode->sysctl_num == pd->pd_node_target)
608 if (pd->pd_freq_target == 0)
609 pd->pd_freq_target = fq;
615 if (fq == oldfq || rnode->sysctl_num != pd->pd_node_target)
618 if (atomic_cas_uint(&pd->pd_busy, 0, 1) != 0)
621 old_target = pd->pd_freq_target;
622 pd->pd_freq_target = fq;
624 error = scmi_cpufreq_set_rate(sc, pd
626 pd->pd_freq_target = old_target;
629 atomic_dec_uint(&pd->pd_busy);
638 struct scmi_perf_domain *pd = &sc->sc_perf_domains[domain_id];
639 struct cpu_info *ci = pd->pd_ci;
648 status = scmi_perf_limits_get(pd, &max_level, &min_level);
660 pd->pd_freq_available = kmem_zalloc(strlen("XXXX ") *
661 pd->pd_nlevels, KM_SLEEP);
662 for (i = 0; i < pd->pd_nlevels; i++) {
664 uint32_t level = pd->pd_level_index_mode ?
665 i : pd->pd_levels[i].pl_perf;
674 pd->pd_levels[i].pl_ifreq / 1000);
675 strcat(pd->pd_freq_available, buf);
676 if (level == pd->pd_sustained_perf) {
699 scmi_cpufreq_sysctl_helper, 0, (void *)pd, 0,
703 pd->pd_node_target = node->sysctl_num;
707 scmi_cpufreq_sysctl_helper, 0, (void *)pd, 0,
711 pd->pd_node_current = node->sysctl_num;
715 NULL, 0, pd->pd_freq_available, 0,
719 pd->pd_node_available = node->sysctl_num;