Home | History | Annotate | Line # | Download | only in amdgpu_dm
      1 /*	$NetBSD: amdgpu_dm_pp_smu.c,v 1.3 2021/12/19 10:59:01 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2018 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  * Authors: AMD
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dm_pp_smu.c,v 1.3 2021/12/19 10:59:01 riastradh Exp $");
     28 
     29 #include <linux/string.h>
     30 #include <linux/acpi.h>
     31 
     32 #include <drm/drm_probe_helper.h>
     33 #include <drm/amdgpu_drm.h>
     34 #include "dm_services.h"
     35 #include "amdgpu.h"
     36 #include "amdgpu_dm.h"
     37 #include "amdgpu_dm_irq.h"
     38 #include "amdgpu_pm.h"
     39 #include "dm_pp_smu.h"
     40 #include "amdgpu_smu.h"
     41 
     42 
     43 bool dm_pp_apply_display_requirements(
     44 		const struct dc_context *ctx,
     45 		const struct dm_pp_display_configuration *pp_display_cfg)
     46 {
     47 	struct amdgpu_device *adev = ctx->driver_context;
     48 	struct smu_context *smu = &adev->smu;
     49 	int i;
     50 
     51 	if (adev->pm.dpm_enabled) {
     52 
     53 		memset(&adev->pm.pm_display_cfg, 0,
     54 				sizeof(adev->pm.pm_display_cfg));
     55 
     56 		adev->pm.pm_display_cfg.cpu_cc6_disable =
     57 			pp_display_cfg->cpu_cc6_disable;
     58 
     59 		adev->pm.pm_display_cfg.cpu_pstate_disable =
     60 			pp_display_cfg->cpu_pstate_disable;
     61 
     62 		adev->pm.pm_display_cfg.cpu_pstate_separation_time =
     63 			pp_display_cfg->cpu_pstate_separation_time;
     64 
     65 		adev->pm.pm_display_cfg.nb_pstate_switch_disable =
     66 			pp_display_cfg->nb_pstate_switch_disable;
     67 
     68 		adev->pm.pm_display_cfg.num_display =
     69 				pp_display_cfg->display_count;
     70 		adev->pm.pm_display_cfg.num_path_including_non_display =
     71 				pp_display_cfg->display_count;
     72 
     73 		adev->pm.pm_display_cfg.min_core_set_clock =
     74 				pp_display_cfg->min_engine_clock_khz/10;
     75 		adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
     76 				pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
     77 		adev->pm.pm_display_cfg.min_mem_set_clock =
     78 				pp_display_cfg->min_memory_clock_khz/10;
     79 
     80 		adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
     81 				pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
     82 		adev->pm.pm_display_cfg.min_dcef_set_clk =
     83 				pp_display_cfg->min_dcfclock_khz/10;
     84 
     85 		adev->pm.pm_display_cfg.multi_monitor_in_sync =
     86 				pp_display_cfg->all_displays_in_sync;
     87 		adev->pm.pm_display_cfg.min_vblank_time =
     88 				pp_display_cfg->avail_mclk_switch_time_us;
     89 
     90 		adev->pm.pm_display_cfg.display_clk =
     91 				pp_display_cfg->disp_clk_khz/10;
     92 
     93 		adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
     94 				pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
     95 
     96 		adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
     97 		adev->pm.pm_display_cfg.line_time_in_us =
     98 				pp_display_cfg->line_time_in_us;
     99 
    100 		adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
    101 		adev->pm.pm_display_cfg.crossfire_display_index = -1;
    102 		adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
    103 
    104 		for (i = 0; i < pp_display_cfg->display_count; i++) {
    105 			const struct dm_pp_single_disp_config *dc_cfg =
    106 						&pp_display_cfg->disp_configs[i];
    107 			adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
    108 		}
    109 
    110 		if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
    111 			adev->powerplay.pp_funcs->display_configuration_change(
    112 				adev->powerplay.pp_handle,
    113 				&adev->pm.pm_display_cfg);
    114 		else
    115 			smu_display_configuration_change(smu,
    116 							 &adev->pm.pm_display_cfg);
    117 
    118 		amdgpu_pm_compute_clocks(adev);
    119 	}
    120 
    121 	return true;
    122 }
    123 
    124 static void get_default_clock_levels(
    125 		enum dm_pp_clock_type clk_type,
    126 		struct dm_pp_clock_levels *clks)
    127 {
    128 	uint32_t disp_clks_in_khz[6] = {
    129 			300000, 400000, 496560, 626090, 685720, 757900 };
    130 	uint32_t sclks_in_khz[6] = {
    131 			300000, 360000, 423530, 514290, 626090, 720000 };
    132 	uint32_t mclks_in_khz[2] = { 333000, 800000 };
    133 
    134 	switch (clk_type) {
    135 	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
    136 		clks->num_levels = 6;
    137 		memmove(clks->clocks_in_khz, disp_clks_in_khz,
    138 				sizeof(disp_clks_in_khz));
    139 		break;
    140 	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
    141 		clks->num_levels = 6;
    142 		memmove(clks->clocks_in_khz, sclks_in_khz,
    143 				sizeof(sclks_in_khz));
    144 		break;
    145 	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
    146 		clks->num_levels = 2;
    147 		memmove(clks->clocks_in_khz, mclks_in_khz,
    148 				sizeof(mclks_in_khz));
    149 		break;
    150 	default:
    151 		clks->num_levels = 0;
    152 		break;
    153 	}
    154 }
    155 
    156 static enum smu_clk_type dc_to_smu_clock_type(
    157 		enum dm_pp_clock_type dm_pp_clk_type)
    158 {
    159 	enum smu_clk_type smu_clk_type = SMU_CLK_COUNT;
    160 
    161 	switch (dm_pp_clk_type) {
    162 	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
    163 		smu_clk_type = SMU_DISPCLK;
    164 		break;
    165 	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
    166 		smu_clk_type = SMU_GFXCLK;
    167 		break;
    168 	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
    169 		smu_clk_type = SMU_MCLK;
    170 		break;
    171 	case DM_PP_CLOCK_TYPE_DCEFCLK:
    172 		smu_clk_type = SMU_DCEFCLK;
    173 		break;
    174 	case DM_PP_CLOCK_TYPE_SOCCLK:
    175 		smu_clk_type = SMU_SOCCLK;
    176 		break;
    177 	default:
    178 		DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
    179 			  dm_pp_clk_type);
    180 		break;
    181 	}
    182 
    183 	return smu_clk_type;
    184 }
    185 
    186 static enum amd_pp_clock_type dc_to_pp_clock_type(
    187 		enum dm_pp_clock_type dm_pp_clk_type)
    188 {
    189 	enum amd_pp_clock_type amd_pp_clk_type = 0;
    190 
    191 	switch (dm_pp_clk_type) {
    192 	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
    193 		amd_pp_clk_type = amd_pp_disp_clock;
    194 		break;
    195 	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
    196 		amd_pp_clk_type = amd_pp_sys_clock;
    197 		break;
    198 	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
    199 		amd_pp_clk_type = amd_pp_mem_clock;
    200 		break;
    201 	case DM_PP_CLOCK_TYPE_DCEFCLK:
    202 		amd_pp_clk_type  = amd_pp_dcef_clock;
    203 		break;
    204 	case DM_PP_CLOCK_TYPE_DCFCLK:
    205 		amd_pp_clk_type = amd_pp_dcf_clock;
    206 		break;
    207 	case DM_PP_CLOCK_TYPE_PIXELCLK:
    208 		amd_pp_clk_type = amd_pp_pixel_clock;
    209 		break;
    210 	case DM_PP_CLOCK_TYPE_FCLK:
    211 		amd_pp_clk_type = amd_pp_f_clock;
    212 		break;
    213 	case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
    214 		amd_pp_clk_type = amd_pp_phy_clock;
    215 		break;
    216 	case DM_PP_CLOCK_TYPE_DPPCLK:
    217 		amd_pp_clk_type = amd_pp_dpp_clock;
    218 		break;
    219 	default:
    220 		DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
    221 				dm_pp_clk_type);
    222 		break;
    223 	}
    224 
    225 	return amd_pp_clk_type;
    226 }
    227 
    228 static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
    229 			enum PP_DAL_POWERLEVEL max_clocks_state)
    230 {
    231 	switch (max_clocks_state) {
    232 	case PP_DAL_POWERLEVEL_0:
    233 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
    234 	case PP_DAL_POWERLEVEL_1:
    235 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
    236 	case PP_DAL_POWERLEVEL_2:
    237 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
    238 	case PP_DAL_POWERLEVEL_3:
    239 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
    240 	case PP_DAL_POWERLEVEL_4:
    241 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
    242 	case PP_DAL_POWERLEVEL_5:
    243 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
    244 	case PP_DAL_POWERLEVEL_6:
    245 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
    246 	case PP_DAL_POWERLEVEL_7:
    247 		return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
    248 	default:
    249 		DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
    250 				max_clocks_state);
    251 		return DM_PP_CLOCKS_STATE_INVALID;
    252 	}
    253 }
    254 
    255 static void pp_to_dc_clock_levels(
    256 		const struct amd_pp_clocks *pp_clks,
    257 		struct dm_pp_clock_levels *dc_clks,
    258 		enum dm_pp_clock_type dc_clk_type)
    259 {
    260 	uint32_t i;
    261 
    262 	if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
    263 		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
    264 				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
    265 				pp_clks->count,
    266 				DM_PP_MAX_CLOCK_LEVELS);
    267 
    268 		dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
    269 	} else
    270 		dc_clks->num_levels = pp_clks->count;
    271 
    272 	DRM_INFO("DM_PPLIB: values for %s clock\n",
    273 			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
    274 
    275 	for (i = 0; i < dc_clks->num_levels; i++) {
    276 		DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
    277 		dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
    278 	}
    279 }
    280 
    281 static void pp_to_dc_clock_levels_with_latency(
    282 		const struct pp_clock_levels_with_latency *pp_clks,
    283 		struct dm_pp_clock_levels_with_latency *clk_level_info,
    284 		enum dm_pp_clock_type dc_clk_type)
    285 {
    286 	uint32_t i;
    287 
    288 	if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
    289 		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
    290 				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
    291 				pp_clks->num_levels,
    292 				DM_PP_MAX_CLOCK_LEVELS);
    293 
    294 		clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
    295 	} else
    296 		clk_level_info->num_levels = pp_clks->num_levels;
    297 
    298 	DRM_DEBUG("DM_PPLIB: values for %s clock\n",
    299 			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
    300 
    301 	for (i = 0; i < clk_level_info->num_levels; i++) {
    302 		DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
    303 		clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
    304 		clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
    305 	}
    306 }
    307 
    308 static void pp_to_dc_clock_levels_with_voltage(
    309 		const struct pp_clock_levels_with_voltage *pp_clks,
    310 		struct dm_pp_clock_levels_with_voltage *clk_level_info,
    311 		enum dm_pp_clock_type dc_clk_type)
    312 {
    313 	uint32_t i;
    314 
    315 	if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
    316 		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
    317 				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
    318 				pp_clks->num_levels,
    319 				DM_PP_MAX_CLOCK_LEVELS);
    320 
    321 		clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
    322 	} else
    323 		clk_level_info->num_levels = pp_clks->num_levels;
    324 
    325 	DRM_INFO("DM_PPLIB: values for %s clock\n",
    326 			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
    327 
    328 	for (i = 0; i < clk_level_info->num_levels; i++) {
    329 		DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
    330 			 pp_clks->data[i].voltage_in_mv);
    331 		clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
    332 		clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
    333 	}
    334 }
    335 
    336 bool dm_pp_get_clock_levels_by_type(
    337 		const struct dc_context *ctx,
    338 		enum dm_pp_clock_type clk_type,
    339 		struct dm_pp_clock_levels *dc_clks)
    340 {
    341 	struct amdgpu_device *adev = ctx->driver_context;
    342 	void *pp_handle = adev->powerplay.pp_handle;
    343 	struct amd_pp_clocks pp_clks = { 0 };
    344 	struct amd_pp_simple_clock_info validation_clks = { 0 };
    345 	uint32_t i;
    346 
    347 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
    348 		if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
    349 			dc_to_pp_clock_type(clk_type), &pp_clks)) {
    350 			/* Error in pplib. Provide default values. */
    351 			get_default_clock_levels(clk_type, dc_clks);
    352 			return true;
    353 		}
    354 	} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) {
    355 		if (smu_get_clock_by_type(&adev->smu,
    356 					  dc_to_pp_clock_type(clk_type),
    357 					  &pp_clks)) {
    358 			get_default_clock_levels(clk_type, dc_clks);
    359 			return true;
    360 		}
    361 	}
    362 
    363 	pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
    364 
    365 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
    366 		if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
    367 						pp_handle, &validation_clks)) {
    368 			/* Error in pplib. Provide default values. */
    369 			DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
    370 			validation_clks.engine_max_clock = 72000;
    371 			validation_clks.memory_max_clock = 80000;
    372 			validation_clks.level = 0;
    373 		}
    374 	} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) {
    375 		if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
    376 			DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
    377 			validation_clks.engine_max_clock = 72000;
    378 			validation_clks.memory_max_clock = 80000;
    379 			validation_clks.level = 0;
    380 		}
    381 	}
    382 
    383 	DRM_INFO("DM_PPLIB: Validation clocks:\n");
    384 	DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
    385 			validation_clks.engine_max_clock);
    386 	DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
    387 			validation_clks.memory_max_clock);
    388 	DRM_INFO("DM_PPLIB:    level           : %d\n",
    389 			validation_clks.level);
    390 
    391 	/* Translate 10 kHz to kHz. */
    392 	validation_clks.engine_max_clock *= 10;
    393 	validation_clks.memory_max_clock *= 10;
    394 
    395 	/* Determine the highest non-boosted level from the Validation Clocks */
    396 	if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
    397 		for (i = 0; i < dc_clks->num_levels; i++) {
    398 			if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
    399 				/* This clock is higher the validation clock.
    400 				 * Than means the previous one is the highest
    401 				 * non-boosted one. */
    402 				DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
    403 						dc_clks->num_levels, i);
    404 				dc_clks->num_levels = i > 0 ? i : 1;
    405 				break;
    406 			}
    407 		}
    408 	} else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
    409 		for (i = 0; i < dc_clks->num_levels; i++) {
    410 			if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
    411 				DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
    412 						dc_clks->num_levels, i);
    413 				dc_clks->num_levels = i > 0 ? i : 1;
    414 				break;
    415 			}
    416 		}
    417 	}
    418 
    419 	return true;
    420 }
    421 
    422 bool dm_pp_get_clock_levels_by_type_with_latency(
    423 	const struct dc_context *ctx,
    424 	enum dm_pp_clock_type clk_type,
    425 	struct dm_pp_clock_levels_with_latency *clk_level_info)
    426 {
    427 	struct amdgpu_device *adev = ctx->driver_context;
    428 	void *pp_handle = adev->powerplay.pp_handle;
    429 	struct pp_clock_levels_with_latency pp_clks = { 0 };
    430 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
    431 	int ret;
    432 
    433 	if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
    434 		ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
    435 						dc_to_pp_clock_type(clk_type),
    436 						&pp_clks);
    437 		if (ret)
    438 			return false;
    439 	} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
    440 		if (smu_get_clock_by_type_with_latency(&adev->smu,
    441 						       dc_to_smu_clock_type(clk_type),
    442 						       &pp_clks))
    443 			return false;
    444 	}
    445 
    446 
    447 	pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
    448 
    449 	return true;
    450 }
    451 
    452 bool dm_pp_get_clock_levels_by_type_with_voltage(
    453 	const struct dc_context *ctx,
    454 	enum dm_pp_clock_type clk_type,
    455 	struct dm_pp_clock_levels_with_voltage *clk_level_info)
    456 {
    457 	struct amdgpu_device *adev = ctx->driver_context;
    458 	void *pp_handle = adev->powerplay.pp_handle;
    459 	struct pp_clock_levels_with_voltage pp_clk_info = {0};
    460 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
    461 	int ret;
    462 
    463 	if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
    464 		ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
    465 						dc_to_pp_clock_type(clk_type),
    466 						&pp_clk_info);
    467 		if (ret)
    468 			return false;
    469 	} else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
    470 		if (smu_get_clock_by_type_with_voltage(&adev->smu,
    471 						       dc_to_pp_clock_type(clk_type),
    472 						       &pp_clk_info))
    473 			return false;
    474 	}
    475 
    476 	pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
    477 
    478 	return true;
    479 }
    480 
    481 bool dm_pp_notify_wm_clock_changes(
    482 	const struct dc_context *ctx,
    483 	struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
    484 {
    485 	/* TODO: to be implemented */
    486 	return false;
    487 }
    488 
    489 bool dm_pp_apply_power_level_change_request(
    490 	const struct dc_context *ctx,
    491 	struct dm_pp_power_level_change_request *level_change_req)
    492 {
    493 	/* TODO: to be implemented */
    494 	return false;
    495 }
    496 
    497 bool dm_pp_apply_clock_for_voltage_request(
    498 	const struct dc_context *ctx,
    499 	struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
    500 {
    501 	struct amdgpu_device *adev = ctx->driver_context;
    502 	struct pp_display_clock_request pp_clock_request = {0};
    503 	int ret = 0;
    504 
    505 	pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
    506 	pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
    507 
    508 	if (!pp_clock_request.clock_type)
    509 		return false;
    510 
    511 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
    512 		ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
    513 			adev->powerplay.pp_handle,
    514 			&pp_clock_request);
    515 	else if (adev->smu.ppt_funcs &&
    516 		 adev->smu.ppt_funcs->display_clock_voltage_request)
    517 		ret = smu_display_clock_voltage_request(&adev->smu,
    518 							&pp_clock_request);
    519 	if (ret)
    520 		return false;
    521 	return true;
    522 }
    523 
    524 bool dm_pp_get_static_clocks(
    525 	const struct dc_context *ctx,
    526 	struct dm_pp_static_clock_info *static_clk_info)
    527 {
    528 	struct amdgpu_device *adev = ctx->driver_context;
    529 	struct amd_pp_clock_info pp_clk_info = {0};
    530 	int ret = 0;
    531 
    532 	if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
    533 		ret = adev->powerplay.pp_funcs->get_current_clocks(
    534 			adev->powerplay.pp_handle,
    535 			&pp_clk_info);
    536 	else if (adev->smu.ppt_funcs)
    537 		ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
    538 	if (ret)
    539 		return false;
    540 
    541 	static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
    542 	static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
    543 	static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
    544 
    545 	return true;
    546 }
    547 
    548 void pp_rv_set_wm_ranges(struct pp_smu *pp,
    549 		struct pp_smu_wm_range_sets *ranges)
    550 {
    551 	const struct dc_context *ctx = pp->dm;
    552 	struct amdgpu_device *adev = ctx->driver_context;
    553 	void *pp_handle = adev->powerplay.pp_handle;
    554 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
    555 	struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
    556 	struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
    557 	struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
    558 	int32_t i;
    559 
    560 	wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
    561 	wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
    562 
    563 	for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
    564 		if (ranges->reader_wm_sets[i].wm_inst > 3)
    565 			wm_dce_clocks[i].wm_set_id = WM_SET_A;
    566 		else
    567 			wm_dce_clocks[i].wm_set_id =
    568 					ranges->reader_wm_sets[i].wm_inst;
    569 		wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
    570 				ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
    571 		wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
    572 				ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
    573 		wm_dce_clocks[i].wm_max_mem_clk_in_khz =
    574 				ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
    575 		wm_dce_clocks[i].wm_min_mem_clk_in_khz =
    576 				ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
    577 	}
    578 
    579 	for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
    580 		if (ranges->writer_wm_sets[i].wm_inst > 3)
    581 			wm_soc_clocks[i].wm_set_id = WM_SET_A;
    582 		else
    583 			wm_soc_clocks[i].wm_set_id =
    584 					ranges->writer_wm_sets[i].wm_inst;
    585 		wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
    586 				ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
    587 		wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
    588 				ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
    589 		wm_soc_clocks[i].wm_max_mem_clk_in_khz =
    590 				ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
    591 		wm_soc_clocks[i].wm_min_mem_clk_in_khz =
    592 				ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
    593 	}
    594 
    595 	if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
    596 		pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
    597 							   &wm_with_clock_ranges);
    598 	else
    599 		smu_set_watermarks_for_clock_ranges(&adev->smu,
    600 				&wm_with_clock_ranges);
    601 }
    602 
    603 void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
    604 {
    605 	const struct dc_context *ctx = pp->dm;
    606 	struct amdgpu_device *adev = ctx->driver_context;
    607 	void *pp_handle = adev->powerplay.pp_handle;
    608 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
    609 
    610 	if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
    611 		pp_funcs->notify_smu_enable_pwe(pp_handle);
    612 	else if (adev->smu.ppt_funcs)
    613 		smu_notify_smu_enable_pwe(&adev->smu);
    614 }
    615 
    616 void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
    617 {
    618 	const struct dc_context *ctx = pp->dm;
    619 	struct amdgpu_device *adev = ctx->driver_context;
    620 	void *pp_handle = adev->powerplay.pp_handle;
    621 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
    622 
    623 	if (!pp_funcs || !pp_funcs->set_active_display_count)
    624 		return;
    625 
    626 	pp_funcs->set_active_display_count(pp_handle, count);
    627 }
    628 
    629 void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
    630 {
    631 	const struct dc_context *ctx = pp->dm;
    632 	struct amdgpu_device *adev = ctx->driver_context;
    633 	void *pp_handle = adev->powerplay.pp_handle;
    634 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
    635 
    636 	if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
    637 		return;
    638 
    639 	pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
    640 }
    641 
    642 void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
    643 {
    644 	const struct dc_context *ctx = pp->dm;
    645 	struct amdgpu_device *adev = ctx->driver_context;
    646 	void *pp_handle = adev->powerplay.pp_handle;
    647 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
    648 
    649 	if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
    650 		return;
    651 
    652 	pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
    653 }
    654 
    655 void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
    656 {
    657 	const struct dc_context *ctx = pp->dm;
    658 	struct amdgpu_device *adev = ctx->driver_context;
    659 	void *pp_handle = adev->powerplay.pp_handle;
    660 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
    661 
    662 	if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
    663 		return;
    664 
    665 	pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
    666 }
    667 
    668 enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
    669 		struct pp_smu_wm_range_sets *ranges)
    670 {
    671 	const struct dc_context *ctx = pp->dm;
    672 	struct amdgpu_device *adev = ctx->driver_context;
    673 	struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
    674 	struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
    675 			wm_with_clock_ranges.wm_dmif_clocks_ranges;
    676 	struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
    677 			wm_with_clock_ranges.wm_mcif_clocks_ranges;
    678 	int32_t i;
    679 
    680 	wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
    681 	wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
    682 
    683 	for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
    684 		if (ranges->reader_wm_sets[i].wm_inst > 3)
    685 			wm_dce_clocks[i].wm_set_id = WM_SET_A;
    686 		else
    687 			wm_dce_clocks[i].wm_set_id =
    688 					ranges->reader_wm_sets[i].wm_inst;
    689 		wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
    690 			ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
    691 		wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
    692 			ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
    693 		wm_dce_clocks[i].wm_max_mem_clk_in_khz =
    694 			ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
    695 		wm_dce_clocks[i].wm_min_mem_clk_in_khz =
    696 			ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
    697 	}
    698 
    699 	for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
    700 		if (ranges->writer_wm_sets[i].wm_inst > 3)
    701 			wm_soc_clocks[i].wm_set_id = WM_SET_A;
    702 		else
    703 			wm_soc_clocks[i].wm_set_id =
    704 					ranges->writer_wm_sets[i].wm_inst;
    705 		wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
    706 			ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
    707 		wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
    708 			ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
    709 		wm_soc_clocks[i].wm_max_mem_clk_in_khz =
    710 			ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
    711 		wm_soc_clocks[i].wm_min_mem_clk_in_khz =
    712 			ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
    713 	}
    714 
    715 	smu_set_watermarks_for_clock_ranges(&adev->smu,	&wm_with_clock_ranges);
    716 
    717 	return PP_SMU_RESULT_OK;
    718 }
    719 
    720 enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
    721 {
    722 	const struct dc_context *ctx = pp->dm;
    723 	struct amdgpu_device *adev = ctx->driver_context;
    724 	struct smu_context *smu = &adev->smu;
    725 
    726 	if (!smu->ppt_funcs)
    727 		return PP_SMU_RESULT_UNSUPPORTED;
    728 
    729 	/* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL;  1: fail */
    730 	if (smu_set_azalia_d3_pme(smu))
    731 		return PP_SMU_RESULT_FAIL;
    732 
    733 	return PP_SMU_RESULT_OK;
    734 }
    735 
    736 enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
    737 {
    738 	const struct dc_context *ctx = pp->dm;
    739 	struct amdgpu_device *adev = ctx->driver_context;
    740 	struct smu_context *smu = &adev->smu;
    741 
    742 	if (!smu->ppt_funcs)
    743 		return PP_SMU_RESULT_UNSUPPORTED;
    744 
    745 	/* 0: successful or smu.ppt_funcs->set_display_count = NULL;  1: fail */
    746 	if (smu_set_display_count(smu, count))
    747 		return PP_SMU_RESULT_FAIL;
    748 
    749 	return PP_SMU_RESULT_OK;
    750 }
    751 
    752 enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
    753 {
    754 	const struct dc_context *ctx = pp->dm;
    755 	struct amdgpu_device *adev = ctx->driver_context;
    756 	struct smu_context *smu = &adev->smu;
    757 
    758 	if (!smu->ppt_funcs)
    759 		return PP_SMU_RESULT_UNSUPPORTED;
    760 
    761 	/* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
    762 	if (smu_set_deep_sleep_dcefclk(smu, mhz))
    763 		return PP_SMU_RESULT_FAIL;
    764 
    765 	return PP_SMU_RESULT_OK;
    766 }
    767 
    768 enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
    769 		struct pp_smu *pp, int mhz)
    770 {
    771 	const struct dc_context *ctx = pp->dm;
    772 	struct amdgpu_device *adev = ctx->driver_context;
    773 	struct smu_context *smu = &adev->smu;
    774 	struct pp_display_clock_request clock_req;
    775 
    776 	if (!smu->ppt_funcs)
    777 		return PP_SMU_RESULT_UNSUPPORTED;
    778 
    779 	clock_req.clock_type = amd_pp_dcef_clock;
    780 	clock_req.clock_freq_in_khz = mhz * 1000;
    781 
    782 	/* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
    783 	 * 1: fail
    784 	 */
    785 	if (smu_display_clock_voltage_request(smu, &clock_req))
    786 		return PP_SMU_RESULT_FAIL;
    787 
    788 	return PP_SMU_RESULT_OK;
    789 }
    790 
    791 enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
    792 {
    793 	const struct dc_context *ctx = pp->dm;
    794 	struct amdgpu_device *adev = ctx->driver_context;
    795 	struct smu_context *smu = &adev->smu;
    796 	struct pp_display_clock_request clock_req;
    797 
    798 	if (!smu->ppt_funcs)
    799 		return PP_SMU_RESULT_UNSUPPORTED;
    800 
    801 	clock_req.clock_type = amd_pp_mem_clock;
    802 	clock_req.clock_freq_in_khz = mhz * 1000;
    803 
    804 	/* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
    805 	 * 1: fail
    806 	 */
    807 	if (smu_display_clock_voltage_request(smu, &clock_req))
    808 		return PP_SMU_RESULT_FAIL;
    809 
    810 	return PP_SMU_RESULT_OK;
    811 }
    812 
    813 enum pp_smu_status pp_nv_set_pstate_handshake_support(
    814 	struct pp_smu *pp, bool pstate_handshake_supported)
    815 {
    816 	const struct dc_context *ctx = pp->dm;
    817 	struct amdgpu_device *adev = ctx->driver_context;
    818 	struct smu_context *smu = &adev->smu;
    819 
    820 	if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported))
    821 		return PP_SMU_RESULT_FAIL;
    822 
    823 	return PP_SMU_RESULT_OK;
    824 }
    825 
    826 enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
    827 		enum pp_smu_nv_clock_id clock_id, int mhz)
    828 {
    829 	const struct dc_context *ctx = pp->dm;
    830 	struct amdgpu_device *adev = ctx->driver_context;
    831 	struct smu_context *smu = &adev->smu;
    832 	struct pp_display_clock_request clock_req;
    833 
    834 	if (!smu->ppt_funcs)
    835 		return PP_SMU_RESULT_UNSUPPORTED;
    836 
    837 	switch (clock_id) {
    838 	case PP_SMU_NV_DISPCLK:
    839 		clock_req.clock_type = amd_pp_disp_clock;
    840 		break;
    841 	case PP_SMU_NV_PHYCLK:
    842 		clock_req.clock_type = amd_pp_phy_clock;
    843 		break;
    844 	case PP_SMU_NV_PIXELCLK:
    845 		clock_req.clock_type = amd_pp_pixel_clock;
    846 		break;
    847 	default:
    848 		break;
    849 	}
    850 	clock_req.clock_freq_in_khz = mhz * 1000;
    851 
    852 	/* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
    853 	 * 1: fail
    854 	 */
    855 	if (smu_display_clock_voltage_request(smu, &clock_req))
    856 		return PP_SMU_RESULT_FAIL;
    857 
    858 	return PP_SMU_RESULT_OK;
    859 }
    860 
    861 enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
    862 		struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
    863 {
    864 	const struct dc_context *ctx = pp->dm;
    865 	struct amdgpu_device *adev = ctx->driver_context;
    866 	struct smu_context *smu = &adev->smu;
    867 
    868 	if (!smu->ppt_funcs)
    869 		return PP_SMU_RESULT_UNSUPPORTED;
    870 
    871 	if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
    872 		return PP_SMU_RESULT_UNSUPPORTED;
    873 
    874 	if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks))
    875 		return PP_SMU_RESULT_OK;
    876 
    877 	return PP_SMU_RESULT_FAIL;
    878 }
    879 
    880 enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
    881 		unsigned int *clock_values_in_khz, unsigned int *num_states)
    882 {
    883 	const struct dc_context *ctx = pp->dm;
    884 	struct amdgpu_device *adev = ctx->driver_context;
    885 	struct smu_context *smu = &adev->smu;
    886 
    887 	if (!smu->ppt_funcs)
    888 		return PP_SMU_RESULT_UNSUPPORTED;
    889 
    890 	if (!smu->ppt_funcs->get_uclk_dpm_states)
    891 		return PP_SMU_RESULT_UNSUPPORTED;
    892 
    893 	if (!smu_get_uclk_dpm_states(smu,
    894 			clock_values_in_khz, num_states))
    895 		return PP_SMU_RESULT_OK;
    896 
    897 	return PP_SMU_RESULT_FAIL;
    898 }
    899 
    900 enum pp_smu_status pp_rn_get_dpm_clock_table(
    901 		struct pp_smu *pp, struct dpm_clocks *clock_table)
    902 {
    903 	const struct dc_context *ctx = pp->dm;
    904 	struct amdgpu_device *adev = ctx->driver_context;
    905 	struct smu_context *smu = &adev->smu;
    906 
    907 	if (!smu->ppt_funcs)
    908 		return PP_SMU_RESULT_UNSUPPORTED;
    909 
    910 	if (!smu->ppt_funcs->get_dpm_clock_table)
    911 		return PP_SMU_RESULT_UNSUPPORTED;
    912 
    913 	if (!smu_get_dpm_clock_table(smu, clock_table))
    914 		return PP_SMU_RESULT_OK;
    915 
    916 	return PP_SMU_RESULT_FAIL;
    917 }
    918 
    919 enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
    920 		struct pp_smu_wm_range_sets *ranges)
    921 {
    922 	const struct dc_context *ctx = pp->dm;
    923 	struct amdgpu_device *adev = ctx->driver_context;
    924 	struct smu_context *smu = &adev->smu;
    925 	struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
    926 	struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
    927 			wm_with_clock_ranges.wm_dmif_clocks_ranges;
    928 	struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
    929 			wm_with_clock_ranges.wm_mcif_clocks_ranges;
    930 	int32_t i;
    931 
    932 	if (!smu->ppt_funcs)
    933 		return PP_SMU_RESULT_UNSUPPORTED;
    934 
    935 	wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
    936 	wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
    937 
    938 	for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
    939 		if (ranges->reader_wm_sets[i].wm_inst > 3)
    940 			wm_dce_clocks[i].wm_set_id = WM_SET_A;
    941 		else
    942 			wm_dce_clocks[i].wm_set_id =
    943 					ranges->reader_wm_sets[i].wm_inst;
    944 
    945 		wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
    946 			ranges->reader_wm_sets[i].min_drain_clk_mhz;
    947 
    948 		wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
    949 			ranges->reader_wm_sets[i].max_drain_clk_mhz;
    950 
    951 		wm_dce_clocks[i].wm_min_mem_clk_in_khz =
    952 			ranges->reader_wm_sets[i].min_fill_clk_mhz;
    953 
    954 		wm_dce_clocks[i].wm_max_mem_clk_in_khz =
    955 			ranges->reader_wm_sets[i].max_fill_clk_mhz;
    956 	}
    957 
    958 	for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
    959 		if (ranges->writer_wm_sets[i].wm_inst > 3)
    960 			wm_soc_clocks[i].wm_set_id = WM_SET_A;
    961 		else
    962 			wm_soc_clocks[i].wm_set_id =
    963 					ranges->writer_wm_sets[i].wm_inst;
    964 		wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
    965 				ranges->writer_wm_sets[i].min_fill_clk_mhz;
    966 
    967 		wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
    968 			ranges->writer_wm_sets[i].max_fill_clk_mhz;
    969 
    970 		wm_soc_clocks[i].wm_min_mem_clk_in_khz =
    971 			ranges->writer_wm_sets[i].min_drain_clk_mhz;
    972 
    973 		wm_soc_clocks[i].wm_max_mem_clk_in_khz =
    974 			ranges->writer_wm_sets[i].max_drain_clk_mhz;
    975 	}
    976 
    977 	smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
    978 
    979 	return PP_SMU_RESULT_OK;
    980 }
    981 
    982 void dm_pp_get_funcs(
    983 		struct dc_context *ctx,
    984 		struct pp_smu_funcs *funcs)
    985 {
    986 	switch (ctx->dce_version) {
    987 	case DCN_VERSION_1_0:
    988 	case DCN_VERSION_1_01:
    989 		funcs->ctx.ver = PP_SMU_VER_RV;
    990 		funcs->rv_funcs.pp_smu.dm = ctx;
    991 		funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
    992 		funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
    993 		funcs->rv_funcs.set_display_count =
    994 				pp_rv_set_active_display_count;
    995 		funcs->rv_funcs.set_min_deep_sleep_dcfclk =
    996 				pp_rv_set_min_deep_sleep_dcfclk;
    997 		funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
    998 				pp_rv_set_hard_min_dcefclk_by_freq;
    999 		funcs->rv_funcs.set_hard_min_fclk_by_freq =
   1000 				pp_rv_set_hard_min_fclk_by_freq;
   1001 		break;
   1002 	case DCN_VERSION_2_0:
   1003 		funcs->ctx.ver = PP_SMU_VER_NV;
   1004 		funcs->nv_funcs.pp_smu.dm = ctx;
   1005 		funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
   1006 		funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
   1007 				pp_nv_set_hard_min_dcefclk_by_freq;
   1008 		funcs->nv_funcs.set_min_deep_sleep_dcfclk =
   1009 				pp_nv_set_min_deep_sleep_dcfclk;
   1010 		funcs->nv_funcs.set_voltage_by_freq =
   1011 				pp_nv_set_voltage_by_freq;
   1012 		funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
   1013 
   1014 		/* todo set_pme_wa_enable cause 4k@6ohz display not light up */
   1015 		funcs->nv_funcs.set_pme_wa_enable = NULL;
   1016 		/* todo debug waring message */
   1017 		funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
   1018 		/* todo  compare data with window driver*/
   1019 		funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
   1020 		/*todo  compare data with window driver */
   1021 		funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
   1022 		funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
   1023 		break;
   1024 
   1025 	case DCN_VERSION_2_1:
   1026 		funcs->ctx.ver = PP_SMU_VER_RN;
   1027 		funcs->rn_funcs.pp_smu.dm = ctx;
   1028 		funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
   1029 		funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
   1030 		break;
   1031 	default:
   1032 		DRM_ERROR("smu version is not supported !\n");
   1033 		break;
   1034 	}
   1035 }
   1036