1 /* $NetBSD: amdgpu_renoir_ppt.c,v 1.4 2021/12/19 12:37:54 riastradh Exp $ */ 2 3 /* 4 * Copyright 2019 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_renoir_ppt.c,v 1.4 2021/12/19 12:37:54 riastradh Exp $"); 28 29 #include "amdgpu.h" 30 #include "amdgpu_smu.h" 31 #include "smu_internal.h" 32 #include "soc15_common.h" 33 #include "smu_v12_0_ppsmc.h" 34 #include "smu12_driver_if.h" 35 #include "smu_v12_0.h" 36 #include "renoir_ppt.h" 37 38 #include <linux/nbsd-namespace.h> 39 40 41 #define CLK_MAP(clk, index) \ 42 [SMU_##clk] = {1, (index)} 43 44 #define MSG_MAP(msg, index) \ 45 [SMU_MSG_##msg] = {1, (index)} 46 47 #define TAB_MAP_VALID(tab) \ 48 [SMU_TABLE_##tab] = {1, TABLE_##tab} 49 50 #define TAB_MAP_INVALID(tab) \ 51 [SMU_TABLE_##tab] = {0, TABLE_##tab} 52 53 static struct smu_12_0_cmn2aisc_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = { 54 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage), 55 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion), 56 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion), 57 MSG_MAP(PowerUpGfx, PPSMC_MSG_PowerUpGfx), 58 MSG_MAP(AllowGfxOff, PPSMC_MSG_EnableGfxOff), 59 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisableGfxOff), 60 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile), 61 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile), 62 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn), 63 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn), 64 MSG_MAP(PowerDownSdma, PPSMC_MSG_PowerDownSdma), 65 MSG_MAP(PowerUpSdma, PPSMC_MSG_PowerUpSdma), 66 MSG_MAP(SetHardMinIspclkByFreq, PPSMC_MSG_SetHardMinIspclkByFreq), 67 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn), 68 MSG_MAP(Spare1, PPSMC_MSG_spare1), 69 MSG_MAP(Spare2, PPSMC_MSG_spare2), 70 MSG_MAP(SetAllowFclkSwitch, PPSMC_MSG_SetAllowFclkSwitch), 71 MSG_MAP(SetMinVideoGfxclkFreq, PPSMC_MSG_SetMinVideoGfxclkFreq), 72 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify), 73 MSG_MAP(SetCustomPolicy, PPSMC_MSG_SetCustomPolicy), 74 MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps), 75 MSG_MAP(NumOfDisplays, PPSMC_MSG_SetDisplayCount), 76 MSG_MAP(QueryPowerLimit, PPSMC_MSG_QueryPowerLimit), 77 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh), 78 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow), 79 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram), 80 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu), 81 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset), 82 MSG_MAP(SetGfxclkOverdriveByFreqVid, PPSMC_MSG_SetGfxclkOverdriveByFreqVid), 83 MSG_MAP(SetHardMinDcfclkByFreq, PPSMC_MSG_SetHardMinDcfclkByFreq), 84 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq), 85 MSG_MAP(ControlIgpuATS, PPSMC_MSG_ControlIgpuATS), 86 MSG_MAP(SetMinVideoFclkFreq, PPSMC_MSG_SetMinVideoFclkFreq), 87 MSG_MAP(SetMinDeepSleepDcfclk, PPSMC_MSG_SetMinDeepSleepDcfclk), 88 MSG_MAP(ForcePowerDownGfx, PPSMC_MSG_ForcePowerDownGfx), 89 MSG_MAP(SetPhyclkVoltageByFreq, PPSMC_MSG_SetPhyclkVoltageByFreq), 90 MSG_MAP(SetDppclkVoltageByFreq, PPSMC_MSG_SetDppclkVoltageByFreq), 91 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn), 92 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode), 93 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency), 94 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency), 95 MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxclkFrequency), 96 MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxclkFrequency), 97 MSG_MAP(SoftReset, PPSMC_MSG_SoftReset), 98 MSG_MAP(SetGfxCGPG, PPSMC_MSG_SetGfxCGPG), 99 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk), 100 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk), 101 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq), 102 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq), 103 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn), 104 MSG_MAP(PowerGateMmHub, PPSMC_MSG_PowerGateMmHub), 105 MSG_MAP(UpdatePmeRestore, PPSMC_MSG_UpdatePmeRestore), 106 MSG_MAP(GpuChangeState, PPSMC_MSG_GpuChangeState), 107 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage), 108 MSG_MAP(ForceGfxContentSave, PPSMC_MSG_ForceGfxContentSave), 109 MSG_MAP(EnableTmdp48MHzRefclkPwrDown, PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown), 110 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg), 111 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg), 112 MSG_MAP(PowerGateAtHub, PPSMC_MSG_PowerGateAtHub), 113 MSG_MAP(SetSoftMinJpeg, PPSMC_MSG_SetSoftMinJpeg), 114 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq), 115 }; 116 117 static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = { 118 CLK_MAP(GFXCLK, CLOCK_GFXCLK), 119 CLK_MAP(SCLK, CLOCK_GFXCLK), 120 CLK_MAP(SOCCLK, CLOCK_SOCCLK), 121 CLK_MAP(UCLK, CLOCK_UMCCLK), 122 CLK_MAP(MCLK, CLOCK_UMCCLK), 123 }; 124 125 static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = { 126 TAB_MAP_VALID(WATERMARKS), 127 TAB_MAP_INVALID(CUSTOM_DPM), 128 TAB_MAP_VALID(DPMCLOCKS), 129 TAB_MAP_VALID(SMU_METRICS), 130 }; 131 132 static int renoir_get_smu_msg_index(struct smu_context *smc, uint32_t index) 133 { 134 struct smu_12_0_cmn2aisc_mapping mapping; 135 136 if (index >= SMU_MSG_MAX_COUNT) 137 return -EINVAL; 138 139 mapping = renoir_message_map[index]; 140 if (!(mapping.valid_mapping)) 141 return -EINVAL; 142 143 return mapping.map_to; 144 } 145 146 static int renoir_get_smu_clk_index(struct smu_context *smc, uint32_t index) 147 { 148 struct smu_12_0_cmn2aisc_mapping mapping; 149 150 if (index >= SMU_CLK_COUNT) 151 return -EINVAL; 152 153 mapping = renoir_clk_map[index]; 154 if (!(mapping.valid_mapping)) { 155 return -EINVAL; 156 } 157 158 return mapping.map_to; 159 } 160 161 static int renoir_get_smu_table_index(struct smu_context *smc, uint32_t index) 162 { 163 struct smu_12_0_cmn2aisc_mapping mapping; 164 165 if (index >= SMU_TABLE_COUNT) 166 return -EINVAL; 167 168 mapping = renoir_table_map[index]; 169 if (!(mapping.valid_mapping)) 170 return -EINVAL; 171 172 return mapping.map_to; 173 } 174 175 static int renoir_get_metrics_table(struct smu_context *smu, 176 SmuMetrics_t *metrics_table) 177 { 178 struct smu_table_context *smu_table= &smu->smu_table; 179 int ret = 0; 180 181 mutex_lock(&smu->metrics_lock); 182 if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { 183 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 184 (void *)smu_table->metrics_table, false); 185 if (ret) { 186 pr_info("Failed to export SMU metrics table!\n"); 187 mutex_unlock(&smu->metrics_lock); 188 return ret; 189 } 190 smu_table->metrics_time = jiffies; 191 } 192 193 memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); 194 mutex_unlock(&smu->metrics_lock); 195 196 return ret; 197 } 198 199 static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables) 200 { 201 struct smu_table_context *smu_table = &smu->smu_table; 202 203 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 204 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 205 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 206 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 207 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 208 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 209 210 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 211 if (!smu_table->clocks_table) 212 return -ENOMEM; 213 214 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 215 if (!smu_table->metrics_table) 216 return -ENOMEM; 217 smu_table->metrics_time = 0; 218 219 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 220 if (!smu_table->watermarks_table) 221 return -ENOMEM; 222 223 return 0; 224 } 225 226 /** 227 * This interface just for getting uclk ultimate freq and should't introduce 228 * other likewise function result in overmuch callback. 229 */ 230 static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, 231 uint32_t dpm_level, uint32_t *freq) 232 { 233 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 234 235 if (!clk_table || clk_type >= SMU_CLK_COUNT) 236 return -EINVAL; 237 238 GET_DPM_CUR_FREQ(clk_table, clk_type, dpm_level, *freq); 239 240 return 0; 241 } 242 243 static int renoir_print_clk_levels(struct smu_context *smu, 244 enum smu_clk_type clk_type, char *buf) 245 { 246 int i, size = 0, ret = 0; 247 uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; 248 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 249 SmuMetrics_t metrics; 250 251 if (!clk_table || clk_type >= SMU_CLK_COUNT) 252 return -EINVAL; 253 254 memset(&metrics, 0, sizeof(metrics)); 255 256 ret = renoir_get_metrics_table(smu, &metrics); 257 if (ret) 258 return ret; 259 260 switch (clk_type) { 261 case SMU_GFXCLK: 262 case SMU_SCLK: 263 /* retirve table returned paramters unit is MHz */ 264 cur_value = metrics.ClockFrequency[CLOCK_GFXCLK]; 265 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false); 266 if (!ret) { 267 /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ 268 if (cur_value == max) 269 i = 2; 270 else if (cur_value == min) 271 i = 0; 272 else 273 i = 1; 274 275 size += sprintf(buf + size, "0: %uMhz %s\n", min, 276 i == 0 ? "*" : ""); 277 size += sprintf(buf + size, "1: %uMhz %s\n", 278 i == 1 ? cur_value : RENOIR_UMD_PSTATE_GFXCLK, 279 i == 1 ? "*" : ""); 280 size += sprintf(buf + size, "2: %uMhz %s\n", max, 281 i == 2 ? "*" : ""); 282 } 283 return size; 284 case SMU_SOCCLK: 285 count = NUM_SOCCLK_DPM_LEVELS; 286 cur_value = metrics.ClockFrequency[CLOCK_SOCCLK]; 287 break; 288 case SMU_MCLK: 289 count = NUM_MEMCLK_DPM_LEVELS; 290 cur_value = metrics.ClockFrequency[CLOCK_UMCCLK]; 291 break; 292 case SMU_DCEFCLK: 293 count = NUM_DCFCLK_DPM_LEVELS; 294 cur_value = metrics.ClockFrequency[CLOCK_DCFCLK]; 295 break; 296 case SMU_FCLK: 297 count = NUM_FCLK_DPM_LEVELS; 298 cur_value = metrics.ClockFrequency[CLOCK_FCLK]; 299 break; 300 default: 301 return -EINVAL; 302 } 303 304 for (i = 0; i < count; i++) { 305 GET_DPM_CUR_FREQ(clk_table, clk_type, i, value); 306 size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, 307 cur_value == value ? "*" : ""); 308 } 309 310 return size; 311 } 312 313 static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu) 314 { 315 enum amd_pm_state_type pm_type; 316 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 317 318 if (!smu_dpm_ctx->dpm_context || 319 !smu_dpm_ctx->dpm_current_power_state) 320 return -EINVAL; 321 322 switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { 323 case SMU_STATE_UI_LABEL_BATTERY: 324 pm_type = POWER_STATE_TYPE_BATTERY; 325 break; 326 case SMU_STATE_UI_LABEL_BALLANCED: 327 pm_type = POWER_STATE_TYPE_BALANCED; 328 break; 329 case SMU_STATE_UI_LABEL_PERFORMANCE: 330 pm_type = POWER_STATE_TYPE_PERFORMANCE; 331 break; 332 default: 333 if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT) 334 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; 335 else 336 pm_type = POWER_STATE_TYPE_DEFAULT; 337 break; 338 } 339 340 return pm_type; 341 } 342 343 static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable) 344 { 345 struct smu_power_context *smu_power = &smu->smu_power; 346 struct smu_power_gate *power_gate = &smu_power->power_gate; 347 int ret = 0; 348 349 if (enable) { 350 /* vcn dpm on is a prerequisite for vcn power gate messages */ 351 if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { 352 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0); 353 if (ret) 354 return ret; 355 } 356 power_gate->vcn_gated = false; 357 } else { 358 if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { 359 ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); 360 if (ret) 361 return ret; 362 } 363 power_gate->vcn_gated = true; 364 } 365 366 return ret; 367 } 368 369 static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 370 { 371 struct smu_power_context *smu_power = &smu->smu_power; 372 struct smu_power_gate *power_gate = &smu_power->power_gate; 373 int ret = 0; 374 375 if (enable) { 376 if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { 377 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); 378 if (ret) 379 return ret; 380 } 381 power_gate->jpeg_gated = false; 382 } else { 383 if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { 384 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); 385 if (ret) 386 return ret; 387 } 388 power_gate->jpeg_gated = true; 389 } 390 391 return ret; 392 } 393 394 static int renoir_get_current_clk_freq_by_table(struct smu_context *smu, 395 enum smu_clk_type clk_type, 396 uint32_t *value) 397 { 398 int ret = 0, clk_id = 0; 399 SmuMetrics_t metrics; 400 401 ret = renoir_get_metrics_table(smu, &metrics); 402 if (ret) 403 return ret; 404 405 clk_id = smu_clk_get_index(smu, clk_type); 406 if (clk_id < 0) 407 return clk_id; 408 409 *value = metrics.ClockFrequency[clk_id]; 410 411 return ret; 412 } 413 414 static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) 415 { 416 int ret = 0, i = 0; 417 uint32_t min_freq, max_freq, force_freq; 418 enum smu_clk_type clk_type; 419 420 enum smu_clk_type clks[] = { 421 SMU_GFXCLK, 422 SMU_MCLK, 423 SMU_SOCCLK, 424 }; 425 426 for (i = 0; i < ARRAY_SIZE(clks); i++) { 427 clk_type = clks[i]; 428 ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); 429 if (ret) 430 return ret; 431 432 force_freq = highest ? max_freq : min_freq; 433 ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); 434 if (ret) 435 return ret; 436 } 437 438 return ret; 439 } 440 441 static int renoir_unforce_dpm_levels(struct smu_context *smu) { 442 443 int ret = 0, i = 0; 444 uint32_t min_freq, max_freq; 445 enum smu_clk_type clk_type; 446 447 struct clk_feature_map { 448 enum smu_clk_type clk_type; 449 uint32_t feature; 450 } clk_feature_map[] = { 451 {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT}, 452 {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT}, 453 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, 454 }; 455 456 for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { 457 if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature)) 458 continue; 459 460 clk_type = clk_feature_map[i].clk_type; 461 462 ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); 463 if (ret) 464 return ret; 465 466 ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); 467 if (ret) 468 return ret; 469 } 470 471 return ret; 472 } 473 474 static int renoir_get_gpu_temperature(struct smu_context *smu, uint32_t *value) 475 { 476 int ret = 0; 477 SmuMetrics_t metrics; 478 479 if (!value) 480 return -EINVAL; 481 482 ret = renoir_get_metrics_table(smu, &metrics); 483 if (ret) 484 return ret; 485 486 *value = (metrics.GfxTemperature / 100) * 487 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 488 489 return 0; 490 } 491 492 static int renoir_get_current_activity_percent(struct smu_context *smu, 493 enum amd_pp_sensors sensor, 494 uint32_t *value) 495 { 496 int ret = 0; 497 SmuMetrics_t metrics; 498 499 if (!value) 500 return -EINVAL; 501 502 ret = renoir_get_metrics_table(smu, &metrics); 503 if (ret) 504 return ret; 505 506 switch (sensor) { 507 case AMDGPU_PP_SENSOR_GPU_LOAD: 508 *value = metrics.AverageGfxActivity / 100; 509 break; 510 default: 511 pr_err("Invalid sensor for retrieving clock activity\n"); 512 return -EINVAL; 513 } 514 515 return 0; 516 } 517 518 static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile) 519 { 520 521 uint32_t pplib_workload = 0; 522 523 switch (profile) { 524 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 525 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 526 break; 527 case PP_SMC_POWER_PROFILE_CUSTOM: 528 pplib_workload = WORKLOAD_PPLIB_COUNT; 529 break; 530 case PP_SMC_POWER_PROFILE_VIDEO: 531 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; 532 break; 533 case PP_SMC_POWER_PROFILE_VR: 534 pplib_workload = WORKLOAD_PPLIB_VR_BIT; 535 break; 536 case PP_SMC_POWER_PROFILE_COMPUTE: 537 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; 538 break; 539 default: 540 return -EINVAL; 541 } 542 543 return pplib_workload; 544 } 545 546 static int renoir_get_profiling_clk_mask(struct smu_context *smu, 547 enum amd_dpm_forced_level level, 548 uint32_t *sclk_mask, 549 uint32_t *mclk_mask, 550 uint32_t *soc_mask) 551 { 552 553 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 554 if (sclk_mask) 555 *sclk_mask = 0; 556 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 557 if (mclk_mask) 558 *mclk_mask = 0; 559 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 560 if(sclk_mask) 561 /* The sclk as gfxclk and has three level about max/min/current */ 562 *sclk_mask = 3 - 1; 563 564 if(mclk_mask) 565 *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1; 566 567 if(soc_mask) 568 *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1; 569 } 570 571 return 0; 572 } 573 574 /** 575 * This interface get dpm clock table for dc 576 */ 577 static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) 578 { 579 DpmClocks_t *table = smu->smu_table.clocks_table; 580 int i; 581 582 if (!clock_table || !table) 583 return -EINVAL; 584 585 for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) { 586 clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq; 587 clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol; 588 } 589 590 for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { 591 clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq; 592 clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol; 593 } 594 595 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { 596 clock_table->FClocks[i].Freq = table->FClocks[i].Freq; 597 clock_table->FClocks[i].Vol = table->FClocks[i].Vol; 598 } 599 600 for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) { 601 clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq; 602 clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol; 603 } 604 605 return 0; 606 } 607 608 static int renoir_force_clk_levels(struct smu_context *smu, 609 enum smu_clk_type clk_type, uint32_t mask) 610 { 611 612 int ret = 0 ; 613 uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; 614 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 615 616 soft_min_level = mask ? (ffs(mask) - 1) : 0; 617 soft_max_level = mask ? (fls(mask) - 1) : 0; 618 619 switch (clk_type) { 620 case SMU_GFXCLK: 621 case SMU_SCLK: 622 if (soft_min_level > 2 || soft_max_level > 2) { 623 pr_info("Currently sclk only support 3 levels on APU\n"); 624 return -EINVAL; 625 } 626 627 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false); 628 if (ret) 629 return ret; 630 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 631 soft_max_level == 0 ? min_freq : 632 soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq); 633 if (ret) 634 return ret; 635 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 636 soft_min_level == 2 ? max_freq : 637 soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq); 638 if (ret) 639 return ret; 640 break; 641 case SMU_SOCCLK: 642 GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); 643 GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); 644 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq); 645 if (ret) 646 return ret; 647 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq); 648 if (ret) 649 return ret; 650 break; 651 case SMU_MCLK: 652 case SMU_FCLK: 653 GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); 654 GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); 655 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq); 656 if (ret) 657 return ret; 658 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq); 659 if (ret) 660 return ret; 661 break; 662 default: 663 break; 664 } 665 666 return ret; 667 } 668 669 static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 670 { 671 int workload_type, ret; 672 uint32_t profile_mode = input[size]; 673 674 if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 675 pr_err("Invalid power profile mode %d\n", smu->power_profile_mode); 676 return -EINVAL; 677 } 678 679 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 680 workload_type = smu_workload_get_type(smu, smu->power_profile_mode); 681 if (workload_type < 0) { 682 pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode); 683 return -EINVAL; 684 } 685 686 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 687 1 << workload_type); 688 if (ret) { 689 pr_err("Fail to set workload type %d\n", workload_type); 690 return ret; 691 } 692 693 smu->power_profile_mode = profile_mode; 694 695 return 0; 696 } 697 698 static int renoir_set_peak_clock_by_device(struct smu_context *smu) 699 { 700 int ret = 0; 701 uint32_t sclk_freq = 0, uclk_freq = 0; 702 703 ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false); 704 if (ret) 705 return ret; 706 707 ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); 708 if (ret) 709 return ret; 710 711 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false); 712 if (ret) 713 return ret; 714 715 ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); 716 if (ret) 717 return ret; 718 719 return ret; 720 } 721 722 static int renoir_set_performance_level(struct smu_context *smu, 723 enum amd_dpm_forced_level level) 724 { 725 int ret = 0; 726 uint32_t sclk_mask, mclk_mask, soc_mask; 727 728 switch (level) { 729 case AMD_DPM_FORCED_LEVEL_HIGH: 730 ret = smu_force_dpm_limit_value(smu, true); 731 break; 732 case AMD_DPM_FORCED_LEVEL_LOW: 733 ret = smu_force_dpm_limit_value(smu, false); 734 break; 735 case AMD_DPM_FORCED_LEVEL_AUTO: 736 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 737 ret = smu_unforce_dpm_levels(smu); 738 break; 739 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 740 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 741 ret = smu_get_profiling_clk_mask(smu, level, 742 &sclk_mask, 743 &mclk_mask, 744 &soc_mask); 745 if (ret) 746 return ret; 747 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); 748 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); 749 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); 750 break; 751 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 752 ret = renoir_set_peak_clock_by_device(smu); 753 break; 754 case AMD_DPM_FORCED_LEVEL_MANUAL: 755 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 756 default: 757 break; 758 } 759 return ret; 760 } 761 762 /* save watermark settings into pplib smu structure, 763 * also pass data to smu controller 764 */ 765 static int renoir_set_watermarks_table( 766 struct smu_context *smu, 767 void *watermarks, 768 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) 769 { 770 int i; 771 int ret = 0; 772 Watermarks_t *table = watermarks; 773 774 if (!table || !clock_ranges) 775 return -EINVAL; 776 777 if (clock_ranges->num_wm_dmif_sets > 4 || 778 clock_ranges->num_wm_mcif_sets > 4) 779 return -EINVAL; 780 781 /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/ 782 for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) { 783 table->WatermarkRow[WM_DCFCLK][i].MinClock = 784 cpu_to_le16((uint16_t) 785 (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz)); 786 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 787 cpu_to_le16((uint16_t) 788 (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz)); 789 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 790 cpu_to_le16((uint16_t) 791 (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz)); 792 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 793 cpu_to_le16((uint16_t) 794 (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz)); 795 table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t) 796 clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; 797 } 798 799 for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) { 800 table->WatermarkRow[WM_SOCCLK][i].MinClock = 801 cpu_to_le16((uint16_t) 802 (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz)); 803 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 804 cpu_to_le16((uint16_t) 805 (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz)); 806 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 807 cpu_to_le16((uint16_t) 808 (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz)); 809 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 810 cpu_to_le16((uint16_t) 811 (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz)); 812 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t) 813 clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; 814 } 815 816 /* pass data to smu controller */ 817 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 818 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 819 ret = smu_write_watermarks_table(smu); 820 if (ret) { 821 pr_err("Failed to update WMTABLE!"); 822 return ret; 823 } 824 smu->watermarks_bitmap |= WATERMARKS_LOADED; 825 } 826 827 return 0; 828 } 829 830 static int renoir_get_power_profile_mode(struct smu_context *smu, 831 char *buf) 832 { 833 static const char *profile_name[] = { 834 "BOOTUP_DEFAULT", 835 "3D_FULL_SCREEN", 836 "POWER_SAVING", 837 "VIDEO", 838 "VR", 839 "COMPUTE", 840 "CUSTOM"}; 841 uint32_t i, size = 0; 842 int16_t workload_type = 0; 843 844 if (!smu->pm_enabled || !buf) 845 return -EINVAL; 846 847 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 848 /* 849 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT 850 * Not all profile modes are supported on arcturus. 851 */ 852 workload_type = smu_workload_get_type(smu, i); 853 if (workload_type < 0) 854 continue; 855 856 size += sprintf(buf + size, "%2d %14s%s\n", 857 i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 858 } 859 860 return size; 861 } 862 863 static int renoir_read_sensor(struct smu_context *smu, 864 enum amd_pp_sensors sensor, 865 void *data, uint32_t *size) 866 { 867 int ret = 0; 868 869 if (!data || !size) 870 return -EINVAL; 871 872 mutex_lock(&smu->sensor_lock); 873 switch (sensor) { 874 case AMDGPU_PP_SENSOR_GPU_LOAD: 875 ret = renoir_get_current_activity_percent(smu, sensor, (uint32_t *)data); 876 *size = 4; 877 break; 878 case AMDGPU_PP_SENSOR_GPU_TEMP: 879 ret = renoir_get_gpu_temperature(smu, (uint32_t *)data); 880 *size = 4; 881 break; 882 default: 883 ret = smu_v12_0_read_sensor(smu, sensor, data, size); 884 } 885 mutex_unlock(&smu->sensor_lock); 886 887 return ret; 888 } 889 890 static const struct pptable_funcs renoir_ppt_funcs = { 891 .get_smu_msg_index = renoir_get_smu_msg_index, 892 .get_smu_clk_index = renoir_get_smu_clk_index, 893 .get_smu_table_index = renoir_get_smu_table_index, 894 .tables_init = renoir_tables_init, 895 .set_power_state = NULL, 896 .get_dpm_clk_limited = renoir_get_dpm_clk_limited, 897 .print_clk_levels = renoir_print_clk_levels, 898 .get_current_power_state = renoir_get_current_power_state, 899 .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable, 900 .dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable, 901 .get_current_clk_freq_by_table = renoir_get_current_clk_freq_by_table, 902 .force_dpm_limit_value = renoir_force_dpm_limit_value, 903 .unforce_dpm_levels = renoir_unforce_dpm_levels, 904 .get_workload_type = renoir_get_workload_type, 905 .get_profiling_clk_mask = renoir_get_profiling_clk_mask, 906 .force_clk_levels = renoir_force_clk_levels, 907 .set_power_profile_mode = renoir_set_power_profile_mode, 908 .set_performance_level = renoir_set_performance_level, 909 .get_dpm_clock_table = renoir_get_dpm_clock_table, 910 .set_watermarks_table = renoir_set_watermarks_table, 911 .get_power_profile_mode = renoir_get_power_profile_mode, 912 .read_sensor = renoir_read_sensor, 913 .check_fw_status = smu_v12_0_check_fw_status, 914 .check_fw_version = smu_v12_0_check_fw_version, 915 .powergate_sdma = smu_v12_0_powergate_sdma, 916 .powergate_vcn = smu_v12_0_powergate_vcn, 917 .powergate_jpeg = smu_v12_0_powergate_jpeg, 918 .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, 919 .read_smc_arg = smu_v12_0_read_arg, 920 .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, 921 .gfx_off_control = smu_v12_0_gfx_off_control, 922 .init_smc_tables = smu_v12_0_init_smc_tables, 923 .fini_smc_tables = smu_v12_0_fini_smc_tables, 924 .populate_smc_tables = smu_v12_0_populate_smc_tables, 925 .get_enabled_mask = smu_v12_0_get_enabled_mask, 926 .get_current_clk_freq = smu_v12_0_get_current_clk_freq, 927 .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, 928 .mode2_reset = smu_v12_0_mode2_reset, 929 .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range, 930 .set_driver_table_location = smu_v12_0_set_driver_table_location, 931 }; 932 933 void renoir_set_ppt_funcs(struct smu_context *smu) 934 { 935 smu->ppt_funcs = &renoir_ppt_funcs; 936 smu->smc_if_version = SMU12_DRIVER_IF_VERSION; 937 smu->is_apu = true; 938 } 939