1 /* $NetBSD: amdgpu_smu_v11_0.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $ */ 2 3 /* 4 * Copyright 2019 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25 #include <sys/cdefs.h> 26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_smu_v11_0.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $"); 27 28 #include <linux/firmware.h> 29 #include <linux/module.h> 30 #include <linux/pci.h> 31 32 #define SMU_11_0_PARTIAL_PPTABLE 33 34 #include "pp_debug.h" 35 #include "amdgpu.h" 36 #include "amdgpu_smu.h" 37 #include "smu_internal.h" 38 #include "atomfirmware.h" 39 #include "amdgpu_atomfirmware.h" 40 #include "smu_v11_0.h" 41 #include "smu_v11_0_pptable.h" 42 #include "soc15_common.h" 43 #include "atom.h" 44 #include "amd_pcie.h" 45 #include "amdgpu_ras.h" 46 47 #include "asic_reg/thm/thm_11_0_2_offset.h" 48 #include "asic_reg/thm/thm_11_0_2_sh_mask.h" 49 #include "asic_reg/mp/mp_11_0_offset.h" 50 #include "asic_reg/mp/mp_11_0_sh_mask.h" 51 #include "asic_reg/nbio/nbio_7_4_offset.h" 52 #include "asic_reg/nbio/nbio_7_4_sh_mask.h" 53 #include "asic_reg/smuio/smuio_11_0_0_offset.h" 54 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h" 55 56 #include <linux/nbsd-namespace.h> 57 58 MODULE_FIRMWARE("amdgpu/vega20_smc.bin"); 59 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin"); 60 MODULE_FIRMWARE("amdgpu/navi10_smc.bin"); 61 MODULE_FIRMWARE("amdgpu/navi14_smc.bin"); 62 MODULE_FIRMWARE("amdgpu/navi12_smc.bin"); 63 64 #define SMU11_VOLTAGE_SCALE 4 65 66 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu, 67 uint16_t msg) 68 { 69 struct amdgpu_device *adev = smu->adev; 70 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); 71 return 0; 72 } 73 74 int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) 75 { 76 struct amdgpu_device *adev = smu->adev; 77 78 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 79 return 0; 80 } 81 82 static int smu_v11_0_wait_for_response(struct smu_context *smu) 83 { 84 struct amdgpu_device *adev = smu->adev; 85 uint32_t cur_value, i, timeout = adev->usec_timeout * 10; 86 87 for (i = 0; i < timeout; i++) { 88 cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); 89 if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0) 90 return cur_value == 0x1 ? 0 : -EIO; 91 92 udelay(1); 93 } 94 95 /* timeout means wrong logic */ 96 return -ETIME; 97 } 98 99 int 100 smu_v11_0_send_msg_with_param(struct smu_context *smu, 101 enum smu_message_type msg, 102 uint32_t param) 103 { 104 struct amdgpu_device *adev = smu->adev; 105 int ret = 0, index = 0; 106 107 index = smu_msg_get_index(smu, msg); 108 if (index < 0) 109 return index; 110 111 ret = smu_v11_0_wait_for_response(smu); 112 if (ret) { 113 pr_err("Msg issuing pre-check failed and " 114 "SMU may be not in the right state!\n"); 115 return ret; 116 } 117 118 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 119 120 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param); 121 122 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index); 123 124 ret = smu_v11_0_wait_for_response(smu); 125 if (ret) 126 pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n", 127 smu_get_message_name(smu, msg), index, param, ret); 128 129 return ret; 130 } 131 132 int smu_v11_0_init_microcode(struct smu_context *smu) 133 { 134 struct amdgpu_device *adev = smu->adev; 135 const char *chip_name; 136 char fw_name[30]; 137 int err = 0; 138 const struct smc_firmware_header_v1_0 *hdr; 139 const struct common_firmware_header *header; 140 struct amdgpu_firmware_info *ucode = NULL; 141 142 switch (adev->asic_type) { 143 case CHIP_VEGA20: 144 chip_name = "vega20"; 145 break; 146 case CHIP_ARCTURUS: 147 chip_name = "arcturus"; 148 break; 149 case CHIP_NAVI10: 150 chip_name = "navi10"; 151 break; 152 case CHIP_NAVI14: 153 chip_name = "navi14"; 154 break; 155 case CHIP_NAVI12: 156 chip_name = "navi12"; 157 break; 158 default: 159 BUG(); 160 } 161 162 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); 163 164 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 165 if (err) 166 goto out; 167 err = amdgpu_ucode_validate(adev->pm.fw); 168 if (err) 169 goto out; 170 171 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 172 amdgpu_ucode_print_smc_hdr(&hdr->header); 173 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 174 175 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 176 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 177 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 178 ucode->fw = adev->pm.fw; 179 header = (const struct common_firmware_header *)ucode->fw->data; 180 adev->firmware.fw_size += 181 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 182 } 183 184 out: 185 if (err) { 186 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n", 187 fw_name); 188 release_firmware(adev->pm.fw); 189 adev->pm.fw = NULL; 190 } 191 return err; 192 } 193 194 int smu_v11_0_load_microcode(struct smu_context *smu) 195 { 196 struct amdgpu_device *adev = smu->adev; 197 const uint32_t *src; 198 const struct smc_firmware_header_v1_0 *hdr; 199 uint32_t addr_start = MP1_SRAM; 200 uint32_t i; 201 uint32_t mp1_fw_flags; 202 203 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 204 src = (const uint32_t *)(adev->pm.fw->data + 205 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 206 207 for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) { 208 WREG32_PCIE(addr_start, src[i]); 209 addr_start += 4; 210 } 211 212 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 213 1 & MP1_SMN_PUB_CTRL__RESET_MASK); 214 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 215 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); 216 217 for (i = 0; i < adev->usec_timeout; i++) { 218 mp1_fw_flags = RREG32_PCIE(MP1_Public | 219 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 220 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 221 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 222 break; 223 udelay(1); 224 } 225 226 if (i == adev->usec_timeout) 227 return -ETIME; 228 229 return 0; 230 } 231 232 int smu_v11_0_check_fw_status(struct smu_context *smu) 233 { 234 struct amdgpu_device *adev = smu->adev; 235 uint32_t mp1_fw_flags; 236 237 mp1_fw_flags = RREG32_PCIE(MP1_Public | 238 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 239 240 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 241 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 242 return 0; 243 244 return -EIO; 245 } 246 247 int smu_v11_0_check_fw_version(struct smu_context *smu) 248 { 249 uint32_t if_version = 0xff, smu_version = 0xff; 250 uint16_t smu_major; 251 uint8_t smu_minor, smu_debug; 252 int ret = 0; 253 254 ret = smu_get_smc_version(smu, &if_version, &smu_version); 255 if (ret) 256 return ret; 257 258 smu_major = (smu_version >> 16) & 0xffff; 259 smu_minor = (smu_version >> 8) & 0xff; 260 smu_debug = (smu_version >> 0) & 0xff; 261 262 switch (smu->adev->asic_type) { 263 case CHIP_VEGA20: 264 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20; 265 break; 266 case CHIP_ARCTURUS: 267 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT; 268 break; 269 case CHIP_NAVI10: 270 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10; 271 break; 272 case CHIP_NAVI14: 273 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14; 274 break; 275 default: 276 pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type); 277 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV; 278 break; 279 } 280 281 /* 282 * 1. if_version mismatch is not critical as our fw is designed 283 * to be backward compatible. 284 * 2. New fw usually brings some optimizations. But that's visible 285 * only on the paired driver. 286 * Considering above, we just leave user a warning message instead 287 * of halt driver loading. 288 */ 289 if (if_version != smu->smc_if_version) { 290 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 291 "smu fw version = 0x%08x (%d.%d.%d)\n", 292 smu->smc_if_version, if_version, 293 smu_version, smu_major, smu_minor, smu_debug); 294 pr_warn("SMU driver if version not matched\n"); 295 } 296 297 return ret; 298 } 299 300 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, const void **table, uint32_t *size) 301 { 302 struct amdgpu_device *adev = smu->adev; 303 uint32_t ppt_offset_bytes; 304 const struct smc_firmware_header_v2_0 *v2; 305 306 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 307 308 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 309 *size = le32_to_cpu(v2->ppt_size_bytes); 310 *table = (const uint8_t *)v2 + ppt_offset_bytes; 311 312 return 0; 313 } 314 315 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, const void **table, 316 uint32_t *size, uint32_t pptable_id) 317 { 318 struct amdgpu_device *adev = smu->adev; 319 const struct smc_firmware_header_v2_1 *v2_1; 320 const struct smc_soft_pptable_entry *entries; 321 uint32_t pptable_count = 0; 322 int i = 0; 323 324 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 325 entries = (const struct smc_soft_pptable_entry *) 326 ((const uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 327 pptable_count = le32_to_cpu(v2_1->pptable_count); 328 for (i = 0; i < pptable_count; i++) { 329 if (le32_to_cpu(entries[i].id) == pptable_id) { 330 *table = ((const uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 331 *size = le32_to_cpu(entries[i].ppt_size_bytes); 332 break; 333 } 334 } 335 336 if (i == pptable_count) 337 return -EINVAL; 338 339 return 0; 340 } 341 342 int smu_v11_0_setup_pptable(struct smu_context *smu) 343 { 344 struct amdgpu_device *adev = smu->adev; 345 const struct smc_firmware_header_v1_0 *hdr; 346 int ret, index; 347 uint32_t size = 0; 348 uint16_t atom_table_size; 349 uint8_t frev, crev; 350 const void *table; 351 uint16_t version_major, version_minor; 352 353 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 354 version_major = le16_to_cpu(hdr->header.header_version_major); 355 version_minor = le16_to_cpu(hdr->header.header_version_minor); 356 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { 357 pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); 358 switch (version_minor) { 359 case 0: 360 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); 361 break; 362 case 1: 363 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size, 364 smu->smu_table.boot_values.pp_table_id); 365 break; 366 default: 367 ret = -EINVAL; 368 break; 369 } 370 if (ret) 371 return ret; 372 373 } else { 374 pr_info("use vbios provided pptable\n"); 375 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 376 powerplayinfo); 377 378 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev, 379 (void *)&table); 380 if (ret) 381 return ret; 382 size = atom_table_size; 383 } 384 385 if (!smu->smu_table.power_play_table) 386 smu->smu_table.power_play_table = table; 387 if (!smu->smu_table.power_play_table_size) 388 smu->smu_table.power_play_table_size = size; 389 390 return 0; 391 } 392 393 static int smu_v11_0_init_dpm_context(struct smu_context *smu) 394 { 395 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 396 397 if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0) 398 return -EINVAL; 399 400 return smu_alloc_dpm_context(smu); 401 } 402 403 static int smu_v11_0_fini_dpm_context(struct smu_context *smu) 404 { 405 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 406 407 if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0) 408 return -EINVAL; 409 410 kfree(smu_dpm->dpm_context); 411 kfree(smu_dpm->golden_dpm_context); 412 kfree(smu_dpm->dpm_current_power_state); 413 kfree(smu_dpm->dpm_request_power_state); 414 smu_dpm->dpm_context = NULL; 415 smu_dpm->golden_dpm_context = NULL; 416 smu_dpm->dpm_context_size = 0; 417 smu_dpm->dpm_current_power_state = NULL; 418 smu_dpm->dpm_request_power_state = NULL; 419 420 return 0; 421 } 422 423 int smu_v11_0_init_smc_tables(struct smu_context *smu) 424 { 425 struct smu_table_context *smu_table = &smu->smu_table; 426 struct smu_table *tables = NULL; 427 int ret = 0; 428 429 if (smu_table->tables) 430 return -EINVAL; 431 432 tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table), 433 GFP_KERNEL); 434 if (!tables) 435 return -ENOMEM; 436 437 smu_table->tables = tables; 438 439 ret = smu_tables_init(smu, tables); 440 if (ret) 441 return ret; 442 443 ret = smu_v11_0_init_dpm_context(smu); 444 if (ret) 445 return ret; 446 447 return 0; 448 } 449 450 int smu_v11_0_fini_smc_tables(struct smu_context *smu) 451 { 452 struct smu_table_context *smu_table = &smu->smu_table; 453 int ret = 0; 454 455 if (!smu_table->tables) 456 return -EINVAL; 457 458 kfree(smu_table->tables); 459 kfree(smu_table->metrics_table); 460 kfree(smu_table->watermarks_table); 461 smu_table->tables = NULL; 462 smu_table->metrics_table = NULL; 463 smu_table->watermarks_table = NULL; 464 smu_table->metrics_time = 0; 465 466 ret = smu_v11_0_fini_dpm_context(smu); 467 if (ret) 468 return ret; 469 return 0; 470 } 471 472 int smu_v11_0_init_power(struct smu_context *smu) 473 { 474 struct smu_power_context *smu_power = &smu->smu_power; 475 476 if (!smu->pm_enabled) 477 return 0; 478 if (smu_power->power_context || smu_power->power_context_size != 0) 479 return -EINVAL; 480 481 smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context), 482 GFP_KERNEL); 483 if (!smu_power->power_context) 484 return -ENOMEM; 485 smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context); 486 487 return 0; 488 } 489 490 int smu_v11_0_fini_power(struct smu_context *smu) 491 { 492 struct smu_power_context *smu_power = &smu->smu_power; 493 494 if (!smu->pm_enabled) 495 return 0; 496 if (!smu_power->power_context || smu_power->power_context_size == 0) 497 return -EINVAL; 498 499 kfree(smu_power->power_context); 500 smu_power->power_context = NULL; 501 smu_power->power_context_size = 0; 502 503 return 0; 504 } 505 506 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) 507 { 508 int ret, index; 509 uint16_t size; 510 uint8_t frev, crev; 511 struct atom_common_table_header *header; 512 struct atom_firmware_info_v3_3 *v_3_3; 513 struct atom_firmware_info_v3_1 *v_3_1; 514 515 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 516 firmwareinfo); 517 518 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev, 519 (uint8_t **)&header); 520 if (ret) 521 return ret; 522 523 if (header->format_revision != 3) { 524 pr_err("unknown atom_firmware_info version! for smu11\n"); 525 return -EINVAL; 526 } 527 528 switch (header->content_revision) { 529 case 0: 530 case 1: 531 case 2: 532 v_3_1 = (struct atom_firmware_info_v3_1 *)header; 533 smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 534 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 535 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 536 smu->smu_table.boot_values.socclk = 0; 537 smu->smu_table.boot_values.dcefclk = 0; 538 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 539 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 540 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 541 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 542 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 543 smu->smu_table.boot_values.pp_table_id = 0; 544 break; 545 case 3: 546 default: 547 v_3_3 = (struct atom_firmware_info_v3_3 *)header; 548 smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 549 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 550 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 551 smu->smu_table.boot_values.socclk = 0; 552 smu->smu_table.boot_values.dcefclk = 0; 553 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 554 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 555 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 556 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 557 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 558 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 559 } 560 561 smu->smu_table.boot_values.format_revision = header->format_revision; 562 smu->smu_table.boot_values.content_revision = header->content_revision; 563 564 return 0; 565 } 566 567 int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu) 568 { 569 int ret, index; 570 struct amdgpu_device *adev = smu->adev; 571 struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; 572 struct atom_get_smu_clock_info_output_parameters_v3_1 *output; 573 574 input.clk_id = SMU11_SYSPLL0_SOCCLK_ID; 575 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 576 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 577 getsmuclockinfo); 578 579 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 580 (uint32_t *)&input); 581 if (ret) 582 return -EINVAL; 583 584 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 585 smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 586 587 memset(&input, 0, sizeof(input)); 588 input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID; 589 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 590 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 591 getsmuclockinfo); 592 593 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 594 (uint32_t *)&input); 595 if (ret) 596 return -EINVAL; 597 598 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 599 smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 600 601 memset(&input, 0, sizeof(input)); 602 input.clk_id = SMU11_SYSPLL0_ECLK_ID; 603 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 604 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 605 getsmuclockinfo); 606 607 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 608 (uint32_t *)&input); 609 if (ret) 610 return -EINVAL; 611 612 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 613 smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 614 615 memset(&input, 0, sizeof(input)); 616 input.clk_id = SMU11_SYSPLL0_VCLK_ID; 617 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 618 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 619 getsmuclockinfo); 620 621 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 622 (uint32_t *)&input); 623 if (ret) 624 return -EINVAL; 625 626 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 627 smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 628 629 memset(&input, 0, sizeof(input)); 630 input.clk_id = SMU11_SYSPLL0_DCLK_ID; 631 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 632 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 633 getsmuclockinfo); 634 635 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 636 (uint32_t *)&input); 637 if (ret) 638 return -EINVAL; 639 640 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 641 smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 642 643 if ((smu->smu_table.boot_values.format_revision == 3) && 644 (smu->smu_table.boot_values.content_revision >= 2)) { 645 memset(&input, 0, sizeof(input)); 646 input.clk_id = SMU11_SYSPLL1_0_FCLK_ID; 647 input.syspll_id = SMU11_SYSPLL1_2_ID; 648 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 649 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 650 getsmuclockinfo); 651 652 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 653 (uint32_t *)&input); 654 if (ret) 655 return -EINVAL; 656 657 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 658 smu->smu_table.boot_values.fclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 659 } 660 661 return 0; 662 } 663 664 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) 665 { 666 struct smu_table_context *smu_table = &smu->smu_table; 667 struct smu_table *memory_pool = &smu_table->memory_pool; 668 int ret = 0; 669 uint64_t address; 670 uint32_t address_low, address_high; 671 672 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 673 return ret; 674 675 address = (uintptr_t)memory_pool->cpu_addr; 676 address_high = (uint32_t)upper_32_bits(address); 677 address_low = (uint32_t)lower_32_bits(address); 678 679 ret = smu_send_smc_msg_with_param(smu, 680 SMU_MSG_SetSystemVirtualDramAddrHigh, 681 address_high); 682 if (ret) 683 return ret; 684 ret = smu_send_smc_msg_with_param(smu, 685 SMU_MSG_SetSystemVirtualDramAddrLow, 686 address_low); 687 if (ret) 688 return ret; 689 690 address = memory_pool->mc_address; 691 address_high = (uint32_t)upper_32_bits(address); 692 address_low = (uint32_t)lower_32_bits(address); 693 694 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 695 address_high); 696 if (ret) 697 return ret; 698 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 699 address_low); 700 if (ret) 701 return ret; 702 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 703 (uint32_t)memory_pool->size); 704 if (ret) 705 return ret; 706 707 return ret; 708 } 709 710 int smu_v11_0_check_pptable(struct smu_context *smu) 711 { 712 int ret; 713 714 ret = smu_check_powerplay_table(smu); 715 return ret; 716 } 717 718 int smu_v11_0_parse_pptable(struct smu_context *smu) 719 { 720 int ret; 721 722 struct smu_table_context *table_context = &smu->smu_table; 723 struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE]; 724 725 if (table_context->driver_pptable) 726 return -EINVAL; 727 728 table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL); 729 730 if (!table_context->driver_pptable) 731 return -ENOMEM; 732 733 ret = smu_store_powerplay_table(smu); 734 if (ret) 735 return -EINVAL; 736 737 ret = smu_append_powerplay_table(smu); 738 739 return ret; 740 } 741 742 int smu_v11_0_populate_smc_pptable(struct smu_context *smu) 743 { 744 int ret; 745 746 ret = smu_set_default_dpm_table(smu); 747 748 return ret; 749 } 750 751 int smu_v11_0_write_pptable(struct smu_context *smu) 752 { 753 struct smu_table_context *table_context = &smu->smu_table; 754 int ret = 0; 755 756 ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0, 757 table_context->driver_pptable, true); 758 759 return ret; 760 } 761 762 int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) 763 { 764 int ret; 765 766 ret = smu_send_smc_msg_with_param(smu, 767 SMU_MSG_SetMinDeepSleepDcefclk, clk); 768 if (ret) 769 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!"); 770 771 return ret; 772 } 773 774 int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) 775 { 776 struct smu_table_context *table_context = &smu->smu_table; 777 778 if (!smu->pm_enabled) 779 return 0; 780 if (!table_context) 781 return -EINVAL; 782 783 return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100); 784 } 785 786 int smu_v11_0_set_driver_table_location(struct smu_context *smu) 787 { 788 struct smu_table *driver_table = &smu->smu_table.driver_table; 789 int ret = 0; 790 791 if (driver_table->mc_address) { 792 ret = smu_send_smc_msg_with_param(smu, 793 SMU_MSG_SetDriverDramAddrHigh, 794 upper_32_bits(driver_table->mc_address)); 795 if (!ret) 796 ret = smu_send_smc_msg_with_param(smu, 797 SMU_MSG_SetDriverDramAddrLow, 798 lower_32_bits(driver_table->mc_address)); 799 } 800 801 return ret; 802 } 803 804 int smu_v11_0_set_tool_table_location(struct smu_context *smu) 805 { 806 int ret = 0; 807 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 808 809 if (tool_table->mc_address) { 810 ret = smu_send_smc_msg_with_param(smu, 811 SMU_MSG_SetToolsDramAddrHigh, 812 upper_32_bits(tool_table->mc_address)); 813 if (!ret) 814 ret = smu_send_smc_msg_with_param(smu, 815 SMU_MSG_SetToolsDramAddrLow, 816 lower_32_bits(tool_table->mc_address)); 817 } 818 819 return ret; 820 } 821 822 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) 823 { 824 int ret = 0; 825 826 if (!smu->pm_enabled) 827 return ret; 828 829 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count); 830 return ret; 831 } 832 833 834 int smu_v11_0_set_allowed_mask(struct smu_context *smu) 835 { 836 struct smu_feature *feature = &smu->smu_feature; 837 int ret = 0; 838 uint32_t feature_mask[2]; 839 840 mutex_lock(&feature->mutex); 841 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) 842 goto failed; 843 844 bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); 845 846 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 847 feature_mask[1]); 848 if (ret) 849 goto failed; 850 851 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, 852 feature_mask[0]); 853 if (ret) 854 goto failed; 855 856 failed: 857 mutex_unlock(&feature->mutex); 858 return ret; 859 } 860 861 int smu_v11_0_get_enabled_mask(struct smu_context *smu, 862 uint32_t *feature_mask, uint32_t num) 863 { 864 uint32_t feature_mask_high = 0, feature_mask_low = 0; 865 struct smu_feature *feature = &smu->smu_feature; 866 int ret = 0; 867 868 if (!feature_mask || num < 2) 869 return -EINVAL; 870 871 if (bitmap_empty(feature->enabled, feature->feature_num)) { 872 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); 873 if (ret) 874 return ret; 875 ret = smu_read_smc_arg(smu, &feature_mask_high); 876 if (ret) 877 return ret; 878 879 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); 880 if (ret) 881 return ret; 882 ret = smu_read_smc_arg(smu, &feature_mask_low); 883 if (ret) 884 return ret; 885 886 feature_mask[0] = feature_mask_low; 887 feature_mask[1] = feature_mask_high; 888 } else { 889 bitmap_copy((unsigned long *)feature_mask, feature->enabled, 890 feature->feature_num); 891 } 892 893 return ret; 894 } 895 896 int smu_v11_0_system_features_control(struct smu_context *smu, 897 bool en) 898 { 899 struct smu_feature *feature = &smu->smu_feature; 900 uint32_t feature_mask[2]; 901 int ret = 0; 902 903 ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 904 SMU_MSG_DisableAllSmuFeatures)); 905 if (ret) 906 return ret; 907 908 bitmap_zero(feature->enabled, feature->feature_num); 909 bitmap_zero(feature->supported, feature->feature_num); 910 911 if (en) { 912 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); 913 if (ret) 914 return ret; 915 916 bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, 917 feature->feature_num); 918 bitmap_copy(feature->supported, (unsigned long *)&feature_mask, 919 feature->feature_num); 920 } 921 922 return ret; 923 } 924 925 int smu_v11_0_notify_display_change(struct smu_context *smu) 926 { 927 int ret = 0; 928 929 if (!smu->pm_enabled) 930 return ret; 931 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && 932 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) 933 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1); 934 935 return ret; 936 } 937 938 static int 939 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, 940 enum smu_clk_type clock_select) 941 { 942 int ret = 0; 943 int clk_id; 944 945 if (!smu->pm_enabled) 946 return ret; 947 948 if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || 949 (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0)) 950 return 0; 951 952 clk_id = smu_clk_get_index(smu, clock_select); 953 if (clk_id < 0) 954 return -EINVAL; 955 956 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, 957 clk_id << 16); 958 if (ret) { 959 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); 960 return ret; 961 } 962 963 ret = smu_read_smc_arg(smu, clock); 964 if (ret) 965 return ret; 966 967 if (*clock != 0) 968 return 0; 969 970 /* if DC limit is zero, return AC limit */ 971 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, 972 clk_id << 16); 973 if (ret) { 974 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!"); 975 return ret; 976 } 977 978 ret = smu_read_smc_arg(smu, clock); 979 980 return ret; 981 } 982 983 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) 984 { 985 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks; 986 int ret = 0; 987 988 max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), 989 GFP_KERNEL); 990 smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks; 991 992 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; 993 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; 994 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; 995 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 996 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 997 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 998 999 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 1000 ret = smu_v11_0_get_max_sustainable_clock(smu, 1001 &(max_sustainable_clocks->uclock), 1002 SMU_UCLK); 1003 if (ret) { 1004 pr_err("[%s] failed to get max UCLK from SMC!", 1005 __func__); 1006 return ret; 1007 } 1008 } 1009 1010 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 1011 ret = smu_v11_0_get_max_sustainable_clock(smu, 1012 &(max_sustainable_clocks->soc_clock), 1013 SMU_SOCCLK); 1014 if (ret) { 1015 pr_err("[%s] failed to get max SOCCLK from SMC!", 1016 __func__); 1017 return ret; 1018 } 1019 } 1020 1021 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { 1022 ret = smu_v11_0_get_max_sustainable_clock(smu, 1023 &(max_sustainable_clocks->dcef_clock), 1024 SMU_DCEFCLK); 1025 if (ret) { 1026 pr_err("[%s] failed to get max DCEFCLK from SMC!", 1027 __func__); 1028 return ret; 1029 } 1030 1031 ret = smu_v11_0_get_max_sustainable_clock(smu, 1032 &(max_sustainable_clocks->display_clock), 1033 SMU_DISPCLK); 1034 if (ret) { 1035 pr_err("[%s] failed to get max DISPCLK from SMC!", 1036 __func__); 1037 return ret; 1038 } 1039 ret = smu_v11_0_get_max_sustainable_clock(smu, 1040 &(max_sustainable_clocks->phy_clock), 1041 SMU_PHYCLK); 1042 if (ret) { 1043 pr_err("[%s] failed to get max PHYCLK from SMC!", 1044 __func__); 1045 return ret; 1046 } 1047 ret = smu_v11_0_get_max_sustainable_clock(smu, 1048 &(max_sustainable_clocks->pixel_clock), 1049 SMU_PIXCLK); 1050 if (ret) { 1051 pr_err("[%s] failed to get max PIXCLK from SMC!", 1052 __func__); 1053 return ret; 1054 } 1055 } 1056 1057 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 1058 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 1059 1060 return 0; 1061 } 1062 1063 uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) { 1064 uint32_t od_limit, max_power_limit; 1065 const struct smu_11_0_powerplay_table *powerplay_table = NULL; 1066 struct smu_table_context *table_context = &smu->smu_table; 1067 powerplay_table = table_context->power_play_table; 1068 1069 max_power_limit = smu_get_pptable_power_limit(smu); 1070 1071 if (!max_power_limit) { 1072 // If we couldn't get the table limit, fall back on first-read value 1073 if (!smu->default_power_limit) 1074 smu->default_power_limit = smu->power_limit; 1075 max_power_limit = smu->default_power_limit; 1076 } 1077 1078 if (smu->od_enabled) { 1079 od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); 1080 1081 pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit); 1082 1083 max_power_limit *= (100 + od_limit); 1084 max_power_limit /= 100; 1085 } 1086 1087 return max_power_limit; 1088 } 1089 1090 int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) 1091 { 1092 int ret = 0; 1093 uint32_t max_power_limit; 1094 1095 max_power_limit = smu_v11_0_get_max_power_limit(smu); 1096 1097 if (n > max_power_limit) { 1098 pr_err("New power limit (%d) is over the max allowed %d\n", 1099 n, 1100 max_power_limit); 1101 return -EINVAL; 1102 } 1103 1104 if (n == 0) 1105 n = smu->default_power_limit; 1106 1107 if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 1108 pr_err("Setting new power limit is not supported!\n"); 1109 return -EOPNOTSUPP; 1110 } 1111 1112 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n); 1113 if (ret) { 1114 pr_err("[%s] Set power limit Failed!\n", __func__); 1115 return ret; 1116 } 1117 smu->power_limit = n; 1118 1119 return 0; 1120 } 1121 1122 int smu_v11_0_get_current_clk_freq(struct smu_context *smu, 1123 enum smu_clk_type clk_id, 1124 uint32_t *value) 1125 { 1126 int ret = 0; 1127 uint32_t freq = 0; 1128 int asic_clk_id; 1129 1130 if (clk_id >= SMU_CLK_COUNT || !value) 1131 return -EINVAL; 1132 1133 asic_clk_id = smu_clk_get_index(smu, clk_id); 1134 if (asic_clk_id < 0) 1135 return -EINVAL; 1136 1137 /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */ 1138 if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0) 1139 ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq); 1140 else { 1141 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq, 1142 (asic_clk_id << 16)); 1143 if (ret) 1144 return ret; 1145 1146 ret = smu_read_smc_arg(smu, &freq); 1147 if (ret) 1148 return ret; 1149 } 1150 1151 freq *= 100; 1152 *value = freq; 1153 1154 return ret; 1155 } 1156 1157 static int smu_v11_0_set_thermal_range(struct smu_context *smu, 1158 struct smu_temperature_range range) 1159 { 1160 struct amdgpu_device *adev = smu->adev; 1161 int low = SMU_THERMAL_MINIMUM_ALERT_TEMP; 1162 int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP; 1163 uint32_t val; 1164 struct smu_table_context *table_context = &smu->smu_table; 1165 const struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; 1166 1167 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, 1168 range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); 1169 high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, powerplay_table->software_shutdown_temp); 1170 1171 if (low > high) 1172 return -EINVAL; 1173 1174 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); 1175 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 1176 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 1177 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); 1178 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); 1179 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); 1180 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); 1181 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1182 1183 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); 1184 1185 return 0; 1186 } 1187 1188 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu) 1189 { 1190 struct amdgpu_device *adev = smu->adev; 1191 uint32_t val = 0; 1192 1193 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); 1194 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); 1195 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); 1196 1197 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); 1198 1199 return 0; 1200 } 1201 1202 int smu_v11_0_start_thermal_control(struct smu_context *smu) 1203 { 1204 int ret = 0; 1205 struct smu_temperature_range range; 1206 struct amdgpu_device *adev = smu->adev; 1207 1208 if (!smu->pm_enabled) 1209 return ret; 1210 1211 memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range)); 1212 1213 ret = smu_get_thermal_temperature_range(smu, &range); 1214 if (ret) 1215 return ret; 1216 1217 if (smu->smu_table.thermal_controller_type) { 1218 ret = smu_v11_0_set_thermal_range(smu, range); 1219 if (ret) 1220 return ret; 1221 1222 ret = smu_v11_0_enable_thermal_alert(smu); 1223 if (ret) 1224 return ret; 1225 1226 ret = smu_set_thermal_fan_table(smu); 1227 if (ret) 1228 return ret; 1229 } 1230 1231 adev->pm.dpm.thermal.min_temp = range.min; 1232 adev->pm.dpm.thermal.max_temp = range.max; 1233 adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; 1234 adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; 1235 adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; 1236 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; 1237 adev->pm.dpm.thermal.min_mem_temp = range.mem_min; 1238 adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; 1239 adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; 1240 1241 return ret; 1242 } 1243 1244 int smu_v11_0_stop_thermal_control(struct smu_context *smu) 1245 { 1246 struct amdgpu_device *adev = smu->adev; 1247 1248 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); 1249 1250 return 0; 1251 } 1252 1253 static uint16_t convert_to_vddc(uint8_t vid) 1254 { 1255 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); 1256 } 1257 1258 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) 1259 { 1260 struct amdgpu_device *adev = smu->adev; 1261 uint32_t vdd = 0, val_vid = 0; 1262 1263 if (!value) 1264 return -EINVAL; 1265 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & 1266 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 1267 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 1268 1269 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); 1270 1271 *value = vdd; 1272 1273 return 0; 1274 1275 } 1276 1277 int smu_v11_0_read_sensor(struct smu_context *smu, 1278 enum amd_pp_sensors sensor, 1279 void *data, uint32_t *size) 1280 { 1281 int ret = 0; 1282 1283 if(!data || !size) 1284 return -EINVAL; 1285 1286 switch (sensor) { 1287 case AMDGPU_PP_SENSOR_GFX_MCLK: 1288 ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data); 1289 *size = 4; 1290 break; 1291 case AMDGPU_PP_SENSOR_GFX_SCLK: 1292 ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data); 1293 *size = 4; 1294 break; 1295 case AMDGPU_PP_SENSOR_VDDGFX: 1296 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data); 1297 *size = 4; 1298 break; 1299 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 1300 *(uint32_t *)data = 0; 1301 *size = 4; 1302 break; 1303 default: 1304 ret = smu_common_read_sensor(smu, sensor, data, size); 1305 break; 1306 } 1307 1308 if (ret) 1309 *size = 0; 1310 1311 return ret; 1312 } 1313 1314 int 1315 smu_v11_0_display_clock_voltage_request(struct smu_context *smu, 1316 struct pp_display_clock_request 1317 *clock_req) 1318 { 1319 enum amd_pp_clock_type clk_type = clock_req->clock_type; 1320 int ret = 0; 1321 enum smu_clk_type clk_select = 0; 1322 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 1323 1324 if (!smu->pm_enabled) 1325 return -EINVAL; 1326 1327 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || 1328 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 1329 switch (clk_type) { 1330 case amd_pp_dcef_clock: 1331 clk_select = SMU_DCEFCLK; 1332 break; 1333 case amd_pp_disp_clock: 1334 clk_select = SMU_DISPCLK; 1335 break; 1336 case amd_pp_pixel_clock: 1337 clk_select = SMU_PIXCLK; 1338 break; 1339 case amd_pp_phy_clock: 1340 clk_select = SMU_PHYCLK; 1341 break; 1342 case amd_pp_mem_clock: 1343 clk_select = SMU_UCLK; 1344 break; 1345 default: 1346 pr_info("[%s] Invalid Clock Type!", __func__); 1347 ret = -EINVAL; 1348 break; 1349 } 1350 1351 if (ret) 1352 goto failed; 1353 1354 if (clk_select == SMU_UCLK && smu->disable_uclk_switch) 1355 return 0; 1356 1357 ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0); 1358 1359 if(clk_select == SMU_UCLK) 1360 smu->hard_min_uclk_req_from_dal = clk_freq; 1361 } 1362 1363 failed: 1364 return ret; 1365 } 1366 1367 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) 1368 { 1369 int ret = 0; 1370 struct amdgpu_device *adev = smu->adev; 1371 1372 switch (adev->asic_type) { 1373 case CHIP_VEGA20: 1374 break; 1375 case CHIP_NAVI10: 1376 case CHIP_NAVI14: 1377 case CHIP_NAVI12: 1378 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 1379 return 0; 1380 if (enable) 1381 ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); 1382 else 1383 ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); 1384 break; 1385 default: 1386 break; 1387 } 1388 1389 return ret; 1390 } 1391 1392 uint32_t 1393 smu_v11_0_get_fan_control_mode(struct smu_context *smu) 1394 { 1395 if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1396 return AMD_FAN_CTRL_MANUAL; 1397 else 1398 return AMD_FAN_CTRL_AUTO; 1399 } 1400 1401 static int 1402 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) 1403 { 1404 int ret = 0; 1405 1406 if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1407 return 0; 1408 1409 ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); 1410 if (ret) 1411 pr_err("[%s]%s smc FAN CONTROL feature failed!", 1412 __func__, (auto_fan_control ? "Start" : "Stop")); 1413 1414 return ret; 1415 } 1416 1417 static int 1418 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) 1419 { 1420 struct amdgpu_device *adev = smu->adev; 1421 1422 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, 1423 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), 1424 CG_FDO_CTRL2, TMIN, 0)); 1425 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, 1426 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), 1427 CG_FDO_CTRL2, FDO_PWM_MODE, mode)); 1428 1429 return 0; 1430 } 1431 1432 int 1433 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) 1434 { 1435 struct amdgpu_device *adev = smu->adev; 1436 uint32_t duty100, duty; 1437 uint64_t tmp64; 1438 1439 if (speed > 100) 1440 speed = 100; 1441 1442 if (smu_v11_0_auto_fan_control(smu, 0)) 1443 return -EINVAL; 1444 1445 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), 1446 CG_FDO_CTRL1, FMAX_DUTY100); 1447 if (!duty100) 1448 return -EINVAL; 1449 1450 tmp64 = (uint64_t)speed * duty100; 1451 do_div(tmp64, 100); 1452 duty = (uint32_t)tmp64; 1453 1454 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, 1455 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), 1456 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); 1457 1458 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); 1459 } 1460 1461 int 1462 smu_v11_0_set_fan_control_mode(struct smu_context *smu, 1463 uint32_t mode) 1464 { 1465 int ret = 0; 1466 1467 switch (mode) { 1468 case AMD_FAN_CTRL_NONE: 1469 ret = smu_v11_0_set_fan_speed_percent(smu, 100); 1470 break; 1471 case AMD_FAN_CTRL_MANUAL: 1472 ret = smu_v11_0_auto_fan_control(smu, 0); 1473 break; 1474 case AMD_FAN_CTRL_AUTO: 1475 ret = smu_v11_0_auto_fan_control(smu, 1); 1476 break; 1477 default: 1478 break; 1479 } 1480 1481 if (ret) { 1482 pr_err("[%s]Set fan control mode failed!", __func__); 1483 return -EINVAL; 1484 } 1485 1486 return ret; 1487 } 1488 1489 int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, 1490 uint32_t speed) 1491 { 1492 struct amdgpu_device *adev = smu->adev; 1493 int ret; 1494 uint32_t tach_period, crystal_clock_freq; 1495 1496 if (!speed) 1497 return -EINVAL; 1498 1499 ret = smu_v11_0_auto_fan_control(smu, 0); 1500 if (ret) 1501 return ret; 1502 1503 crystal_clock_freq = amdgpu_asic_get_xclk(adev); 1504 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1505 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, 1506 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), 1507 CG_TACH_CTRL, TARGET_PERIOD, 1508 tach_period)); 1509 1510 ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); 1511 1512 return ret; 1513 } 1514 1515 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, 1516 uint32_t pstate) 1517 { 1518 int ret = 0; 1519 ret = smu_send_smc_msg_with_param(smu, 1520 SMU_MSG_SetXgmiMode, 1521 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); 1522 return ret; 1523 } 1524 1525 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ 1526 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ 1527 1528 static int smu_v11_0_irq_process(struct amdgpu_device *adev, 1529 struct amdgpu_irq_src *source, 1530 struct amdgpu_iv_entry *entry) 1531 { 1532 uint32_t client_id = entry->client_id; 1533 uint32_t src_id = entry->src_id; 1534 1535 if (client_id == SOC15_IH_CLIENTID_THM) { 1536 switch (src_id) { 1537 case THM_11_0__SRCID__THM_DIG_THERM_L2H: 1538 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", 1539 PCI_BUS_NUM(adev->pdev->devfn), 1540 PCI_SLOT(adev->pdev->devfn), 1541 PCI_FUNC(adev->pdev->devfn)); 1542 break; 1543 case THM_11_0__SRCID__THM_DIG_THERM_H2L: 1544 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n", 1545 PCI_BUS_NUM(adev->pdev->devfn), 1546 PCI_SLOT(adev->pdev->devfn), 1547 PCI_FUNC(adev->pdev->devfn)); 1548 break; 1549 default: 1550 pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n", 1551 src_id, 1552 PCI_BUS_NUM(adev->pdev->devfn), 1553 PCI_SLOT(adev->pdev->devfn), 1554 PCI_FUNC(adev->pdev->devfn)); 1555 break; 1556 1557 } 1558 } 1559 1560 return 0; 1561 } 1562 1563 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs = 1564 { 1565 .process = smu_v11_0_irq_process, 1566 }; 1567 1568 int smu_v11_0_register_irq_handler(struct smu_context *smu) 1569 { 1570 struct amdgpu_device *adev = smu->adev; 1571 struct amdgpu_irq_src *irq_src = smu->irq_source; 1572 int ret = 0; 1573 1574 /* already register */ 1575 if (irq_src) 1576 return 0; 1577 1578 irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); 1579 if (!irq_src) 1580 return -ENOMEM; 1581 smu->irq_source = irq_src; 1582 1583 irq_src->funcs = &smu_v11_0_irq_funcs; 1584 1585 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1586 THM_11_0__SRCID__THM_DIG_THERM_L2H, 1587 irq_src); 1588 if (ret) 1589 return ret; 1590 1591 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1592 THM_11_0__SRCID__THM_DIG_THERM_H2L, 1593 irq_src); 1594 if (ret) 1595 return ret; 1596 1597 return ret; 1598 } 1599 1600 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 1601 struct pp_smu_nv_clock_table *max_clocks) 1602 { 1603 struct smu_table_context *table_context = &smu->smu_table; 1604 struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL; 1605 1606 if (!max_clocks || !table_context->max_sustainable_clocks) 1607 return -EINVAL; 1608 1609 sustainable_clocks = table_context->max_sustainable_clocks; 1610 1611 max_clocks->dcfClockInKhz = 1612 (unsigned int) sustainable_clocks->dcef_clock * 1000; 1613 max_clocks->displayClockInKhz = 1614 (unsigned int) sustainable_clocks->display_clock * 1000; 1615 max_clocks->phyClockInKhz = 1616 (unsigned int) sustainable_clocks->phy_clock * 1000; 1617 max_clocks->pixelClockInKhz = 1618 (unsigned int) sustainable_clocks->pixel_clock * 1000; 1619 max_clocks->uClockInKhz = 1620 (unsigned int) sustainable_clocks->uclock * 1000; 1621 max_clocks->socClockInKhz = 1622 (unsigned int) sustainable_clocks->soc_clock * 1000; 1623 max_clocks->dscClockInKhz = 0; 1624 max_clocks->dppClockInKhz = 0; 1625 max_clocks->fabricClockInKhz = 0; 1626 1627 return 0; 1628 } 1629 1630 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) 1631 { 1632 int ret = 0; 1633 1634 ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME); 1635 1636 return ret; 1637 } 1638 1639 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq) 1640 { 1641 return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq); 1642 } 1643 1644 bool smu_v11_0_baco_is_support(struct smu_context *smu) 1645 { 1646 struct amdgpu_device *adev = smu->adev; 1647 struct smu_baco_context *smu_baco = &smu->smu_baco; 1648 uint32_t val; 1649 bool baco_support; 1650 1651 mutex_lock(&smu_baco->mutex); 1652 baco_support = smu_baco->platform_support; 1653 mutex_unlock(&smu_baco->mutex); 1654 1655 if (!baco_support) 1656 return false; 1657 1658 /* Arcturus does not support this bit mask */ 1659 if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 1660 !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 1661 return false; 1662 1663 val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); 1664 if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) 1665 return true; 1666 1667 return false; 1668 } 1669 1670 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) 1671 { 1672 struct smu_baco_context *smu_baco = &smu->smu_baco; 1673 enum smu_baco_state baco_state; 1674 1675 mutex_lock(&smu_baco->mutex); 1676 baco_state = smu_baco->state; 1677 mutex_unlock(&smu_baco->mutex); 1678 1679 return baco_state; 1680 } 1681 1682 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) 1683 { 1684 1685 struct smu_baco_context *smu_baco = &smu->smu_baco; 1686 struct amdgpu_device *adev = smu->adev; 1687 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1688 uint32_t bif_doorbell_intr_cntl; 1689 uint32_t data; 1690 int ret = 0; 1691 1692 if (smu_v11_0_baco_get_state(smu) == state) 1693 return 0; 1694 1695 mutex_lock(&smu_baco->mutex); 1696 1697 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); 1698 1699 if (state == SMU_BACO_STATE_ENTER) { 1700 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, 1701 BIF_DOORBELL_INT_CNTL, 1702 DOORBELL_INTERRUPT_DISABLE, 1); 1703 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); 1704 1705 if (!ras || !ras->supported) { 1706 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); 1707 data |= 0x80000000; 1708 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); 1709 1710 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0); 1711 } else { 1712 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1); 1713 } 1714 } else { 1715 ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco); 1716 if (ret) 1717 goto out; 1718 1719 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, 1720 BIF_DOORBELL_INT_CNTL, 1721 DOORBELL_INTERRUPT_DISABLE, 0); 1722 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); 1723 1724 /* clear vbios scratch 6 and 7 for coming asic reinit */ 1725 WREG32(adev->bios_scratch_reg_offset + 6, 0); 1726 WREG32(adev->bios_scratch_reg_offset + 7, 0); 1727 } 1728 if (ret) 1729 goto out; 1730 1731 smu_baco->state = state; 1732 out: 1733 mutex_unlock(&smu_baco->mutex); 1734 return ret; 1735 } 1736 1737 int smu_v11_0_baco_enter(struct smu_context *smu) 1738 { 1739 struct amdgpu_device *adev = smu->adev; 1740 int ret = 0; 1741 1742 /* Arcturus does not need this audio workaround */ 1743 if (adev->asic_type != CHIP_ARCTURUS) { 1744 ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); 1745 if (ret) 1746 return ret; 1747 } 1748 1749 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); 1750 if (ret) 1751 return ret; 1752 1753 msleep(10); 1754 1755 return ret; 1756 } 1757 1758 int smu_v11_0_baco_exit(struct smu_context *smu) 1759 { 1760 int ret = 0; 1761 1762 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); 1763 if (ret) 1764 return ret; 1765 1766 return ret; 1767 } 1768 1769 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 1770 uint32_t *min, uint32_t *max) 1771 { 1772 int ret = 0, clk_id = 0; 1773 uint32_t param = 0; 1774 1775 clk_id = smu_clk_get_index(smu, clk_type); 1776 if (clk_id < 0) { 1777 ret = -EINVAL; 1778 goto failed; 1779 } 1780 param = (clk_id & 0xffff) << 16; 1781 1782 if (max) { 1783 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param); 1784 if (ret) 1785 goto failed; 1786 ret = smu_read_smc_arg(smu, max); 1787 if (ret) 1788 goto failed; 1789 } 1790 1791 if (min) { 1792 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param); 1793 if (ret) 1794 goto failed; 1795 ret = smu_read_smc_arg(smu, min); 1796 if (ret) 1797 goto failed; 1798 } 1799 1800 failed: 1801 return ret; 1802 } 1803 1804 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, 1805 uint32_t min, uint32_t max) 1806 { 1807 int ret = 0, clk_id = 0; 1808 uint32_t param; 1809 1810 clk_id = smu_clk_get_index(smu, clk_type); 1811 if (clk_id < 0) 1812 return clk_id; 1813 1814 if (max > 0) { 1815 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1816 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1817 param); 1818 if (ret) 1819 return ret; 1820 } 1821 1822 if (min > 0) { 1823 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1824 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1825 param); 1826 if (ret) 1827 return ret; 1828 } 1829 1830 return ret; 1831 } 1832 1833 int smu_v11_0_override_pcie_parameters(struct smu_context *smu) 1834 { 1835 struct amdgpu_device *adev = smu->adev; 1836 uint32_t pcie_gen = 0, pcie_width = 0; 1837 int ret; 1838 1839 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 1840 pcie_gen = 3; 1841 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 1842 pcie_gen = 2; 1843 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1844 pcie_gen = 1; 1845 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1846 pcie_gen = 0; 1847 1848 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1849 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1850 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1851 */ 1852 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1853 pcie_width = 6; 1854 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1855 pcie_width = 5; 1856 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1857 pcie_width = 4; 1858 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1859 pcie_width = 3; 1860 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1861 pcie_width = 2; 1862 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1863 pcie_width = 1; 1864 1865 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1866 1867 if (ret) 1868 pr_err("[%s] Attempt to override pcie params failed!\n", __func__); 1869 1870 return ret; 1871 1872 } 1873 1874 int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size) 1875 { 1876 struct smu_table_context *table_context = &smu->smu_table; 1877 int ret = 0; 1878 1879 if (initialize) { 1880 if (table_context->overdrive_table) { 1881 return -EINVAL; 1882 } 1883 table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL); 1884 if (!table_context->overdrive_table) { 1885 return -ENOMEM; 1886 } 1887 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false); 1888 if (ret) { 1889 pr_err("Failed to export overdrive table!\n"); 1890 return ret; 1891 } 1892 if (!table_context->boot_overdrive_table) { 1893 table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL); 1894 if (!table_context->boot_overdrive_table) { 1895 return -ENOMEM; 1896 } 1897 } 1898 } 1899 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true); 1900 if (ret) { 1901 pr_err("Failed to import overdrive table!\n"); 1902 return ret; 1903 } 1904 return ret; 1905 } 1906 1907 int smu_v11_0_set_performance_level(struct smu_context *smu, 1908 enum amd_dpm_forced_level level) 1909 { 1910 int ret = 0; 1911 uint32_t sclk_mask, mclk_mask, soc_mask; 1912 1913 switch (level) { 1914 case AMD_DPM_FORCED_LEVEL_HIGH: 1915 ret = smu_force_dpm_limit_value(smu, true); 1916 break; 1917 case AMD_DPM_FORCED_LEVEL_LOW: 1918 ret = smu_force_dpm_limit_value(smu, false); 1919 break; 1920 case AMD_DPM_FORCED_LEVEL_AUTO: 1921 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1922 ret = smu_unforce_dpm_levels(smu); 1923 break; 1924 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1925 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1926 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1927 ret = smu_get_profiling_clk_mask(smu, level, 1928 &sclk_mask, 1929 &mclk_mask, 1930 &soc_mask); 1931 if (ret) 1932 return ret; 1933 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); 1934 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); 1935 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); 1936 break; 1937 case AMD_DPM_FORCED_LEVEL_MANUAL: 1938 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1939 default: 1940 break; 1941 } 1942 return ret; 1943 } 1944 1945