1 1.5 riastrad /* $NetBSD: amdgpu_smu.c,v 1.5 2021/12/19 12:37:54 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2019 Advanced Micro Devices, Inc. 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice shall be included in 14 1.1 riastrad * all copies or substantial portions of the Software. 15 1.1 riastrad * 16 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 23 1.1 riastrad */ 24 1.1 riastrad 25 1.1 riastrad #include <sys/cdefs.h> 26 1.5 riastrad __KERNEL_RCSID(0, "$NetBSD: amdgpu_smu.c,v 1.5 2021/12/19 12:37:54 riastradh Exp $"); 27 1.1 riastrad 28 1.1 riastrad #include <linux/firmware.h> 29 1.1 riastrad #include <linux/pci.h> 30 1.1 riastrad 31 1.1 riastrad #include "pp_debug.h" 32 1.1 riastrad #include "amdgpu.h" 33 1.1 riastrad #include "amdgpu_smu.h" 34 1.1 riastrad #include "smu_internal.h" 35 1.1 riastrad #include "soc15_common.h" 36 1.1 riastrad #include "smu_v11_0.h" 37 1.1 riastrad #include "smu_v12_0.h" 38 1.1 riastrad #include "atom.h" 39 1.1 riastrad #include "amd_pcie.h" 40 1.1 riastrad #include "vega20_ppt.h" 41 1.1 riastrad #include "arcturus_ppt.h" 42 1.1 riastrad #include "navi10_ppt.h" 43 1.1 riastrad #include "renoir_ppt.h" 44 1.1 riastrad 45 1.3 riastrad #include <linux/nbsd-namespace.h> 46 1.3 riastrad 47 1.1 riastrad #undef __SMU_DUMMY_MAP 48 1.1 riastrad #define __SMU_DUMMY_MAP(type) #type 49 1.1 riastrad static const char* __smu_message_names[] = { 50 1.1 riastrad SMU_MESSAGE_TYPES 51 1.1 riastrad }; 52 1.1 riastrad 53 1.1 riastrad const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type) 54 1.1 riastrad { 55 1.1 riastrad if (type < 0 || type >= SMU_MSG_MAX_COUNT) 56 1.1 riastrad return "unknown smu message"; 57 1.1 riastrad return __smu_message_names[type]; 58 1.1 riastrad } 59 1.1 riastrad 60 1.1 riastrad #undef __SMU_DUMMY_MAP 61 1.1 riastrad #define __SMU_DUMMY_MAP(fea) #fea 62 1.1 riastrad static const char* __smu_feature_names[] = { 63 1.1 riastrad SMU_FEATURE_MASKS 64 1.1 riastrad }; 65 1.1 riastrad 66 1.1 riastrad const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature) 67 1.1 riastrad { 68 1.1 riastrad if (feature < 0 || feature >= SMU_FEATURE_COUNT) 69 1.1 riastrad return "unknown smu feature"; 70 1.1 riastrad return __smu_feature_names[feature]; 71 1.1 riastrad } 72 1.1 riastrad 73 1.1 riastrad size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) 74 1.1 riastrad { 75 1.1 riastrad size_t size = 0; 76 1.1 riastrad int ret = 0, i = 0; 77 1.1 riastrad uint32_t feature_mask[2] = { 0 }; 78 1.1 riastrad int32_t feature_index = 0; 79 1.1 riastrad uint32_t count = 0; 80 1.1 riastrad uint32_t sort_feature[SMU_FEATURE_COUNT]; 81 1.1 riastrad uint64_t hw_feature_count = 0; 82 1.1 riastrad 83 1.1 riastrad mutex_lock(&smu->mutex); 84 1.1 riastrad 85 1.1 riastrad ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); 86 1.1 riastrad if (ret) 87 1.1 riastrad goto failed; 88 1.1 riastrad 89 1.5 riastrad size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n", 90 1.1 riastrad feature_mask[1], feature_mask[0]); 91 1.1 riastrad 92 1.1 riastrad for (i = 0; i < SMU_FEATURE_COUNT; i++) { 93 1.1 riastrad feature_index = smu_feature_get_index(smu, i); 94 1.1 riastrad if (feature_index < 0) 95 1.1 riastrad continue; 96 1.1 riastrad sort_feature[feature_index] = i; 97 1.1 riastrad hw_feature_count++; 98 1.1 riastrad } 99 1.1 riastrad 100 1.1 riastrad for (i = 0; i < hw_feature_count; i++) { 101 1.5 riastrad size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n", 102 1.1 riastrad count++, 103 1.1 riastrad smu_get_feature_name(smu, sort_feature[i]), 104 1.1 riastrad i, 105 1.1 riastrad !!smu_feature_is_enabled(smu, sort_feature[i]) ? 106 1.1 riastrad "enabled" : "disabled"); 107 1.1 riastrad } 108 1.1 riastrad 109 1.1 riastrad failed: 110 1.1 riastrad mutex_unlock(&smu->mutex); 111 1.1 riastrad 112 1.1 riastrad return size; 113 1.1 riastrad } 114 1.1 riastrad 115 1.1 riastrad static int smu_feature_update_enable_state(struct smu_context *smu, 116 1.1 riastrad uint64_t feature_mask, 117 1.1 riastrad bool enabled) 118 1.1 riastrad { 119 1.1 riastrad struct smu_feature *feature = &smu->smu_feature; 120 1.1 riastrad uint32_t feature_low = 0, feature_high = 0; 121 1.1 riastrad int ret = 0; 122 1.1 riastrad 123 1.1 riastrad if (!smu->pm_enabled) 124 1.1 riastrad return ret; 125 1.1 riastrad 126 1.1 riastrad feature_low = (feature_mask >> 0 ) & 0xffffffff; 127 1.1 riastrad feature_high = (feature_mask >> 32) & 0xffffffff; 128 1.1 riastrad 129 1.1 riastrad if (enabled) { 130 1.1 riastrad ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, 131 1.1 riastrad feature_low); 132 1.1 riastrad if (ret) 133 1.1 riastrad return ret; 134 1.1 riastrad ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, 135 1.1 riastrad feature_high); 136 1.1 riastrad if (ret) 137 1.1 riastrad return ret; 138 1.1 riastrad } else { 139 1.1 riastrad ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, 140 1.1 riastrad feature_low); 141 1.1 riastrad if (ret) 142 1.1 riastrad return ret; 143 1.1 riastrad ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, 144 1.1 riastrad feature_high); 145 1.1 riastrad if (ret) 146 1.1 riastrad return ret; 147 1.1 riastrad } 148 1.1 riastrad 149 1.1 riastrad mutex_lock(&feature->mutex); 150 1.1 riastrad if (enabled) 151 1.1 riastrad bitmap_or(feature->enabled, feature->enabled, 152 1.1 riastrad (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); 153 1.1 riastrad else 154 1.1 riastrad bitmap_andnot(feature->enabled, feature->enabled, 155 1.1 riastrad (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); 156 1.1 riastrad mutex_unlock(&feature->mutex); 157 1.1 riastrad 158 1.1 riastrad return ret; 159 1.1 riastrad } 160 1.1 riastrad 161 1.1 riastrad int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) 162 1.1 riastrad { 163 1.1 riastrad int ret = 0; 164 1.1 riastrad uint32_t feature_mask[2] = { 0 }; 165 1.1 riastrad uint64_t feature_2_enabled = 0; 166 1.1 riastrad uint64_t feature_2_disabled = 0; 167 1.1 riastrad uint64_t feature_enables = 0; 168 1.1 riastrad 169 1.1 riastrad mutex_lock(&smu->mutex); 170 1.1 riastrad 171 1.1 riastrad ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); 172 1.1 riastrad if (ret) 173 1.1 riastrad goto out; 174 1.1 riastrad 175 1.1 riastrad feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]); 176 1.1 riastrad 177 1.1 riastrad feature_2_enabled = ~feature_enables & new_mask; 178 1.1 riastrad feature_2_disabled = feature_enables & ~new_mask; 179 1.1 riastrad 180 1.1 riastrad if (feature_2_enabled) { 181 1.1 riastrad ret = smu_feature_update_enable_state(smu, feature_2_enabled, true); 182 1.1 riastrad if (ret) 183 1.1 riastrad goto out; 184 1.1 riastrad } 185 1.1 riastrad if (feature_2_disabled) { 186 1.1 riastrad ret = smu_feature_update_enable_state(smu, feature_2_disabled, false); 187 1.1 riastrad if (ret) 188 1.1 riastrad goto out; 189 1.1 riastrad } 190 1.1 riastrad 191 1.1 riastrad out: 192 1.1 riastrad mutex_unlock(&smu->mutex); 193 1.1 riastrad 194 1.1 riastrad return ret; 195 1.1 riastrad } 196 1.1 riastrad 197 1.1 riastrad int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version) 198 1.1 riastrad { 199 1.1 riastrad int ret = 0; 200 1.1 riastrad 201 1.1 riastrad if (!if_version && !smu_version) 202 1.1 riastrad return -EINVAL; 203 1.1 riastrad 204 1.1 riastrad if (if_version) { 205 1.1 riastrad ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion); 206 1.1 riastrad if (ret) 207 1.1 riastrad return ret; 208 1.1 riastrad 209 1.1 riastrad ret = smu_read_smc_arg(smu, if_version); 210 1.1 riastrad if (ret) 211 1.1 riastrad return ret; 212 1.1 riastrad } 213 1.1 riastrad 214 1.1 riastrad if (smu_version) { 215 1.1 riastrad ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion); 216 1.1 riastrad if (ret) 217 1.1 riastrad return ret; 218 1.1 riastrad 219 1.1 riastrad ret = smu_read_smc_arg(smu, smu_version); 220 1.1 riastrad if (ret) 221 1.1 riastrad return ret; 222 1.1 riastrad } 223 1.1 riastrad 224 1.1 riastrad return ret; 225 1.1 riastrad } 226 1.1 riastrad 227 1.1 riastrad int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, 228 1.1 riastrad uint32_t min, uint32_t max) 229 1.1 riastrad { 230 1.1 riastrad int ret = 0; 231 1.1 riastrad 232 1.1 riastrad if (min <= 0 && max <= 0) 233 1.1 riastrad return -EINVAL; 234 1.1 riastrad 235 1.1 riastrad if (!smu_clk_dpm_is_enabled(smu, clk_type)) 236 1.1 riastrad return 0; 237 1.1 riastrad 238 1.1 riastrad ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max); 239 1.1 riastrad return ret; 240 1.1 riastrad } 241 1.1 riastrad 242 1.1 riastrad int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, 243 1.1 riastrad uint32_t min, uint32_t max) 244 1.1 riastrad { 245 1.1 riastrad int ret = 0, clk_id = 0; 246 1.1 riastrad uint32_t param; 247 1.1 riastrad 248 1.1 riastrad if (min <= 0 && max <= 0) 249 1.1 riastrad return -EINVAL; 250 1.1 riastrad 251 1.1 riastrad if (!smu_clk_dpm_is_enabled(smu, clk_type)) 252 1.1 riastrad return 0; 253 1.1 riastrad 254 1.1 riastrad clk_id = smu_clk_get_index(smu, clk_type); 255 1.1 riastrad if (clk_id < 0) 256 1.1 riastrad return clk_id; 257 1.1 riastrad 258 1.1 riastrad if (max > 0) { 259 1.1 riastrad param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 260 1.1 riastrad ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 261 1.1 riastrad param); 262 1.1 riastrad if (ret) 263 1.1 riastrad return ret; 264 1.1 riastrad } 265 1.1 riastrad 266 1.1 riastrad if (min > 0) { 267 1.1 riastrad param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 268 1.1 riastrad ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 269 1.1 riastrad param); 270 1.1 riastrad if (ret) 271 1.1 riastrad return ret; 272 1.1 riastrad } 273 1.1 riastrad 274 1.1 riastrad 275 1.1 riastrad return ret; 276 1.1 riastrad } 277 1.1 riastrad 278 1.1 riastrad int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, 279 1.1 riastrad uint32_t *min, uint32_t *max, bool lock_needed) 280 1.1 riastrad { 281 1.1 riastrad uint32_t clock_limit; 282 1.1 riastrad int ret = 0; 283 1.1 riastrad 284 1.1 riastrad if (!min && !max) 285 1.1 riastrad return -EINVAL; 286 1.1 riastrad 287 1.1 riastrad if (lock_needed) 288 1.1 riastrad mutex_lock(&smu->mutex); 289 1.1 riastrad 290 1.1 riastrad if (!smu_clk_dpm_is_enabled(smu, clk_type)) { 291 1.1 riastrad switch (clk_type) { 292 1.1 riastrad case SMU_MCLK: 293 1.1 riastrad case SMU_UCLK: 294 1.1 riastrad clock_limit = smu->smu_table.boot_values.uclk; 295 1.1 riastrad break; 296 1.1 riastrad case SMU_GFXCLK: 297 1.1 riastrad case SMU_SCLK: 298 1.1 riastrad clock_limit = smu->smu_table.boot_values.gfxclk; 299 1.1 riastrad break; 300 1.1 riastrad case SMU_SOCCLK: 301 1.1 riastrad clock_limit = smu->smu_table.boot_values.socclk; 302 1.1 riastrad break; 303 1.1 riastrad default: 304 1.1 riastrad clock_limit = 0; 305 1.1 riastrad break; 306 1.1 riastrad } 307 1.1 riastrad 308 1.1 riastrad /* clock in Mhz unit */ 309 1.1 riastrad if (min) 310 1.1 riastrad *min = clock_limit / 100; 311 1.1 riastrad if (max) 312 1.1 riastrad *max = clock_limit / 100; 313 1.1 riastrad } else { 314 1.1 riastrad /* 315 1.1 riastrad * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the 316 1.1 riastrad * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). 317 1.1 riastrad */ 318 1.1 riastrad ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); 319 1.1 riastrad } 320 1.1 riastrad 321 1.1 riastrad if (lock_needed) 322 1.1 riastrad mutex_unlock(&smu->mutex); 323 1.1 riastrad 324 1.1 riastrad return ret; 325 1.1 riastrad } 326 1.1 riastrad 327 1.1 riastrad int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, 328 1.1 riastrad uint16_t level, uint32_t *value) 329 1.1 riastrad { 330 1.1 riastrad int ret = 0, clk_id = 0; 331 1.1 riastrad uint32_t param; 332 1.1 riastrad 333 1.1 riastrad if (!value) 334 1.1 riastrad return -EINVAL; 335 1.1 riastrad 336 1.1 riastrad if (!smu_clk_dpm_is_enabled(smu, clk_type)) 337 1.1 riastrad return 0; 338 1.1 riastrad 339 1.1 riastrad clk_id = smu_clk_get_index(smu, clk_type); 340 1.1 riastrad if (clk_id < 0) 341 1.1 riastrad return clk_id; 342 1.1 riastrad 343 1.1 riastrad param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 344 1.1 riastrad 345 1.1 riastrad ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex, 346 1.1 riastrad param); 347 1.1 riastrad if (ret) 348 1.1 riastrad return ret; 349 1.1 riastrad 350 1.1 riastrad ret = smu_read_smc_arg(smu, ¶m); 351 1.1 riastrad if (ret) 352 1.1 riastrad return ret; 353 1.1 riastrad 354 1.1 riastrad /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM 355 1.1 riastrad * now, we un-support it */ 356 1.1 riastrad *value = param & 0x7fffffff; 357 1.1 riastrad 358 1.1 riastrad return ret; 359 1.1 riastrad } 360 1.1 riastrad 361 1.1 riastrad int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, 362 1.1 riastrad uint32_t *value) 363 1.1 riastrad { 364 1.1 riastrad return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value); 365 1.1 riastrad } 366 1.1 riastrad 367 1.1 riastrad int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, 368 1.1 riastrad uint32_t *min_value, uint32_t *max_value) 369 1.1 riastrad { 370 1.1 riastrad int ret = 0; 371 1.1 riastrad uint32_t level_count = 0; 372 1.1 riastrad 373 1.1 riastrad if (!min_value && !max_value) 374 1.1 riastrad return -EINVAL; 375 1.1 riastrad 376 1.1 riastrad if (min_value) { 377 1.1 riastrad /* by default, level 0 clock value as min value */ 378 1.1 riastrad ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value); 379 1.1 riastrad if (ret) 380 1.1 riastrad return ret; 381 1.1 riastrad } 382 1.1 riastrad 383 1.1 riastrad if (max_value) { 384 1.1 riastrad ret = smu_get_dpm_level_count(smu, clk_type, &level_count); 385 1.1 riastrad if (ret) 386 1.1 riastrad return ret; 387 1.1 riastrad 388 1.1 riastrad ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value); 389 1.1 riastrad if (ret) 390 1.1 riastrad return ret; 391 1.1 riastrad } 392 1.1 riastrad 393 1.1 riastrad return ret; 394 1.1 riastrad } 395 1.1 riastrad 396 1.1 riastrad bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) 397 1.1 riastrad { 398 1.1 riastrad enum smu_feature_mask feature_id = 0; 399 1.1 riastrad 400 1.1 riastrad switch (clk_type) { 401 1.1 riastrad case SMU_MCLK: 402 1.1 riastrad case SMU_UCLK: 403 1.1 riastrad feature_id = SMU_FEATURE_DPM_UCLK_BIT; 404 1.1 riastrad break; 405 1.1 riastrad case SMU_GFXCLK: 406 1.1 riastrad case SMU_SCLK: 407 1.1 riastrad feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 408 1.1 riastrad break; 409 1.1 riastrad case SMU_SOCCLK: 410 1.1 riastrad feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 411 1.1 riastrad break; 412 1.1 riastrad default: 413 1.1 riastrad return true; 414 1.1 riastrad } 415 1.1 riastrad 416 1.1 riastrad if(!smu_feature_is_enabled(smu, feature_id)) { 417 1.1 riastrad return false; 418 1.1 riastrad } 419 1.1 riastrad 420 1.1 riastrad return true; 421 1.1 riastrad } 422 1.1 riastrad 423 1.1 riastrad /** 424 1.1 riastrad * smu_dpm_set_power_gate - power gate/ungate the specific IP block 425 1.1 riastrad * 426 1.1 riastrad * @smu: smu_context pointer 427 1.1 riastrad * @block_type: the IP block to power gate/ungate 428 1.1 riastrad * @gate: to power gate if true, ungate otherwise 429 1.1 riastrad * 430 1.1 riastrad * This API uses no smu->mutex lock protection due to: 431 1.1 riastrad * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 432 1.1 riastrad * This is guarded to be race condition free by the caller. 433 1.1 riastrad * 2. Or get called on user setting request of power_dpm_force_performance_level. 434 1.1 riastrad * Under this case, the smu->mutex lock protection is already enforced on 435 1.1 riastrad * the parent API smu_force_performance_level of the call path. 436 1.1 riastrad */ 437 1.1 riastrad int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, 438 1.1 riastrad bool gate) 439 1.1 riastrad { 440 1.1 riastrad int ret = 0; 441 1.1 riastrad 442 1.1 riastrad switch (block_type) { 443 1.1 riastrad case AMD_IP_BLOCK_TYPE_UVD: 444 1.1 riastrad ret = smu_dpm_set_uvd_enable(smu, !gate); 445 1.1 riastrad break; 446 1.1 riastrad case AMD_IP_BLOCK_TYPE_VCE: 447 1.1 riastrad ret = smu_dpm_set_vce_enable(smu, !gate); 448 1.1 riastrad break; 449 1.1 riastrad case AMD_IP_BLOCK_TYPE_GFX: 450 1.1 riastrad ret = smu_gfx_off_control(smu, gate); 451 1.1 riastrad break; 452 1.1 riastrad case AMD_IP_BLOCK_TYPE_SDMA: 453 1.1 riastrad ret = smu_powergate_sdma(smu, gate); 454 1.1 riastrad break; 455 1.1 riastrad case AMD_IP_BLOCK_TYPE_JPEG: 456 1.1 riastrad ret = smu_dpm_set_jpeg_enable(smu, !gate); 457 1.1 riastrad break; 458 1.1 riastrad default: 459 1.1 riastrad break; 460 1.1 riastrad } 461 1.1 riastrad 462 1.1 riastrad return ret; 463 1.1 riastrad } 464 1.1 riastrad 465 1.1 riastrad int smu_get_power_num_states(struct smu_context *smu, 466 1.1 riastrad struct pp_states_info *state_info) 467 1.1 riastrad { 468 1.1 riastrad if (!state_info) 469 1.1 riastrad return -EINVAL; 470 1.1 riastrad 471 1.1 riastrad /* not support power state */ 472 1.1 riastrad memset(state_info, 0, sizeof(struct pp_states_info)); 473 1.1 riastrad state_info->nums = 1; 474 1.1 riastrad state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 475 1.1 riastrad 476 1.1 riastrad return 0; 477 1.1 riastrad } 478 1.1 riastrad 479 1.1 riastrad int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, 480 1.1 riastrad void *data, uint32_t *size) 481 1.1 riastrad { 482 1.1 riastrad struct smu_power_context *smu_power = &smu->smu_power; 483 1.1 riastrad struct smu_power_gate *power_gate = &smu_power->power_gate; 484 1.1 riastrad int ret = 0; 485 1.1 riastrad 486 1.1 riastrad if(!data || !size) 487 1.1 riastrad return -EINVAL; 488 1.1 riastrad 489 1.1 riastrad switch (sensor) { 490 1.1 riastrad case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 491 1.1 riastrad *((uint32_t *)data) = smu->pstate_sclk; 492 1.1 riastrad *size = 4; 493 1.1 riastrad break; 494 1.1 riastrad case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 495 1.1 riastrad *((uint32_t *)data) = smu->pstate_mclk; 496 1.1 riastrad *size = 4; 497 1.1 riastrad break; 498 1.1 riastrad case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 499 1.1 riastrad ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); 500 1.1 riastrad *size = 8; 501 1.1 riastrad break; 502 1.1 riastrad case AMDGPU_PP_SENSOR_UVD_POWER: 503 1.1 riastrad *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 504 1.1 riastrad *size = 4; 505 1.1 riastrad break; 506 1.1 riastrad case AMDGPU_PP_SENSOR_VCE_POWER: 507 1.1 riastrad *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 508 1.1 riastrad *size = 4; 509 1.1 riastrad break; 510 1.1 riastrad case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 511 1.1 riastrad *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1; 512 1.1 riastrad *size = 4; 513 1.1 riastrad break; 514 1.1 riastrad default: 515 1.1 riastrad ret = -EINVAL; 516 1.1 riastrad break; 517 1.1 riastrad } 518 1.1 riastrad 519 1.1 riastrad if (ret) 520 1.1 riastrad *size = 0; 521 1.1 riastrad 522 1.1 riastrad return ret; 523 1.1 riastrad } 524 1.1 riastrad 525 1.1 riastrad int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument, 526 1.1 riastrad void *table_data, bool drv2smu) 527 1.1 riastrad { 528 1.1 riastrad struct smu_table_context *smu_table = &smu->smu_table; 529 1.1 riastrad struct amdgpu_device *adev = smu->adev; 530 1.1 riastrad struct smu_table *table = &smu_table->driver_table; 531 1.1 riastrad int table_id = smu_table_get_index(smu, table_index); 532 1.1 riastrad uint32_t table_size; 533 1.1 riastrad int ret = 0; 534 1.1 riastrad 535 1.1 riastrad if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) 536 1.1 riastrad return -EINVAL; 537 1.1 riastrad 538 1.1 riastrad table_size = smu_table->tables[table_index].size; 539 1.1 riastrad 540 1.1 riastrad if (drv2smu) { 541 1.1 riastrad memcpy(table->cpu_addr, table_data, table_size); 542 1.1 riastrad /* 543 1.1 riastrad * Flush hdp cache: to guard the content seen by 544 1.1 riastrad * GPU is consitent with CPU. 545 1.1 riastrad */ 546 1.1 riastrad amdgpu_asic_flush_hdp(adev, NULL); 547 1.1 riastrad } 548 1.1 riastrad 549 1.1 riastrad ret = smu_send_smc_msg_with_param(smu, drv2smu ? 550 1.1 riastrad SMU_MSG_TransferTableDram2Smu : 551 1.1 riastrad SMU_MSG_TransferTableSmu2Dram, 552 1.1 riastrad table_id | ((argument & 0xFFFF) << 16)); 553 1.1 riastrad if (ret) 554 1.1 riastrad return ret; 555 1.1 riastrad 556 1.1 riastrad if (!drv2smu) { 557 1.1 riastrad amdgpu_asic_flush_hdp(adev, NULL); 558 1.1 riastrad memcpy(table_data, table->cpu_addr, table_size); 559 1.1 riastrad } 560 1.1 riastrad 561 1.1 riastrad return ret; 562 1.1 riastrad } 563 1.1 riastrad 564 1.1 riastrad bool is_support_sw_smu(struct amdgpu_device *adev) 565 1.1 riastrad { 566 1.1 riastrad if (adev->asic_type == CHIP_VEGA20) 567 1.1 riastrad return (amdgpu_dpm == 2) ? true : false; 568 1.1 riastrad else if (adev->asic_type >= CHIP_ARCTURUS) { 569 1.1 riastrad if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 570 1.1 riastrad return false; 571 1.1 riastrad else 572 1.1 riastrad return true; 573 1.1 riastrad } else 574 1.1 riastrad return false; 575 1.1 riastrad } 576 1.1 riastrad 577 1.1 riastrad bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) 578 1.1 riastrad { 579 1.1 riastrad if (!is_support_sw_smu(adev)) 580 1.1 riastrad return false; 581 1.1 riastrad 582 1.1 riastrad if (adev->asic_type == CHIP_VEGA20) 583 1.1 riastrad return true; 584 1.1 riastrad 585 1.1 riastrad return false; 586 1.1 riastrad } 587 1.1 riastrad 588 1.3 riastrad int smu_sys_get_pp_table(struct smu_context *smu, const void **table) 589 1.1 riastrad { 590 1.1 riastrad struct smu_table_context *smu_table = &smu->smu_table; 591 1.1 riastrad uint32_t powerplay_table_size; 592 1.1 riastrad 593 1.1 riastrad if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 594 1.1 riastrad return -EINVAL; 595 1.1 riastrad 596 1.1 riastrad mutex_lock(&smu->mutex); 597 1.1 riastrad 598 1.1 riastrad if (smu_table->hardcode_pptable) 599 1.1 riastrad *table = smu_table->hardcode_pptable; 600 1.1 riastrad else 601 1.1 riastrad *table = smu_table->power_play_table; 602 1.1 riastrad 603 1.1 riastrad powerplay_table_size = smu_table->power_play_table_size; 604 1.1 riastrad 605 1.1 riastrad mutex_unlock(&smu->mutex); 606 1.1 riastrad 607 1.1 riastrad return powerplay_table_size; 608 1.1 riastrad } 609 1.1 riastrad 610 1.1 riastrad int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) 611 1.1 riastrad { 612 1.1 riastrad struct smu_table_context *smu_table = &smu->smu_table; 613 1.1 riastrad ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 614 1.1 riastrad int ret = 0; 615 1.1 riastrad 616 1.1 riastrad if (!smu->pm_enabled) 617 1.1 riastrad return -EINVAL; 618 1.1 riastrad if (header->usStructureSize != size) { 619 1.1 riastrad pr_err("pp table size not matched !\n"); 620 1.1 riastrad return -EIO; 621 1.1 riastrad } 622 1.1 riastrad 623 1.1 riastrad mutex_lock(&smu->mutex); 624 1.1 riastrad if (!smu_table->hardcode_pptable) 625 1.1 riastrad smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 626 1.1 riastrad if (!smu_table->hardcode_pptable) { 627 1.1 riastrad ret = -ENOMEM; 628 1.1 riastrad goto failed; 629 1.1 riastrad } 630 1.1 riastrad 631 1.1 riastrad memcpy(smu_table->hardcode_pptable, buf, size); 632 1.1 riastrad smu_table->power_play_table = smu_table->hardcode_pptable; 633 1.1 riastrad smu_table->power_play_table_size = size; 634 1.1 riastrad 635 1.1 riastrad /* 636 1.1 riastrad * Special hw_fini action(for Navi1x, the DPMs disablement will be 637 1.1 riastrad * skipped) may be needed for custom pptable uploading. 638 1.1 riastrad */ 639 1.1 riastrad smu->uploading_custom_pp_table = true; 640 1.1 riastrad 641 1.1 riastrad ret = smu_reset(smu); 642 1.1 riastrad if (ret) 643 1.1 riastrad pr_info("smu reset failed, ret = %d\n", ret); 644 1.1 riastrad 645 1.1 riastrad smu->uploading_custom_pp_table = false; 646 1.1 riastrad 647 1.1 riastrad failed: 648 1.1 riastrad mutex_unlock(&smu->mutex); 649 1.1 riastrad return ret; 650 1.1 riastrad } 651 1.1 riastrad 652 1.1 riastrad int smu_feature_init_dpm(struct smu_context *smu) 653 1.1 riastrad { 654 1.1 riastrad struct smu_feature *feature = &smu->smu_feature; 655 1.1 riastrad int ret = 0; 656 1.1 riastrad uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 657 1.1 riastrad 658 1.1 riastrad if (!smu->pm_enabled) 659 1.1 riastrad return ret; 660 1.1 riastrad mutex_lock(&feature->mutex); 661 1.1 riastrad bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 662 1.1 riastrad mutex_unlock(&feature->mutex); 663 1.1 riastrad 664 1.1 riastrad ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 665 1.1 riastrad SMU_FEATURE_MAX/32); 666 1.1 riastrad if (ret) 667 1.1 riastrad return ret; 668 1.1 riastrad 669 1.1 riastrad mutex_lock(&feature->mutex); 670 1.1 riastrad bitmap_or(feature->allowed, feature->allowed, 671 1.1 riastrad (unsigned long *)allowed_feature_mask, 672 1.1 riastrad feature->feature_num); 673 1.1 riastrad mutex_unlock(&feature->mutex); 674 1.1 riastrad 675 1.1 riastrad return ret; 676 1.1 riastrad } 677 1.1 riastrad 678 1.1 riastrad 679 1.1 riastrad int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) 680 1.1 riastrad { 681 1.1 riastrad struct smu_feature *feature = &smu->smu_feature; 682 1.1 riastrad int feature_id; 683 1.1 riastrad int ret = 0; 684 1.1 riastrad 685 1.1 riastrad if (smu->is_apu) 686 1.1 riastrad return 1; 687 1.1 riastrad 688 1.1 riastrad feature_id = smu_feature_get_index(smu, mask); 689 1.1 riastrad if (feature_id < 0) 690 1.1 riastrad return 0; 691 1.1 riastrad 692 1.1 riastrad WARN_ON(feature_id > feature->feature_num); 693 1.1 riastrad 694 1.1 riastrad mutex_lock(&feature->mutex); 695 1.1 riastrad ret = test_bit(feature_id, feature->enabled); 696 1.1 riastrad mutex_unlock(&feature->mutex); 697 1.1 riastrad 698 1.1 riastrad return ret; 699 1.1 riastrad } 700 1.1 riastrad 701 1.1 riastrad int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, 702 1.1 riastrad bool enable) 703 1.1 riastrad { 704 1.1 riastrad struct smu_feature *feature = &smu->smu_feature; 705 1.1 riastrad int feature_id; 706 1.1 riastrad 707 1.1 riastrad feature_id = smu_feature_get_index(smu, mask); 708 1.1 riastrad if (feature_id < 0) 709 1.1 riastrad return -EINVAL; 710 1.1 riastrad 711 1.1 riastrad WARN_ON(feature_id > feature->feature_num); 712 1.1 riastrad 713 1.1 riastrad return smu_feature_update_enable_state(smu, 714 1.1 riastrad 1ULL << feature_id, 715 1.1 riastrad enable); 716 1.1 riastrad } 717 1.1 riastrad 718 1.1 riastrad int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask) 719 1.1 riastrad { 720 1.1 riastrad struct smu_feature *feature = &smu->smu_feature; 721 1.1 riastrad int feature_id; 722 1.1 riastrad int ret = 0; 723 1.1 riastrad 724 1.1 riastrad feature_id = smu_feature_get_index(smu, mask); 725 1.1 riastrad if (feature_id < 0) 726 1.1 riastrad return 0; 727 1.1 riastrad 728 1.1 riastrad WARN_ON(feature_id > feature->feature_num); 729 1.1 riastrad 730 1.1 riastrad mutex_lock(&feature->mutex); 731 1.1 riastrad ret = test_bit(feature_id, feature->supported); 732 1.1 riastrad mutex_unlock(&feature->mutex); 733 1.1 riastrad 734 1.1 riastrad return ret; 735 1.1 riastrad } 736 1.1 riastrad 737 1.1 riastrad int smu_feature_set_supported(struct smu_context *smu, 738 1.1 riastrad enum smu_feature_mask mask, 739 1.1 riastrad bool enable) 740 1.1 riastrad { 741 1.1 riastrad struct smu_feature *feature = &smu->smu_feature; 742 1.1 riastrad int feature_id; 743 1.1 riastrad int ret = 0; 744 1.1 riastrad 745 1.1 riastrad feature_id = smu_feature_get_index(smu, mask); 746 1.1 riastrad if (feature_id < 0) 747 1.1 riastrad return -EINVAL; 748 1.1 riastrad 749 1.1 riastrad WARN_ON(feature_id > feature->feature_num); 750 1.1 riastrad 751 1.1 riastrad mutex_lock(&feature->mutex); 752 1.1 riastrad if (enable) 753 1.1 riastrad test_and_set_bit(feature_id, feature->supported); 754 1.1 riastrad else 755 1.1 riastrad test_and_clear_bit(feature_id, feature->supported); 756 1.1 riastrad mutex_unlock(&feature->mutex); 757 1.1 riastrad 758 1.1 riastrad return ret; 759 1.1 riastrad } 760 1.1 riastrad 761 1.1 riastrad static int smu_set_funcs(struct amdgpu_device *adev) 762 1.1 riastrad { 763 1.1 riastrad struct smu_context *smu = &adev->smu; 764 1.1 riastrad 765 1.1 riastrad if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 766 1.1 riastrad smu->od_enabled = true; 767 1.1 riastrad 768 1.1 riastrad switch (adev->asic_type) { 769 1.1 riastrad case CHIP_VEGA20: 770 1.1 riastrad adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 771 1.1 riastrad vega20_set_ppt_funcs(smu); 772 1.1 riastrad break; 773 1.1 riastrad case CHIP_NAVI10: 774 1.1 riastrad case CHIP_NAVI14: 775 1.1 riastrad case CHIP_NAVI12: 776 1.1 riastrad navi10_set_ppt_funcs(smu); 777 1.1 riastrad break; 778 1.1 riastrad case CHIP_ARCTURUS: 779 1.1 riastrad adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 780 1.1 riastrad arcturus_set_ppt_funcs(smu); 781 1.1 riastrad /* OD is not supported on Arcturus */ 782 1.1 riastrad smu->od_enabled =false; 783 1.1 riastrad break; 784 1.1 riastrad case CHIP_RENOIR: 785 1.1 riastrad renoir_set_ppt_funcs(smu); 786 1.1 riastrad break; 787 1.1 riastrad default: 788 1.1 riastrad return -EINVAL; 789 1.1 riastrad } 790 1.1 riastrad 791 1.1 riastrad return 0; 792 1.1 riastrad } 793 1.1 riastrad 794 1.1 riastrad static int smu_early_init(void *handle) 795 1.1 riastrad { 796 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 797 1.1 riastrad struct smu_context *smu = &adev->smu; 798 1.1 riastrad 799 1.1 riastrad smu->adev = adev; 800 1.1 riastrad smu->pm_enabled = !!amdgpu_dpm; 801 1.1 riastrad smu->is_apu = false; 802 1.1 riastrad mutex_init(&smu->mutex); 803 1.1 riastrad 804 1.1 riastrad return smu_set_funcs(adev); 805 1.1 riastrad } 806 1.1 riastrad 807 1.1 riastrad static int smu_late_init(void *handle) 808 1.1 riastrad { 809 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 810 1.1 riastrad struct smu_context *smu = &adev->smu; 811 1.1 riastrad 812 1.1 riastrad if (!smu->pm_enabled) 813 1.1 riastrad return 0; 814 1.1 riastrad 815 1.1 riastrad smu_handle_task(&adev->smu, 816 1.1 riastrad smu->smu_dpm.dpm_level, 817 1.1 riastrad AMD_PP_TASK_COMPLETE_INIT, 818 1.1 riastrad false); 819 1.1 riastrad 820 1.1 riastrad return 0; 821 1.1 riastrad } 822 1.1 riastrad 823 1.1 riastrad int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, 824 1.1 riastrad uint16_t *size, uint8_t *frev, uint8_t *crev, 825 1.1 riastrad uint8_t **addr) 826 1.1 riastrad { 827 1.1 riastrad struct amdgpu_device *adev = smu->adev; 828 1.1 riastrad uint16_t data_start; 829 1.1 riastrad 830 1.1 riastrad if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table, 831 1.1 riastrad size, frev, crev, &data_start)) 832 1.1 riastrad return -EINVAL; 833 1.1 riastrad 834 1.1 riastrad *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start; 835 1.1 riastrad 836 1.1 riastrad return 0; 837 1.1 riastrad } 838 1.1 riastrad 839 1.1 riastrad static int smu_initialize_pptable(struct smu_context *smu) 840 1.1 riastrad { 841 1.1 riastrad /* TODO */ 842 1.1 riastrad return 0; 843 1.1 riastrad } 844 1.1 riastrad 845 1.1 riastrad static int smu_smc_table_sw_init(struct smu_context *smu) 846 1.1 riastrad { 847 1.1 riastrad int ret; 848 1.1 riastrad 849 1.1 riastrad ret = smu_initialize_pptable(smu); 850 1.1 riastrad if (ret) { 851 1.1 riastrad pr_err("Failed to init smu_initialize_pptable!\n"); 852 1.1 riastrad return ret; 853 1.1 riastrad } 854 1.1 riastrad 855 1.1 riastrad /** 856 1.1 riastrad * Create smu_table structure, and init smc tables such as 857 1.1 riastrad * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 858 1.1 riastrad */ 859 1.1 riastrad ret = smu_init_smc_tables(smu); 860 1.1 riastrad if (ret) { 861 1.1 riastrad pr_err("Failed to init smc tables!\n"); 862 1.1 riastrad return ret; 863 1.1 riastrad } 864 1.1 riastrad 865 1.1 riastrad /** 866 1.1 riastrad * Create smu_power_context structure, and allocate smu_dpm_context and 867 1.1 riastrad * context size to fill the smu_power_context data. 868 1.1 riastrad */ 869 1.1 riastrad ret = smu_init_power(smu); 870 1.1 riastrad if (ret) { 871 1.1 riastrad pr_err("Failed to init smu_init_power!\n"); 872 1.1 riastrad return ret; 873 1.1 riastrad } 874 1.1 riastrad 875 1.1 riastrad return 0; 876 1.1 riastrad } 877 1.1 riastrad 878 1.1 riastrad static int smu_smc_table_sw_fini(struct smu_context *smu) 879 1.1 riastrad { 880 1.1 riastrad int ret; 881 1.1 riastrad 882 1.1 riastrad ret = smu_fini_smc_tables(smu); 883 1.1 riastrad if (ret) { 884 1.1 riastrad pr_err("Failed to smu_fini_smc_tables!\n"); 885 1.1 riastrad return ret; 886 1.1 riastrad } 887 1.1 riastrad 888 1.1 riastrad return 0; 889 1.1 riastrad } 890 1.1 riastrad 891 1.1 riastrad static int smu_sw_init(void *handle) 892 1.1 riastrad { 893 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 894 1.1 riastrad struct smu_context *smu = &adev->smu; 895 1.1 riastrad int ret; 896 1.1 riastrad 897 1.1 riastrad smu->pool_size = adev->pm.smu_prv_buffer_size; 898 1.1 riastrad smu->smu_feature.feature_num = SMU_FEATURE_MAX; 899 1.1 riastrad mutex_init(&smu->smu_feature.mutex); 900 1.1 riastrad bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 901 1.1 riastrad bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); 902 1.1 riastrad bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 903 1.1 riastrad 904 1.1 riastrad mutex_init(&smu->smu_baco.mutex); 905 1.1 riastrad smu->smu_baco.state = SMU_BACO_STATE_EXIT; 906 1.1 riastrad smu->smu_baco.platform_support = false; 907 1.1 riastrad 908 1.1 riastrad mutex_init(&smu->sensor_lock); 909 1.1 riastrad mutex_init(&smu->metrics_lock); 910 1.1 riastrad 911 1.1 riastrad smu->watermarks_bitmap = 0; 912 1.1 riastrad smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 913 1.1 riastrad smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 914 1.1 riastrad 915 1.1 riastrad smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 916 1.1 riastrad smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 917 1.1 riastrad smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 918 1.1 riastrad smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 919 1.1 riastrad smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 920 1.1 riastrad smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 921 1.1 riastrad smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 922 1.1 riastrad smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 923 1.1 riastrad 924 1.1 riastrad smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 925 1.1 riastrad smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 926 1.1 riastrad smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 927 1.1 riastrad smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 928 1.1 riastrad smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 929 1.1 riastrad smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 930 1.1 riastrad smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 931 1.1 riastrad smu->display_config = &adev->pm.pm_display_cfg; 932 1.1 riastrad 933 1.1 riastrad smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 934 1.1 riastrad smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 935 1.1 riastrad ret = smu_init_microcode(smu); 936 1.1 riastrad if (ret) { 937 1.1 riastrad pr_err("Failed to load smu firmware!\n"); 938 1.1 riastrad return ret; 939 1.1 riastrad } 940 1.1 riastrad 941 1.1 riastrad ret = smu_smc_table_sw_init(smu); 942 1.1 riastrad if (ret) { 943 1.1 riastrad pr_err("Failed to sw init smc table!\n"); 944 1.1 riastrad return ret; 945 1.1 riastrad } 946 1.1 riastrad 947 1.1 riastrad ret = smu_register_irq_handler(smu); 948 1.1 riastrad if (ret) { 949 1.1 riastrad pr_err("Failed to register smc irq handler!\n"); 950 1.1 riastrad return ret; 951 1.1 riastrad } 952 1.1 riastrad 953 1.1 riastrad return 0; 954 1.1 riastrad } 955 1.1 riastrad 956 1.1 riastrad static int smu_sw_fini(void *handle) 957 1.1 riastrad { 958 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 959 1.1 riastrad struct smu_context *smu = &adev->smu; 960 1.1 riastrad int ret; 961 1.1 riastrad 962 1.1 riastrad kfree(smu->irq_source); 963 1.1 riastrad smu->irq_source = NULL; 964 1.1 riastrad 965 1.1 riastrad ret = smu_smc_table_sw_fini(smu); 966 1.1 riastrad if (ret) { 967 1.1 riastrad pr_err("Failed to sw fini smc table!\n"); 968 1.1 riastrad return ret; 969 1.1 riastrad } 970 1.1 riastrad 971 1.1 riastrad ret = smu_fini_power(smu); 972 1.1 riastrad if (ret) { 973 1.1 riastrad pr_err("Failed to init smu_fini_power!\n"); 974 1.1 riastrad return ret; 975 1.1 riastrad } 976 1.1 riastrad 977 1.4 riastrad mutex_destroy(&smu->metrics_lock); 978 1.4 riastrad mutex_destroy(&smu->sensor_lock); 979 1.4 riastrad mutex_destroy(&smu->smu_baco.mutex); 980 1.4 riastrad mutex_destroy(&smu->smu_feature.mutex); 981 1.4 riastrad mutex_destroy(&smu->mutex); 982 1.4 riastrad 983 1.1 riastrad return 0; 984 1.1 riastrad } 985 1.1 riastrad 986 1.1 riastrad static int smu_init_fb_allocations(struct smu_context *smu) 987 1.1 riastrad { 988 1.1 riastrad struct amdgpu_device *adev = smu->adev; 989 1.1 riastrad struct smu_table_context *smu_table = &smu->smu_table; 990 1.1 riastrad struct smu_table *tables = smu_table->tables; 991 1.1 riastrad struct smu_table *driver_table = &(smu_table->driver_table); 992 1.1 riastrad uint32_t max_table_size = 0; 993 1.1 riastrad int ret, i; 994 1.1 riastrad 995 1.1 riastrad /* VRAM allocation for tool table */ 996 1.1 riastrad if (tables[SMU_TABLE_PMSTATUSLOG].size) { 997 1.1 riastrad ret = amdgpu_bo_create_kernel(adev, 998 1.1 riastrad tables[SMU_TABLE_PMSTATUSLOG].size, 999 1.1 riastrad tables[SMU_TABLE_PMSTATUSLOG].align, 1000 1.1 riastrad tables[SMU_TABLE_PMSTATUSLOG].domain, 1001 1.1 riastrad &tables[SMU_TABLE_PMSTATUSLOG].bo, 1002 1.1 riastrad &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 1003 1.1 riastrad &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 1004 1.1 riastrad if (ret) { 1005 1.1 riastrad pr_err("VRAM allocation for tool table failed!\n"); 1006 1.1 riastrad return ret; 1007 1.1 riastrad } 1008 1.1 riastrad } 1009 1.1 riastrad 1010 1.1 riastrad /* VRAM allocation for driver table */ 1011 1.1 riastrad for (i = 0; i < SMU_TABLE_COUNT; i++) { 1012 1.1 riastrad if (tables[i].size == 0) 1013 1.1 riastrad continue; 1014 1.1 riastrad 1015 1.1 riastrad if (i == SMU_TABLE_PMSTATUSLOG) 1016 1.1 riastrad continue; 1017 1.1 riastrad 1018 1.1 riastrad if (max_table_size < tables[i].size) 1019 1.1 riastrad max_table_size = tables[i].size; 1020 1.1 riastrad } 1021 1.1 riastrad 1022 1.1 riastrad driver_table->size = max_table_size; 1023 1.1 riastrad driver_table->align = PAGE_SIZE; 1024 1.1 riastrad driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 1025 1.1 riastrad 1026 1.1 riastrad ret = amdgpu_bo_create_kernel(adev, 1027 1.1 riastrad driver_table->size, 1028 1.1 riastrad driver_table->align, 1029 1.1 riastrad driver_table->domain, 1030 1.1 riastrad &driver_table->bo, 1031 1.1 riastrad &driver_table->mc_address, 1032 1.1 riastrad &driver_table->cpu_addr); 1033 1.1 riastrad if (ret) { 1034 1.1 riastrad pr_err("VRAM allocation for driver table failed!\n"); 1035 1.1 riastrad if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 1036 1.1 riastrad amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 1037 1.1 riastrad &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 1038 1.1 riastrad &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 1039 1.1 riastrad } 1040 1.1 riastrad 1041 1.1 riastrad return ret; 1042 1.1 riastrad } 1043 1.1 riastrad 1044 1.1 riastrad static int smu_fini_fb_allocations(struct smu_context *smu) 1045 1.1 riastrad { 1046 1.1 riastrad struct smu_table_context *smu_table = &smu->smu_table; 1047 1.1 riastrad struct smu_table *tables = smu_table->tables; 1048 1.1 riastrad struct smu_table *driver_table = &(smu_table->driver_table); 1049 1.1 riastrad 1050 1.1 riastrad if (!tables) 1051 1.1 riastrad return 0; 1052 1.1 riastrad 1053 1.1 riastrad if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 1054 1.1 riastrad amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 1055 1.1 riastrad &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 1056 1.1 riastrad &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 1057 1.1 riastrad 1058 1.1 riastrad amdgpu_bo_free_kernel(&driver_table->bo, 1059 1.1 riastrad &driver_table->mc_address, 1060 1.1 riastrad &driver_table->cpu_addr); 1061 1.1 riastrad 1062 1.1 riastrad return 0; 1063 1.1 riastrad } 1064 1.1 riastrad 1065 1.1 riastrad static int smu_smc_table_hw_init(struct smu_context *smu, 1066 1.1 riastrad bool initialize) 1067 1.1 riastrad { 1068 1.1 riastrad struct amdgpu_device *adev = smu->adev; 1069 1.1 riastrad int ret; 1070 1.1 riastrad 1071 1.1 riastrad if (smu_is_dpm_running(smu) && adev->in_suspend) { 1072 1.1 riastrad pr_info("dpm has been enabled\n"); 1073 1.1 riastrad return 0; 1074 1.1 riastrad } 1075 1.1 riastrad 1076 1.1 riastrad if (adev->asic_type != CHIP_ARCTURUS) { 1077 1.1 riastrad ret = smu_init_display_count(smu, 0); 1078 1.1 riastrad if (ret) 1079 1.1 riastrad return ret; 1080 1.1 riastrad } 1081 1.1 riastrad 1082 1.1 riastrad if (initialize) { 1083 1.1 riastrad /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1084 1.1 riastrad ret = smu_get_vbios_bootup_values(smu); 1085 1.1 riastrad if (ret) 1086 1.1 riastrad return ret; 1087 1.1 riastrad 1088 1.1 riastrad ret = smu_setup_pptable(smu); 1089 1.1 riastrad if (ret) 1090 1.1 riastrad return ret; 1091 1.1 riastrad 1092 1.1 riastrad ret = smu_get_clk_info_from_vbios(smu); 1093 1.1 riastrad if (ret) 1094 1.1 riastrad return ret; 1095 1.1 riastrad 1096 1.1 riastrad /* 1097 1.1 riastrad * check if the format_revision in vbios is up to pptable header 1098 1.1 riastrad * version, and the structure size is not 0. 1099 1.1 riastrad */ 1100 1.1 riastrad ret = smu_check_pptable(smu); 1101 1.1 riastrad if (ret) 1102 1.1 riastrad return ret; 1103 1.1 riastrad 1104 1.1 riastrad /* 1105 1.1 riastrad * allocate vram bos to store smc table contents. 1106 1.1 riastrad */ 1107 1.1 riastrad ret = smu_init_fb_allocations(smu); 1108 1.1 riastrad if (ret) 1109 1.1 riastrad return ret; 1110 1.1 riastrad 1111 1.1 riastrad /* 1112 1.1 riastrad * Parse pptable format and fill PPTable_t smc_pptable to 1113 1.1 riastrad * smu_table_context structure. And read the smc_dpm_table from vbios, 1114 1.1 riastrad * then fill it into smc_pptable. 1115 1.1 riastrad */ 1116 1.1 riastrad ret = smu_parse_pptable(smu); 1117 1.1 riastrad if (ret) 1118 1.1 riastrad return ret; 1119 1.1 riastrad 1120 1.1 riastrad /* 1121 1.1 riastrad * Send msg GetDriverIfVersion to check if the return value is equal 1122 1.1 riastrad * with DRIVER_IF_VERSION of smc header. 1123 1.1 riastrad */ 1124 1.1 riastrad ret = smu_check_fw_version(smu); 1125 1.1 riastrad if (ret) 1126 1.1 riastrad return ret; 1127 1.1 riastrad } 1128 1.1 riastrad 1129 1.1 riastrad /* smu_dump_pptable(smu); */ 1130 1.1 riastrad if (!amdgpu_sriov_vf(adev)) { 1131 1.1 riastrad ret = smu_set_driver_table_location(smu); 1132 1.1 riastrad if (ret) 1133 1.1 riastrad return ret; 1134 1.1 riastrad 1135 1.1 riastrad /* 1136 1.1 riastrad * Copy pptable bo in the vram to smc with SMU MSGs such as 1137 1.1 riastrad * SetDriverDramAddr and TransferTableDram2Smu. 1138 1.1 riastrad */ 1139 1.1 riastrad ret = smu_write_pptable(smu); 1140 1.1 riastrad if (ret) 1141 1.1 riastrad return ret; 1142 1.1 riastrad 1143 1.1 riastrad /* issue Run*Btc msg */ 1144 1.1 riastrad ret = smu_run_btc(smu); 1145 1.1 riastrad if (ret) 1146 1.1 riastrad return ret; 1147 1.1 riastrad ret = smu_feature_set_allowed_mask(smu); 1148 1.1 riastrad if (ret) 1149 1.1 riastrad return ret; 1150 1.1 riastrad 1151 1.1 riastrad ret = smu_system_features_control(smu, true); 1152 1.1 riastrad if (ret) 1153 1.1 riastrad return ret; 1154 1.1 riastrad 1155 1.1 riastrad if (adev->asic_type == CHIP_NAVI10) { 1156 1.1 riastrad if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 || 1157 1.1 riastrad adev->pdev->revision == 0xc3 || 1158 1.1 riastrad adev->pdev->revision == 0xca || 1159 1.1 riastrad adev->pdev->revision == 0xcb)) || 1160 1.1 riastrad (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 || 1161 1.1 riastrad adev->pdev->revision == 0xf4 || 1162 1.1 riastrad adev->pdev->revision == 0xf5 || 1163 1.1 riastrad adev->pdev->revision == 0xf6))) { 1164 1.1 riastrad ret = smu_disable_umc_cdr_12gbps_workaround(smu); 1165 1.1 riastrad if (ret) { 1166 1.1 riastrad pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n"); 1167 1.1 riastrad return ret; 1168 1.1 riastrad } 1169 1.1 riastrad } 1170 1.1 riastrad } 1171 1.1 riastrad } 1172 1.1 riastrad if (adev->asic_type != CHIP_ARCTURUS) { 1173 1.1 riastrad ret = smu_notify_display_change(smu); 1174 1.1 riastrad if (ret) 1175 1.1 riastrad return ret; 1176 1.1 riastrad 1177 1.1 riastrad /* 1178 1.1 riastrad * Set min deep sleep dce fclk with bootup value from vbios via 1179 1.1 riastrad * SetMinDeepSleepDcefclk MSG. 1180 1.1 riastrad */ 1181 1.1 riastrad ret = smu_set_min_dcef_deep_sleep(smu); 1182 1.1 riastrad if (ret) 1183 1.1 riastrad return ret; 1184 1.1 riastrad } 1185 1.1 riastrad 1186 1.1 riastrad /* 1187 1.1 riastrad * Set initialized values (get from vbios) to dpm tables context such as 1188 1.1 riastrad * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1189 1.1 riastrad * type of clks. 1190 1.1 riastrad */ 1191 1.1 riastrad if (initialize) { 1192 1.1 riastrad ret = smu_populate_smc_tables(smu); 1193 1.1 riastrad if (ret) 1194 1.1 riastrad return ret; 1195 1.1 riastrad 1196 1.1 riastrad ret = smu_init_max_sustainable_clocks(smu); 1197 1.1 riastrad if (ret) 1198 1.1 riastrad return ret; 1199 1.1 riastrad } 1200 1.1 riastrad 1201 1.1 riastrad if (adev->asic_type != CHIP_ARCTURUS) { 1202 1.1 riastrad ret = smu_override_pcie_parameters(smu); 1203 1.1 riastrad if (ret) 1204 1.1 riastrad return ret; 1205 1.1 riastrad } 1206 1.1 riastrad 1207 1.1 riastrad ret = smu_set_default_od_settings(smu, initialize); 1208 1.1 riastrad if (ret) 1209 1.1 riastrad return ret; 1210 1.1 riastrad 1211 1.1 riastrad if (initialize) { 1212 1.1 riastrad ret = smu_populate_umd_state_clk(smu); 1213 1.1 riastrad if (ret) 1214 1.1 riastrad return ret; 1215 1.1 riastrad 1216 1.1 riastrad ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false); 1217 1.1 riastrad if (ret) 1218 1.1 riastrad return ret; 1219 1.1 riastrad } 1220 1.1 riastrad 1221 1.1 riastrad /* 1222 1.1 riastrad * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1223 1.1 riastrad */ 1224 1.1 riastrad if (!amdgpu_sriov_vf(adev)) { 1225 1.1 riastrad ret = smu_set_tool_table_location(smu); 1226 1.1 riastrad } 1227 1.1 riastrad if (!smu_is_dpm_running(smu)) 1228 1.1 riastrad pr_info("dpm has been disabled\n"); 1229 1.1 riastrad 1230 1.1 riastrad return ret; 1231 1.1 riastrad } 1232 1.1 riastrad 1233 1.1 riastrad /** 1234 1.1 riastrad * smu_alloc_memory_pool - allocate memory pool in the system memory 1235 1.1 riastrad * 1236 1.1 riastrad * @smu: amdgpu_device pointer 1237 1.1 riastrad * 1238 1.1 riastrad * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 1239 1.1 riastrad * and DramLogSetDramAddr can notify it changed. 1240 1.1 riastrad * 1241 1.1 riastrad * Returns 0 on success, error on failure. 1242 1.1 riastrad */ 1243 1.1 riastrad static int smu_alloc_memory_pool(struct smu_context *smu) 1244 1.1 riastrad { 1245 1.1 riastrad struct amdgpu_device *adev = smu->adev; 1246 1.1 riastrad struct smu_table_context *smu_table = &smu->smu_table; 1247 1.1 riastrad struct smu_table *memory_pool = &smu_table->memory_pool; 1248 1.1 riastrad uint64_t pool_size = smu->pool_size; 1249 1.1 riastrad int ret = 0; 1250 1.1 riastrad 1251 1.1 riastrad if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 1252 1.1 riastrad return ret; 1253 1.1 riastrad 1254 1.1 riastrad memory_pool->size = pool_size; 1255 1.1 riastrad memory_pool->align = PAGE_SIZE; 1256 1.1 riastrad memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 1257 1.1 riastrad 1258 1.1 riastrad switch (pool_size) { 1259 1.1 riastrad case SMU_MEMORY_POOL_SIZE_256_MB: 1260 1.1 riastrad case SMU_MEMORY_POOL_SIZE_512_MB: 1261 1.1 riastrad case SMU_MEMORY_POOL_SIZE_1_GB: 1262 1.1 riastrad case SMU_MEMORY_POOL_SIZE_2_GB: 1263 1.1 riastrad ret = amdgpu_bo_create_kernel(adev, 1264 1.1 riastrad memory_pool->size, 1265 1.1 riastrad memory_pool->align, 1266 1.1 riastrad memory_pool->domain, 1267 1.1 riastrad &memory_pool->bo, 1268 1.1 riastrad &memory_pool->mc_address, 1269 1.1 riastrad &memory_pool->cpu_addr); 1270 1.1 riastrad break; 1271 1.1 riastrad default: 1272 1.1 riastrad break; 1273 1.1 riastrad } 1274 1.1 riastrad 1275 1.1 riastrad return ret; 1276 1.1 riastrad } 1277 1.1 riastrad 1278 1.1 riastrad static int smu_free_memory_pool(struct smu_context *smu) 1279 1.1 riastrad { 1280 1.1 riastrad struct smu_table_context *smu_table = &smu->smu_table; 1281 1.1 riastrad struct smu_table *memory_pool = &smu_table->memory_pool; 1282 1.1 riastrad 1283 1.1 riastrad if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 1284 1.1 riastrad return 0; 1285 1.1 riastrad 1286 1.1 riastrad amdgpu_bo_free_kernel(&memory_pool->bo, 1287 1.1 riastrad &memory_pool->mc_address, 1288 1.1 riastrad &memory_pool->cpu_addr); 1289 1.1 riastrad 1290 1.1 riastrad memset(memory_pool, 0, sizeof(struct smu_table)); 1291 1.1 riastrad 1292 1.1 riastrad return 0; 1293 1.1 riastrad } 1294 1.1 riastrad 1295 1.1 riastrad static int smu_start_smc_engine(struct smu_context *smu) 1296 1.1 riastrad { 1297 1.1 riastrad struct amdgpu_device *adev = smu->adev; 1298 1.1 riastrad int ret = 0; 1299 1.1 riastrad 1300 1.1 riastrad if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1301 1.1 riastrad if (adev->asic_type < CHIP_NAVI10) { 1302 1.1 riastrad if (smu->ppt_funcs->load_microcode) { 1303 1.1 riastrad ret = smu->ppt_funcs->load_microcode(smu); 1304 1.1 riastrad if (ret) 1305 1.1 riastrad return ret; 1306 1.1 riastrad } 1307 1.1 riastrad } 1308 1.1 riastrad } 1309 1.1 riastrad 1310 1.1 riastrad if (smu->ppt_funcs->check_fw_status) { 1311 1.1 riastrad ret = smu->ppt_funcs->check_fw_status(smu); 1312 1.1 riastrad if (ret) 1313 1.1 riastrad pr_err("SMC is not ready\n"); 1314 1.1 riastrad } 1315 1.1 riastrad 1316 1.1 riastrad return ret; 1317 1.1 riastrad } 1318 1.1 riastrad 1319 1.1 riastrad static int smu_hw_init(void *handle) 1320 1.1 riastrad { 1321 1.1 riastrad int ret; 1322 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1323 1.1 riastrad struct smu_context *smu = &adev->smu; 1324 1.1 riastrad 1325 1.1 riastrad ret = smu_start_smc_engine(smu); 1326 1.1 riastrad if (ret) { 1327 1.1 riastrad pr_err("SMU is not ready yet!\n"); 1328 1.1 riastrad return ret; 1329 1.1 riastrad } 1330 1.1 riastrad 1331 1.1 riastrad if (smu->is_apu) { 1332 1.1 riastrad smu_powergate_sdma(&adev->smu, false); 1333 1.1 riastrad smu_powergate_vcn(&adev->smu, false); 1334 1.1 riastrad smu_powergate_jpeg(&adev->smu, false); 1335 1.1 riastrad smu_set_gfx_cgpg(&adev->smu, true); 1336 1.1 riastrad } 1337 1.1 riastrad 1338 1.1 riastrad if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1339 1.1 riastrad return 0; 1340 1.1 riastrad 1341 1.1 riastrad if (!smu->pm_enabled) 1342 1.1 riastrad return 0; 1343 1.1 riastrad 1344 1.1 riastrad ret = smu_feature_init_dpm(smu); 1345 1.1 riastrad if (ret) 1346 1.1 riastrad goto failed; 1347 1.1 riastrad 1348 1.1 riastrad ret = smu_smc_table_hw_init(smu, true); 1349 1.1 riastrad if (ret) 1350 1.1 riastrad goto failed; 1351 1.1 riastrad 1352 1.1 riastrad ret = smu_alloc_memory_pool(smu); 1353 1.1 riastrad if (ret) 1354 1.1 riastrad goto failed; 1355 1.1 riastrad 1356 1.1 riastrad /* 1357 1.1 riastrad * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1358 1.1 riastrad * pool location. 1359 1.1 riastrad */ 1360 1.1 riastrad ret = smu_notify_memory_pool_location(smu); 1361 1.1 riastrad if (ret) 1362 1.1 riastrad goto failed; 1363 1.1 riastrad 1364 1.1 riastrad ret = smu_start_thermal_control(smu); 1365 1.1 riastrad if (ret) 1366 1.1 riastrad goto failed; 1367 1.1 riastrad 1368 1.1 riastrad if (!smu->pm_enabled) 1369 1.1 riastrad adev->pm.dpm_enabled = false; 1370 1.1 riastrad else 1371 1.1 riastrad adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */ 1372 1.1 riastrad 1373 1.1 riastrad pr_info("SMU is initialized successfully!\n"); 1374 1.1 riastrad 1375 1.1 riastrad return 0; 1376 1.1 riastrad 1377 1.1 riastrad failed: 1378 1.1 riastrad return ret; 1379 1.1 riastrad } 1380 1.1 riastrad 1381 1.1 riastrad static int smu_stop_dpms(struct smu_context *smu) 1382 1.1 riastrad { 1383 1.1 riastrad return smu_system_features_control(smu, false); 1384 1.1 riastrad } 1385 1.1 riastrad 1386 1.1 riastrad static int smu_hw_fini(void *handle) 1387 1.1 riastrad { 1388 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1389 1.1 riastrad struct smu_context *smu = &adev->smu; 1390 1.1 riastrad struct smu_table_context *table_context = &smu->smu_table; 1391 1.1 riastrad int ret = 0; 1392 1.1 riastrad 1393 1.1 riastrad if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1394 1.1 riastrad return 0; 1395 1.1 riastrad 1396 1.1 riastrad if (smu->is_apu) { 1397 1.1 riastrad smu_powergate_sdma(&adev->smu, true); 1398 1.1 riastrad smu_powergate_vcn(&adev->smu, true); 1399 1.1 riastrad smu_powergate_jpeg(&adev->smu, true); 1400 1.1 riastrad } 1401 1.1 riastrad 1402 1.1 riastrad if (!smu->pm_enabled) 1403 1.1 riastrad return 0; 1404 1.1 riastrad 1405 1.1 riastrad if (!amdgpu_sriov_vf(adev)){ 1406 1.1 riastrad ret = smu_stop_thermal_control(smu); 1407 1.1 riastrad if (ret) { 1408 1.1 riastrad pr_warn("Fail to stop thermal control!\n"); 1409 1.1 riastrad return ret; 1410 1.1 riastrad } 1411 1.1 riastrad 1412 1.1 riastrad /* 1413 1.1 riastrad * For custom pptable uploading, skip the DPM features 1414 1.1 riastrad * disable process on Navi1x ASICs. 1415 1.1 riastrad * - As the gfx related features are under control of 1416 1.1 riastrad * RLC on those ASICs. RLC reinitialization will be 1417 1.1 riastrad * needed to reenable them. That will cost much more 1418 1.1 riastrad * efforts. 1419 1.1 riastrad * 1420 1.1 riastrad * - SMU firmware can handle the DPM reenablement 1421 1.1 riastrad * properly. 1422 1.1 riastrad */ 1423 1.1 riastrad if (!smu->uploading_custom_pp_table || 1424 1.1 riastrad !((adev->asic_type >= CHIP_NAVI10) && 1425 1.1 riastrad (adev->asic_type <= CHIP_NAVI12))) { 1426 1.1 riastrad ret = smu_stop_dpms(smu); 1427 1.1 riastrad if (ret) { 1428 1.1 riastrad pr_warn("Fail to stop Dpms!\n"); 1429 1.1 riastrad return ret; 1430 1.1 riastrad } 1431 1.1 riastrad } 1432 1.1 riastrad } 1433 1.1 riastrad 1434 1.1 riastrad kfree(table_context->driver_pptable); 1435 1.1 riastrad table_context->driver_pptable = NULL; 1436 1.1 riastrad 1437 1.1 riastrad kfree(table_context->max_sustainable_clocks); 1438 1.1 riastrad table_context->max_sustainable_clocks = NULL; 1439 1.1 riastrad 1440 1.1 riastrad kfree(table_context->overdrive_table); 1441 1.1 riastrad table_context->overdrive_table = NULL; 1442 1.1 riastrad 1443 1.1 riastrad ret = smu_fini_fb_allocations(smu); 1444 1.1 riastrad if (ret) 1445 1.1 riastrad return ret; 1446 1.1 riastrad 1447 1.1 riastrad ret = smu_free_memory_pool(smu); 1448 1.1 riastrad if (ret) 1449 1.1 riastrad return ret; 1450 1.1 riastrad 1451 1.1 riastrad return 0; 1452 1.1 riastrad } 1453 1.1 riastrad 1454 1.1 riastrad int smu_reset(struct smu_context *smu) 1455 1.1 riastrad { 1456 1.1 riastrad struct amdgpu_device *adev = smu->adev; 1457 1.1 riastrad int ret = 0; 1458 1.1 riastrad 1459 1.1 riastrad ret = smu_hw_fini(adev); 1460 1.1 riastrad if (ret) 1461 1.1 riastrad return ret; 1462 1.1 riastrad 1463 1.1 riastrad ret = smu_hw_init(adev); 1464 1.1 riastrad if (ret) 1465 1.1 riastrad return ret; 1466 1.1 riastrad 1467 1.1 riastrad return ret; 1468 1.1 riastrad } 1469 1.1 riastrad 1470 1.1 riastrad static int smu_suspend(void *handle) 1471 1.1 riastrad { 1472 1.1 riastrad int ret; 1473 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1474 1.1 riastrad struct smu_context *smu = &adev->smu; 1475 1.1 riastrad bool baco_feature_is_enabled = false; 1476 1.1 riastrad 1477 1.1 riastrad if (!smu->pm_enabled) 1478 1.1 riastrad return 0; 1479 1.1 riastrad 1480 1.1 riastrad if(!smu->is_apu) 1481 1.1 riastrad baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); 1482 1.1 riastrad 1483 1.1 riastrad ret = smu_system_features_control(smu, false); 1484 1.1 riastrad if (ret) 1485 1.1 riastrad return ret; 1486 1.1 riastrad 1487 1.1 riastrad if (baco_feature_is_enabled) { 1488 1.1 riastrad ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true); 1489 1.1 riastrad if (ret) { 1490 1.1 riastrad pr_warn("set BACO feature enabled failed, return %d\n", ret); 1491 1.1 riastrad return ret; 1492 1.1 riastrad } 1493 1.1 riastrad } 1494 1.1 riastrad 1495 1.1 riastrad smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1496 1.1 riastrad 1497 1.1 riastrad if (adev->asic_type >= CHIP_NAVI10 && 1498 1.1 riastrad adev->gfx.rlc.funcs->stop) 1499 1.1 riastrad adev->gfx.rlc.funcs->stop(adev); 1500 1.1 riastrad if (smu->is_apu) 1501 1.1 riastrad smu_set_gfx_cgpg(&adev->smu, false); 1502 1.1 riastrad 1503 1.1 riastrad return 0; 1504 1.1 riastrad } 1505 1.1 riastrad 1506 1.1 riastrad static int smu_resume(void *handle) 1507 1.1 riastrad { 1508 1.1 riastrad int ret; 1509 1.1 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1510 1.1 riastrad struct smu_context *smu = &adev->smu; 1511 1.1 riastrad 1512 1.1 riastrad if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1513 1.1 riastrad return 0; 1514 1.1 riastrad 1515 1.1 riastrad if (!smu->pm_enabled) 1516 1.1 riastrad return 0; 1517 1.1 riastrad 1518 1.1 riastrad pr_info("SMU is resuming...\n"); 1519 1.1 riastrad 1520 1.1 riastrad ret = smu_start_smc_engine(smu); 1521 1.1 riastrad if (ret) { 1522 1.1 riastrad pr_err("SMU is not ready yet!\n"); 1523 1.1 riastrad goto failed; 1524 1.1 riastrad } 1525 1.1 riastrad 1526 1.1 riastrad ret = smu_smc_table_hw_init(smu, false); 1527 1.1 riastrad if (ret) 1528 1.1 riastrad goto failed; 1529 1.1 riastrad 1530 1.1 riastrad ret = smu_start_thermal_control(smu); 1531 1.1 riastrad if (ret) 1532 1.1 riastrad goto failed; 1533 1.1 riastrad 1534 1.1 riastrad if (smu->is_apu) 1535 1.1 riastrad smu_set_gfx_cgpg(&adev->smu, true); 1536 1.1 riastrad 1537 1.1 riastrad smu->disable_uclk_switch = 0; 1538 1.1 riastrad 1539 1.1 riastrad pr_info("SMU is resumed successfully!\n"); 1540 1.1 riastrad 1541 1.1 riastrad return 0; 1542 1.1 riastrad 1543 1.1 riastrad failed: 1544 1.1 riastrad return ret; 1545 1.1 riastrad } 1546 1.1 riastrad 1547 1.1 riastrad int smu_display_configuration_change(struct smu_context *smu, 1548 1.1 riastrad const struct amd_pp_display_configuration *display_config) 1549 1.1 riastrad { 1550 1.1 riastrad int index = 0; 1551 1.1 riastrad int num_of_active_display = 0; 1552 1.1 riastrad 1553 1.1 riastrad if (!smu->pm_enabled || !is_support_sw_smu(smu->adev)) 1554 1.1 riastrad return -EINVAL; 1555 1.1 riastrad 1556 1.1 riastrad if (!display_config) 1557 1.1 riastrad return -EINVAL; 1558 1.1 riastrad 1559 1.1 riastrad mutex_lock(&smu->mutex); 1560 1.1 riastrad 1561 1.1 riastrad if (smu->ppt_funcs->set_deep_sleep_dcefclk) 1562 1.1 riastrad smu->ppt_funcs->set_deep_sleep_dcefclk(smu, 1563 1.1 riastrad display_config->min_dcef_deep_sleep_set_clk / 100); 1564 1.1 riastrad 1565 1.1 riastrad for (index = 0; index < display_config->num_path_including_non_display; index++) { 1566 1.1 riastrad if (display_config->displays[index].controller_id != 0) 1567 1.1 riastrad num_of_active_display++; 1568 1.1 riastrad } 1569 1.1 riastrad 1570 1.1 riastrad smu_set_active_display_count(smu, num_of_active_display); 1571 1.1 riastrad 1572 1.1 riastrad smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time, 1573 1.1 riastrad display_config->cpu_cc6_disable, 1574 1.1 riastrad display_config->cpu_pstate_disable, 1575 1.1 riastrad display_config->nb_pstate_switch_disable); 1576 1.1 riastrad 1577 1.1 riastrad mutex_unlock(&smu->mutex); 1578 1.1 riastrad 1579 1.1 riastrad return 0; 1580 1.1 riastrad } 1581 1.1 riastrad 1582 1.1 riastrad static int smu_get_clock_info(struct smu_context *smu, 1583 1.1 riastrad struct smu_clock_info *clk_info, 1584 1.1 riastrad enum smu_perf_level_designation designation) 1585 1.1 riastrad { 1586 1.1 riastrad int ret; 1587 1.1 riastrad struct smu_performance_level level = {0}; 1588 1.1 riastrad 1589 1.1 riastrad if (!clk_info) 1590 1.1 riastrad return -EINVAL; 1591 1.1 riastrad 1592 1.1 riastrad ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level); 1593 1.1 riastrad if (ret) 1594 1.1 riastrad return -EINVAL; 1595 1.1 riastrad 1596 1.1 riastrad clk_info->min_mem_clk = level.memory_clock; 1597 1.1 riastrad clk_info->min_eng_clk = level.core_clock; 1598 1.1 riastrad clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; 1599 1.1 riastrad 1600 1.1 riastrad ret = smu_get_perf_level(smu, designation, &level); 1601 1.1 riastrad if (ret) 1602 1.1 riastrad return -EINVAL; 1603 1.1 riastrad 1604 1.1 riastrad clk_info->min_mem_clk = level.memory_clock; 1605 1.1 riastrad clk_info->min_eng_clk = level.core_clock; 1606 1.1 riastrad clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; 1607 1.1 riastrad 1608 1.1 riastrad return 0; 1609 1.1 riastrad } 1610 1.1 riastrad 1611 1.1 riastrad int smu_get_current_clocks(struct smu_context *smu, 1612 1.1 riastrad struct amd_pp_clock_info *clocks) 1613 1.1 riastrad { 1614 1.1 riastrad struct amd_pp_simple_clock_info simple_clocks = {0}; 1615 1.1 riastrad struct smu_clock_info hw_clocks; 1616 1.1 riastrad int ret = 0; 1617 1.1 riastrad 1618 1.1 riastrad if (!is_support_sw_smu(smu->adev)) 1619 1.1 riastrad return -EINVAL; 1620 1.1 riastrad 1621 1.1 riastrad mutex_lock(&smu->mutex); 1622 1.1 riastrad 1623 1.1 riastrad smu_get_dal_power_level(smu, &simple_clocks); 1624 1.1 riastrad 1625 1.1 riastrad if (smu->support_power_containment) 1626 1.1 riastrad ret = smu_get_clock_info(smu, &hw_clocks, 1627 1.1 riastrad PERF_LEVEL_POWER_CONTAINMENT); 1628 1.1 riastrad else 1629 1.1 riastrad ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY); 1630 1.1 riastrad 1631 1.1 riastrad if (ret) { 1632 1.1 riastrad pr_err("Error in smu_get_clock_info\n"); 1633 1.1 riastrad goto failed; 1634 1.1 riastrad } 1635 1.1 riastrad 1636 1.1 riastrad clocks->min_engine_clock = hw_clocks.min_eng_clk; 1637 1.1 riastrad clocks->max_engine_clock = hw_clocks.max_eng_clk; 1638 1.1 riastrad clocks->min_memory_clock = hw_clocks.min_mem_clk; 1639 1.1 riastrad clocks->max_memory_clock = hw_clocks.max_mem_clk; 1640 1.1 riastrad clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; 1641 1.1 riastrad clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; 1642 1.1 riastrad clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1643 1.1 riastrad clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1644 1.1 riastrad 1645 1.1 riastrad if (simple_clocks.level == 0) 1646 1.1 riastrad clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; 1647 1.1 riastrad else 1648 1.1 riastrad clocks->max_clocks_state = simple_clocks.level; 1649 1.1 riastrad 1650 1.1 riastrad if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) { 1651 1.1 riastrad clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1652 1.1 riastrad clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1653 1.1 riastrad } 1654 1.1 riastrad 1655 1.1 riastrad failed: 1656 1.1 riastrad mutex_unlock(&smu->mutex); 1657 1.1 riastrad return ret; 1658 1.1 riastrad } 1659 1.1 riastrad 1660 1.1 riastrad static int smu_set_clockgating_state(void *handle, 1661 1.1 riastrad enum amd_clockgating_state state) 1662 1.1 riastrad { 1663 1.1 riastrad return 0; 1664 1.1 riastrad } 1665 1.1 riastrad 1666 1.1 riastrad static int smu_set_powergating_state(void *handle, 1667 1.1 riastrad enum amd_powergating_state state) 1668 1.1 riastrad { 1669 1.1 riastrad return 0; 1670 1.1 riastrad } 1671 1.1 riastrad 1672 1.1 riastrad static int smu_enable_umd_pstate(void *handle, 1673 1.1 riastrad enum amd_dpm_forced_level *level) 1674 1.1 riastrad { 1675 1.1 riastrad uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1676 1.1 riastrad AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1677 1.1 riastrad AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1678 1.1 riastrad AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1679 1.1 riastrad 1680 1.1 riastrad struct smu_context *smu = (struct smu_context*)(handle); 1681 1.1 riastrad struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1682 1.1 riastrad 1683 1.1 riastrad if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)) 1684 1.1 riastrad return -EINVAL; 1685 1.1 riastrad 1686 1.1 riastrad if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1687 1.1 riastrad /* enter umd pstate, save current level, disable gfx cg*/ 1688 1.1 riastrad if (*level & profile_mode_mask) { 1689 1.1 riastrad smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 1690 1.1 riastrad smu_dpm_ctx->enable_umd_pstate = true; 1691 1.1 riastrad amdgpu_device_ip_set_clockgating_state(smu->adev, 1692 1.1 riastrad AMD_IP_BLOCK_TYPE_GFX, 1693 1.1 riastrad AMD_CG_STATE_UNGATE); 1694 1.1 riastrad amdgpu_device_ip_set_powergating_state(smu->adev, 1695 1.1 riastrad AMD_IP_BLOCK_TYPE_GFX, 1696 1.1 riastrad AMD_PG_STATE_UNGATE); 1697 1.1 riastrad } 1698 1.1 riastrad } else { 1699 1.1 riastrad /* exit umd pstate, restore level, enable gfx cg*/ 1700 1.1 riastrad if (!(*level & profile_mode_mask)) { 1701 1.1 riastrad if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1702 1.1 riastrad *level = smu_dpm_ctx->saved_dpm_level; 1703 1.1 riastrad smu_dpm_ctx->enable_umd_pstate = false; 1704 1.1 riastrad amdgpu_device_ip_set_clockgating_state(smu->adev, 1705 1.1 riastrad AMD_IP_BLOCK_TYPE_GFX, 1706 1.1 riastrad AMD_CG_STATE_GATE); 1707 1.1 riastrad amdgpu_device_ip_set_powergating_state(smu->adev, 1708 1.1 riastrad AMD_IP_BLOCK_TYPE_GFX, 1709 1.1 riastrad AMD_PG_STATE_GATE); 1710 1.1 riastrad } 1711 1.1 riastrad } 1712 1.1 riastrad 1713 1.1 riastrad return 0; 1714 1.1 riastrad } 1715 1.1 riastrad 1716 1.1 riastrad int smu_adjust_power_state_dynamic(struct smu_context *smu, 1717 1.1 riastrad enum amd_dpm_forced_level level, 1718 1.1 riastrad bool skip_display_settings) 1719 1.1 riastrad { 1720 1.1 riastrad int ret = 0; 1721 1.1 riastrad int index = 0; 1722 1.1 riastrad long workload; 1723 1.1 riastrad struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1724 1.1 riastrad 1725 1.1 riastrad if (!smu->pm_enabled) 1726 1.1 riastrad return -EINVAL; 1727 1.1 riastrad 1728 1.1 riastrad if (!skip_display_settings) { 1729 1.1 riastrad ret = smu_display_config_changed(smu); 1730 1.1 riastrad if (ret) { 1731 1.1 riastrad pr_err("Failed to change display config!"); 1732 1.1 riastrad return ret; 1733 1.1 riastrad } 1734 1.1 riastrad } 1735 1.1 riastrad 1736 1.1 riastrad ret = smu_apply_clocks_adjust_rules(smu); 1737 1.1 riastrad if (ret) { 1738 1.1 riastrad pr_err("Failed to apply clocks adjust rules!"); 1739 1.1 riastrad return ret; 1740 1.1 riastrad } 1741 1.1 riastrad 1742 1.1 riastrad if (!skip_display_settings) { 1743 1.1 riastrad ret = smu_notify_smc_display_config(smu); 1744 1.1 riastrad if (ret) { 1745 1.1 riastrad pr_err("Failed to notify smc display config!"); 1746 1.1 riastrad return ret; 1747 1.1 riastrad } 1748 1.1 riastrad } 1749 1.1 riastrad 1750 1.1 riastrad if (smu_dpm_ctx->dpm_level != level) { 1751 1.1 riastrad ret = smu_asic_set_performance_level(smu, level); 1752 1.1 riastrad if (ret) { 1753 1.1 riastrad pr_err("Failed to set performance level!"); 1754 1.1 riastrad return ret; 1755 1.1 riastrad } 1756 1.1 riastrad 1757 1.1 riastrad /* update the saved copy */ 1758 1.1 riastrad smu_dpm_ctx->dpm_level = level; 1759 1.1 riastrad } 1760 1.1 riastrad 1761 1.1 riastrad if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1762 1.1 riastrad index = fls(smu->workload_mask); 1763 1.1 riastrad index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1764 1.1 riastrad workload = smu->workload_setting[index]; 1765 1.1 riastrad 1766 1.1 riastrad if (smu->power_profile_mode != workload) 1767 1.1 riastrad smu_set_power_profile_mode(smu, &workload, 0, false); 1768 1.1 riastrad } 1769 1.1 riastrad 1770 1.1 riastrad return ret; 1771 1.1 riastrad } 1772 1.1 riastrad 1773 1.1 riastrad int smu_handle_task(struct smu_context *smu, 1774 1.1 riastrad enum amd_dpm_forced_level level, 1775 1.1 riastrad enum amd_pp_task task_id, 1776 1.1 riastrad bool lock_needed) 1777 1.1 riastrad { 1778 1.1 riastrad int ret = 0; 1779 1.1 riastrad 1780 1.1 riastrad if (lock_needed) 1781 1.1 riastrad mutex_lock(&smu->mutex); 1782 1.1 riastrad 1783 1.1 riastrad switch (task_id) { 1784 1.1 riastrad case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1785 1.1 riastrad ret = smu_pre_display_config_changed(smu); 1786 1.1 riastrad if (ret) 1787 1.1 riastrad goto out; 1788 1.1 riastrad ret = smu_set_cpu_power_state(smu); 1789 1.1 riastrad if (ret) 1790 1.1 riastrad goto out; 1791 1.1 riastrad ret = smu_adjust_power_state_dynamic(smu, level, false); 1792 1.1 riastrad break; 1793 1.1 riastrad case AMD_PP_TASK_COMPLETE_INIT: 1794 1.1 riastrad case AMD_PP_TASK_READJUST_POWER_STATE: 1795 1.1 riastrad ret = smu_adjust_power_state_dynamic(smu, level, true); 1796 1.1 riastrad break; 1797 1.1 riastrad default: 1798 1.1 riastrad break; 1799 1.1 riastrad } 1800 1.1 riastrad 1801 1.1 riastrad out: 1802 1.1 riastrad if (lock_needed) 1803 1.1 riastrad mutex_unlock(&smu->mutex); 1804 1.1 riastrad 1805 1.1 riastrad return ret; 1806 1.1 riastrad } 1807 1.1 riastrad 1808 1.1 riastrad int smu_switch_power_profile(struct smu_context *smu, 1809 1.1 riastrad enum PP_SMC_POWER_PROFILE type, 1810 1.1 riastrad bool en) 1811 1.1 riastrad { 1812 1.1 riastrad struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1813 1.1 riastrad long workload; 1814 1.1 riastrad uint32_t index; 1815 1.1 riastrad 1816 1.1 riastrad if (!smu->pm_enabled) 1817 1.1 riastrad return -EINVAL; 1818 1.1 riastrad 1819 1.1 riastrad if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1820 1.1 riastrad return -EINVAL; 1821 1.1 riastrad 1822 1.1 riastrad mutex_lock(&smu->mutex); 1823 1.1 riastrad 1824 1.1 riastrad if (!en) { 1825 1.1 riastrad smu->workload_mask &= ~(1 << smu->workload_prority[type]); 1826 1.1 riastrad index = fls(smu->workload_mask); 1827 1.1 riastrad index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1828 1.1 riastrad workload = smu->workload_setting[index]; 1829 1.1 riastrad } else { 1830 1.1 riastrad smu->workload_mask |= (1 << smu->workload_prority[type]); 1831 1.1 riastrad index = fls(smu->workload_mask); 1832 1.1 riastrad index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1833 1.1 riastrad workload = smu->workload_setting[index]; 1834 1.1 riastrad } 1835 1.1 riastrad 1836 1.1 riastrad if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1837 1.1 riastrad smu_set_power_profile_mode(smu, &workload, 0, false); 1838 1.1 riastrad 1839 1.1 riastrad mutex_unlock(&smu->mutex); 1840 1.1 riastrad 1841 1.1 riastrad return 0; 1842 1.1 riastrad } 1843 1.1 riastrad 1844 1.1 riastrad enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) 1845 1.1 riastrad { 1846 1.1 riastrad struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1847 1.1 riastrad enum amd_dpm_forced_level level; 1848 1.1 riastrad 1849 1.1 riastrad if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1850 1.1 riastrad return -EINVAL; 1851 1.1 riastrad 1852 1.1 riastrad mutex_lock(&(smu->mutex)); 1853 1.1 riastrad level = smu_dpm_ctx->dpm_level; 1854 1.1 riastrad mutex_unlock(&(smu->mutex)); 1855 1.1 riastrad 1856 1.1 riastrad return level; 1857 1.1 riastrad } 1858 1.1 riastrad 1859 1.1 riastrad int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) 1860 1.1 riastrad { 1861 1.1 riastrad struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1862 1.1 riastrad int ret = 0; 1863 1.1 riastrad 1864 1.1 riastrad if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1865 1.1 riastrad return -EINVAL; 1866 1.1 riastrad 1867 1.1 riastrad mutex_lock(&smu->mutex); 1868 1.1 riastrad 1869 1.1 riastrad ret = smu_enable_umd_pstate(smu, &level); 1870 1.1 riastrad if (ret) { 1871 1.1 riastrad mutex_unlock(&smu->mutex); 1872 1.1 riastrad return ret; 1873 1.1 riastrad } 1874 1.1 riastrad 1875 1.1 riastrad ret = smu_handle_task(smu, level, 1876 1.1 riastrad AMD_PP_TASK_READJUST_POWER_STATE, 1877 1.1 riastrad false); 1878 1.1 riastrad 1879 1.1 riastrad mutex_unlock(&smu->mutex); 1880 1.1 riastrad 1881 1.1 riastrad return ret; 1882 1.1 riastrad } 1883 1.1 riastrad 1884 1.1 riastrad int smu_set_display_count(struct smu_context *smu, uint32_t count) 1885 1.1 riastrad { 1886 1.1 riastrad int ret = 0; 1887 1.1 riastrad 1888 1.1 riastrad mutex_lock(&smu->mutex); 1889 1.1 riastrad ret = smu_init_display_count(smu, count); 1890 1.1 riastrad mutex_unlock(&smu->mutex); 1891 1.1 riastrad 1892 1.1 riastrad return ret; 1893 1.1 riastrad } 1894 1.1 riastrad 1895 1.1 riastrad int smu_force_clk_levels(struct smu_context *smu, 1896 1.1 riastrad enum smu_clk_type clk_type, 1897 1.1 riastrad uint32_t mask, 1898 1.1 riastrad bool lock_needed) 1899 1.1 riastrad { 1900 1.1 riastrad struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1901 1.1 riastrad int ret = 0; 1902 1.1 riastrad 1903 1.1 riastrad if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1904 1.1 riastrad pr_debug("force clock level is for dpm manual mode only.\n"); 1905 1.1 riastrad return -EINVAL; 1906 1.1 riastrad } 1907 1.1 riastrad 1908 1.1 riastrad if (lock_needed) 1909 1.1 riastrad mutex_lock(&smu->mutex); 1910 1.1 riastrad 1911 1.1 riastrad if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) 1912 1.1 riastrad ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 1913 1.1 riastrad 1914 1.1 riastrad if (lock_needed) 1915 1.1 riastrad mutex_unlock(&smu->mutex); 1916 1.1 riastrad 1917 1.1 riastrad return ret; 1918 1.1 riastrad } 1919 1.1 riastrad 1920 1.1 riastrad int smu_set_mp1_state(struct smu_context *smu, 1921 1.1 riastrad enum pp_mp1_state mp1_state) 1922 1.1 riastrad { 1923 1.1 riastrad uint16_t msg; 1924 1.1 riastrad int ret; 1925 1.1 riastrad 1926 1.1 riastrad /* 1927 1.1 riastrad * The SMC is not fully ready. That may be 1928 1.1 riastrad * expected as the IP may be masked. 1929 1.1 riastrad * So, just return without error. 1930 1.1 riastrad */ 1931 1.1 riastrad if (!smu->pm_enabled) 1932 1.1 riastrad return 0; 1933 1.1 riastrad 1934 1.1 riastrad mutex_lock(&smu->mutex); 1935 1.1 riastrad 1936 1.1 riastrad switch (mp1_state) { 1937 1.1 riastrad case PP_MP1_STATE_SHUTDOWN: 1938 1.1 riastrad msg = SMU_MSG_PrepareMp1ForShutdown; 1939 1.1 riastrad break; 1940 1.1 riastrad case PP_MP1_STATE_UNLOAD: 1941 1.1 riastrad msg = SMU_MSG_PrepareMp1ForUnload; 1942 1.1 riastrad break; 1943 1.1 riastrad case PP_MP1_STATE_RESET: 1944 1.1 riastrad msg = SMU_MSG_PrepareMp1ForReset; 1945 1.1 riastrad break; 1946 1.1 riastrad case PP_MP1_STATE_NONE: 1947 1.1 riastrad default: 1948 1.1 riastrad mutex_unlock(&smu->mutex); 1949 1.1 riastrad return 0; 1950 1.1 riastrad } 1951 1.1 riastrad 1952 1.1 riastrad /* some asics may not support those messages */ 1953 1.1 riastrad if (smu_msg_get_index(smu, msg) < 0) { 1954 1.1 riastrad mutex_unlock(&smu->mutex); 1955 1.1 riastrad return 0; 1956 1.1 riastrad } 1957 1.1 riastrad 1958 1.1 riastrad ret = smu_send_smc_msg(smu, msg); 1959 1.1 riastrad if (ret) 1960 1.1 riastrad pr_err("[PrepareMp1] Failed!\n"); 1961 1.1 riastrad 1962 1.1 riastrad mutex_unlock(&smu->mutex); 1963 1.1 riastrad 1964 1.1 riastrad return ret; 1965 1.1 riastrad } 1966 1.1 riastrad 1967 1.1 riastrad int smu_set_df_cstate(struct smu_context *smu, 1968 1.1 riastrad enum pp_df_cstate state) 1969 1.1 riastrad { 1970 1.1 riastrad int ret = 0; 1971 1.1 riastrad 1972 1.1 riastrad /* 1973 1.1 riastrad * The SMC is not fully ready. That may be 1974 1.1 riastrad * expected as the IP may be masked. 1975 1.1 riastrad * So, just return without error. 1976 1.1 riastrad */ 1977 1.1 riastrad if (!smu->pm_enabled) 1978 1.1 riastrad return 0; 1979 1.1 riastrad 1980 1.1 riastrad if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 1981 1.1 riastrad return 0; 1982 1.1 riastrad 1983 1.1 riastrad mutex_lock(&smu->mutex); 1984 1.1 riastrad 1985 1.1 riastrad ret = smu->ppt_funcs->set_df_cstate(smu, state); 1986 1.1 riastrad if (ret) 1987 1.1 riastrad pr_err("[SetDfCstate] failed!\n"); 1988 1.1 riastrad 1989 1.1 riastrad mutex_unlock(&smu->mutex); 1990 1.1 riastrad 1991 1.1 riastrad return ret; 1992 1.1 riastrad } 1993 1.1 riastrad 1994 1.1 riastrad int smu_write_watermarks_table(struct smu_context *smu) 1995 1.1 riastrad { 1996 1.1 riastrad void *watermarks_table = smu->smu_table.watermarks_table; 1997 1.1 riastrad 1998 1.1 riastrad if (!watermarks_table) 1999 1.1 riastrad return -EINVAL; 2000 1.1 riastrad 2001 1.1 riastrad return smu_update_table(smu, 2002 1.1 riastrad SMU_TABLE_WATERMARKS, 2003 1.1 riastrad 0, 2004 1.1 riastrad watermarks_table, 2005 1.1 riastrad true); 2006 1.1 riastrad } 2007 1.1 riastrad 2008 1.1 riastrad int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, 2009 1.1 riastrad struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) 2010 1.1 riastrad { 2011 1.1 riastrad void *table = smu->smu_table.watermarks_table; 2012 1.1 riastrad 2013 1.1 riastrad if (!table) 2014 1.1 riastrad return -EINVAL; 2015 1.1 riastrad 2016 1.1 riastrad mutex_lock(&smu->mutex); 2017 1.1 riastrad 2018 1.1 riastrad if (!smu->disable_watermark && 2019 1.1 riastrad smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && 2020 1.1 riastrad smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 2021 1.1 riastrad smu_set_watermarks_table(smu, table, clock_ranges); 2022 1.1 riastrad smu->watermarks_bitmap |= WATERMARKS_EXIST; 2023 1.1 riastrad smu->watermarks_bitmap &= ~WATERMARKS_LOADED; 2024 1.1 riastrad } 2025 1.1 riastrad 2026 1.1 riastrad mutex_unlock(&smu->mutex); 2027 1.1 riastrad 2028 1.1 riastrad return 0; 2029 1.1 riastrad } 2030 1.1 riastrad 2031 1.1 riastrad const struct amd_ip_funcs smu_ip_funcs = { 2032 1.1 riastrad .name = "smu", 2033 1.1 riastrad .early_init = smu_early_init, 2034 1.1 riastrad .late_init = smu_late_init, 2035 1.1 riastrad .sw_init = smu_sw_init, 2036 1.1 riastrad .sw_fini = smu_sw_fini, 2037 1.1 riastrad .hw_init = smu_hw_init, 2038 1.1 riastrad .hw_fini = smu_hw_fini, 2039 1.1 riastrad .suspend = smu_suspend, 2040 1.1 riastrad .resume = smu_resume, 2041 1.1 riastrad .is_idle = NULL, 2042 1.1 riastrad .check_soft_reset = NULL, 2043 1.1 riastrad .wait_for_idle = NULL, 2044 1.1 riastrad .soft_reset = NULL, 2045 1.1 riastrad .set_clockgating_state = smu_set_clockgating_state, 2046 1.1 riastrad .set_powergating_state = smu_set_powergating_state, 2047 1.1 riastrad .enable_umd_pstate = smu_enable_umd_pstate, 2048 1.1 riastrad }; 2049 1.1 riastrad 2050 1.1 riastrad const struct amdgpu_ip_block_version smu_v11_0_ip_block = 2051 1.1 riastrad { 2052 1.1 riastrad .type = AMD_IP_BLOCK_TYPE_SMC, 2053 1.1 riastrad .major = 11, 2054 1.1 riastrad .minor = 0, 2055 1.1 riastrad .rev = 0, 2056 1.1 riastrad .funcs = &smu_ip_funcs, 2057 1.1 riastrad }; 2058 1.1 riastrad 2059 1.1 riastrad const struct amdgpu_ip_block_version smu_v12_0_ip_block = 2060 1.1 riastrad { 2061 1.1 riastrad .type = AMD_IP_BLOCK_TYPE_SMC, 2062 1.1 riastrad .major = 12, 2063 1.1 riastrad .minor = 0, 2064 1.1 riastrad .rev = 0, 2065 1.1 riastrad .funcs = &smu_ip_funcs, 2066 1.1 riastrad }; 2067 1.1 riastrad 2068 1.1 riastrad int smu_load_microcode(struct smu_context *smu) 2069 1.1 riastrad { 2070 1.1 riastrad int ret = 0; 2071 1.1 riastrad 2072 1.1 riastrad mutex_lock(&smu->mutex); 2073 1.1 riastrad 2074 1.1 riastrad if (smu->ppt_funcs->load_microcode) 2075 1.1 riastrad ret = smu->ppt_funcs->load_microcode(smu); 2076 1.1 riastrad 2077 1.1 riastrad mutex_unlock(&smu->mutex); 2078 1.1 riastrad 2079 1.1 riastrad return ret; 2080 1.1 riastrad } 2081 1.1 riastrad 2082 1.1 riastrad int smu_check_fw_status(struct smu_context *smu) 2083 1.1 riastrad { 2084 1.1 riastrad int ret = 0; 2085 1.1 riastrad 2086 1.1 riastrad mutex_lock(&smu->mutex); 2087 1.1 riastrad 2088 1.1 riastrad if (smu->ppt_funcs->check_fw_status) 2089 1.1 riastrad ret = smu->ppt_funcs->check_fw_status(smu); 2090 1.1 riastrad 2091 1.1 riastrad mutex_unlock(&smu->mutex); 2092 1.1 riastrad 2093 1.1 riastrad return ret; 2094 1.1 riastrad } 2095 1.1 riastrad 2096 1.1 riastrad int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 2097 1.1 riastrad { 2098 1.1 riastrad int ret = 0; 2099 1.1 riastrad 2100 1.1 riastrad mutex_lock(&smu->mutex); 2101 1.1 riastrad 2102 1.1 riastrad if (smu->ppt_funcs->set_gfx_cgpg) 2103 1.1 riastrad ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 2104 1.1 riastrad 2105 1.1 riastrad mutex_unlock(&smu->mutex); 2106 1.1 riastrad 2107 1.1 riastrad return ret; 2108 1.1 riastrad } 2109 1.1 riastrad 2110 1.1 riastrad int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) 2111 1.1 riastrad { 2112 1.1 riastrad int ret = 0; 2113 1.1 riastrad 2114 1.1 riastrad mutex_lock(&smu->mutex); 2115 1.1 riastrad 2116 1.1 riastrad if (smu->ppt_funcs->set_fan_speed_rpm) 2117 1.1 riastrad ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); 2118 1.1 riastrad 2119 1.1 riastrad mutex_unlock(&smu->mutex); 2120 1.1 riastrad 2121 1.1 riastrad return ret; 2122 1.1 riastrad } 2123 1.1 riastrad 2124 1.1 riastrad int smu_get_power_limit(struct smu_context *smu, 2125 1.1 riastrad uint32_t *limit, 2126 1.1 riastrad bool def, 2127 1.1 riastrad bool lock_needed) 2128 1.1 riastrad { 2129 1.1 riastrad int ret = 0; 2130 1.1 riastrad 2131 1.1 riastrad if (lock_needed) 2132 1.1 riastrad mutex_lock(&smu->mutex); 2133 1.1 riastrad 2134 1.1 riastrad if (smu->ppt_funcs->get_power_limit) 2135 1.1 riastrad ret = smu->ppt_funcs->get_power_limit(smu, limit, def); 2136 1.1 riastrad 2137 1.1 riastrad if (lock_needed) 2138 1.1 riastrad mutex_unlock(&smu->mutex); 2139 1.1 riastrad 2140 1.1 riastrad return ret; 2141 1.1 riastrad } 2142 1.1 riastrad 2143 1.1 riastrad int smu_set_power_limit(struct smu_context *smu, uint32_t limit) 2144 1.1 riastrad { 2145 1.1 riastrad int ret = 0; 2146 1.1 riastrad 2147 1.1 riastrad mutex_lock(&smu->mutex); 2148 1.1 riastrad 2149 1.1 riastrad if (smu->ppt_funcs->set_power_limit) 2150 1.1 riastrad ret = smu->ppt_funcs->set_power_limit(smu, limit); 2151 1.1 riastrad 2152 1.1 riastrad mutex_unlock(&smu->mutex); 2153 1.1 riastrad 2154 1.1 riastrad return ret; 2155 1.1 riastrad } 2156 1.1 riastrad 2157 1.1 riastrad int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 2158 1.1 riastrad { 2159 1.1 riastrad int ret = 0; 2160 1.1 riastrad 2161 1.1 riastrad mutex_lock(&smu->mutex); 2162 1.1 riastrad 2163 1.1 riastrad if (smu->ppt_funcs->print_clk_levels) 2164 1.1 riastrad ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 2165 1.1 riastrad 2166 1.1 riastrad mutex_unlock(&smu->mutex); 2167 1.1 riastrad 2168 1.1 riastrad return ret; 2169 1.1 riastrad } 2170 1.1 riastrad 2171 1.1 riastrad int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) 2172 1.1 riastrad { 2173 1.1 riastrad int ret = 0; 2174 1.1 riastrad 2175 1.1 riastrad mutex_lock(&smu->mutex); 2176 1.1 riastrad 2177 1.1 riastrad if (smu->ppt_funcs->get_od_percentage) 2178 1.1 riastrad ret = smu->ppt_funcs->get_od_percentage(smu, type); 2179 1.1 riastrad 2180 1.1 riastrad mutex_unlock(&smu->mutex); 2181 1.1 riastrad 2182 1.1 riastrad return ret; 2183 1.1 riastrad } 2184 1.1 riastrad 2185 1.1 riastrad int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value) 2186 1.1 riastrad { 2187 1.1 riastrad int ret = 0; 2188 1.1 riastrad 2189 1.1 riastrad mutex_lock(&smu->mutex); 2190 1.1 riastrad 2191 1.1 riastrad if (smu->ppt_funcs->set_od_percentage) 2192 1.1 riastrad ret = smu->ppt_funcs->set_od_percentage(smu, type, value); 2193 1.1 riastrad 2194 1.1 riastrad mutex_unlock(&smu->mutex); 2195 1.1 riastrad 2196 1.1 riastrad return ret; 2197 1.1 riastrad } 2198 1.1 riastrad 2199 1.1 riastrad int smu_od_edit_dpm_table(struct smu_context *smu, 2200 1.1 riastrad enum PP_OD_DPM_TABLE_COMMAND type, 2201 1.1 riastrad long *input, uint32_t size) 2202 1.1 riastrad { 2203 1.1 riastrad int ret = 0; 2204 1.1 riastrad 2205 1.1 riastrad mutex_lock(&smu->mutex); 2206 1.1 riastrad 2207 1.1 riastrad if (smu->ppt_funcs->od_edit_dpm_table) 2208 1.1 riastrad ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 2209 1.1 riastrad 2210 1.1 riastrad mutex_unlock(&smu->mutex); 2211 1.1 riastrad 2212 1.1 riastrad return ret; 2213 1.1 riastrad } 2214 1.1 riastrad 2215 1.1 riastrad int smu_read_sensor(struct smu_context *smu, 2216 1.1 riastrad enum amd_pp_sensors sensor, 2217 1.1 riastrad void *data, uint32_t *size) 2218 1.1 riastrad { 2219 1.1 riastrad int ret = 0; 2220 1.1 riastrad 2221 1.1 riastrad mutex_lock(&smu->mutex); 2222 1.1 riastrad 2223 1.1 riastrad if (smu->ppt_funcs->read_sensor) 2224 1.1 riastrad ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size); 2225 1.1 riastrad 2226 1.1 riastrad mutex_unlock(&smu->mutex); 2227 1.1 riastrad 2228 1.1 riastrad return ret; 2229 1.1 riastrad } 2230 1.1 riastrad 2231 1.1 riastrad int smu_get_power_profile_mode(struct smu_context *smu, char *buf) 2232 1.1 riastrad { 2233 1.1 riastrad int ret = 0; 2234 1.1 riastrad 2235 1.1 riastrad mutex_lock(&smu->mutex); 2236 1.1 riastrad 2237 1.1 riastrad if (smu->ppt_funcs->get_power_profile_mode) 2238 1.1 riastrad ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); 2239 1.1 riastrad 2240 1.1 riastrad mutex_unlock(&smu->mutex); 2241 1.1 riastrad 2242 1.1 riastrad return ret; 2243 1.1 riastrad } 2244 1.1 riastrad 2245 1.1 riastrad int smu_set_power_profile_mode(struct smu_context *smu, 2246 1.1 riastrad long *param, 2247 1.1 riastrad uint32_t param_size, 2248 1.1 riastrad bool lock_needed) 2249 1.1 riastrad { 2250 1.1 riastrad int ret = 0; 2251 1.1 riastrad 2252 1.1 riastrad if (lock_needed) 2253 1.1 riastrad mutex_lock(&smu->mutex); 2254 1.1 riastrad 2255 1.1 riastrad if (smu->ppt_funcs->set_power_profile_mode) 2256 1.1 riastrad ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); 2257 1.1 riastrad 2258 1.1 riastrad if (lock_needed) 2259 1.1 riastrad mutex_unlock(&smu->mutex); 2260 1.1 riastrad 2261 1.1 riastrad return ret; 2262 1.1 riastrad } 2263 1.1 riastrad 2264 1.1 riastrad 2265 1.1 riastrad int smu_get_fan_control_mode(struct smu_context *smu) 2266 1.1 riastrad { 2267 1.1 riastrad int ret = 0; 2268 1.1 riastrad 2269 1.1 riastrad mutex_lock(&smu->mutex); 2270 1.1 riastrad 2271 1.1 riastrad if (smu->ppt_funcs->get_fan_control_mode) 2272 1.1 riastrad ret = smu->ppt_funcs->get_fan_control_mode(smu); 2273 1.1 riastrad 2274 1.1 riastrad mutex_unlock(&smu->mutex); 2275 1.1 riastrad 2276 1.1 riastrad return ret; 2277 1.1 riastrad } 2278 1.1 riastrad 2279 1.1 riastrad int smu_set_fan_control_mode(struct smu_context *smu, int value) 2280 1.1 riastrad { 2281 1.1 riastrad int ret = 0; 2282 1.1 riastrad 2283 1.1 riastrad mutex_lock(&smu->mutex); 2284 1.1 riastrad 2285 1.1 riastrad if (smu->ppt_funcs->set_fan_control_mode) 2286 1.1 riastrad ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 2287 1.1 riastrad 2288 1.1 riastrad mutex_unlock(&smu->mutex); 2289 1.1 riastrad 2290 1.1 riastrad return ret; 2291 1.1 riastrad } 2292 1.1 riastrad 2293 1.1 riastrad int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) 2294 1.1 riastrad { 2295 1.1 riastrad int ret = 0; 2296 1.1 riastrad 2297 1.1 riastrad mutex_lock(&smu->mutex); 2298 1.1 riastrad 2299 1.1 riastrad if (smu->ppt_funcs->get_fan_speed_percent) 2300 1.1 riastrad ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed); 2301 1.1 riastrad 2302 1.1 riastrad mutex_unlock(&smu->mutex); 2303 1.1 riastrad 2304 1.1 riastrad return ret; 2305 1.1 riastrad } 2306 1.1 riastrad 2307 1.1 riastrad int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) 2308 1.1 riastrad { 2309 1.1 riastrad int ret = 0; 2310 1.1 riastrad 2311 1.1 riastrad mutex_lock(&smu->mutex); 2312 1.1 riastrad 2313 1.1 riastrad if (smu->ppt_funcs->set_fan_speed_percent) 2314 1.1 riastrad ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); 2315 1.1 riastrad 2316 1.1 riastrad mutex_unlock(&smu->mutex); 2317 1.1 riastrad 2318 1.1 riastrad return ret; 2319 1.1 riastrad } 2320 1.1 riastrad 2321 1.1 riastrad int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) 2322 1.1 riastrad { 2323 1.1 riastrad int ret = 0; 2324 1.1 riastrad 2325 1.1 riastrad mutex_lock(&smu->mutex); 2326 1.1 riastrad 2327 1.1 riastrad if (smu->ppt_funcs->get_fan_speed_rpm) 2328 1.1 riastrad ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); 2329 1.1 riastrad 2330 1.1 riastrad mutex_unlock(&smu->mutex); 2331 1.1 riastrad 2332 1.1 riastrad return ret; 2333 1.1 riastrad } 2334 1.1 riastrad 2335 1.1 riastrad int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) 2336 1.1 riastrad { 2337 1.1 riastrad int ret = 0; 2338 1.1 riastrad 2339 1.1 riastrad mutex_lock(&smu->mutex); 2340 1.1 riastrad 2341 1.1 riastrad if (smu->ppt_funcs->set_deep_sleep_dcefclk) 2342 1.1 riastrad ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk); 2343 1.1 riastrad 2344 1.1 riastrad mutex_unlock(&smu->mutex); 2345 1.1 riastrad 2346 1.1 riastrad return ret; 2347 1.1 riastrad } 2348 1.1 riastrad 2349 1.1 riastrad int smu_set_active_display_count(struct smu_context *smu, uint32_t count) 2350 1.1 riastrad { 2351 1.1 riastrad int ret = 0; 2352 1.1 riastrad 2353 1.1 riastrad if (smu->ppt_funcs->set_active_display_count) 2354 1.1 riastrad ret = smu->ppt_funcs->set_active_display_count(smu, count); 2355 1.1 riastrad 2356 1.1 riastrad return ret; 2357 1.1 riastrad } 2358 1.1 riastrad 2359 1.1 riastrad int smu_get_clock_by_type(struct smu_context *smu, 2360 1.1 riastrad enum amd_pp_clock_type type, 2361 1.1 riastrad struct amd_pp_clocks *clocks) 2362 1.1 riastrad { 2363 1.1 riastrad int ret = 0; 2364 1.1 riastrad 2365 1.1 riastrad mutex_lock(&smu->mutex); 2366 1.1 riastrad 2367 1.1 riastrad if (smu->ppt_funcs->get_clock_by_type) 2368 1.1 riastrad ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks); 2369 1.1 riastrad 2370 1.1 riastrad mutex_unlock(&smu->mutex); 2371 1.1 riastrad 2372 1.1 riastrad return ret; 2373 1.1 riastrad } 2374 1.1 riastrad 2375 1.1 riastrad int smu_get_max_high_clocks(struct smu_context *smu, 2376 1.1 riastrad struct amd_pp_simple_clock_info *clocks) 2377 1.1 riastrad { 2378 1.1 riastrad int ret = 0; 2379 1.1 riastrad 2380 1.1 riastrad mutex_lock(&smu->mutex); 2381 1.1 riastrad 2382 1.1 riastrad if (smu->ppt_funcs->get_max_high_clocks) 2383 1.1 riastrad ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks); 2384 1.1 riastrad 2385 1.1 riastrad mutex_unlock(&smu->mutex); 2386 1.1 riastrad 2387 1.1 riastrad return ret; 2388 1.1 riastrad } 2389 1.1 riastrad 2390 1.1 riastrad int smu_get_clock_by_type_with_latency(struct smu_context *smu, 2391 1.1 riastrad enum smu_clk_type clk_type, 2392 1.1 riastrad struct pp_clock_levels_with_latency *clocks) 2393 1.1 riastrad { 2394 1.1 riastrad int ret = 0; 2395 1.1 riastrad 2396 1.1 riastrad mutex_lock(&smu->mutex); 2397 1.1 riastrad 2398 1.1 riastrad if (smu->ppt_funcs->get_clock_by_type_with_latency) 2399 1.1 riastrad ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 2400 1.1 riastrad 2401 1.1 riastrad mutex_unlock(&smu->mutex); 2402 1.1 riastrad 2403 1.1 riastrad return ret; 2404 1.1 riastrad } 2405 1.1 riastrad 2406 1.1 riastrad int smu_get_clock_by_type_with_voltage(struct smu_context *smu, 2407 1.1 riastrad enum amd_pp_clock_type type, 2408 1.1 riastrad struct pp_clock_levels_with_voltage *clocks) 2409 1.1 riastrad { 2410 1.1 riastrad int ret = 0; 2411 1.1 riastrad 2412 1.1 riastrad mutex_lock(&smu->mutex); 2413 1.1 riastrad 2414 1.1 riastrad if (smu->ppt_funcs->get_clock_by_type_with_voltage) 2415 1.1 riastrad ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks); 2416 1.1 riastrad 2417 1.1 riastrad mutex_unlock(&smu->mutex); 2418 1.1 riastrad 2419 1.1 riastrad return ret; 2420 1.1 riastrad } 2421 1.1 riastrad 2422 1.1 riastrad 2423 1.1 riastrad int smu_display_clock_voltage_request(struct smu_context *smu, 2424 1.1 riastrad struct pp_display_clock_request *clock_req) 2425 1.1 riastrad { 2426 1.1 riastrad int ret = 0; 2427 1.1 riastrad 2428 1.1 riastrad mutex_lock(&smu->mutex); 2429 1.1 riastrad 2430 1.1 riastrad if (smu->ppt_funcs->display_clock_voltage_request) 2431 1.1 riastrad ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 2432 1.1 riastrad 2433 1.1 riastrad mutex_unlock(&smu->mutex); 2434 1.1 riastrad 2435 1.1 riastrad return ret; 2436 1.1 riastrad } 2437 1.1 riastrad 2438 1.1 riastrad 2439 1.1 riastrad int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) 2440 1.1 riastrad { 2441 1.1 riastrad int ret = -EINVAL; 2442 1.1 riastrad 2443 1.1 riastrad mutex_lock(&smu->mutex); 2444 1.1 riastrad 2445 1.1 riastrad if (smu->ppt_funcs->display_disable_memory_clock_switch) 2446 1.1 riastrad ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 2447 1.1 riastrad 2448 1.1 riastrad mutex_unlock(&smu->mutex); 2449 1.1 riastrad 2450 1.1 riastrad return ret; 2451 1.1 riastrad } 2452 1.1 riastrad 2453 1.1 riastrad int smu_notify_smu_enable_pwe(struct smu_context *smu) 2454 1.1 riastrad { 2455 1.1 riastrad int ret = 0; 2456 1.1 riastrad 2457 1.1 riastrad mutex_lock(&smu->mutex); 2458 1.1 riastrad 2459 1.1 riastrad if (smu->ppt_funcs->notify_smu_enable_pwe) 2460 1.1 riastrad ret = smu->ppt_funcs->notify_smu_enable_pwe(smu); 2461 1.1 riastrad 2462 1.1 riastrad mutex_unlock(&smu->mutex); 2463 1.1 riastrad 2464 1.1 riastrad return ret; 2465 1.1 riastrad } 2466 1.1 riastrad 2467 1.1 riastrad int smu_set_xgmi_pstate(struct smu_context *smu, 2468 1.1 riastrad uint32_t pstate) 2469 1.1 riastrad { 2470 1.1 riastrad int ret = 0; 2471 1.1 riastrad 2472 1.1 riastrad mutex_lock(&smu->mutex); 2473 1.1 riastrad 2474 1.1 riastrad if (smu->ppt_funcs->set_xgmi_pstate) 2475 1.1 riastrad ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 2476 1.1 riastrad 2477 1.1 riastrad mutex_unlock(&smu->mutex); 2478 1.1 riastrad 2479 1.1 riastrad return ret; 2480 1.1 riastrad } 2481 1.1 riastrad 2482 1.1 riastrad int smu_set_azalia_d3_pme(struct smu_context *smu) 2483 1.1 riastrad { 2484 1.1 riastrad int ret = 0; 2485 1.1 riastrad 2486 1.1 riastrad mutex_lock(&smu->mutex); 2487 1.1 riastrad 2488 1.1 riastrad if (smu->ppt_funcs->set_azalia_d3_pme) 2489 1.1 riastrad ret = smu->ppt_funcs->set_azalia_d3_pme(smu); 2490 1.1 riastrad 2491 1.1 riastrad mutex_unlock(&smu->mutex); 2492 1.1 riastrad 2493 1.1 riastrad return ret; 2494 1.1 riastrad } 2495 1.1 riastrad 2496 1.1 riastrad bool smu_baco_is_support(struct smu_context *smu) 2497 1.1 riastrad { 2498 1.1 riastrad bool ret = false; 2499 1.1 riastrad 2500 1.1 riastrad mutex_lock(&smu->mutex); 2501 1.1 riastrad 2502 1.1 riastrad if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2503 1.1 riastrad ret = smu->ppt_funcs->baco_is_support(smu); 2504 1.1 riastrad 2505 1.1 riastrad mutex_unlock(&smu->mutex); 2506 1.1 riastrad 2507 1.1 riastrad return ret; 2508 1.1 riastrad } 2509 1.1 riastrad 2510 1.1 riastrad int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) 2511 1.1 riastrad { 2512 1.1 riastrad if (smu->ppt_funcs->baco_get_state) 2513 1.1 riastrad return -EINVAL; 2514 1.1 riastrad 2515 1.1 riastrad mutex_lock(&smu->mutex); 2516 1.1 riastrad *state = smu->ppt_funcs->baco_get_state(smu); 2517 1.1 riastrad mutex_unlock(&smu->mutex); 2518 1.1 riastrad 2519 1.1 riastrad return 0; 2520 1.1 riastrad } 2521 1.1 riastrad 2522 1.1 riastrad int smu_baco_enter(struct smu_context *smu) 2523 1.1 riastrad { 2524 1.1 riastrad int ret = 0; 2525 1.1 riastrad 2526 1.1 riastrad mutex_lock(&smu->mutex); 2527 1.1 riastrad 2528 1.1 riastrad if (smu->ppt_funcs->baco_enter) 2529 1.1 riastrad ret = smu->ppt_funcs->baco_enter(smu); 2530 1.1 riastrad 2531 1.1 riastrad mutex_unlock(&smu->mutex); 2532 1.1 riastrad 2533 1.1 riastrad return ret; 2534 1.1 riastrad } 2535 1.1 riastrad 2536 1.1 riastrad int smu_baco_exit(struct smu_context *smu) 2537 1.1 riastrad { 2538 1.1 riastrad int ret = 0; 2539 1.1 riastrad 2540 1.1 riastrad mutex_lock(&smu->mutex); 2541 1.1 riastrad 2542 1.1 riastrad if (smu->ppt_funcs->baco_exit) 2543 1.1 riastrad ret = smu->ppt_funcs->baco_exit(smu); 2544 1.1 riastrad 2545 1.1 riastrad mutex_unlock(&smu->mutex); 2546 1.1 riastrad 2547 1.1 riastrad return ret; 2548 1.1 riastrad } 2549 1.1 riastrad 2550 1.1 riastrad int smu_mode2_reset(struct smu_context *smu) 2551 1.1 riastrad { 2552 1.1 riastrad int ret = 0; 2553 1.1 riastrad 2554 1.1 riastrad mutex_lock(&smu->mutex); 2555 1.1 riastrad 2556 1.1 riastrad if (smu->ppt_funcs->mode2_reset) 2557 1.1 riastrad ret = smu->ppt_funcs->mode2_reset(smu); 2558 1.1 riastrad 2559 1.1 riastrad mutex_unlock(&smu->mutex); 2560 1.1 riastrad 2561 1.1 riastrad return ret; 2562 1.1 riastrad } 2563 1.1 riastrad 2564 1.1 riastrad int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 2565 1.1 riastrad struct pp_smu_nv_clock_table *max_clocks) 2566 1.1 riastrad { 2567 1.1 riastrad int ret = 0; 2568 1.1 riastrad 2569 1.1 riastrad mutex_lock(&smu->mutex); 2570 1.1 riastrad 2571 1.1 riastrad if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 2572 1.1 riastrad ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 2573 1.1 riastrad 2574 1.1 riastrad mutex_unlock(&smu->mutex); 2575 1.1 riastrad 2576 1.1 riastrad return ret; 2577 1.1 riastrad } 2578 1.1 riastrad 2579 1.1 riastrad int smu_get_uclk_dpm_states(struct smu_context *smu, 2580 1.1 riastrad unsigned int *clock_values_in_khz, 2581 1.1 riastrad unsigned int *num_states) 2582 1.1 riastrad { 2583 1.1 riastrad int ret = 0; 2584 1.1 riastrad 2585 1.1 riastrad mutex_lock(&smu->mutex); 2586 1.1 riastrad 2587 1.1 riastrad if (smu->ppt_funcs->get_uclk_dpm_states) 2588 1.1 riastrad ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 2589 1.1 riastrad 2590 1.1 riastrad mutex_unlock(&smu->mutex); 2591 1.1 riastrad 2592 1.1 riastrad return ret; 2593 1.1 riastrad } 2594 1.1 riastrad 2595 1.1 riastrad enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) 2596 1.1 riastrad { 2597 1.1 riastrad enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 2598 1.1 riastrad 2599 1.1 riastrad mutex_lock(&smu->mutex); 2600 1.1 riastrad 2601 1.1 riastrad if (smu->ppt_funcs->get_current_power_state) 2602 1.1 riastrad pm_state = smu->ppt_funcs->get_current_power_state(smu); 2603 1.1 riastrad 2604 1.1 riastrad mutex_unlock(&smu->mutex); 2605 1.1 riastrad 2606 1.1 riastrad return pm_state; 2607 1.1 riastrad } 2608 1.1 riastrad 2609 1.1 riastrad int smu_get_dpm_clock_table(struct smu_context *smu, 2610 1.1 riastrad struct dpm_clocks *clock_table) 2611 1.1 riastrad { 2612 1.1 riastrad int ret = 0; 2613 1.1 riastrad 2614 1.1 riastrad mutex_lock(&smu->mutex); 2615 1.1 riastrad 2616 1.1 riastrad if (smu->ppt_funcs->get_dpm_clock_table) 2617 1.1 riastrad ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 2618 1.1 riastrad 2619 1.1 riastrad mutex_unlock(&smu->mutex); 2620 1.1 riastrad 2621 1.1 riastrad return ret; 2622 1.1 riastrad } 2623 1.1 riastrad 2624 1.1 riastrad uint32_t smu_get_pptable_power_limit(struct smu_context *smu) 2625 1.1 riastrad { 2626 1.1 riastrad uint32_t ret = 0; 2627 1.1 riastrad 2628 1.1 riastrad if (smu->ppt_funcs->get_pptable_power_limit) 2629 1.1 riastrad ret = smu->ppt_funcs->get_pptable_power_limit(smu); 2630 1.1 riastrad 2631 1.1 riastrad return ret; 2632 1.1 riastrad } 2633 1.1 riastrad 2634 1.1 riastrad int smu_send_smc_msg(struct smu_context *smu, 2635 1.1 riastrad enum smu_message_type msg) 2636 1.1 riastrad { 2637 1.1 riastrad int ret; 2638 1.1 riastrad 2639 1.1 riastrad ret = smu_send_smc_msg_with_param(smu, msg, 0); 2640 1.1 riastrad return ret; 2641 1.1 riastrad } 2642