Home | History | Annotate | Line # | Download | only in powerplay
      1 /*	$NetBSD: amdgpu_navi10_ppt.c,v 1.4 2021/12/19 12:21:29 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2019 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  */
     25 
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_navi10_ppt.c,v 1.4 2021/12/19 12:21:29 riastradh Exp $");
     28 
     29 #include "pp_debug.h"
     30 #include <linux/firmware.h>
     31 #include <linux/pci.h>
     32 #include "amdgpu.h"
     33 #include "amdgpu_smu.h"
     34 #include "smu_internal.h"
     35 #include "atomfirmware.h"
     36 #include "amdgpu_atomfirmware.h"
     37 #include "smu_v11_0.h"
     38 #include "smu11_driver_if_navi10.h"
     39 #include "soc15_common.h"
     40 #include "atom.h"
     41 #include "navi10_ppt.h"
     42 #include "smu_v11_0_pptable.h"
     43 #include "smu_v11_0_ppsmc.h"
     44 #include "nbio/nbio_7_4_sh_mask.h"
     45 
     46 #include "asic_reg/mp/mp_11_0_sh_mask.h"
     47 
     48 /* XXX *@!#^@&*!#& */
     49 #define	sprintf(buf, fmt, ...)						      \
     50 	snprintf(buf, (size_t)-1, fmt, ##__VA_ARGS__)
     51 
     52 #define FEATURE_MASK(feature) (1ULL << feature)
     53 #define SMC_DPM_FEATURE ( \
     54 	FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
     55 	FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)	 | \
     56 	FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT)	 | \
     57 	FEATURE_MASK(FEATURE_DPM_UCLK_BIT)	 | \
     58 	FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)	 | \
     59 	FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)	 | \
     60 	FEATURE_MASK(FEATURE_DPM_LINK_BIT)	 | \
     61 	FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
     62 
     63 #define MSG_MAP(msg, index) \
     64 	[SMU_MSG_##msg] = {1, (index)}
     65 
     66 static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
     67 	MSG_MAP(TestMessage,			PPSMC_MSG_TestMessage),
     68 	MSG_MAP(GetSmuVersion,			PPSMC_MSG_GetSmuVersion),
     69 	MSG_MAP(GetDriverIfVersion,		PPSMC_MSG_GetDriverIfVersion),
     70 	MSG_MAP(SetAllowedFeaturesMaskLow,	PPSMC_MSG_SetAllowedFeaturesMaskLow),
     71 	MSG_MAP(SetAllowedFeaturesMaskHigh,	PPSMC_MSG_SetAllowedFeaturesMaskHigh),
     72 	MSG_MAP(EnableAllSmuFeatures,		PPSMC_MSG_EnableAllSmuFeatures),
     73 	MSG_MAP(DisableAllSmuFeatures,		PPSMC_MSG_DisableAllSmuFeatures),
     74 	MSG_MAP(EnableSmuFeaturesLow,		PPSMC_MSG_EnableSmuFeaturesLow),
     75 	MSG_MAP(EnableSmuFeaturesHigh,		PPSMC_MSG_EnableSmuFeaturesHigh),
     76 	MSG_MAP(DisableSmuFeaturesLow,		PPSMC_MSG_DisableSmuFeaturesLow),
     77 	MSG_MAP(DisableSmuFeaturesHigh,		PPSMC_MSG_DisableSmuFeaturesHigh),
     78 	MSG_MAP(GetEnabledSmuFeaturesLow,	PPSMC_MSG_GetEnabledSmuFeaturesLow),
     79 	MSG_MAP(GetEnabledSmuFeaturesHigh,	PPSMC_MSG_GetEnabledSmuFeaturesHigh),
     80 	MSG_MAP(SetWorkloadMask,		PPSMC_MSG_SetWorkloadMask),
     81 	MSG_MAP(SetPptLimit,			PPSMC_MSG_SetPptLimit),
     82 	MSG_MAP(SetDriverDramAddrHigh,		PPSMC_MSG_SetDriverDramAddrHigh),
     83 	MSG_MAP(SetDriverDramAddrLow,		PPSMC_MSG_SetDriverDramAddrLow),
     84 	MSG_MAP(SetToolsDramAddrHigh,		PPSMC_MSG_SetToolsDramAddrHigh),
     85 	MSG_MAP(SetToolsDramAddrLow,		PPSMC_MSG_SetToolsDramAddrLow),
     86 	MSG_MAP(TransferTableSmu2Dram,		PPSMC_MSG_TransferTableSmu2Dram),
     87 	MSG_MAP(TransferTableDram2Smu,		PPSMC_MSG_TransferTableDram2Smu),
     88 	MSG_MAP(UseDefaultPPTable,		PPSMC_MSG_UseDefaultPPTable),
     89 	MSG_MAP(UseBackupPPTable,		PPSMC_MSG_UseBackupPPTable),
     90 	MSG_MAP(RunBtc,				PPSMC_MSG_RunBtc),
     91 	MSG_MAP(EnterBaco,			PPSMC_MSG_EnterBaco),
     92 	MSG_MAP(SetSoftMinByFreq,		PPSMC_MSG_SetSoftMinByFreq),
     93 	MSG_MAP(SetSoftMaxByFreq,		PPSMC_MSG_SetSoftMaxByFreq),
     94 	MSG_MAP(SetHardMinByFreq,		PPSMC_MSG_SetHardMinByFreq),
     95 	MSG_MAP(SetHardMaxByFreq,		PPSMC_MSG_SetHardMaxByFreq),
     96 	MSG_MAP(GetMinDpmFreq,			PPSMC_MSG_GetMinDpmFreq),
     97 	MSG_MAP(GetMaxDpmFreq,			PPSMC_MSG_GetMaxDpmFreq),
     98 	MSG_MAP(GetDpmFreqByIndex,		PPSMC_MSG_GetDpmFreqByIndex),
     99 	MSG_MAP(SetMemoryChannelConfig,		PPSMC_MSG_SetMemoryChannelConfig),
    100 	MSG_MAP(SetGeminiMode,			PPSMC_MSG_SetGeminiMode),
    101 	MSG_MAP(SetGeminiApertureHigh,		PPSMC_MSG_SetGeminiApertureHigh),
    102 	MSG_MAP(SetGeminiApertureLow,		PPSMC_MSG_SetGeminiApertureLow),
    103 	MSG_MAP(OverridePcieParameters,		PPSMC_MSG_OverridePcieParameters),
    104 	MSG_MAP(SetMinDeepSleepDcefclk,		PPSMC_MSG_SetMinDeepSleepDcefclk),
    105 	MSG_MAP(ReenableAcDcInterrupt,		PPSMC_MSG_ReenableAcDcInterrupt),
    106 	MSG_MAP(NotifyPowerSource,		PPSMC_MSG_NotifyPowerSource),
    107 	MSG_MAP(SetUclkFastSwitch,		PPSMC_MSG_SetUclkFastSwitch),
    108 	MSG_MAP(SetVideoFps,			PPSMC_MSG_SetVideoFps),
    109 	MSG_MAP(PrepareMp1ForUnload,		PPSMC_MSG_PrepareMp1ForUnload),
    110 	MSG_MAP(DramLogSetDramAddrHigh,		PPSMC_MSG_DramLogSetDramAddrHigh),
    111 	MSG_MAP(DramLogSetDramAddrLow,		PPSMC_MSG_DramLogSetDramAddrLow),
    112 	MSG_MAP(DramLogSetDramSize,		PPSMC_MSG_DramLogSetDramSize),
    113 	MSG_MAP(ConfigureGfxDidt,		PPSMC_MSG_ConfigureGfxDidt),
    114 	MSG_MAP(NumOfDisplays,			PPSMC_MSG_NumOfDisplays),
    115 	MSG_MAP(SetSystemVirtualDramAddrHigh,	PPSMC_MSG_SetSystemVirtualDramAddrHigh),
    116 	MSG_MAP(SetSystemVirtualDramAddrLow,	PPSMC_MSG_SetSystemVirtualDramAddrLow),
    117 	MSG_MAP(AllowGfxOff,			PPSMC_MSG_AllowGfxOff),
    118 	MSG_MAP(DisallowGfxOff,			PPSMC_MSG_DisallowGfxOff),
    119 	MSG_MAP(GetPptLimit,			PPSMC_MSG_GetPptLimit),
    120 	MSG_MAP(GetDcModeMaxDpmFreq,		PPSMC_MSG_GetDcModeMaxDpmFreq),
    121 	MSG_MAP(GetDebugData,			PPSMC_MSG_GetDebugData),
    122 	MSG_MAP(ExitBaco,			PPSMC_MSG_ExitBaco),
    123 	MSG_MAP(PrepareMp1ForReset,		PPSMC_MSG_PrepareMp1ForReset),
    124 	MSG_MAP(PrepareMp1ForShutdown,		PPSMC_MSG_PrepareMp1ForShutdown),
    125 	MSG_MAP(PowerUpVcn,		PPSMC_MSG_PowerUpVcn),
    126 	MSG_MAP(PowerDownVcn,		PPSMC_MSG_PowerDownVcn),
    127 	MSG_MAP(PowerUpJpeg,		PPSMC_MSG_PowerUpJpeg),
    128 	MSG_MAP(PowerDownJpeg,		PPSMC_MSG_PowerDownJpeg),
    129 	MSG_MAP(BacoAudioD3PME,		PPSMC_MSG_BacoAudioD3PME),
    130 	MSG_MAP(ArmD3,			PPSMC_MSG_ArmD3),
    131 	MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange),
    132 	MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE,	PPSMC_MSG_DALEnableDummyPstateChange),
    133 	MSG_MAP(GetVoltageByDpm,		     PPSMC_MSG_GetVoltageByDpm),
    134 	MSG_MAP(GetVoltageByDpmOverdrive,	     PPSMC_MSG_GetVoltageByDpmOverdrive),
    135 };
    136 
    137 static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
    138 	CLK_MAP(GFXCLK, PPCLK_GFXCLK),
    139 	CLK_MAP(SCLK,	PPCLK_GFXCLK),
    140 	CLK_MAP(SOCCLK, PPCLK_SOCCLK),
    141 	CLK_MAP(FCLK, PPCLK_SOCCLK),
    142 	CLK_MAP(UCLK, PPCLK_UCLK),
    143 	CLK_MAP(MCLK, PPCLK_UCLK),
    144 	CLK_MAP(DCLK, PPCLK_DCLK),
    145 	CLK_MAP(VCLK, PPCLK_VCLK),
    146 	CLK_MAP(DCEFCLK, PPCLK_DCEFCLK),
    147 	CLK_MAP(DISPCLK, PPCLK_DISPCLK),
    148 	CLK_MAP(PIXCLK, PPCLK_PIXCLK),
    149 	CLK_MAP(PHYCLK, PPCLK_PHYCLK),
    150 };
    151 
    152 static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
    153 	FEA_MAP(DPM_PREFETCHER),
    154 	FEA_MAP(DPM_GFXCLK),
    155 	FEA_MAP(DPM_GFX_PACE),
    156 	FEA_MAP(DPM_UCLK),
    157 	FEA_MAP(DPM_SOCCLK),
    158 	FEA_MAP(DPM_MP0CLK),
    159 	FEA_MAP(DPM_LINK),
    160 	FEA_MAP(DPM_DCEFCLK),
    161 	FEA_MAP(MEM_VDDCI_SCALING),
    162 	FEA_MAP(MEM_MVDD_SCALING),
    163 	FEA_MAP(DS_GFXCLK),
    164 	FEA_MAP(DS_SOCCLK),
    165 	FEA_MAP(DS_LCLK),
    166 	FEA_MAP(DS_DCEFCLK),
    167 	FEA_MAP(DS_UCLK),
    168 	FEA_MAP(GFX_ULV),
    169 	FEA_MAP(FW_DSTATE),
    170 	FEA_MAP(GFXOFF),
    171 	FEA_MAP(BACO),
    172 	FEA_MAP(VCN_PG),
    173 	FEA_MAP(JPEG_PG),
    174 	FEA_MAP(USB_PG),
    175 	FEA_MAP(RSMU_SMN_CG),
    176 	FEA_MAP(PPT),
    177 	FEA_MAP(TDC),
    178 	FEA_MAP(GFX_EDC),
    179 	FEA_MAP(APCC_PLUS),
    180 	FEA_MAP(GTHR),
    181 	FEA_MAP(ACDC),
    182 	FEA_MAP(VR0HOT),
    183 	FEA_MAP(VR1HOT),
    184 	FEA_MAP(FW_CTF),
    185 	FEA_MAP(FAN_CONTROL),
    186 	FEA_MAP(THERMAL),
    187 	FEA_MAP(GFX_DCS),
    188 	FEA_MAP(RM),
    189 	FEA_MAP(LED_DISPLAY),
    190 	FEA_MAP(GFX_SS),
    191 	FEA_MAP(OUT_OF_BAND_MONITOR),
    192 	FEA_MAP(TEMP_DEPENDENT_VMIN),
    193 	FEA_MAP(MMHUB_PG),
    194 	FEA_MAP(ATHUB_PG),
    195 	FEA_MAP(APCC_DFLL),
    196 };
    197 
    198 static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
    199 	TAB_MAP(PPTABLE),
    200 	TAB_MAP(WATERMARKS),
    201 	TAB_MAP(AVFS),
    202 	TAB_MAP(AVFS_PSM_DEBUG),
    203 	TAB_MAP(AVFS_FUSE_OVERRIDE),
    204 	TAB_MAP(PMSTATUSLOG),
    205 	TAB_MAP(SMU_METRICS),
    206 	TAB_MAP(DRIVER_SMU_CONFIG),
    207 	TAB_MAP(ACTIVITY_MONITOR_COEFF),
    208 	TAB_MAP(OVERDRIVE),
    209 	TAB_MAP(I2C_COMMANDS),
    210 	TAB_MAP(PACE),
    211 };
    212 
    213 static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
    214 	PWR_MAP(AC),
    215 	PWR_MAP(DC),
    216 };
    217 
    218 static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
    219 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,	WORKLOAD_PPLIB_DEFAULT_BIT),
    220 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,		WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
    221 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,		WORKLOAD_PPLIB_POWER_SAVING_BIT),
    222 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,		WORKLOAD_PPLIB_VIDEO_BIT),
    223 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,			WORKLOAD_PPLIB_VR_BIT),
    224 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,		WORKLOAD_PPLIB_COMPUTE_BIT),
    225 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,		WORKLOAD_PPLIB_CUSTOM_BIT),
    226 };
    227 
    228 static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
    229 {
    230 	struct smu_11_0_cmn2aisc_mapping mapping;
    231 
    232 	if (index >= SMU_MSG_MAX_COUNT)
    233 		return -EINVAL;
    234 
    235 	mapping = navi10_message_map[index];
    236 	if (!(mapping.valid_mapping)) {
    237 		return -EINVAL;
    238 	}
    239 
    240 	return mapping.map_to;
    241 }
    242 
    243 static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
    244 {
    245 	struct smu_11_0_cmn2aisc_mapping mapping;
    246 
    247 	if (index >= SMU_CLK_COUNT)
    248 		return -EINVAL;
    249 
    250 	mapping = navi10_clk_map[index];
    251 	if (!(mapping.valid_mapping)) {
    252 		return -EINVAL;
    253 	}
    254 
    255 	return mapping.map_to;
    256 }
    257 
    258 static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
    259 {
    260 	struct smu_11_0_cmn2aisc_mapping mapping;
    261 
    262 	if (index >= SMU_FEATURE_COUNT)
    263 		return -EINVAL;
    264 
    265 	mapping = navi10_feature_mask_map[index];
    266 	if (!(mapping.valid_mapping)) {
    267 		return -EINVAL;
    268 	}
    269 
    270 	return mapping.map_to;
    271 }
    272 
    273 static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
    274 {
    275 	struct smu_11_0_cmn2aisc_mapping mapping;
    276 
    277 	if (index >= SMU_TABLE_COUNT)
    278 		return -EINVAL;
    279 
    280 	mapping = navi10_table_map[index];
    281 	if (!(mapping.valid_mapping)) {
    282 		return -EINVAL;
    283 	}
    284 
    285 	return mapping.map_to;
    286 }
    287 
    288 static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
    289 {
    290 	struct smu_11_0_cmn2aisc_mapping mapping;
    291 
    292 	if (index >= SMU_POWER_SOURCE_COUNT)
    293 		return -EINVAL;
    294 
    295 	mapping = navi10_pwr_src_map[index];
    296 	if (!(mapping.valid_mapping)) {
    297 		return -EINVAL;
    298 	}
    299 
    300 	return mapping.map_to;
    301 }
    302 
    303 
    304 static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
    305 {
    306 	struct smu_11_0_cmn2aisc_mapping mapping;
    307 
    308 	if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
    309 		return -EINVAL;
    310 
    311 	mapping = navi10_workload_map[profile];
    312 	if (!(mapping.valid_mapping)) {
    313 		return -EINVAL;
    314 	}
    315 
    316 	return mapping.map_to;
    317 }
    318 
    319 static bool is_asic_secure(struct smu_context *smu)
    320 {
    321 	struct amdgpu_device *adev = smu->adev;
    322 	bool is_secure = true;
    323 	uint32_t mp0_fw_intf;
    324 
    325 	mp0_fw_intf = RREG32_PCIE(MP0_Public |
    326 				   (smnMP0_FW_INTF & 0xffffffff));
    327 
    328 	if (!(mp0_fw_intf & (1 << 19)))
    329 		is_secure = false;
    330 
    331 	return is_secure;
    332 }
    333 
    334 static int
    335 navi10_get_allowed_feature_mask(struct smu_context *smu,
    336 				  uint32_t *feature_mask, uint32_t num)
    337 {
    338 	struct amdgpu_device *adev = smu->adev;
    339 
    340 	if (num > 2)
    341 		return -EINVAL;
    342 
    343 	memset(feature_mask, 0, sizeof(uint32_t) * num);
    344 
    345 	*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
    346 				| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
    347 				| FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
    348 				| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
    349 				| FEATURE_MASK(FEATURE_PPT_BIT)
    350 				| FEATURE_MASK(FEATURE_TDC_BIT)
    351 				| FEATURE_MASK(FEATURE_GFX_EDC_BIT)
    352 				| FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
    353 				| FEATURE_MASK(FEATURE_VR0HOT_BIT)
    354 				| FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
    355 				| FEATURE_MASK(FEATURE_THERMAL_BIT)
    356 				| FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
    357 				| FEATURE_MASK(FEATURE_DS_LCLK_BIT)
    358 				| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
    359 				| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
    360 				| FEATURE_MASK(FEATURE_BACO_BIT)
    361 				| FEATURE_MASK(FEATURE_ACDC_BIT)
    362 				| FEATURE_MASK(FEATURE_GFX_SS_BIT)
    363 				| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
    364 				| FEATURE_MASK(FEATURE_FW_CTF_BIT)
    365 				| FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
    366 
    367 	if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
    368 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
    369 
    370 	if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
    371 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
    372 
    373 	if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
    374 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
    375 
    376 	if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
    377 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
    378 
    379 	if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
    380 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
    381 				| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
    382 				| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
    383 
    384 	if (adev->pm.pp_feature & PP_ULV_MASK)
    385 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
    386 
    387 	if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
    388 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
    389 
    390 	if (adev->pm.pp_feature & PP_GFXOFF_MASK)
    391 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
    392 
    393 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
    394 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
    395 
    396 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
    397 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
    398 
    399 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
    400 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
    401 
    402 	if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
    403 		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
    404 
    405 	/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
    406 	if (is_asic_secure(smu)) {
    407 		/* only for navi10 A0 */
    408 		if ((adev->asic_type == CHIP_NAVI10) &&
    409 			(adev->rev_id == 0)) {
    410 			*(uint64_t *)feature_mask &=
    411 					~(FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
    412 					  | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
    413 					  | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT));
    414 			*(uint64_t *)feature_mask &=
    415 					~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
    416 		}
    417 	}
    418 
    419 	return 0;
    420 }
    421 
    422 static int navi10_check_powerplay_table(struct smu_context *smu)
    423 {
    424 	return 0;
    425 }
    426 
    427 static int navi10_append_powerplay_table(struct smu_context *smu)
    428 {
    429 	struct amdgpu_device *adev = smu->adev;
    430 	struct smu_table_context *table_context = &smu->smu_table;
    431 	PPTable_t *smc_pptable = table_context->driver_pptable;
    432 	struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
    433 	int index, ret;
    434 
    435 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
    436 					   smc_dpm_info);
    437 
    438 	ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
    439 				      (uint8_t **)&smc_dpm_table);
    440 	if (ret)
    441 		return ret;
    442 
    443 	memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
    444 	       sizeof(I2cControllerConfig_t) * NUM_I2C_CONTROLLERS);
    445 
    446 	/* SVI2 Board Parameters */
    447 	smc_pptable->MaxVoltageStepGfx = smc_dpm_table->MaxVoltageStepGfx;
    448 	smc_pptable->MaxVoltageStepSoc = smc_dpm_table->MaxVoltageStepSoc;
    449 	smc_pptable->VddGfxVrMapping = smc_dpm_table->VddGfxVrMapping;
    450 	smc_pptable->VddSocVrMapping = smc_dpm_table->VddSocVrMapping;
    451 	smc_pptable->VddMem0VrMapping = smc_dpm_table->VddMem0VrMapping;
    452 	smc_pptable->VddMem1VrMapping = smc_dpm_table->VddMem1VrMapping;
    453 	smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->GfxUlvPhaseSheddingMask;
    454 	smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->SocUlvPhaseSheddingMask;
    455 	smc_pptable->ExternalSensorPresent = smc_dpm_table->ExternalSensorPresent;
    456 	smc_pptable->Padding8_V = smc_dpm_table->Padding8_V;
    457 
    458 	/* Telemetry Settings */
    459 	smc_pptable->GfxMaxCurrent = smc_dpm_table->GfxMaxCurrent;
    460 	smc_pptable->GfxOffset = smc_dpm_table->GfxOffset;
    461 	smc_pptable->Padding_TelemetryGfx = smc_dpm_table->Padding_TelemetryGfx;
    462 	smc_pptable->SocMaxCurrent = smc_dpm_table->SocMaxCurrent;
    463 	smc_pptable->SocOffset = smc_dpm_table->SocOffset;
    464 	smc_pptable->Padding_TelemetrySoc = smc_dpm_table->Padding_TelemetrySoc;
    465 	smc_pptable->Mem0MaxCurrent = smc_dpm_table->Mem0MaxCurrent;
    466 	smc_pptable->Mem0Offset = smc_dpm_table->Mem0Offset;
    467 	smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->Padding_TelemetryMem0;
    468 	smc_pptable->Mem1MaxCurrent = smc_dpm_table->Mem1MaxCurrent;
    469 	smc_pptable->Mem1Offset = smc_dpm_table->Mem1Offset;
    470 	smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->Padding_TelemetryMem1;
    471 
    472 	/* GPIO Settings */
    473 	smc_pptable->AcDcGpio = smc_dpm_table->AcDcGpio;
    474 	smc_pptable->AcDcPolarity = smc_dpm_table->AcDcPolarity;
    475 	smc_pptable->VR0HotGpio = smc_dpm_table->VR0HotGpio;
    476 	smc_pptable->VR0HotPolarity = smc_dpm_table->VR0HotPolarity;
    477 	smc_pptable->VR1HotGpio = smc_dpm_table->VR1HotGpio;
    478 	smc_pptable->VR1HotPolarity = smc_dpm_table->VR1HotPolarity;
    479 	smc_pptable->GthrGpio = smc_dpm_table->GthrGpio;
    480 	smc_pptable->GthrPolarity = smc_dpm_table->GthrPolarity;
    481 
    482 	/* LED Display Settings */
    483 	smc_pptable->LedPin0 = smc_dpm_table->LedPin0;
    484 	smc_pptable->LedPin1 = smc_dpm_table->LedPin1;
    485 	smc_pptable->LedPin2 = smc_dpm_table->LedPin2;
    486 	smc_pptable->padding8_4 = smc_dpm_table->padding8_4;
    487 
    488 	/* GFXCLK PLL Spread Spectrum */
    489 	smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->PllGfxclkSpreadEnabled;
    490 	smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->PllGfxclkSpreadPercent;
    491 	smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->PllGfxclkSpreadFreq;
    492 
    493 	/* GFXCLK DFLL Spread Spectrum */
    494 	smc_pptable->DfllGfxclkSpreadEnabled = smc_dpm_table->DfllGfxclkSpreadEnabled;
    495 	smc_pptable->DfllGfxclkSpreadPercent = smc_dpm_table->DfllGfxclkSpreadPercent;
    496 	smc_pptable->DfllGfxclkSpreadFreq = smc_dpm_table->DfllGfxclkSpreadFreq;
    497 
    498 	/* UCLK Spread Spectrum */
    499 	smc_pptable->UclkSpreadEnabled = smc_dpm_table->UclkSpreadEnabled;
    500 	smc_pptable->UclkSpreadPercent = smc_dpm_table->UclkSpreadPercent;
    501 	smc_pptable->UclkSpreadFreq = smc_dpm_table->UclkSpreadFreq;
    502 
    503 	/* SOCCLK Spread Spectrum */
    504 	smc_pptable->SoclkSpreadEnabled = smc_dpm_table->SoclkSpreadEnabled;
    505 	smc_pptable->SocclkSpreadPercent = smc_dpm_table->SocclkSpreadPercent;
    506 	smc_pptable->SocclkSpreadFreq = smc_dpm_table->SocclkSpreadFreq;
    507 
    508 	/* Total board power */
    509 	smc_pptable->TotalBoardPower = smc_dpm_table->TotalBoardPower;
    510 	smc_pptable->BoardPadding = smc_dpm_table->BoardPadding;
    511 
    512 	/* Mvdd Svi2 Div Ratio Setting */
    513 	smc_pptable->MvddRatio = smc_dpm_table->MvddRatio;
    514 
    515 	if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
    516 		/* TODO: remove it once SMU fw fix it */
    517 		smc_pptable->DebugOverrides |= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN;
    518 	}
    519 
    520 	return 0;
    521 }
    522 
    523 static int navi10_store_powerplay_table(struct smu_context *smu)
    524 {
    525 	const struct smu_11_0_powerplay_table *powerplay_table = NULL;
    526 	struct smu_table_context *table_context = &smu->smu_table;
    527 	struct smu_baco_context *smu_baco = &smu->smu_baco;
    528 
    529 	if (!table_context->power_play_table)
    530 		return -EINVAL;
    531 
    532 	powerplay_table = table_context->power_play_table;
    533 
    534 	memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
    535 	       sizeof(PPTable_t));
    536 
    537 	table_context->thermal_controller_type = powerplay_table->thermal_controller_type;
    538 
    539 	mutex_lock(&smu_baco->mutex);
    540 	if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
    541 	    powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
    542 		smu_baco->platform_support = true;
    543 	mutex_unlock(&smu_baco->mutex);
    544 
    545 	return 0;
    546 }
    547 
    548 static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
    549 {
    550 	struct smu_table_context *smu_table = &smu->smu_table;
    551 
    552 	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
    553 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    554 	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
    555 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    556 	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
    557 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    558 	SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
    559 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    560 	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
    561 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
    562 	SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
    563 		       sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
    564 		       AMDGPU_GEM_DOMAIN_VRAM);
    565 
    566 	smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
    567 	if (!smu_table->metrics_table)
    568 		return -ENOMEM;
    569 	smu_table->metrics_time = 0;
    570 
    571 	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
    572 	if (!smu_table->watermarks_table)
    573 		return -ENOMEM;
    574 
    575 	return 0;
    576 }
    577 
    578 static int navi10_get_metrics_table(struct smu_context *smu,
    579 				    SmuMetrics_t *metrics_table)
    580 {
    581 	struct smu_table_context *smu_table= &smu->smu_table;
    582 	int ret = 0;
    583 
    584 	mutex_lock(&smu->metrics_lock);
    585 	if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
    586 		ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
    587 				(void *)smu_table->metrics_table, false);
    588 		if (ret) {
    589 			pr_info("Failed to export SMU metrics table!\n");
    590 			mutex_unlock(&smu->metrics_lock);
    591 			return ret;
    592 		}
    593 		smu_table->metrics_time = jiffies;
    594 	}
    595 
    596 	memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
    597 	mutex_unlock(&smu->metrics_lock);
    598 
    599 	return ret;
    600 }
    601 
    602 static int navi10_allocate_dpm_context(struct smu_context *smu)
    603 {
    604 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
    605 
    606 	if (smu_dpm->dpm_context)
    607 		return -EINVAL;
    608 
    609 	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
    610 				       GFP_KERNEL);
    611 	if (!smu_dpm->dpm_context)
    612 		return -ENOMEM;
    613 
    614 	smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
    615 
    616 	return 0;
    617 }
    618 
    619 static int navi10_set_default_dpm_table(struct smu_context *smu)
    620 {
    621 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
    622 	struct smu_table_context *table_context = &smu->smu_table;
    623 	struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
    624 	PPTable_t *driver_ppt = NULL;
    625 	int i;
    626 
    627 	driver_ppt = table_context->driver_pptable;
    628 
    629 	dpm_context->dpm_tables.soc_table.min = driver_ppt->FreqTableSocclk[0];
    630 	dpm_context->dpm_tables.soc_table.max = driver_ppt->FreqTableSocclk[NUM_SOCCLK_DPM_LEVELS - 1];
    631 
    632 	dpm_context->dpm_tables.gfx_table.min = driver_ppt->FreqTableGfx[0];
    633 	dpm_context->dpm_tables.gfx_table.max = driver_ppt->FreqTableGfx[NUM_GFXCLK_DPM_LEVELS - 1];
    634 
    635 	dpm_context->dpm_tables.uclk_table.min = driver_ppt->FreqTableUclk[0];
    636 	dpm_context->dpm_tables.uclk_table.max = driver_ppt->FreqTableUclk[NUM_UCLK_DPM_LEVELS - 1];
    637 
    638 	dpm_context->dpm_tables.vclk_table.min = driver_ppt->FreqTableVclk[0];
    639 	dpm_context->dpm_tables.vclk_table.max = driver_ppt->FreqTableVclk[NUM_VCLK_DPM_LEVELS - 1];
    640 
    641 	dpm_context->dpm_tables.dclk_table.min = driver_ppt->FreqTableDclk[0];
    642 	dpm_context->dpm_tables.dclk_table.max = driver_ppt->FreqTableDclk[NUM_DCLK_DPM_LEVELS - 1];
    643 
    644 	dpm_context->dpm_tables.dcef_table.min = driver_ppt->FreqTableDcefclk[0];
    645 	dpm_context->dpm_tables.dcef_table.max = driver_ppt->FreqTableDcefclk[NUM_DCEFCLK_DPM_LEVELS - 1];
    646 
    647 	dpm_context->dpm_tables.pixel_table.min = driver_ppt->FreqTablePixclk[0];
    648 	dpm_context->dpm_tables.pixel_table.max = driver_ppt->FreqTablePixclk[NUM_PIXCLK_DPM_LEVELS - 1];
    649 
    650 	dpm_context->dpm_tables.display_table.min = driver_ppt->FreqTableDispclk[0];
    651 	dpm_context->dpm_tables.display_table.max = driver_ppt->FreqTableDispclk[NUM_DISPCLK_DPM_LEVELS - 1];
    652 
    653 	dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0];
    654 	dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1];
    655 
    656 	for (i = 0; i < MAX_PCIE_CONF; i++) {
    657 		dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
    658 		dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
    659 	}
    660 
    661 	return 0;
    662 }
    663 
    664 static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
    665 {
    666 	struct smu_power_context *smu_power = &smu->smu_power;
    667 	struct smu_power_gate *power_gate = &smu_power->power_gate;
    668 	int ret = 0;
    669 
    670 	if (enable) {
    671 		/* vcn dpm on is a prerequisite for vcn power gate messages */
    672 		if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
    673 			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
    674 			if (ret)
    675 				return ret;
    676 		}
    677 		power_gate->vcn_gated = false;
    678 	} else {
    679 		if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
    680 			ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
    681 			if (ret)
    682 				return ret;
    683 		}
    684 		power_gate->vcn_gated = true;
    685 	}
    686 
    687 	return ret;
    688 }
    689 
    690 static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
    691 {
    692 	struct smu_power_context *smu_power = &smu->smu_power;
    693 	struct smu_power_gate *power_gate = &smu_power->power_gate;
    694 	int ret = 0;
    695 
    696 	if (enable) {
    697 		if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
    698 			ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
    699 			if (ret)
    700 				return ret;
    701 		}
    702 		power_gate->jpeg_gated = false;
    703 	} else {
    704 		if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
    705 			ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
    706 			if (ret)
    707 				return ret;
    708 		}
    709 		power_gate->jpeg_gated = true;
    710 	}
    711 
    712 	return ret;
    713 }
    714 
    715 static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
    716 				       enum smu_clk_type clk_type,
    717 				       uint32_t *value)
    718 {
    719 	int ret = 0, clk_id = 0;
    720 	SmuMetrics_t metrics;
    721 
    722 	ret = navi10_get_metrics_table(smu, &metrics);
    723 	if (ret)
    724 		return ret;
    725 
    726 	clk_id = smu_clk_get_index(smu, clk_type);
    727 	if (clk_id < 0)
    728 		return clk_id;
    729 
    730 	*value = metrics.CurrClock[clk_id];
    731 
    732 	return ret;
    733 }
    734 
    735 static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
    736 {
    737 	PPTable_t *pptable = smu->smu_table.driver_pptable;
    738 	DpmDescriptor_t *dpm_desc = NULL;
    739 	uint32_t clk_index = 0;
    740 
    741 	clk_index = smu_clk_get_index(smu, clk_type);
    742 	dpm_desc = &pptable->DpmDescriptor[clk_index];
    743 
    744 	/* 0 - Fine grained DPM, 1 - Discrete DPM */
    745 	return dpm_desc->SnapToDiscrete == 0 ? true : false;
    746 }
    747 
    748 static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
    749 {
    750 	return od_table->cap[cap];
    751 }
    752 
    753 static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
    754 					enum SMU_11_0_ODSETTING_ID setting,
    755 					uint32_t *min, uint32_t *max)
    756 {
    757 	if (min)
    758 		*min = od_table->min[setting];
    759 	if (max)
    760 		*max = od_table->max[setting];
    761 }
    762 
    763 static int navi10_print_clk_levels(struct smu_context *smu,
    764 			enum smu_clk_type clk_type, char *buf)
    765 {
    766 	uint16_t *curve_settings;
    767 	int i, size = 0, ret = 0;
    768 	uint32_t cur_value = 0, value = 0, count = 0;
    769 	uint32_t freq_values[3] = {0};
    770 	uint32_t mark_index = 0;
    771 	struct smu_table_context *table_context = &smu->smu_table;
    772 	uint32_t gen_speed, lane_width;
    773 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
    774 	struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
    775 	struct amdgpu_device *adev = smu->adev;
    776 	PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
    777 	OverDriveTable_t *od_table =
    778 		(OverDriveTable_t *)table_context->overdrive_table;
    779 	struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
    780 	uint32_t min_value, max_value;
    781 
    782 	switch (clk_type) {
    783 	case SMU_GFXCLK:
    784 	case SMU_SCLK:
    785 	case SMU_SOCCLK:
    786 	case SMU_MCLK:
    787 	case SMU_UCLK:
    788 	case SMU_FCLK:
    789 	case SMU_DCEFCLK:
    790 		ret = smu_get_current_clk_freq(smu, clk_type, &cur_value);
    791 		if (ret)
    792 			return size;
    793 
    794 		/* 10KHz -> MHz */
    795 		cur_value = cur_value / 100;
    796 
    797 		ret = smu_get_dpm_level_count(smu, clk_type, &count);
    798 		if (ret)
    799 			return size;
    800 
    801 		if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
    802 			for (i = 0; i < count; i++) {
    803 				ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
    804 				if (ret)
    805 					return size;
    806 
    807 				size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
    808 						cur_value == value ? "*" : "");
    809 			}
    810 		} else {
    811 			ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
    812 			if (ret)
    813 				return size;
    814 			ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
    815 			if (ret)
    816 				return size;
    817 
    818 			freq_values[1] = cur_value;
    819 			mark_index = cur_value == freq_values[0] ? 0 :
    820 				     cur_value == freq_values[2] ? 2 : 1;
    821 			if (mark_index != 1)
    822 				freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
    823 
    824 			for (i = 0; i < 3; i++) {
    825 				size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
    826 						i == mark_index ? "*" : "");
    827 			}
    828 
    829 		}
    830 		break;
    831 	case SMU_PCIE:
    832 		gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
    833 			     PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
    834 			>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
    835 		lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
    836 			      PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
    837 			>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
    838 		for (i = 0; i < NUM_LINK_LEVELS; i++)
    839 			size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
    840 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
    841 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
    842 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
    843 					(dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
    844 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
    845 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
    846 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
    847 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
    848 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
    849 					(dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
    850 					pptable->LclkFreq[i],
    851 					(gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
    852 					(lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
    853 					"*" : "");
    854 		break;
    855 	case SMU_OD_SCLK:
    856 		if (!smu->od_enabled || !od_table || !od_settings)
    857 			break;
    858 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
    859 			break;
    860 		size += sprintf(buf + size, "OD_SCLK:\n");
    861 		size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
    862 		break;
    863 	case SMU_OD_MCLK:
    864 		if (!smu->od_enabled || !od_table || !od_settings)
    865 			break;
    866 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
    867 			break;
    868 		size += sprintf(buf + size, "OD_MCLK:\n");
    869 		size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
    870 		break;
    871 	case SMU_OD_VDDC_CURVE:
    872 		if (!smu->od_enabled || !od_table || !od_settings)
    873 			break;
    874 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
    875 			break;
    876 		size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
    877 		for (i = 0; i < 3; i++) {
    878 			switch (i) {
    879 			case 0:
    880 				curve_settings = &od_table->GfxclkFreq1;
    881 				break;
    882 			case 1:
    883 				curve_settings = &od_table->GfxclkFreq2;
    884 				break;
    885 			case 2:
    886 				curve_settings = &od_table->GfxclkFreq3;
    887 				break;
    888 			default:
    889 				break;
    890 			}
    891 			size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
    892 		}
    893 		break;
    894 	case SMU_OD_RANGE:
    895 		if (!smu->od_enabled || !od_table || !od_settings)
    896 			break;
    897 		size = sprintf(buf, "%s:\n", "OD_RANGE");
    898 
    899 		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
    900 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
    901 						    &min_value, NULL);
    902 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
    903 						    NULL, &max_value);
    904 			size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
    905 					min_value, max_value);
    906 		}
    907 
    908 		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
    909 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
    910 						    &min_value, &max_value);
    911 			size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
    912 					min_value, max_value);
    913 		}
    914 
    915 		if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
    916 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
    917 						    &min_value, &max_value);
    918 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
    919 					min_value, max_value);
    920 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
    921 						    &min_value, &max_value);
    922 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
    923 					min_value, max_value);
    924 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
    925 						    &min_value, &max_value);
    926 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
    927 					min_value, max_value);
    928 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
    929 						    &min_value, &max_value);
    930 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
    931 					min_value, max_value);
    932 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
    933 						    &min_value, &max_value);
    934 			size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
    935 					min_value, max_value);
    936 			navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
    937 						    &min_value, &max_value);
    938 			size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
    939 					min_value, max_value);
    940 		}
    941 
    942 		break;
    943 	default:
    944 		break;
    945 	}
    946 
    947 	return size;
    948 }
    949 
    950 static int navi10_force_clk_levels(struct smu_context *smu,
    951 				   enum smu_clk_type clk_type, uint32_t mask)
    952 {
    953 
    954 	int ret = 0, size = 0;
    955 	uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
    956 
    957 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
    958 	soft_max_level = mask ? (fls(mask) - 1) : 0;
    959 
    960 	switch (clk_type) {
    961 	case SMU_GFXCLK:
    962 	case SMU_SCLK:
    963 	case SMU_SOCCLK:
    964 	case SMU_MCLK:
    965 	case SMU_UCLK:
    966 	case SMU_DCEFCLK:
    967 	case SMU_FCLK:
    968 		/* There is only 2 levels for fine grained DPM */
    969 		if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
    970 			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
    971 			soft_min_level = (soft_min_level >= 1 ? 1 : 0);
    972 		}
    973 
    974 		ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
    975 		if (ret)
    976 			return size;
    977 
    978 		ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
    979 		if (ret)
    980 			return size;
    981 
    982 		ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
    983 		if (ret)
    984 			return size;
    985 		break;
    986 	default:
    987 		break;
    988 	}
    989 
    990 	return size;
    991 }
    992 
    993 static int navi10_populate_umd_state_clk(struct smu_context *smu)
    994 {
    995 	int ret = 0;
    996 	uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
    997 
    998 	ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false);
    999 	if (ret)
   1000 		return ret;
   1001 
   1002 	smu->pstate_sclk = min_sclk_freq * 100;
   1003 
   1004 	ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false);
   1005 	if (ret)
   1006 		return ret;
   1007 
   1008 	smu->pstate_mclk = min_mclk_freq * 100;
   1009 
   1010 	return ret;
   1011 }
   1012 
   1013 static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
   1014 						 enum smu_clk_type clk_type,
   1015 						 struct pp_clock_levels_with_latency *clocks)
   1016 {
   1017 	int ret = 0, i = 0;
   1018 	uint32_t level_count = 0, freq = 0;
   1019 
   1020 	switch (clk_type) {
   1021 	case SMU_GFXCLK:
   1022 	case SMU_DCEFCLK:
   1023 	case SMU_SOCCLK:
   1024 	case SMU_MCLK:
   1025 	case SMU_UCLK:
   1026 		ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
   1027 		if (ret)
   1028 			return ret;
   1029 
   1030 		level_count = min(level_count, (uint32_t)MAX_NUM_CLOCKS);
   1031 		clocks->num_levels = level_count;
   1032 
   1033 		for (i = 0; i < level_count; i++) {
   1034 			ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &freq);
   1035 			if (ret)
   1036 				return ret;
   1037 
   1038 			clocks->data[i].clocks_in_khz = freq * 1000;
   1039 			clocks->data[i].latency_in_us = 0;
   1040 		}
   1041 		break;
   1042 	default:
   1043 		break;
   1044 	}
   1045 
   1046 	return ret;
   1047 }
   1048 
   1049 static int navi10_pre_display_config_changed(struct smu_context *smu)
   1050 {
   1051 	int ret = 0;
   1052 	uint32_t max_freq = 0;
   1053 
   1054 	ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
   1055 	if (ret)
   1056 		return ret;
   1057 
   1058 	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
   1059 		ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
   1060 		if (ret)
   1061 			return ret;
   1062 		ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
   1063 		if (ret)
   1064 			return ret;
   1065 	}
   1066 
   1067 	return ret;
   1068 }
   1069 
   1070 static int navi10_display_config_changed(struct smu_context *smu)
   1071 {
   1072 	int ret = 0;
   1073 
   1074 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
   1075 	    !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
   1076 		ret = smu_write_watermarks_table(smu);
   1077 		if (ret)
   1078 			return ret;
   1079 
   1080 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
   1081 	}
   1082 
   1083 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
   1084 	    smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
   1085 	    smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
   1086 		ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
   1087 						  smu->display_config->num_display);
   1088 		if (ret)
   1089 			return ret;
   1090 	}
   1091 
   1092 	return ret;
   1093 }
   1094 
   1095 static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
   1096 {
   1097 	int ret = 0, i = 0;
   1098 	uint32_t min_freq, max_freq, force_freq;
   1099 	enum smu_clk_type clk_type;
   1100 
   1101 	enum smu_clk_type clks[] = {
   1102 		SMU_GFXCLK,
   1103 		SMU_MCLK,
   1104 		SMU_SOCCLK,
   1105 	};
   1106 
   1107 	for (i = 0; i < ARRAY_SIZE(clks); i++) {
   1108 		clk_type = clks[i];
   1109 		ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
   1110 		if (ret)
   1111 			return ret;
   1112 
   1113 		force_freq = highest ? max_freq : min_freq;
   1114 		ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
   1115 		if (ret)
   1116 			return ret;
   1117 	}
   1118 
   1119 	return ret;
   1120 }
   1121 
   1122 static int navi10_unforce_dpm_levels(struct smu_context *smu)
   1123 {
   1124 	int ret = 0, i = 0;
   1125 	uint32_t min_freq, max_freq;
   1126 	enum smu_clk_type clk_type;
   1127 
   1128 	enum smu_clk_type clks[] = {
   1129 		SMU_GFXCLK,
   1130 		SMU_MCLK,
   1131 		SMU_SOCCLK,
   1132 	};
   1133 
   1134 	for (i = 0; i < ARRAY_SIZE(clks); i++) {
   1135 		clk_type = clks[i];
   1136 		ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
   1137 		if (ret)
   1138 			return ret;
   1139 
   1140 		ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
   1141 		if (ret)
   1142 			return ret;
   1143 	}
   1144 
   1145 	return ret;
   1146 }
   1147 
   1148 static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
   1149 {
   1150 	int ret = 0;
   1151 	SmuMetrics_t metrics;
   1152 
   1153 	if (!value)
   1154 		return -EINVAL;
   1155 
   1156 	ret = navi10_get_metrics_table(smu, &metrics);
   1157 	if (ret)
   1158 		return ret;
   1159 
   1160 	*value = metrics.AverageSocketPower << 8;
   1161 
   1162 	return 0;
   1163 }
   1164 
   1165 static int navi10_get_current_activity_percent(struct smu_context *smu,
   1166 					       enum amd_pp_sensors sensor,
   1167 					       uint32_t *value)
   1168 {
   1169 	int ret = 0;
   1170 	SmuMetrics_t metrics;
   1171 
   1172 	if (!value)
   1173 		return -EINVAL;
   1174 
   1175 	ret = navi10_get_metrics_table(smu, &metrics);
   1176 	if (ret)
   1177 		return ret;
   1178 
   1179 	switch (sensor) {
   1180 	case AMDGPU_PP_SENSOR_GPU_LOAD:
   1181 		*value = metrics.AverageGfxActivity;
   1182 		break;
   1183 	case AMDGPU_PP_SENSOR_MEM_LOAD:
   1184 		*value = metrics.AverageUclkActivity;
   1185 		break;
   1186 	default:
   1187 		pr_err("Invalid sensor for retrieving clock activity\n");
   1188 		return -EINVAL;
   1189 	}
   1190 
   1191 	return 0;
   1192 }
   1193 
   1194 static bool navi10_is_dpm_running(struct smu_context *smu)
   1195 {
   1196 	int ret __unused = 0;
   1197 	uint32_t feature_mask[2];
   1198 	unsigned long feature_enabled;
   1199 	ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
   1200 	feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
   1201 			   ((uint64_t)feature_mask[1] << 32));
   1202 	return !!(feature_enabled & SMC_DPM_FEATURE);
   1203 }
   1204 
   1205 static int navi10_get_fan_speed_rpm(struct smu_context *smu,
   1206 				    uint32_t *speed)
   1207 {
   1208 	SmuMetrics_t metrics;
   1209 	int ret = 0;
   1210 
   1211 	if (!speed)
   1212 		return -EINVAL;
   1213 
   1214 	ret = navi10_get_metrics_table(smu, &metrics);
   1215 	if (ret)
   1216 		return ret;
   1217 
   1218 	*speed = metrics.CurrFanSpeed;
   1219 
   1220 	return ret;
   1221 }
   1222 
   1223 static int navi10_get_fan_speed_percent(struct smu_context *smu,
   1224 					uint32_t *speed)
   1225 {
   1226 	int ret = 0;
   1227 	uint32_t percent = 0;
   1228 	uint32_t current_rpm;
   1229 	PPTable_t *pptable = smu->smu_table.driver_pptable;
   1230 
   1231 	ret = navi10_get_fan_speed_rpm(smu, &current_rpm);
   1232 	if (ret)
   1233 		return ret;
   1234 
   1235 	percent = current_rpm * 100 / pptable->FanMaximumRpm;
   1236 	*speed = percent > 100 ? 100 : percent;
   1237 
   1238 	return ret;
   1239 }
   1240 
   1241 static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
   1242 {
   1243 	DpmActivityMonitorCoeffInt_t activity_monitor;
   1244 	uint32_t i, size = 0;
   1245 	int16_t workload_type = 0;
   1246 	static const char *profile_name[] = {
   1247 					"BOOTUP_DEFAULT",
   1248 					"3D_FULL_SCREEN",
   1249 					"POWER_SAVING",
   1250 					"VIDEO",
   1251 					"VR",
   1252 					"COMPUTE",
   1253 					"CUSTOM"};
   1254 	static const char *title[] = {
   1255 			"PROFILE_INDEX(NAME)",
   1256 			"CLOCK_TYPE(NAME)",
   1257 			"FPS",
   1258 			"MinFreqType",
   1259 			"MinActiveFreqType",
   1260 			"MinActiveFreq",
   1261 			"BoosterFreqType",
   1262 			"BoosterFreq",
   1263 			"PD_Data_limit_c",
   1264 			"PD_Data_error_coeff",
   1265 			"PD_Data_error_rate_coeff"};
   1266 	int result = 0;
   1267 
   1268 	if (!buf)
   1269 		return -EINVAL;
   1270 
   1271 	size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
   1272 			title[0], title[1], title[2], title[3], title[4], title[5],
   1273 			title[6], title[7], title[8], title[9], title[10]);
   1274 
   1275 	for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
   1276 		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
   1277 		workload_type = smu_workload_get_type(smu, i);
   1278 		if (workload_type < 0)
   1279 			return -EINVAL;
   1280 
   1281 		result = smu_update_table(smu,
   1282 					  SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
   1283 					  (void *)(&activity_monitor), false);
   1284 		if (result) {
   1285 			pr_err("[%s] Failed to get activity monitor!", __func__);
   1286 			return result;
   1287 		}
   1288 
   1289 		size += sprintf(buf + size, "%2d %14s%s:\n",
   1290 			i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
   1291 
   1292 		size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
   1293 			" ",
   1294 			0,
   1295 			"GFXCLK",
   1296 			activity_monitor.Gfx_FPS,
   1297 			activity_monitor.Gfx_MinFreqStep,
   1298 			activity_monitor.Gfx_MinActiveFreqType,
   1299 			activity_monitor.Gfx_MinActiveFreq,
   1300 			activity_monitor.Gfx_BoosterFreqType,
   1301 			activity_monitor.Gfx_BoosterFreq,
   1302 			activity_monitor.Gfx_PD_Data_limit_c,
   1303 			activity_monitor.Gfx_PD_Data_error_coeff,
   1304 			activity_monitor.Gfx_PD_Data_error_rate_coeff);
   1305 
   1306 		size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
   1307 			" ",
   1308 			1,
   1309 			"SOCCLK",
   1310 			activity_monitor.Soc_FPS,
   1311 			activity_monitor.Soc_MinFreqStep,
   1312 			activity_monitor.Soc_MinActiveFreqType,
   1313 			activity_monitor.Soc_MinActiveFreq,
   1314 			activity_monitor.Soc_BoosterFreqType,
   1315 			activity_monitor.Soc_BoosterFreq,
   1316 			activity_monitor.Soc_PD_Data_limit_c,
   1317 			activity_monitor.Soc_PD_Data_error_coeff,
   1318 			activity_monitor.Soc_PD_Data_error_rate_coeff);
   1319 
   1320 		size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
   1321 			" ",
   1322 			2,
   1323 			"MEMLK",
   1324 			activity_monitor.Mem_FPS,
   1325 			activity_monitor.Mem_MinFreqStep,
   1326 			activity_monitor.Mem_MinActiveFreqType,
   1327 			activity_monitor.Mem_MinActiveFreq,
   1328 			activity_monitor.Mem_BoosterFreqType,
   1329 			activity_monitor.Mem_BoosterFreq,
   1330 			activity_monitor.Mem_PD_Data_limit_c,
   1331 			activity_monitor.Mem_PD_Data_error_coeff,
   1332 			activity_monitor.Mem_PD_Data_error_rate_coeff);
   1333 	}
   1334 
   1335 	return size;
   1336 }
   1337 
   1338 static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
   1339 {
   1340 	DpmActivityMonitorCoeffInt_t activity_monitor;
   1341 	int workload_type, ret = 0;
   1342 
   1343 	smu->power_profile_mode = input[size];
   1344 
   1345 	if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
   1346 		pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
   1347 		return -EINVAL;
   1348 	}
   1349 
   1350 	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
   1351 		if (size < 0)
   1352 			return -EINVAL;
   1353 
   1354 		ret = smu_update_table(smu,
   1355 				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
   1356 				       (void *)(&activity_monitor), false);
   1357 		if (ret) {
   1358 			pr_err("[%s] Failed to get activity monitor!", __func__);
   1359 			return ret;
   1360 		}
   1361 
   1362 		switch (input[0]) {
   1363 		case 0: /* Gfxclk */
   1364 			activity_monitor.Gfx_FPS = input[1];
   1365 			activity_monitor.Gfx_MinFreqStep = input[2];
   1366 			activity_monitor.Gfx_MinActiveFreqType = input[3];
   1367 			activity_monitor.Gfx_MinActiveFreq = input[4];
   1368 			activity_monitor.Gfx_BoosterFreqType = input[5];
   1369 			activity_monitor.Gfx_BoosterFreq = input[6];
   1370 			activity_monitor.Gfx_PD_Data_limit_c = input[7];
   1371 			activity_monitor.Gfx_PD_Data_error_coeff = input[8];
   1372 			activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
   1373 			break;
   1374 		case 1: /* Socclk */
   1375 			activity_monitor.Soc_FPS = input[1];
   1376 			activity_monitor.Soc_MinFreqStep = input[2];
   1377 			activity_monitor.Soc_MinActiveFreqType = input[3];
   1378 			activity_monitor.Soc_MinActiveFreq = input[4];
   1379 			activity_monitor.Soc_BoosterFreqType = input[5];
   1380 			activity_monitor.Soc_BoosterFreq = input[6];
   1381 			activity_monitor.Soc_PD_Data_limit_c = input[7];
   1382 			activity_monitor.Soc_PD_Data_error_coeff = input[8];
   1383 			activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
   1384 			break;
   1385 		case 2: /* Memlk */
   1386 			activity_monitor.Mem_FPS = input[1];
   1387 			activity_monitor.Mem_MinFreqStep = input[2];
   1388 			activity_monitor.Mem_MinActiveFreqType = input[3];
   1389 			activity_monitor.Mem_MinActiveFreq = input[4];
   1390 			activity_monitor.Mem_BoosterFreqType = input[5];
   1391 			activity_monitor.Mem_BoosterFreq = input[6];
   1392 			activity_monitor.Mem_PD_Data_limit_c = input[7];
   1393 			activity_monitor.Mem_PD_Data_error_coeff = input[8];
   1394 			activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
   1395 			break;
   1396 		}
   1397 
   1398 		ret = smu_update_table(smu,
   1399 				       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
   1400 				       (void *)(&activity_monitor), true);
   1401 		if (ret) {
   1402 			pr_err("[%s] Failed to set activity monitor!", __func__);
   1403 			return ret;
   1404 		}
   1405 	}
   1406 
   1407 	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
   1408 	workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
   1409 	if (workload_type < 0)
   1410 		return -EINVAL;
   1411 	smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
   1412 				    1 << workload_type);
   1413 
   1414 	return ret;
   1415 }
   1416 
   1417 static int navi10_get_profiling_clk_mask(struct smu_context *smu,
   1418 					 enum amd_dpm_forced_level level,
   1419 					 uint32_t *sclk_mask,
   1420 					 uint32_t *mclk_mask,
   1421 					 uint32_t *soc_mask)
   1422 {
   1423 	int ret = 0;
   1424 	uint32_t level_count = 0;
   1425 
   1426 	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
   1427 		if (sclk_mask)
   1428 			*sclk_mask = 0;
   1429 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
   1430 		if (mclk_mask)
   1431 			*mclk_mask = 0;
   1432 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
   1433 		if(sclk_mask) {
   1434 			ret = smu_get_dpm_level_count(smu, SMU_SCLK, &level_count);
   1435 			if (ret)
   1436 				return ret;
   1437 			*sclk_mask = level_count - 1;
   1438 		}
   1439 
   1440 		if(mclk_mask) {
   1441 			ret = smu_get_dpm_level_count(smu, SMU_MCLK, &level_count);
   1442 			if (ret)
   1443 				return ret;
   1444 			*mclk_mask = level_count - 1;
   1445 		}
   1446 
   1447 		if(soc_mask) {
   1448 			ret = smu_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
   1449 			if (ret)
   1450 				return ret;
   1451 			*soc_mask = level_count - 1;
   1452 		}
   1453 	}
   1454 
   1455 	return ret;
   1456 }
   1457 
   1458 static int navi10_notify_smc_display_config(struct smu_context *smu)
   1459 {
   1460 	struct smu_clocks min_clocks = {0};
   1461 	struct pp_display_clock_request clock_req;
   1462 	int ret = 0;
   1463 
   1464 	min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
   1465 	min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
   1466 	min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
   1467 
   1468 	if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
   1469 		clock_req.clock_type = amd_pp_dcef_clock;
   1470 		clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
   1471 
   1472 		ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
   1473 		if (!ret) {
   1474 			if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
   1475 				ret = smu_send_smc_msg_with_param(smu,
   1476 								  SMU_MSG_SetMinDeepSleepDcefclk,
   1477 								  min_clocks.dcef_clock_in_sr/100);
   1478 				if (ret) {
   1479 					pr_err("Attempt to set divider for DCEFCLK Failed!");
   1480 					return ret;
   1481 				}
   1482 			}
   1483 		} else {
   1484 			pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
   1485 		}
   1486 	}
   1487 
   1488 	if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
   1489 		ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
   1490 		if (ret) {
   1491 			pr_err("[%s] Set hard min uclk failed!", __func__);
   1492 			return ret;
   1493 		}
   1494 	}
   1495 
   1496 	return 0;
   1497 }
   1498 
   1499 static int navi10_set_watermarks_table(struct smu_context *smu,
   1500 				       void *watermarks, struct
   1501 				       dm_pp_wm_sets_with_clock_ranges_soc15
   1502 				       *clock_ranges)
   1503 {
   1504 	int i;
   1505 	Watermarks_t *table = watermarks;
   1506 
   1507 	if (!table || !clock_ranges)
   1508 		return -EINVAL;
   1509 
   1510 	if (clock_ranges->num_wm_dmif_sets > 4 ||
   1511 	    clock_ranges->num_wm_mcif_sets > 4)
   1512 		return -EINVAL;
   1513 
   1514 	for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
   1515 		table->WatermarkRow[1][i].MinClock =
   1516 			cpu_to_le16((uint16_t)
   1517 			(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
   1518 			1000));
   1519 		table->WatermarkRow[1][i].MaxClock =
   1520 			cpu_to_le16((uint16_t)
   1521 			(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
   1522 			1000));
   1523 		table->WatermarkRow[1][i].MinUclk =
   1524 			cpu_to_le16((uint16_t)
   1525 			(clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
   1526 			1000));
   1527 		table->WatermarkRow[1][i].MaxUclk =
   1528 			cpu_to_le16((uint16_t)
   1529 			(clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
   1530 			1000));
   1531 		table->WatermarkRow[1][i].WmSetting = (uint8_t)
   1532 				clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
   1533 	}
   1534 
   1535 	for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
   1536 		table->WatermarkRow[0][i].MinClock =
   1537 			cpu_to_le16((uint16_t)
   1538 			(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
   1539 			1000));
   1540 		table->WatermarkRow[0][i].MaxClock =
   1541 			cpu_to_le16((uint16_t)
   1542 			(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
   1543 			1000));
   1544 		table->WatermarkRow[0][i].MinUclk =
   1545 			cpu_to_le16((uint16_t)
   1546 			(clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
   1547 			1000));
   1548 		table->WatermarkRow[0][i].MaxUclk =
   1549 			cpu_to_le16((uint16_t)
   1550 			(clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
   1551 			1000));
   1552 		table->WatermarkRow[0][i].WmSetting = (uint8_t)
   1553 				clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
   1554 	}
   1555 
   1556 	return 0;
   1557 }
   1558 
   1559 static int navi10_thermal_get_temperature(struct smu_context *smu,
   1560 					     enum amd_pp_sensors sensor,
   1561 					     uint32_t *value)
   1562 {
   1563 	SmuMetrics_t metrics;
   1564 	int ret = 0;
   1565 
   1566 	if (!value)
   1567 		return -EINVAL;
   1568 
   1569 	ret = navi10_get_metrics_table(smu, &metrics);
   1570 	if (ret)
   1571 		return ret;
   1572 
   1573 	switch (sensor) {
   1574 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
   1575 		*value = metrics.TemperatureHotspot *
   1576 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   1577 		break;
   1578 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
   1579 		*value = metrics.TemperatureEdge *
   1580 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   1581 		break;
   1582 	case AMDGPU_PP_SENSOR_MEM_TEMP:
   1583 		*value = metrics.TemperatureMem *
   1584 			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   1585 		break;
   1586 	default:
   1587 		pr_err("Invalid sensor for retrieving temp\n");
   1588 		return -EINVAL;
   1589 	}
   1590 
   1591 	return 0;
   1592 }
   1593 
   1594 static int navi10_read_sensor(struct smu_context *smu,
   1595 				 enum amd_pp_sensors sensor,
   1596 				 void *data, uint32_t *size)
   1597 {
   1598 	int ret = 0;
   1599 	struct smu_table_context *table_context = &smu->smu_table;
   1600 	PPTable_t *pptable = table_context->driver_pptable;
   1601 
   1602 	if(!data || !size)
   1603 		return -EINVAL;
   1604 
   1605 	mutex_lock(&smu->sensor_lock);
   1606 	switch (sensor) {
   1607 	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
   1608 		*(uint32_t *)data = pptable->FanMaximumRpm;
   1609 		*size = 4;
   1610 		break;
   1611 	case AMDGPU_PP_SENSOR_MEM_LOAD:
   1612 	case AMDGPU_PP_SENSOR_GPU_LOAD:
   1613 		ret = navi10_get_current_activity_percent(smu, sensor, (uint32_t *)data);
   1614 		*size = 4;
   1615 		break;
   1616 	case AMDGPU_PP_SENSOR_GPU_POWER:
   1617 		ret = navi10_get_gpu_power(smu, (uint32_t *)data);
   1618 		*size = 4;
   1619 		break;
   1620 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
   1621 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
   1622 	case AMDGPU_PP_SENSOR_MEM_TEMP:
   1623 		ret = navi10_thermal_get_temperature(smu, sensor, (uint32_t *)data);
   1624 		*size = 4;
   1625 		break;
   1626 	default:
   1627 		ret = smu_v11_0_read_sensor(smu, sensor, data, size);
   1628 	}
   1629 	mutex_unlock(&smu->sensor_lock);
   1630 
   1631 	return ret;
   1632 }
   1633 
   1634 static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)
   1635 {
   1636 	uint32_t num_discrete_levels = 0;
   1637 	uint16_t *dpm_levels = NULL;
   1638 	uint16_t i = 0;
   1639 	struct smu_table_context *table_context = &smu->smu_table;
   1640 	PPTable_t *driver_ppt = NULL;
   1641 
   1642 	if (!clocks_in_khz || !num_states || !table_context->driver_pptable)
   1643 		return -EINVAL;
   1644 
   1645 	driver_ppt = table_context->driver_pptable;
   1646 	num_discrete_levels = driver_ppt->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels;
   1647 	dpm_levels = driver_ppt->FreqTableUclk;
   1648 
   1649 	if (num_discrete_levels == 0 || dpm_levels == NULL)
   1650 		return -EINVAL;
   1651 
   1652 	*num_states = num_discrete_levels;
   1653 	for (i = 0; i < num_discrete_levels; i++) {
   1654 		/* convert to khz */
   1655 		*clocks_in_khz = (*dpm_levels) * 1000;
   1656 		clocks_in_khz++;
   1657 		dpm_levels++;
   1658 	}
   1659 
   1660 	return 0;
   1661 }
   1662 
   1663 static int navi10_set_performance_level(struct smu_context *smu,
   1664 					enum amd_dpm_forced_level level);
   1665 
   1666 static int navi10_set_standard_performance_level(struct smu_context *smu)
   1667 {
   1668 	struct amdgpu_device *adev = smu->adev;
   1669 	int ret = 0;
   1670 	uint32_t sclk_freq = 0, uclk_freq = 0;
   1671 
   1672 	switch (adev->asic_type) {
   1673 	case CHIP_NAVI10:
   1674 		sclk_freq = NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
   1675 		uclk_freq = NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
   1676 		break;
   1677 	case CHIP_NAVI14:
   1678 		sclk_freq = NAVI14_UMD_PSTATE_PROFILING_GFXCLK;
   1679 		uclk_freq = NAVI14_UMD_PSTATE_PROFILING_MEMCLK;
   1680 		break;
   1681 	default:
   1682 		/* by default, this is same as auto performance level */
   1683 		return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
   1684 	}
   1685 
   1686 	ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
   1687 	if (ret)
   1688 		return ret;
   1689 	ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
   1690 	if (ret)
   1691 		return ret;
   1692 
   1693 	return ret;
   1694 }
   1695 
   1696 static int navi10_set_peak_performance_level(struct smu_context *smu)
   1697 {
   1698 	struct amdgpu_device *adev = smu->adev;
   1699 	int ret = 0;
   1700 	uint32_t sclk_freq = 0, uclk_freq = 0;
   1701 
   1702 	switch (adev->asic_type) {
   1703 	case CHIP_NAVI10:
   1704 		switch (adev->pdev->revision) {
   1705 		case 0xf0: /* XTX */
   1706 		case 0xc0:
   1707 			sclk_freq = NAVI10_PEAK_SCLK_XTX;
   1708 			break;
   1709 		case 0xf1: /* XT */
   1710 		case 0xc1:
   1711 			sclk_freq = NAVI10_PEAK_SCLK_XT;
   1712 			break;
   1713 		default: /* XL */
   1714 			sclk_freq = NAVI10_PEAK_SCLK_XL;
   1715 			break;
   1716 		}
   1717 		break;
   1718 	case CHIP_NAVI14:
   1719 		switch (adev->pdev->revision) {
   1720 		case 0xc7: /* XT */
   1721 		case 0xf4:
   1722 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
   1723 			break;
   1724 		case 0xc1: /* XTM */
   1725 		case 0xf2:
   1726 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
   1727 			break;
   1728 		case 0xc3: /* XLM */
   1729 		case 0xf3:
   1730 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
   1731 			break;
   1732 		case 0xc5: /* XTX */
   1733 		case 0xf6:
   1734 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
   1735 			break;
   1736 		default: /* XL */
   1737 			sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
   1738 			break;
   1739 		}
   1740 		break;
   1741 	case CHIP_NAVI12:
   1742 		sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
   1743 		break;
   1744 	default:
   1745 		ret = smu_get_dpm_level_range(smu, SMU_SCLK, NULL, &sclk_freq);
   1746 		if (ret)
   1747 			return ret;
   1748 	}
   1749 
   1750 	ret = smu_get_dpm_level_range(smu, SMU_UCLK, NULL, &uclk_freq);
   1751 	if (ret)
   1752 		return ret;
   1753 
   1754 	ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
   1755 	if (ret)
   1756 		return ret;
   1757 	ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
   1758 	if (ret)
   1759 		return ret;
   1760 
   1761 	return ret;
   1762 }
   1763 
   1764 static int navi10_set_performance_level(struct smu_context *smu,
   1765 					enum amd_dpm_forced_level level)
   1766 {
   1767 	int ret = 0;
   1768 	uint32_t sclk_mask, mclk_mask, soc_mask;
   1769 
   1770 	switch (level) {
   1771 	case AMD_DPM_FORCED_LEVEL_HIGH:
   1772 		ret = smu_force_dpm_limit_value(smu, true);
   1773 		break;
   1774 	case AMD_DPM_FORCED_LEVEL_LOW:
   1775 		ret = smu_force_dpm_limit_value(smu, false);
   1776 		break;
   1777 	case AMD_DPM_FORCED_LEVEL_AUTO:
   1778 		ret = smu_unforce_dpm_levels(smu);
   1779 		break;
   1780 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
   1781 		ret = navi10_set_standard_performance_level(smu);
   1782 		break;
   1783 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
   1784 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
   1785 		ret = smu_get_profiling_clk_mask(smu, level,
   1786 						 &sclk_mask,
   1787 						 &mclk_mask,
   1788 						 &soc_mask);
   1789 		if (ret)
   1790 			return ret;
   1791 		smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
   1792 		smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
   1793 		smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
   1794 		break;
   1795 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
   1796 		ret = navi10_set_peak_performance_level(smu);
   1797 		break;
   1798 	case AMD_DPM_FORCED_LEVEL_MANUAL:
   1799 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
   1800 	default:
   1801 		break;
   1802 	}
   1803 	return ret;
   1804 }
   1805 
   1806 static int navi10_get_thermal_temperature_range(struct smu_context *smu,
   1807 						struct smu_temperature_range *range)
   1808 {
   1809 	struct smu_table_context *table_context = &smu->smu_table;
   1810 	const struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
   1811 
   1812 	if (!range || !powerplay_table)
   1813 		return -EINVAL;
   1814 
   1815 	range->max = powerplay_table->software_shutdown_temp *
   1816 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
   1817 
   1818 	return 0;
   1819 }
   1820 
   1821 static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
   1822 						bool disable_memory_clock_switch)
   1823 {
   1824 	int ret = 0;
   1825 	struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
   1826 		(struct smu_11_0_max_sustainable_clocks *)
   1827 			smu->smu_table.max_sustainable_clocks;
   1828 	uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
   1829 	uint32_t max_memory_clock = max_sustainable_clocks->uclock;
   1830 
   1831 	if(smu->disable_uclk_switch == disable_memory_clock_switch)
   1832 		return 0;
   1833 
   1834 	if(disable_memory_clock_switch)
   1835 		ret = smu_set_hard_freq_range(smu, SMU_UCLK, max_memory_clock, 0);
   1836 	else
   1837 		ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_memory_clock, 0);
   1838 
   1839 	if(!ret)
   1840 		smu->disable_uclk_switch = disable_memory_clock_switch;
   1841 
   1842 	return ret;
   1843 }
   1844 
   1845 static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu)
   1846 {
   1847 	PPTable_t *pptable = smu->smu_table.driver_pptable;
   1848 	return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
   1849 }
   1850 
   1851 static int navi10_get_power_limit(struct smu_context *smu,
   1852 				     uint32_t *limit,
   1853 				     bool cap)
   1854 {
   1855 	PPTable_t *pptable = smu->smu_table.driver_pptable;
   1856 	uint32_t asic_default_power_limit = 0;
   1857 	int ret = 0;
   1858 	int power_src;
   1859 
   1860 	if (!smu->power_limit) {
   1861 		if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
   1862 			power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
   1863 			if (power_src < 0)
   1864 				return -EINVAL;
   1865 
   1866 			ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
   1867 				power_src << 16);
   1868 			if (ret) {
   1869 				pr_err("[%s] get PPT limit failed!", __func__);
   1870 				return ret;
   1871 			}
   1872 			smu_read_smc_arg(smu, &asic_default_power_limit);
   1873 		} else {
   1874 			/* the last hope to figure out the ppt limit */
   1875 			if (!pptable) {
   1876 				pr_err("Cannot get PPT limit due to pptable missing!");
   1877 				return -EINVAL;
   1878 			}
   1879 			asic_default_power_limit =
   1880 				pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
   1881 		}
   1882 
   1883 		smu->power_limit = asic_default_power_limit;
   1884 	}
   1885 
   1886 	if (cap)
   1887 		*limit = smu_v11_0_get_max_power_limit(smu);
   1888 	else
   1889 		*limit = smu->power_limit;
   1890 
   1891 	return 0;
   1892 }
   1893 
   1894 static int navi10_update_pcie_parameters(struct smu_context *smu,
   1895 				     uint32_t pcie_gen_cap,
   1896 				     uint32_t pcie_width_cap)
   1897 {
   1898 	PPTable_t *pptable = smu->smu_table.driver_pptable;
   1899 	int ret, i;
   1900 	uint32_t smu_pcie_arg;
   1901 
   1902 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
   1903 	struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
   1904 
   1905 	for (i = 0; i < NUM_LINK_LEVELS; i++) {
   1906 		smu_pcie_arg = (i << 16) |
   1907 			((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
   1908 				(pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
   1909 					pptable->PcieLaneCount[i] : pcie_width_cap);
   1910 		ret = smu_send_smc_msg_with_param(smu,
   1911 					  SMU_MSG_OverridePcieParameters,
   1912 					  smu_pcie_arg);
   1913 
   1914 		if (ret)
   1915 			return ret;
   1916 
   1917 		if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
   1918 			dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
   1919 		if (pptable->PcieLaneCount[i] > pcie_width_cap)
   1920 			dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
   1921 	}
   1922 
   1923 	return 0;
   1924 }
   1925 
   1926 static inline void navi10_dump_od_table(OverDriveTable_t *od_table) {
   1927 	pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
   1928 	pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
   1929 	pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
   1930 	pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
   1931 	pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax);
   1932 	pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
   1933 }
   1934 
   1935 static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
   1936 {
   1937 	if (value < od_table->min[setting]) {
   1938 		pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
   1939 		return -EINVAL;
   1940 	}
   1941 	if (value > od_table->max[setting]) {
   1942 		pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
   1943 		return -EINVAL;
   1944 	}
   1945 	return 0;
   1946 }
   1947 
   1948 static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
   1949 						     uint16_t *voltage,
   1950 						     uint32_t freq)
   1951 {
   1952 	uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
   1953 	uint32_t value = 0;
   1954 	int ret;
   1955 
   1956 	ret = smu_send_smc_msg_with_param(smu,
   1957 					  SMU_MSG_GetVoltageByDpm,
   1958 					  param);
   1959 	if (ret) {
   1960 		pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
   1961 		return ret;
   1962 	}
   1963 
   1964 	smu_read_smc_arg(smu, &value);
   1965 	*voltage = (uint16_t)value;
   1966 
   1967 	return 0;
   1968 }
   1969 
   1970 static int navi10_setup_od_limits(struct smu_context *smu) {
   1971 	const struct smu_11_0_overdrive_table *overdrive_table = NULL;
   1972 	const struct smu_11_0_powerplay_table *powerplay_table = NULL;
   1973 
   1974 	if (!smu->smu_table.power_play_table) {
   1975 		pr_err("powerplay table uninitialized!\n");
   1976 		return -ENOENT;
   1977 	}
   1978 	powerplay_table = (const struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
   1979 	overdrive_table = &powerplay_table->overdrive_table;
   1980 	if (!smu->od_settings) {
   1981 		smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL);
   1982 	} else {
   1983 		memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table));
   1984 	}
   1985 	return 0;
   1986 }
   1987 
   1988 static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
   1989 	OverDriveTable_t *od_table, *boot_od_table;
   1990 	int ret = 0;
   1991 
   1992 	ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
   1993 	if (ret)
   1994 		return ret;
   1995 
   1996 	od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
   1997 	boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
   1998 	if (initialize) {
   1999 		ret = navi10_setup_od_limits(smu);
   2000 		if (ret) {
   2001 			pr_err("Failed to retrieve board OD limits\n");
   2002 			return ret;
   2003 		}
   2004 		if (od_table) {
   2005 			if (!od_table->GfxclkVolt1) {
   2006 				ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
   2007 										&od_table->GfxclkVolt1,
   2008 										od_table->GfxclkFreq1);
   2009 				if (ret)
   2010 					od_table->GfxclkVolt1 = 0;
   2011 				if (boot_od_table)
   2012 					boot_od_table->GfxclkVolt1 = od_table->GfxclkVolt1;
   2013 			}
   2014 
   2015 			if (!od_table->GfxclkVolt2) {
   2016 				ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
   2017 										&od_table->GfxclkVolt2,
   2018 										od_table->GfxclkFreq2);
   2019 				if (ret)
   2020 					od_table->GfxclkVolt2 = 0;
   2021 				if (boot_od_table)
   2022 					boot_od_table->GfxclkVolt2 = od_table->GfxclkVolt2;
   2023 			}
   2024 
   2025 			if (!od_table->GfxclkVolt3) {
   2026 				ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
   2027 										&od_table->GfxclkVolt3,
   2028 										od_table->GfxclkFreq3);
   2029 				if (ret)
   2030 					od_table->GfxclkVolt3 = 0;
   2031 				if (boot_od_table)
   2032 					boot_od_table->GfxclkVolt3 = od_table->GfxclkVolt3;
   2033 			}
   2034 		}
   2035 	}
   2036 
   2037 	if (od_table) {
   2038 		navi10_dump_od_table(od_table);
   2039 	}
   2040 
   2041 	return ret;
   2042 }
   2043 
   2044 static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
   2045 	int i;
   2046 	int ret = 0;
   2047 	struct smu_table_context *table_context = &smu->smu_table;
   2048 	OverDriveTable_t *od_table;
   2049 	struct smu_11_0_overdrive_table *od_settings;
   2050 	enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
   2051 	uint16_t *freq_ptr, *voltage_ptr;
   2052 	od_table = (OverDriveTable_t *)table_context->overdrive_table;
   2053 
   2054 	if (!smu->od_enabled) {
   2055 		pr_warn("OverDrive is not enabled!\n");
   2056 		return -EINVAL;
   2057 	}
   2058 
   2059 	if (!smu->od_settings) {
   2060 		pr_err("OD board limits are not set!\n");
   2061 		return -ENOENT;
   2062 	}
   2063 
   2064 	od_settings = smu->od_settings;
   2065 
   2066 	switch (type) {
   2067 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
   2068 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
   2069 			pr_warn("GFXCLK_LIMITS not supported!\n");
   2070 			return -ENOTSUPP;
   2071 		}
   2072 		if (!table_context->overdrive_table) {
   2073 			pr_err("Overdrive is not initialized\n");
   2074 			return -EINVAL;
   2075 		}
   2076 		for (i = 0; i < size; i += 2) {
   2077 			if (i + 2 > size) {
   2078 				pr_info("invalid number of input parameters %d\n", size);
   2079 				return -EINVAL;
   2080 			}
   2081 			switch (input[i]) {
   2082 			case 0:
   2083 				freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
   2084 				freq_ptr = &od_table->GfxclkFmin;
   2085 				if (input[i + 1] > od_table->GfxclkFmax) {
   2086 					pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
   2087 						input[i + 1],
   2088 						od_table->GfxclkFmin);
   2089 					return -EINVAL;
   2090 				}
   2091 				break;
   2092 			case 1:
   2093 				freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
   2094 				freq_ptr = &od_table->GfxclkFmax;
   2095 				if (input[i + 1] < od_table->GfxclkFmin) {
   2096 					pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
   2097 						input[i + 1],
   2098 						od_table->GfxclkFmax);
   2099 					return -EINVAL;
   2100 				}
   2101 				break;
   2102 			default:
   2103 				pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
   2104 				pr_info("Supported indices: [0:min,1:max]\n");
   2105 				return -EINVAL;
   2106 			}
   2107 			ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]);
   2108 			if (ret)
   2109 				return ret;
   2110 			*freq_ptr = input[i + 1];
   2111 		}
   2112 		break;
   2113 	case PP_OD_EDIT_MCLK_VDDC_TABLE:
   2114 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
   2115 			pr_warn("UCLK_MAX not supported!\n");
   2116 			return -ENOTSUPP;
   2117 		}
   2118 		if (size < 2) {
   2119 			pr_info("invalid number of parameters: %d\n", size);
   2120 			return -EINVAL;
   2121 		}
   2122 		if (input[0] != 1) {
   2123 			pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
   2124 			pr_info("Supported indices: [1:max]\n");
   2125 			return -EINVAL;
   2126 		}
   2127 		ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
   2128 		if (ret)
   2129 			return ret;
   2130 		od_table->UclkFmax = input[1];
   2131 		break;
   2132 	case PP_OD_RESTORE_DEFAULT_TABLE:
   2133 		if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
   2134 			pr_err("Overdrive table was not initialized!\n");
   2135 			return -EINVAL;
   2136 		}
   2137 		memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
   2138 		break;
   2139 	case PP_OD_COMMIT_DPM_TABLE:
   2140 		navi10_dump_od_table(od_table);
   2141 		ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
   2142 		if (ret) {
   2143 			pr_err("Failed to import overdrive table!\n");
   2144 			return ret;
   2145 		}
   2146 		// no lock needed because smu_od_edit_dpm_table has it
   2147 		ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
   2148 			AMD_PP_TASK_READJUST_POWER_STATE,
   2149 			false);
   2150 		if (ret) {
   2151 			return ret;
   2152 		}
   2153 		break;
   2154 	case PP_OD_EDIT_VDDC_CURVE:
   2155 		if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
   2156 			pr_warn("GFXCLK_CURVE not supported!\n");
   2157 			return -ENOTSUPP;
   2158 		}
   2159 		if (size < 3) {
   2160 			pr_info("invalid number of parameters: %d\n", size);
   2161 			return -EINVAL;
   2162 		}
   2163 		if (!od_table) {
   2164 			pr_info("Overdrive is not initialized\n");
   2165 			return -EINVAL;
   2166 		}
   2167 
   2168 		switch (input[0]) {
   2169 		case 0:
   2170 			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
   2171 			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
   2172 			freq_ptr = &od_table->GfxclkFreq1;
   2173 			voltage_ptr = &od_table->GfxclkVolt1;
   2174 			break;
   2175 		case 1:
   2176 			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
   2177 			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
   2178 			freq_ptr = &od_table->GfxclkFreq2;
   2179 			voltage_ptr = &od_table->GfxclkVolt2;
   2180 			break;
   2181 		case 2:
   2182 			freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
   2183 			voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
   2184 			freq_ptr = &od_table->GfxclkFreq3;
   2185 			voltage_ptr = &od_table->GfxclkVolt3;
   2186 			break;
   2187 		default:
   2188 			pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]);
   2189 			pr_info("Supported indices: [0, 1, 2]\n");
   2190 			return -EINVAL;
   2191 		}
   2192 		ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]);
   2193 		if (ret)
   2194 			return ret;
   2195 		// Allow setting zero to disable the OverDrive VDDC curve
   2196 		if (input[2] != 0) {
   2197 			ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]);
   2198 			if (ret)
   2199 				return ret;
   2200 			*freq_ptr = input[1];
   2201 			*voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
   2202 			pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
   2203 		} else {
   2204 			// If setting 0, disable all voltage curve settings
   2205 			od_table->GfxclkVolt1 = 0;
   2206 			od_table->GfxclkVolt2 = 0;
   2207 			od_table->GfxclkVolt3 = 0;
   2208 		}
   2209 		navi10_dump_od_table(od_table);
   2210 		break;
   2211 	default:
   2212 		return -ENOSYS;
   2213 	}
   2214 	return ret;
   2215 }
   2216 
   2217 static int navi10_run_btc(struct smu_context *smu)
   2218 {
   2219 	int ret = 0;
   2220 
   2221 	ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
   2222 	if (ret)
   2223 		pr_err("RunBtc failed!\n");
   2224 
   2225 	return ret;
   2226 }
   2227 
   2228 static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
   2229 {
   2230 	int result = 0;
   2231 
   2232 	if (!enable)
   2233 		result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
   2234 	else
   2235 		result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
   2236 
   2237 	return result;
   2238 }
   2239 
   2240 static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
   2241 {
   2242 	uint32_t uclk_count, uclk_min, uclk_max;
   2243 	uint32_t smu_version;
   2244 	int ret = 0;
   2245 
   2246 	ret = smu_get_smc_version(smu, NULL, &smu_version);
   2247 	if (ret)
   2248 		return ret;
   2249 
   2250 	/* This workaround is available only for 42.50 or later SMC firmwares */
   2251 	if (smu_version < 0x2A3200)
   2252 		return 0;
   2253 
   2254 	ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
   2255 	if (ret)
   2256 		return ret;
   2257 
   2258 	ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
   2259 	if (ret)
   2260 		return ret;
   2261 
   2262 	ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
   2263 	if (ret)
   2264 		return ret;
   2265 
   2266 	/* Force UCLK out of the highest DPM */
   2267 	ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min);
   2268 	if (ret)
   2269 		return ret;
   2270 
   2271 	/* Revert the UCLK Hardmax */
   2272 	ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max);
   2273 	if (ret)
   2274 		return ret;
   2275 
   2276 	/*
   2277 	 * In this case, SMU already disabled dummy pstate during enablement
   2278 	 * of UCLK DPM, we have to re-enabled it.
   2279 	 * */
   2280 	return navi10_dummy_pstate_control(smu, true);
   2281 }
   2282 
   2283 static const struct pptable_funcs navi10_ppt_funcs = {
   2284 	.tables_init = navi10_tables_init,
   2285 	.alloc_dpm_context = navi10_allocate_dpm_context,
   2286 	.store_powerplay_table = navi10_store_powerplay_table,
   2287 	.check_powerplay_table = navi10_check_powerplay_table,
   2288 	.append_powerplay_table = navi10_append_powerplay_table,
   2289 	.get_smu_msg_index = navi10_get_smu_msg_index,
   2290 	.get_smu_clk_index = navi10_get_smu_clk_index,
   2291 	.get_smu_feature_index = navi10_get_smu_feature_index,
   2292 	.get_smu_table_index = navi10_get_smu_table_index,
   2293 	.get_smu_power_index = navi10_get_pwr_src_index,
   2294 	.get_workload_type = navi10_get_workload_type,
   2295 	.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
   2296 	.set_default_dpm_table = navi10_set_default_dpm_table,
   2297 	.dpm_set_uvd_enable = navi10_dpm_set_uvd_enable,
   2298 	.dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
   2299 	.get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table,
   2300 	.print_clk_levels = navi10_print_clk_levels,
   2301 	.force_clk_levels = navi10_force_clk_levels,
   2302 	.populate_umd_state_clk = navi10_populate_umd_state_clk,
   2303 	.get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
   2304 	.pre_display_config_changed = navi10_pre_display_config_changed,
   2305 	.display_config_changed = navi10_display_config_changed,
   2306 	.notify_smc_display_config = navi10_notify_smc_display_config,
   2307 	.force_dpm_limit_value = navi10_force_dpm_limit_value,
   2308 	.unforce_dpm_levels = navi10_unforce_dpm_levels,
   2309 	.is_dpm_running = navi10_is_dpm_running,
   2310 	.get_fan_speed_percent = navi10_get_fan_speed_percent,
   2311 	.get_fan_speed_rpm = navi10_get_fan_speed_rpm,
   2312 	.get_power_profile_mode = navi10_get_power_profile_mode,
   2313 	.set_power_profile_mode = navi10_set_power_profile_mode,
   2314 	.get_profiling_clk_mask = navi10_get_profiling_clk_mask,
   2315 	.set_watermarks_table = navi10_set_watermarks_table,
   2316 	.read_sensor = navi10_read_sensor,
   2317 	.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
   2318 	.set_performance_level = navi10_set_performance_level,
   2319 	.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
   2320 	.display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
   2321 	.get_power_limit = navi10_get_power_limit,
   2322 	.update_pcie_parameters = navi10_update_pcie_parameters,
   2323 	.init_microcode = smu_v11_0_init_microcode,
   2324 	.load_microcode = smu_v11_0_load_microcode,
   2325 	.init_smc_tables = smu_v11_0_init_smc_tables,
   2326 	.fini_smc_tables = smu_v11_0_fini_smc_tables,
   2327 	.init_power = smu_v11_0_init_power,
   2328 	.fini_power = smu_v11_0_fini_power,
   2329 	.check_fw_status = smu_v11_0_check_fw_status,
   2330 	.setup_pptable = smu_v11_0_setup_pptable,
   2331 	.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
   2332 	.get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
   2333 	.check_pptable = smu_v11_0_check_pptable,
   2334 	.parse_pptable = smu_v11_0_parse_pptable,
   2335 	.populate_smc_tables = smu_v11_0_populate_smc_pptable,
   2336 	.check_fw_version = smu_v11_0_check_fw_version,
   2337 	.write_pptable = smu_v11_0_write_pptable,
   2338 	.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
   2339 	.set_driver_table_location = smu_v11_0_set_driver_table_location,
   2340 	.set_tool_table_location = smu_v11_0_set_tool_table_location,
   2341 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
   2342 	.system_features_control = smu_v11_0_system_features_control,
   2343 	.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
   2344 	.read_smc_arg = smu_v11_0_read_arg,
   2345 	.init_display_count = smu_v11_0_init_display_count,
   2346 	.set_allowed_mask = smu_v11_0_set_allowed_mask,
   2347 	.get_enabled_mask = smu_v11_0_get_enabled_mask,
   2348 	.notify_display_change = smu_v11_0_notify_display_change,
   2349 	.set_power_limit = smu_v11_0_set_power_limit,
   2350 	.get_current_clk_freq = smu_v11_0_get_current_clk_freq,
   2351 	.init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
   2352 	.start_thermal_control = smu_v11_0_start_thermal_control,
   2353 	.stop_thermal_control = smu_v11_0_stop_thermal_control,
   2354 	.set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
   2355 	.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
   2356 	.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
   2357 	.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
   2358 	.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
   2359 	.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
   2360 	.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
   2361 	.gfx_off_control = smu_v11_0_gfx_off_control,
   2362 	.register_irq_handler = smu_v11_0_register_irq_handler,
   2363 	.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
   2364 	.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
   2365 	.baco_is_support= smu_v11_0_baco_is_support,
   2366 	.baco_get_state = smu_v11_0_baco_get_state,
   2367 	.baco_set_state = smu_v11_0_baco_set_state,
   2368 	.baco_enter = smu_v11_0_baco_enter,
   2369 	.baco_exit = smu_v11_0_baco_exit,
   2370 	.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
   2371 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
   2372 	.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
   2373 	.set_default_od_settings = navi10_set_default_od_settings,
   2374 	.od_edit_dpm_table = navi10_od_edit_dpm_table,
   2375 	.get_pptable_power_limit = navi10_get_pptable_power_limit,
   2376 	.run_btc = navi10_run_btc,
   2377 	.disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
   2378 };
   2379 
   2380 void navi10_set_ppt_funcs(struct smu_context *smu)
   2381 {
   2382 	smu->ppt_funcs = &navi10_ppt_funcs;
   2383 }
   2384