1 1.6 riastrad /* $NetBSD: amdgpu_dpm.c,v 1.6 2021/12/18 23:44:58 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2011 Advanced Micro Devices, Inc. 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice shall be included in 14 1.1 riastrad * all copies or substantial portions of the Software. 15 1.1 riastrad * 16 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 23 1.1 riastrad * 24 1.1 riastrad * Authors: Alex Deucher 25 1.1 riastrad */ 26 1.1 riastrad 27 1.1 riastrad #include <sys/cdefs.h> 28 1.6 riastrad __KERNEL_RCSID(0, "$NetBSD: amdgpu_dpm.c,v 1.6 2021/12/18 23:44:58 riastradh Exp $"); 29 1.1 riastrad 30 1.1 riastrad #include "amdgpu.h" 31 1.1 riastrad #include "amdgpu_atombios.h" 32 1.1 riastrad #include "amdgpu_i2c.h" 33 1.1 riastrad #include "amdgpu_dpm.h" 34 1.1 riastrad #include "atom.h" 35 1.6 riastrad #include "amd_pcie.h" 36 1.1 riastrad 37 1.1 riastrad void amdgpu_dpm_print_class_info(u32 class, u32 class2) 38 1.1 riastrad { 39 1.6 riastrad const char *s; 40 1.6 riastrad 41 1.1 riastrad switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 42 1.1 riastrad case ATOM_PPLIB_CLASSIFICATION_UI_NONE: 43 1.1 riastrad default: 44 1.6 riastrad s = "none"; 45 1.1 riastrad break; 46 1.1 riastrad case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 47 1.6 riastrad s = "battery"; 48 1.1 riastrad break; 49 1.1 riastrad case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: 50 1.6 riastrad s = "balanced"; 51 1.1 riastrad break; 52 1.1 riastrad case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 53 1.6 riastrad s = "performance"; 54 1.1 riastrad break; 55 1.1 riastrad } 56 1.6 riastrad printk("\tui class: %s\n", s); 57 1.6 riastrad printk("\tinternal class:"); 58 1.1 riastrad if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && 59 1.1 riastrad (class2 == 0)) 60 1.6 riastrad pr_cont(" none"); 61 1.1 riastrad else { 62 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) 63 1.6 riastrad pr_cont(" boot"); 64 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 65 1.6 riastrad pr_cont(" thermal"); 66 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) 67 1.6 riastrad pr_cont(" limited_pwr"); 68 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_REST) 69 1.6 riastrad pr_cont(" rest"); 70 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) 71 1.6 riastrad pr_cont(" forced"); 72 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 73 1.6 riastrad pr_cont(" 3d_perf"); 74 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) 75 1.6 riastrad pr_cont(" ovrdrv"); 76 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 77 1.6 riastrad pr_cont(" uvd"); 78 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) 79 1.6 riastrad pr_cont(" 3d_low"); 80 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) 81 1.6 riastrad pr_cont(" acpi"); 82 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 83 1.6 riastrad pr_cont(" uvd_hd2"); 84 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 85 1.6 riastrad pr_cont(" uvd_hd"); 86 1.1 riastrad if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 87 1.6 riastrad pr_cont(" uvd_sd"); 88 1.1 riastrad if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) 89 1.6 riastrad pr_cont(" limited_pwr2"); 90 1.1 riastrad if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 91 1.6 riastrad pr_cont(" ulv"); 92 1.1 riastrad if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 93 1.6 riastrad pr_cont(" uvd_mvc"); 94 1.1 riastrad } 95 1.6 riastrad pr_cont("\n"); 96 1.1 riastrad } 97 1.1 riastrad 98 1.1 riastrad void amdgpu_dpm_print_cap_info(u32 caps) 99 1.1 riastrad { 100 1.6 riastrad printk("\tcaps:"); 101 1.1 riastrad if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) 102 1.6 riastrad pr_cont(" single_disp"); 103 1.1 riastrad if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) 104 1.6 riastrad pr_cont(" video"); 105 1.1 riastrad if (caps & ATOM_PPLIB_DISALLOW_ON_DC) 106 1.6 riastrad pr_cont(" no_dc"); 107 1.6 riastrad pr_cont("\n"); 108 1.1 riastrad } 109 1.1 riastrad 110 1.1 riastrad void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, 111 1.1 riastrad struct amdgpu_ps *rps) 112 1.1 riastrad { 113 1.6 riastrad printk("\tstatus:"); 114 1.1 riastrad if (rps == adev->pm.dpm.current_ps) 115 1.6 riastrad pr_cont(" c"); 116 1.1 riastrad if (rps == adev->pm.dpm.requested_ps) 117 1.6 riastrad pr_cont(" r"); 118 1.1 riastrad if (rps == adev->pm.dpm.boot_ps) 119 1.6 riastrad pr_cont(" b"); 120 1.6 riastrad pr_cont("\n"); 121 1.6 riastrad } 122 1.6 riastrad 123 1.6 riastrad void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) 124 1.6 riastrad { 125 1.6 riastrad struct drm_device *ddev = adev->ddev; 126 1.6 riastrad struct drm_crtc *crtc; 127 1.6 riastrad struct amdgpu_crtc *amdgpu_crtc; 128 1.6 riastrad 129 1.6 riastrad adev->pm.dpm.new_active_crtcs = 0; 130 1.6 riastrad adev->pm.dpm.new_active_crtc_count = 0; 131 1.6 riastrad if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 132 1.6 riastrad list_for_each_entry(crtc, 133 1.6 riastrad &ddev->mode_config.crtc_list, head) { 134 1.6 riastrad amdgpu_crtc = to_amdgpu_crtc(crtc); 135 1.6 riastrad if (amdgpu_crtc->enabled) { 136 1.6 riastrad adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 137 1.6 riastrad adev->pm.dpm.new_active_crtc_count++; 138 1.6 riastrad } 139 1.6 riastrad } 140 1.6 riastrad } 141 1.1 riastrad } 142 1.1 riastrad 143 1.1 riastrad 144 1.1 riastrad u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 145 1.1 riastrad { 146 1.1 riastrad struct drm_device *dev = adev->ddev; 147 1.1 riastrad struct drm_crtc *crtc; 148 1.1 riastrad struct amdgpu_crtc *amdgpu_crtc; 149 1.1 riastrad u32 vblank_in_pixels; 150 1.1 riastrad u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 151 1.1 riastrad 152 1.1 riastrad if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 153 1.1 riastrad list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 154 1.1 riastrad amdgpu_crtc = to_amdgpu_crtc(crtc); 155 1.1 riastrad if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 156 1.1 riastrad vblank_in_pixels = 157 1.1 riastrad amdgpu_crtc->hw_mode.crtc_htotal * 158 1.1 riastrad (amdgpu_crtc->hw_mode.crtc_vblank_end - 159 1.1 riastrad amdgpu_crtc->hw_mode.crtc_vdisplay + 160 1.1 riastrad (amdgpu_crtc->v_border * 2)); 161 1.1 riastrad 162 1.1 riastrad vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; 163 1.1 riastrad break; 164 1.1 riastrad } 165 1.1 riastrad } 166 1.1 riastrad } 167 1.1 riastrad 168 1.1 riastrad return vblank_time_us; 169 1.1 riastrad } 170 1.1 riastrad 171 1.1 riastrad u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) 172 1.1 riastrad { 173 1.1 riastrad struct drm_device *dev = adev->ddev; 174 1.1 riastrad struct drm_crtc *crtc; 175 1.1 riastrad struct amdgpu_crtc *amdgpu_crtc; 176 1.1 riastrad u32 vrefresh = 0; 177 1.1 riastrad 178 1.1 riastrad if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 179 1.1 riastrad list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 180 1.1 riastrad amdgpu_crtc = to_amdgpu_crtc(crtc); 181 1.1 riastrad if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 182 1.1 riastrad vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 183 1.1 riastrad break; 184 1.1 riastrad } 185 1.1 riastrad } 186 1.1 riastrad } 187 1.1 riastrad 188 1.1 riastrad return vrefresh; 189 1.1 riastrad } 190 1.1 riastrad 191 1.1 riastrad bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) 192 1.1 riastrad { 193 1.1 riastrad switch (sensor) { 194 1.1 riastrad case THERMAL_TYPE_RV6XX: 195 1.1 riastrad case THERMAL_TYPE_RV770: 196 1.1 riastrad case THERMAL_TYPE_EVERGREEN: 197 1.1 riastrad case THERMAL_TYPE_SUMO: 198 1.1 riastrad case THERMAL_TYPE_NI: 199 1.1 riastrad case THERMAL_TYPE_SI: 200 1.1 riastrad case THERMAL_TYPE_CI: 201 1.1 riastrad case THERMAL_TYPE_KV: 202 1.1 riastrad return true; 203 1.1 riastrad case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 204 1.1 riastrad case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 205 1.1 riastrad return false; /* need special handling */ 206 1.1 riastrad case THERMAL_TYPE_NONE: 207 1.1 riastrad case THERMAL_TYPE_EXTERNAL: 208 1.1 riastrad case THERMAL_TYPE_EXTERNAL_GPIO: 209 1.1 riastrad default: 210 1.1 riastrad return false; 211 1.1 riastrad } 212 1.1 riastrad } 213 1.1 riastrad 214 1.1 riastrad union power_info { 215 1.1 riastrad struct _ATOM_POWERPLAY_INFO info; 216 1.1 riastrad struct _ATOM_POWERPLAY_INFO_V2 info_2; 217 1.1 riastrad struct _ATOM_POWERPLAY_INFO_V3 info_3; 218 1.1 riastrad struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 219 1.1 riastrad struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 220 1.1 riastrad struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 221 1.1 riastrad struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; 222 1.1 riastrad struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; 223 1.1 riastrad }; 224 1.1 riastrad 225 1.1 riastrad union fan_info { 226 1.1 riastrad struct _ATOM_PPLIB_FANTABLE fan; 227 1.1 riastrad struct _ATOM_PPLIB_FANTABLE2 fan2; 228 1.1 riastrad struct _ATOM_PPLIB_FANTABLE3 fan3; 229 1.1 riastrad }; 230 1.1 riastrad 231 1.1 riastrad static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, 232 1.1 riastrad ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) 233 1.1 riastrad { 234 1.1 riastrad u32 size = atom_table->ucNumEntries * 235 1.1 riastrad sizeof(struct amdgpu_clock_voltage_dependency_entry); 236 1.1 riastrad int i; 237 1.1 riastrad ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; 238 1.1 riastrad 239 1.1 riastrad amdgpu_table->entries = kzalloc(size, GFP_KERNEL); 240 1.1 riastrad if (!amdgpu_table->entries) 241 1.1 riastrad return -ENOMEM; 242 1.1 riastrad 243 1.1 riastrad entry = &atom_table->entries[0]; 244 1.1 riastrad for (i = 0; i < atom_table->ucNumEntries; i++) { 245 1.1 riastrad amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | 246 1.1 riastrad (entry->ucClockHigh << 16); 247 1.1 riastrad amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); 248 1.1 riastrad entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) 249 1.1 riastrad ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); 250 1.1 riastrad } 251 1.1 riastrad amdgpu_table->count = atom_table->ucNumEntries; 252 1.1 riastrad 253 1.1 riastrad return 0; 254 1.1 riastrad } 255 1.1 riastrad 256 1.1 riastrad int amdgpu_get_platform_caps(struct amdgpu_device *adev) 257 1.1 riastrad { 258 1.1 riastrad struct amdgpu_mode_info *mode_info = &adev->mode_info; 259 1.1 riastrad union power_info *power_info; 260 1.1 riastrad int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 261 1.1 riastrad u16 data_offset; 262 1.1 riastrad u8 frev, crev; 263 1.1 riastrad 264 1.1 riastrad if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 265 1.1 riastrad &frev, &crev, &data_offset)) 266 1.1 riastrad return -EINVAL; 267 1.4 riastrad power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 268 1.1 riastrad 269 1.1 riastrad adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); 270 1.1 riastrad adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 271 1.1 riastrad adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 272 1.1 riastrad 273 1.1 riastrad return 0; 274 1.1 riastrad } 275 1.1 riastrad 276 1.1 riastrad /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ 277 1.1 riastrad #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 278 1.1 riastrad #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 279 1.1 riastrad #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 280 1.1 riastrad #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 281 1.1 riastrad #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 282 1.1 riastrad #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 283 1.1 riastrad #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 284 1.1 riastrad #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 285 1.1 riastrad 286 1.1 riastrad int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) 287 1.1 riastrad { 288 1.1 riastrad struct amdgpu_mode_info *mode_info = &adev->mode_info; 289 1.1 riastrad union power_info *power_info; 290 1.1 riastrad union fan_info *fan_info; 291 1.1 riastrad ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; 292 1.1 riastrad int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 293 1.1 riastrad u16 data_offset; 294 1.1 riastrad u8 frev, crev; 295 1.1 riastrad int ret, i; 296 1.1 riastrad 297 1.1 riastrad if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 298 1.1 riastrad &frev, &crev, &data_offset)) 299 1.1 riastrad return -EINVAL; 300 1.4 riastrad power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 301 1.1 riastrad 302 1.1 riastrad /* fan table */ 303 1.1 riastrad if (le16_to_cpu(power_info->pplib.usTableSize) >= 304 1.1 riastrad sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 305 1.1 riastrad if (power_info->pplib3.usFanTableOffset) { 306 1.4 riastrad fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + 307 1.1 riastrad le16_to_cpu(power_info->pplib3.usFanTableOffset)); 308 1.1 riastrad adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; 309 1.1 riastrad adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); 310 1.1 riastrad adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); 311 1.1 riastrad adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); 312 1.1 riastrad adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); 313 1.1 riastrad adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); 314 1.1 riastrad adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); 315 1.1 riastrad if (fan_info->fan.ucFanTableFormat >= 2) 316 1.1 riastrad adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); 317 1.1 riastrad else 318 1.1 riastrad adev->pm.dpm.fan.t_max = 10900; 319 1.1 riastrad adev->pm.dpm.fan.cycle_delay = 100000; 320 1.1 riastrad if (fan_info->fan.ucFanTableFormat >= 3) { 321 1.1 riastrad adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; 322 1.1 riastrad adev->pm.dpm.fan.default_max_fan_pwm = 323 1.1 riastrad le16_to_cpu(fan_info->fan3.usFanPWMMax); 324 1.1 riastrad adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; 325 1.1 riastrad adev->pm.dpm.fan.fan_output_sensitivity = 326 1.1 riastrad le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); 327 1.1 riastrad } 328 1.1 riastrad adev->pm.dpm.fan.ucode_fan_control = true; 329 1.1 riastrad } 330 1.1 riastrad } 331 1.1 riastrad 332 1.1 riastrad /* clock dependancy tables, shedding tables */ 333 1.1 riastrad if (le16_to_cpu(power_info->pplib.usTableSize) >= 334 1.1 riastrad sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { 335 1.1 riastrad if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { 336 1.1 riastrad dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 337 1.4 riastrad (mode_info->atom_context->bios + data_offset + 338 1.1 riastrad le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); 339 1.1 riastrad ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 340 1.1 riastrad dep_table); 341 1.1 riastrad if (ret) { 342 1.1 riastrad amdgpu_free_extended_power_table(adev); 343 1.1 riastrad return ret; 344 1.1 riastrad } 345 1.1 riastrad } 346 1.1 riastrad if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { 347 1.1 riastrad dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 348 1.4 riastrad (mode_info->atom_context->bios + data_offset + 349 1.1 riastrad le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); 350 1.1 riastrad ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 351 1.1 riastrad dep_table); 352 1.1 riastrad if (ret) { 353 1.1 riastrad amdgpu_free_extended_power_table(adev); 354 1.1 riastrad return ret; 355 1.1 riastrad } 356 1.1 riastrad } 357 1.1 riastrad if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { 358 1.1 riastrad dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 359 1.4 riastrad (mode_info->atom_context->bios + data_offset + 360 1.1 riastrad le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); 361 1.1 riastrad ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 362 1.1 riastrad dep_table); 363 1.1 riastrad if (ret) { 364 1.1 riastrad amdgpu_free_extended_power_table(adev); 365 1.1 riastrad return ret; 366 1.1 riastrad } 367 1.1 riastrad } 368 1.1 riastrad if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { 369 1.1 riastrad dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 370 1.4 riastrad (mode_info->atom_context->bios + data_offset + 371 1.1 riastrad le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); 372 1.1 riastrad ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 373 1.1 riastrad dep_table); 374 1.1 riastrad if (ret) { 375 1.1 riastrad amdgpu_free_extended_power_table(adev); 376 1.1 riastrad return ret; 377 1.1 riastrad } 378 1.1 riastrad } 379 1.1 riastrad if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { 380 1.1 riastrad ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = 381 1.1 riastrad (ATOM_PPLIB_Clock_Voltage_Limit_Table *) 382 1.4 riastrad (mode_info->atom_context->bios + data_offset + 383 1.1 riastrad le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); 384 1.1 riastrad if (clk_v->ucNumEntries) { 385 1.1 riastrad adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = 386 1.1 riastrad le16_to_cpu(clk_v->entries[0].usSclkLow) | 387 1.1 riastrad (clk_v->entries[0].ucSclkHigh << 16); 388 1.1 riastrad adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = 389 1.1 riastrad le16_to_cpu(clk_v->entries[0].usMclkLow) | 390 1.1 riastrad (clk_v->entries[0].ucMclkHigh << 16); 391 1.1 riastrad adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = 392 1.1 riastrad le16_to_cpu(clk_v->entries[0].usVddc); 393 1.1 riastrad adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = 394 1.1 riastrad le16_to_cpu(clk_v->entries[0].usVddci); 395 1.1 riastrad } 396 1.1 riastrad } 397 1.1 riastrad if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { 398 1.1 riastrad ATOM_PPLIB_PhaseSheddingLimits_Table *psl = 399 1.1 riastrad (ATOM_PPLIB_PhaseSheddingLimits_Table *) 400 1.4 riastrad (mode_info->atom_context->bios + data_offset + 401 1.1 riastrad le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); 402 1.1 riastrad ATOM_PPLIB_PhaseSheddingLimits_Record *entry; 403 1.1 riastrad 404 1.1 riastrad adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 405 1.6 riastrad kcalloc(psl->ucNumEntries, 406 1.1 riastrad sizeof(struct amdgpu_phase_shedding_limits_entry), 407 1.1 riastrad GFP_KERNEL); 408 1.1 riastrad if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 409 1.1 riastrad amdgpu_free_extended_power_table(adev); 410 1.1 riastrad return -ENOMEM; 411 1.1 riastrad } 412 1.1 riastrad 413 1.1 riastrad entry = &psl->entries[0]; 414 1.1 riastrad for (i = 0; i < psl->ucNumEntries; i++) { 415 1.1 riastrad adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = 416 1.1 riastrad le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); 417 1.1 riastrad adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = 418 1.1 riastrad le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); 419 1.1 riastrad adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = 420 1.1 riastrad le16_to_cpu(entry->usVoltage); 421 1.1 riastrad entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) 422 1.1 riastrad ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); 423 1.1 riastrad } 424 1.1 riastrad adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = 425 1.1 riastrad psl->ucNumEntries; 426 1.1 riastrad } 427 1.1 riastrad } 428 1.1 riastrad 429 1.1 riastrad /* cac data */ 430 1.1 riastrad if (le16_to_cpu(power_info->pplib.usTableSize) >= 431 1.1 riastrad sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { 432 1.1 riastrad adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); 433 1.1 riastrad adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); 434 1.1 riastrad adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; 435 1.1 riastrad adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); 436 1.1 riastrad if (adev->pm.dpm.tdp_od_limit) 437 1.1 riastrad adev->pm.dpm.power_control = true; 438 1.1 riastrad else 439 1.1 riastrad adev->pm.dpm.power_control = false; 440 1.1 riastrad adev->pm.dpm.tdp_adjustment = 0; 441 1.1 riastrad adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); 442 1.1 riastrad adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); 443 1.1 riastrad adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); 444 1.1 riastrad if (power_info->pplib5.usCACLeakageTableOffset) { 445 1.1 riastrad ATOM_PPLIB_CAC_Leakage_Table *cac_table = 446 1.1 riastrad (ATOM_PPLIB_CAC_Leakage_Table *) 447 1.4 riastrad (mode_info->atom_context->bios + data_offset + 448 1.1 riastrad le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); 449 1.1 riastrad ATOM_PPLIB_CAC_Leakage_Record *entry; 450 1.1 riastrad u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); 451 1.1 riastrad adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); 452 1.1 riastrad if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { 453 1.1 riastrad amdgpu_free_extended_power_table(adev); 454 1.1 riastrad return -ENOMEM; 455 1.1 riastrad } 456 1.1 riastrad entry = &cac_table->entries[0]; 457 1.1 riastrad for (i = 0; i < cac_table->ucNumEntries; i++) { 458 1.1 riastrad if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 459 1.1 riastrad adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = 460 1.1 riastrad le16_to_cpu(entry->usVddc1); 461 1.1 riastrad adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = 462 1.1 riastrad le16_to_cpu(entry->usVddc2); 463 1.1 riastrad adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = 464 1.1 riastrad le16_to_cpu(entry->usVddc3); 465 1.1 riastrad } else { 466 1.1 riastrad adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = 467 1.1 riastrad le16_to_cpu(entry->usVddc); 468 1.1 riastrad adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = 469 1.1 riastrad le32_to_cpu(entry->ulLeakageValue); 470 1.1 riastrad } 471 1.1 riastrad entry = (ATOM_PPLIB_CAC_Leakage_Record *) 472 1.1 riastrad ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); 473 1.1 riastrad } 474 1.1 riastrad adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; 475 1.1 riastrad } 476 1.1 riastrad } 477 1.1 riastrad 478 1.1 riastrad /* ext tables */ 479 1.1 riastrad if (le16_to_cpu(power_info->pplib.usTableSize) >= 480 1.1 riastrad sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 481 1.1 riastrad ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) 482 1.4 riastrad (mode_info->atom_context->bios + data_offset + 483 1.1 riastrad le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); 484 1.1 riastrad if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && 485 1.1 riastrad ext_hdr->usVCETableOffset) { 486 1.1 riastrad VCEClockInfoArray *array = (VCEClockInfoArray *) 487 1.4 riastrad (mode_info->atom_context->bios + data_offset + 488 1.1 riastrad le16_to_cpu(ext_hdr->usVCETableOffset) + 1); 489 1.1 riastrad ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = 490 1.1 riastrad (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) 491 1.4 riastrad (mode_info->atom_context->bios + data_offset + 492 1.1 riastrad le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 493 1.1 riastrad 1 + array->ucNumEntries * sizeof(VCEClockInfo)); 494 1.1 riastrad ATOM_PPLIB_VCE_State_Table *states = 495 1.1 riastrad (ATOM_PPLIB_VCE_State_Table *) 496 1.4 riastrad (mode_info->atom_context->bios + data_offset + 497 1.1 riastrad le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 498 1.1 riastrad 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + 499 1.1 riastrad 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); 500 1.1 riastrad ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; 501 1.1 riastrad ATOM_PPLIB_VCE_State_Record *state_entry; 502 1.1 riastrad VCEClockInfo *vce_clk; 503 1.1 riastrad u32 size = limits->numEntries * 504 1.1 riastrad sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); 505 1.1 riastrad adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = 506 1.1 riastrad kzalloc(size, GFP_KERNEL); 507 1.1 riastrad if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { 508 1.1 riastrad amdgpu_free_extended_power_table(adev); 509 1.1 riastrad return -ENOMEM; 510 1.1 riastrad } 511 1.1 riastrad adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = 512 1.1 riastrad limits->numEntries; 513 1.1 riastrad entry = &limits->entries[0]; 514 1.1 riastrad state_entry = &states->entries[0]; 515 1.1 riastrad for (i = 0; i < limits->numEntries; i++) { 516 1.1 riastrad vce_clk = (VCEClockInfo *) 517 1.1 riastrad ((u8 *)&array->entries[0] + 518 1.1 riastrad (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 519 1.1 riastrad adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = 520 1.1 riastrad le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 521 1.1 riastrad adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = 522 1.1 riastrad le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 523 1.1 riastrad adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = 524 1.1 riastrad le16_to_cpu(entry->usVoltage); 525 1.1 riastrad entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) 526 1.1 riastrad ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); 527 1.1 riastrad } 528 1.6 riastrad adev->pm.dpm.num_of_vce_states = 529 1.6 riastrad states->numEntries > AMD_MAX_VCE_LEVELS ? 530 1.6 riastrad AMD_MAX_VCE_LEVELS : states->numEntries; 531 1.6 riastrad for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { 532 1.1 riastrad vce_clk = (VCEClockInfo *) 533 1.1 riastrad ((u8 *)&array->entries[0] + 534 1.1 riastrad (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 535 1.1 riastrad adev->pm.dpm.vce_states[i].evclk = 536 1.1 riastrad le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 537 1.1 riastrad adev->pm.dpm.vce_states[i].ecclk = 538 1.1 riastrad le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 539 1.1 riastrad adev->pm.dpm.vce_states[i].clk_idx = 540 1.1 riastrad state_entry->ucClockInfoIndex & 0x3f; 541 1.1 riastrad adev->pm.dpm.vce_states[i].pstate = 542 1.1 riastrad (state_entry->ucClockInfoIndex & 0xc0) >> 6; 543 1.1 riastrad state_entry = (ATOM_PPLIB_VCE_State_Record *) 544 1.1 riastrad ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); 545 1.1 riastrad } 546 1.1 riastrad } 547 1.1 riastrad if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && 548 1.1 riastrad ext_hdr->usUVDTableOffset) { 549 1.1 riastrad UVDClockInfoArray *array = (UVDClockInfoArray *) 550 1.4 riastrad (mode_info->atom_context->bios + data_offset + 551 1.1 riastrad le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); 552 1.1 riastrad ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = 553 1.1 riastrad (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) 554 1.4 riastrad (mode_info->atom_context->bios + data_offset + 555 1.1 riastrad le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + 556 1.1 riastrad 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); 557 1.1 riastrad ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; 558 1.1 riastrad u32 size = limits->numEntries * 559 1.1 riastrad sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); 560 1.1 riastrad adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = 561 1.1 riastrad kzalloc(size, GFP_KERNEL); 562 1.1 riastrad if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { 563 1.1 riastrad amdgpu_free_extended_power_table(adev); 564 1.1 riastrad return -ENOMEM; 565 1.1 riastrad } 566 1.1 riastrad adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = 567 1.1 riastrad limits->numEntries; 568 1.1 riastrad entry = &limits->entries[0]; 569 1.1 riastrad for (i = 0; i < limits->numEntries; i++) { 570 1.1 riastrad UVDClockInfo *uvd_clk = (UVDClockInfo *) 571 1.1 riastrad ((u8 *)&array->entries[0] + 572 1.1 riastrad (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); 573 1.1 riastrad adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = 574 1.1 riastrad le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); 575 1.1 riastrad adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 576 1.1 riastrad le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 577 1.1 riastrad adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 578 1.1 riastrad le16_to_cpu(entry->usVoltage); 579 1.1 riastrad entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 580 1.1 riastrad ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 581 1.1 riastrad } 582 1.1 riastrad } 583 1.1 riastrad if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && 584 1.1 riastrad ext_hdr->usSAMUTableOffset) { 585 1.1 riastrad ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = 586 1.1 riastrad (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) 587 1.4 riastrad (mode_info->atom_context->bios + data_offset + 588 1.1 riastrad le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); 589 1.1 riastrad ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; 590 1.1 riastrad u32 size = limits->numEntries * 591 1.1 riastrad sizeof(struct amdgpu_clock_voltage_dependency_entry); 592 1.1 riastrad adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = 593 1.1 riastrad kzalloc(size, GFP_KERNEL); 594 1.1 riastrad if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { 595 1.1 riastrad amdgpu_free_extended_power_table(adev); 596 1.1 riastrad return -ENOMEM; 597 1.1 riastrad } 598 1.1 riastrad adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = 599 1.1 riastrad limits->numEntries; 600 1.1 riastrad entry = &limits->entries[0]; 601 1.1 riastrad for (i = 0; i < limits->numEntries; i++) { 602 1.1 riastrad adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = 603 1.1 riastrad le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); 604 1.1 riastrad adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = 605 1.1 riastrad le16_to_cpu(entry->usVoltage); 606 1.1 riastrad entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) 607 1.1 riastrad ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); 608 1.1 riastrad } 609 1.1 riastrad } 610 1.1 riastrad if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && 611 1.1 riastrad ext_hdr->usPPMTableOffset) { 612 1.1 riastrad ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) 613 1.4 riastrad (mode_info->atom_context->bios + data_offset + 614 1.1 riastrad le16_to_cpu(ext_hdr->usPPMTableOffset)); 615 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table = 616 1.1 riastrad kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); 617 1.1 riastrad if (!adev->pm.dpm.dyn_state.ppm_table) { 618 1.1 riastrad amdgpu_free_extended_power_table(adev); 619 1.1 riastrad return -ENOMEM; 620 1.1 riastrad } 621 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; 622 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = 623 1.1 riastrad le16_to_cpu(ppm->usCpuCoreNumber); 624 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table->platform_tdp = 625 1.1 riastrad le32_to_cpu(ppm->ulPlatformTDP); 626 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = 627 1.1 riastrad le32_to_cpu(ppm->ulSmallACPlatformTDP); 628 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table->platform_tdc = 629 1.1 riastrad le32_to_cpu(ppm->ulPlatformTDC); 630 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = 631 1.1 riastrad le32_to_cpu(ppm->ulSmallACPlatformTDC); 632 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table->apu_tdp = 633 1.1 riastrad le32_to_cpu(ppm->ulApuTDP); 634 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = 635 1.1 riastrad le32_to_cpu(ppm->ulDGpuTDP); 636 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = 637 1.1 riastrad le32_to_cpu(ppm->ulDGpuUlvPower); 638 1.1 riastrad adev->pm.dpm.dyn_state.ppm_table->tj_max = 639 1.1 riastrad le32_to_cpu(ppm->ulTjmax); 640 1.1 riastrad } 641 1.1 riastrad if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && 642 1.1 riastrad ext_hdr->usACPTableOffset) { 643 1.1 riastrad ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = 644 1.1 riastrad (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) 645 1.4 riastrad (mode_info->atom_context->bios + data_offset + 646 1.1 riastrad le16_to_cpu(ext_hdr->usACPTableOffset) + 1); 647 1.1 riastrad ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; 648 1.1 riastrad u32 size = limits->numEntries * 649 1.1 riastrad sizeof(struct amdgpu_clock_voltage_dependency_entry); 650 1.1 riastrad adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = 651 1.1 riastrad kzalloc(size, GFP_KERNEL); 652 1.1 riastrad if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { 653 1.1 riastrad amdgpu_free_extended_power_table(adev); 654 1.1 riastrad return -ENOMEM; 655 1.1 riastrad } 656 1.1 riastrad adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = 657 1.1 riastrad limits->numEntries; 658 1.1 riastrad entry = &limits->entries[0]; 659 1.1 riastrad for (i = 0; i < limits->numEntries; i++) { 660 1.1 riastrad adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = 661 1.1 riastrad le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); 662 1.1 riastrad adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = 663 1.1 riastrad le16_to_cpu(entry->usVoltage); 664 1.1 riastrad entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) 665 1.1 riastrad ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); 666 1.1 riastrad } 667 1.1 riastrad } 668 1.1 riastrad if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && 669 1.1 riastrad ext_hdr->usPowerTuneTableOffset) { 670 1.4 riastrad u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + 671 1.1 riastrad le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 672 1.1 riastrad ATOM_PowerTune_Table *pt; 673 1.1 riastrad adev->pm.dpm.dyn_state.cac_tdp_table = 674 1.1 riastrad kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); 675 1.1 riastrad if (!adev->pm.dpm.dyn_state.cac_tdp_table) { 676 1.1 riastrad amdgpu_free_extended_power_table(adev); 677 1.1 riastrad return -ENOMEM; 678 1.1 riastrad } 679 1.1 riastrad if (rev > 0) { 680 1.1 riastrad ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) 681 1.4 riastrad (mode_info->atom_context->bios + data_offset + 682 1.1 riastrad le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 683 1.1 riastrad adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 684 1.1 riastrad ppt->usMaximumPowerDeliveryLimit; 685 1.1 riastrad pt = &ppt->power_tune_table; 686 1.1 riastrad } else { 687 1.1 riastrad ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) 688 1.4 riastrad (mode_info->atom_context->bios + data_offset + 689 1.1 riastrad le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 690 1.1 riastrad adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; 691 1.1 riastrad pt = &ppt->power_tune_table; 692 1.1 riastrad } 693 1.1 riastrad adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); 694 1.1 riastrad adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = 695 1.1 riastrad le16_to_cpu(pt->usConfigurableTDP); 696 1.1 riastrad adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); 697 1.1 riastrad adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = 698 1.1 riastrad le16_to_cpu(pt->usBatteryPowerLimit); 699 1.1 riastrad adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = 700 1.1 riastrad le16_to_cpu(pt->usSmallPowerLimit); 701 1.1 riastrad adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = 702 1.1 riastrad le16_to_cpu(pt->usLowCACLeakage); 703 1.1 riastrad adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = 704 1.1 riastrad le16_to_cpu(pt->usHighCACLeakage); 705 1.1 riastrad } 706 1.1 riastrad if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && 707 1.1 riastrad ext_hdr->usSclkVddgfxTableOffset) { 708 1.1 riastrad dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 709 1.4 riastrad (mode_info->atom_context->bios + data_offset + 710 1.1 riastrad le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); 711 1.1 riastrad ret = amdgpu_parse_clk_voltage_dep_table( 712 1.1 riastrad &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, 713 1.1 riastrad dep_table); 714 1.1 riastrad if (ret) { 715 1.1 riastrad kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); 716 1.1 riastrad return ret; 717 1.1 riastrad } 718 1.1 riastrad } 719 1.1 riastrad } 720 1.1 riastrad 721 1.1 riastrad return 0; 722 1.1 riastrad } 723 1.1 riastrad 724 1.1 riastrad void amdgpu_free_extended_power_table(struct amdgpu_device *adev) 725 1.1 riastrad { 726 1.1 riastrad struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; 727 1.1 riastrad 728 1.1 riastrad kfree(dyn_state->vddc_dependency_on_sclk.entries); 729 1.1 riastrad kfree(dyn_state->vddci_dependency_on_mclk.entries); 730 1.1 riastrad kfree(dyn_state->vddc_dependency_on_mclk.entries); 731 1.1 riastrad kfree(dyn_state->mvdd_dependency_on_mclk.entries); 732 1.1 riastrad kfree(dyn_state->cac_leakage_table.entries); 733 1.1 riastrad kfree(dyn_state->phase_shedding_limits_table.entries); 734 1.1 riastrad kfree(dyn_state->ppm_table); 735 1.1 riastrad kfree(dyn_state->cac_tdp_table); 736 1.1 riastrad kfree(dyn_state->vce_clock_voltage_dependency_table.entries); 737 1.1 riastrad kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); 738 1.1 riastrad kfree(dyn_state->samu_clock_voltage_dependency_table.entries); 739 1.1 riastrad kfree(dyn_state->acp_clock_voltage_dependency_table.entries); 740 1.1 riastrad kfree(dyn_state->vddgfx_dependency_on_sclk.entries); 741 1.1 riastrad } 742 1.1 riastrad 743 1.1 riastrad static const char *pp_lib_thermal_controller_names[] = { 744 1.1 riastrad "NONE", 745 1.1 riastrad "lm63", 746 1.1 riastrad "adm1032", 747 1.1 riastrad "adm1030", 748 1.1 riastrad "max6649", 749 1.1 riastrad "lm64", 750 1.1 riastrad "f75375", 751 1.1 riastrad "RV6xx", 752 1.1 riastrad "RV770", 753 1.1 riastrad "adt7473", 754 1.1 riastrad "NONE", 755 1.1 riastrad "External GPIO", 756 1.1 riastrad "Evergreen", 757 1.1 riastrad "emc2103", 758 1.1 riastrad "Sumo", 759 1.1 riastrad "Northern Islands", 760 1.1 riastrad "Southern Islands", 761 1.1 riastrad "lm96163", 762 1.1 riastrad "Sea Islands", 763 1.1 riastrad "Kaveri/Kabini", 764 1.1 riastrad }; 765 1.1 riastrad 766 1.1 riastrad void amdgpu_add_thermal_controller(struct amdgpu_device *adev) 767 1.1 riastrad { 768 1.1 riastrad struct amdgpu_mode_info *mode_info = &adev->mode_info; 769 1.1 riastrad ATOM_PPLIB_POWERPLAYTABLE *power_table; 770 1.1 riastrad int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 771 1.1 riastrad ATOM_PPLIB_THERMALCONTROLLER *controller; 772 1.1 riastrad struct amdgpu_i2c_bus_rec i2c_bus; 773 1.1 riastrad u16 data_offset; 774 1.1 riastrad u8 frev, crev; 775 1.1 riastrad 776 1.1 riastrad if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 777 1.1 riastrad &frev, &crev, &data_offset)) 778 1.1 riastrad return; 779 1.1 riastrad power_table = (ATOM_PPLIB_POWERPLAYTABLE *) 780 1.4 riastrad (mode_info->atom_context->bios + data_offset); 781 1.1 riastrad controller = &power_table->sThermalController; 782 1.1 riastrad 783 1.1 riastrad /* add the i2c bus for thermal/fan chip */ 784 1.1 riastrad if (controller->ucType > 0) { 785 1.1 riastrad if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) 786 1.1 riastrad adev->pm.no_fan = true; 787 1.1 riastrad adev->pm.fan_pulses_per_revolution = 788 1.1 riastrad controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; 789 1.1 riastrad if (adev->pm.fan_pulses_per_revolution) { 790 1.1 riastrad adev->pm.fan_min_rpm = controller->ucFanMinRPM; 791 1.1 riastrad adev->pm.fan_max_rpm = controller->ucFanMaxRPM; 792 1.1 riastrad } 793 1.1 riastrad if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { 794 1.1 riastrad DRM_INFO("Internal thermal controller %s fan control\n", 795 1.1 riastrad (controller->ucFanParameters & 796 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 797 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; 798 1.1 riastrad } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { 799 1.1 riastrad DRM_INFO("Internal thermal controller %s fan control\n", 800 1.1 riastrad (controller->ucFanParameters & 801 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 802 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_RV770; 803 1.1 riastrad } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { 804 1.1 riastrad DRM_INFO("Internal thermal controller %s fan control\n", 805 1.1 riastrad (controller->ucFanParameters & 806 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 807 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; 808 1.1 riastrad } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { 809 1.1 riastrad DRM_INFO("Internal thermal controller %s fan control\n", 810 1.1 riastrad (controller->ucFanParameters & 811 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 812 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; 813 1.1 riastrad } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { 814 1.1 riastrad DRM_INFO("Internal thermal controller %s fan control\n", 815 1.1 riastrad (controller->ucFanParameters & 816 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 817 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_NI; 818 1.1 riastrad } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { 819 1.1 riastrad DRM_INFO("Internal thermal controller %s fan control\n", 820 1.1 riastrad (controller->ucFanParameters & 821 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 822 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_SI; 823 1.1 riastrad } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { 824 1.1 riastrad DRM_INFO("Internal thermal controller %s fan control\n", 825 1.1 riastrad (controller->ucFanParameters & 826 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 827 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_CI; 828 1.1 riastrad } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { 829 1.1 riastrad DRM_INFO("Internal thermal controller %s fan control\n", 830 1.1 riastrad (controller->ucFanParameters & 831 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 832 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_KV; 833 1.1 riastrad } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { 834 1.1 riastrad DRM_INFO("External GPIO thermal controller %s fan control\n", 835 1.1 riastrad (controller->ucFanParameters & 836 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 837 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; 838 1.1 riastrad } else if (controller->ucType == 839 1.1 riastrad ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { 840 1.1 riastrad DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", 841 1.1 riastrad (controller->ucFanParameters & 842 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 843 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; 844 1.1 riastrad } else if (controller->ucType == 845 1.1 riastrad ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { 846 1.1 riastrad DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", 847 1.1 riastrad (controller->ucFanParameters & 848 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 849 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; 850 1.1 riastrad } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { 851 1.1 riastrad DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 852 1.1 riastrad pp_lib_thermal_controller_names[controller->ucType], 853 1.1 riastrad controller->ucI2cAddress >> 1, 854 1.1 riastrad (controller->ucFanParameters & 855 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 856 1.1 riastrad adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; 857 1.1 riastrad i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); 858 1.1 riastrad adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); 859 1.1 riastrad if (adev->pm.i2c_bus) { 860 1.1 riastrad struct i2c_board_info info = { }; 861 1.1 riastrad const char *name = pp_lib_thermal_controller_names[controller->ucType]; 862 1.1 riastrad info.addr = controller->ucI2cAddress >> 1; 863 1.1 riastrad strlcpy(info.type, name, sizeof(info.type)); 864 1.1 riastrad i2c_new_device(&adev->pm.i2c_bus->adapter, &info); 865 1.1 riastrad } 866 1.1 riastrad } else { 867 1.1 riastrad DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", 868 1.1 riastrad controller->ucType, 869 1.1 riastrad controller->ucI2cAddress >> 1, 870 1.1 riastrad (controller->ucFanParameters & 871 1.1 riastrad ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 872 1.1 riastrad } 873 1.1 riastrad } 874 1.1 riastrad } 875 1.1 riastrad 876 1.1 riastrad enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, 877 1.1 riastrad u32 sys_mask, 878 1.1 riastrad enum amdgpu_pcie_gen asic_gen, 879 1.1 riastrad enum amdgpu_pcie_gen default_gen) 880 1.1 riastrad { 881 1.1 riastrad switch (asic_gen) { 882 1.1 riastrad case AMDGPU_PCIE_GEN1: 883 1.1 riastrad return AMDGPU_PCIE_GEN1; 884 1.1 riastrad case AMDGPU_PCIE_GEN2: 885 1.1 riastrad return AMDGPU_PCIE_GEN2; 886 1.1 riastrad case AMDGPU_PCIE_GEN3: 887 1.1 riastrad return AMDGPU_PCIE_GEN3; 888 1.1 riastrad default: 889 1.6 riastrad if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) && 890 1.6 riastrad (default_gen == AMDGPU_PCIE_GEN3)) 891 1.1 riastrad return AMDGPU_PCIE_GEN3; 892 1.6 riastrad else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) && 893 1.6 riastrad (default_gen == AMDGPU_PCIE_GEN2)) 894 1.1 riastrad return AMDGPU_PCIE_GEN2; 895 1.1 riastrad else 896 1.1 riastrad return AMDGPU_PCIE_GEN1; 897 1.1 riastrad } 898 1.1 riastrad return AMDGPU_PCIE_GEN1; 899 1.1 riastrad } 900 1.1 riastrad 901 1.6 riastrad struct amd_vce_state* 902 1.6 riastrad amdgpu_get_vce_clock_state(void *handle, u32 idx) 903 1.1 riastrad { 904 1.6 riastrad struct amdgpu_device *adev = (struct amdgpu_device *)handle; 905 1.6 riastrad 906 1.6 riastrad if (idx < adev->pm.dpm.num_of_vce_states) 907 1.6 riastrad return &adev->pm.dpm.vce_states[idx]; 908 1.6 riastrad 909 1.6 riastrad return NULL; 910 1.6 riastrad } 911 1.6 riastrad 912 1.6 riastrad int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 913 1.6 riastrad { 914 1.6 riastrad uint32_t clk_freq; 915 1.6 riastrad int ret = 0; 916 1.6 riastrad if (is_support_sw_smu(adev)) { 917 1.6 riastrad ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, 918 1.6 riastrad low ? &clk_freq : NULL, 919 1.6 riastrad !low ? &clk_freq : NULL, 920 1.6 riastrad true); 921 1.6 riastrad if (ret) 922 1.6 riastrad return 0; 923 1.6 riastrad return clk_freq * 100; 924 1.6 riastrad 925 1.6 riastrad } else { 926 1.6 riastrad return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low)); 927 1.6 riastrad } 928 1.6 riastrad } 929 1.6 riastrad 930 1.6 riastrad int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 931 1.6 riastrad { 932 1.6 riastrad uint32_t clk_freq; 933 1.6 riastrad int ret = 0; 934 1.6 riastrad if (is_support_sw_smu(adev)) { 935 1.6 riastrad ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, 936 1.6 riastrad low ? &clk_freq : NULL, 937 1.6 riastrad !low ? &clk_freq : NULL, 938 1.6 riastrad true); 939 1.6 riastrad if (ret) 940 1.6 riastrad return 0; 941 1.6 riastrad return clk_freq * 100; 942 1.6 riastrad 943 1.6 riastrad } else { 944 1.6 riastrad return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low)); 945 1.6 riastrad } 946 1.6 riastrad } 947 1.6 riastrad 948 1.6 riastrad int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 949 1.6 riastrad { 950 1.6 riastrad int ret = 0; 951 1.6 riastrad bool swsmu = is_support_sw_smu(adev); 952 1.6 riastrad 953 1.6 riastrad switch (block_type) { 954 1.6 riastrad case AMD_IP_BLOCK_TYPE_UVD: 955 1.6 riastrad case AMD_IP_BLOCK_TYPE_VCE: 956 1.6 riastrad if (swsmu) { 957 1.6 riastrad ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 958 1.6 riastrad } else if (adev->powerplay.pp_funcs && 959 1.6 riastrad adev->powerplay.pp_funcs->set_powergating_by_smu) { 960 1.6 riastrad /* 961 1.6 riastrad * TODO: need a better lock mechanism 962 1.6 riastrad * 963 1.6 riastrad * Here adev->pm.mutex lock protection is enforced on 964 1.6 riastrad * UVD and VCE cases only. Since for other cases, there 965 1.6 riastrad * may be already lock protection in amdgpu_pm.c. 966 1.6 riastrad * This is a quick fix for the deadlock issue below. 967 1.6 riastrad * NFO: task ocltst:2028 blocked for more than 120 seconds. 968 1.6 riastrad * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu 969 1.6 riastrad * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. 970 1.6 riastrad * cltst D 0 2028 2026 0x00000000 971 1.6 riastrad * all Trace: 972 1.6 riastrad * __schedule+0x2c0/0x870 973 1.6 riastrad * schedule+0x2c/0x70 974 1.6 riastrad * schedule_preempt_disabled+0xe/0x10 975 1.6 riastrad * __mutex_lock.isra.9+0x26d/0x4e0 976 1.6 riastrad * __mutex_lock_slowpath+0x13/0x20 977 1.6 riastrad * ? __mutex_lock_slowpath+0x13/0x20 978 1.6 riastrad * mutex_lock+0x2f/0x40 979 1.6 riastrad * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu] 980 1.6 riastrad * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu] 981 1.6 riastrad * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu] 982 1.6 riastrad * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu] 983 1.6 riastrad * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu] 984 1.6 riastrad * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu] 985 1.6 riastrad */ 986 1.6 riastrad mutex_lock(&adev->pm.mutex); 987 1.6 riastrad ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 988 1.6 riastrad (adev)->powerplay.pp_handle, block_type, gate)); 989 1.6 riastrad mutex_unlock(&adev->pm.mutex); 990 1.6 riastrad } 991 1.6 riastrad break; 992 1.6 riastrad case AMD_IP_BLOCK_TYPE_GFX: 993 1.6 riastrad case AMD_IP_BLOCK_TYPE_VCN: 994 1.6 riastrad case AMD_IP_BLOCK_TYPE_SDMA: 995 1.6 riastrad if (swsmu) 996 1.6 riastrad ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 997 1.6 riastrad else if (adev->powerplay.pp_funcs && 998 1.6 riastrad adev->powerplay.pp_funcs->set_powergating_by_smu) 999 1.6 riastrad ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 1000 1.6 riastrad (adev)->powerplay.pp_handle, block_type, gate)); 1001 1.6 riastrad break; 1002 1.6 riastrad case AMD_IP_BLOCK_TYPE_JPEG: 1003 1.6 riastrad if (swsmu) 1004 1.6 riastrad ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 1005 1.6 riastrad break; 1006 1.6 riastrad case AMD_IP_BLOCK_TYPE_GMC: 1007 1.6 riastrad case AMD_IP_BLOCK_TYPE_ACP: 1008 1.6 riastrad if (adev->powerplay.pp_funcs && 1009 1.6 riastrad adev->powerplay.pp_funcs->set_powergating_by_smu) 1010 1.6 riastrad ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 1011 1.6 riastrad (adev)->powerplay.pp_handle, block_type, gate)); 1012 1.6 riastrad break; 1013 1.1 riastrad default: 1014 1.6 riastrad break; 1015 1.1 riastrad } 1016 1.6 riastrad 1017 1.6 riastrad return ret; 1018 1.6 riastrad } 1019 1.6 riastrad 1020 1.6 riastrad int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 1021 1.6 riastrad { 1022 1.6 riastrad const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1023 1.6 riastrad void *pp_handle = adev->powerplay.pp_handle; 1024 1.6 riastrad struct smu_context *smu = &adev->smu; 1025 1.6 riastrad int ret = 0; 1026 1.6 riastrad 1027 1.6 riastrad if (is_support_sw_smu(adev)) { 1028 1.6 riastrad ret = smu_baco_enter(smu); 1029 1.6 riastrad } else { 1030 1.6 riastrad if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1031 1.6 riastrad return -ENOENT; 1032 1.6 riastrad 1033 1.6 riastrad /* enter BACO state */ 1034 1.6 riastrad ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 1035 1.6 riastrad } 1036 1.6 riastrad 1037 1.6 riastrad return ret; 1038 1.6 riastrad } 1039 1.6 riastrad 1040 1.6 riastrad int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 1041 1.6 riastrad { 1042 1.6 riastrad const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1043 1.6 riastrad void *pp_handle = adev->powerplay.pp_handle; 1044 1.6 riastrad struct smu_context *smu = &adev->smu; 1045 1.6 riastrad int ret = 0; 1046 1.6 riastrad 1047 1.6 riastrad if (is_support_sw_smu(adev)) { 1048 1.6 riastrad ret = smu_baco_exit(smu); 1049 1.6 riastrad } else { 1050 1.6 riastrad if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1051 1.6 riastrad return -ENOENT; 1052 1.6 riastrad 1053 1.6 riastrad /* exit BACO state */ 1054 1.6 riastrad ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 1055 1.6 riastrad } 1056 1.6 riastrad 1057 1.6 riastrad return ret; 1058 1.6 riastrad } 1059 1.6 riastrad 1060 1.6 riastrad int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 1061 1.6 riastrad enum pp_mp1_state mp1_state) 1062 1.6 riastrad { 1063 1.6 riastrad int ret = 0; 1064 1.6 riastrad 1065 1.6 riastrad if (is_support_sw_smu(adev)) { 1066 1.6 riastrad ret = smu_set_mp1_state(&adev->smu, mp1_state); 1067 1.6 riastrad } else if (adev->powerplay.pp_funcs && 1068 1.6 riastrad adev->powerplay.pp_funcs->set_mp1_state) { 1069 1.6 riastrad ret = adev->powerplay.pp_funcs->set_mp1_state( 1070 1.6 riastrad adev->powerplay.pp_handle, 1071 1.6 riastrad mp1_state); 1072 1.6 riastrad } 1073 1.6 riastrad 1074 1.6 riastrad return ret; 1075 1.6 riastrad } 1076 1.6 riastrad 1077 1.6 riastrad bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 1078 1.6 riastrad { 1079 1.6 riastrad const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1080 1.6 riastrad void *pp_handle = adev->powerplay.pp_handle; 1081 1.6 riastrad struct smu_context *smu = &adev->smu; 1082 1.6 riastrad bool baco_cap; 1083 1.6 riastrad 1084 1.6 riastrad if (is_support_sw_smu(adev)) { 1085 1.6 riastrad return smu_baco_is_support(smu); 1086 1.6 riastrad } else { 1087 1.6 riastrad if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 1088 1.6 riastrad return false; 1089 1.6 riastrad 1090 1.6 riastrad if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) 1091 1.6 riastrad return false; 1092 1.6 riastrad 1093 1.6 riastrad return baco_cap ? true : false; 1094 1.6 riastrad } 1095 1.6 riastrad } 1096 1.6 riastrad 1097 1.6 riastrad int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 1098 1.6 riastrad { 1099 1.6 riastrad const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1100 1.6 riastrad void *pp_handle = adev->powerplay.pp_handle; 1101 1.6 riastrad struct smu_context *smu = &adev->smu; 1102 1.6 riastrad 1103 1.6 riastrad if (is_support_sw_smu(adev)) { 1104 1.6 riastrad return smu_mode2_reset(smu); 1105 1.6 riastrad } else { 1106 1.6 riastrad if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 1107 1.6 riastrad return -ENOENT; 1108 1.6 riastrad 1109 1.6 riastrad return pp_funcs->asic_reset_mode_2(pp_handle); 1110 1.6 riastrad } 1111 1.6 riastrad } 1112 1.6 riastrad 1113 1.6 riastrad int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 1114 1.6 riastrad { 1115 1.6 riastrad const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1116 1.6 riastrad void *pp_handle = adev->powerplay.pp_handle; 1117 1.6 riastrad struct smu_context *smu = &adev->smu; 1118 1.6 riastrad int ret = 0; 1119 1.6 riastrad 1120 1.6 riastrad dev_info(adev->dev, "GPU BACO reset\n"); 1121 1.6 riastrad 1122 1.6 riastrad if (is_support_sw_smu(adev)) { 1123 1.6 riastrad ret = smu_baco_enter(smu); 1124 1.6 riastrad if (ret) 1125 1.6 riastrad return ret; 1126 1.6 riastrad 1127 1.6 riastrad ret = smu_baco_exit(smu); 1128 1.6 riastrad if (ret) 1129 1.6 riastrad return ret; 1130 1.6 riastrad } else { 1131 1.6 riastrad if (!pp_funcs 1132 1.6 riastrad || !pp_funcs->set_asic_baco_state) 1133 1.6 riastrad return -ENOENT; 1134 1.6 riastrad 1135 1.6 riastrad /* enter BACO state */ 1136 1.6 riastrad ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 1137 1.6 riastrad if (ret) 1138 1.6 riastrad return ret; 1139 1.6 riastrad 1140 1.6 riastrad /* exit BACO state */ 1141 1.6 riastrad ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 1142 1.6 riastrad if (ret) 1143 1.6 riastrad return ret; 1144 1.6 riastrad } 1145 1.6 riastrad 1146 1.6 riastrad return 0; 1147 1.6 riastrad } 1148 1.6 riastrad 1149 1.6 riastrad int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 1150 1.6 riastrad enum PP_SMC_POWER_PROFILE type, 1151 1.6 riastrad bool en) 1152 1.6 riastrad { 1153 1.6 riastrad int ret = 0; 1154 1.6 riastrad 1155 1.6 riastrad if (is_support_sw_smu(adev)) 1156 1.6 riastrad ret = smu_switch_power_profile(&adev->smu, type, en); 1157 1.6 riastrad else if (adev->powerplay.pp_funcs && 1158 1.6 riastrad adev->powerplay.pp_funcs->switch_power_profile) 1159 1.6 riastrad ret = adev->powerplay.pp_funcs->switch_power_profile( 1160 1.6 riastrad adev->powerplay.pp_handle, type, en); 1161 1.6 riastrad 1162 1.6 riastrad return ret; 1163 1.1 riastrad } 1164 1.1 riastrad 1165 1.6 riastrad int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 1166 1.6 riastrad uint32_t pstate) 1167 1.1 riastrad { 1168 1.6 riastrad int ret = 0; 1169 1.1 riastrad 1170 1.6 riastrad if (is_support_sw_smu_xgmi(adev)) 1171 1.6 riastrad ret = smu_set_xgmi_pstate(&adev->smu, pstate); 1172 1.6 riastrad else if (adev->powerplay.pp_funcs && 1173 1.6 riastrad adev->powerplay.pp_funcs->set_xgmi_pstate) 1174 1.6 riastrad ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 1175 1.6 riastrad pstate); 1176 1.1 riastrad 1177 1.6 riastrad return ret; 1178 1.1 riastrad } 1179