1 /* $NetBSD: radeon_kv_dpm.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $ */ 2 3 /* 4 * Copyright 2013 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: radeon_kv_dpm.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $"); 28 29 #include <linux/pci.h> 30 #include <linux/seq_file.h> 31 32 #include "cikd.h" 33 #include "kv_dpm.h" 34 #include "r600_dpm.h" 35 #include "radeon.h" 36 #include "radeon_asic.h" 37 38 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 39 #define KV_MINIMUM_ENGINE_CLOCK 800 40 #define SMC_RAM_END 0x40000 41 42 static int kv_enable_nb_dpm(struct radeon_device *rdev, 43 bool enable); 44 static void kv_init_graphics_levels(struct radeon_device *rdev); 45 static int kv_calculate_ds_divider(struct radeon_device *rdev); 46 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); 47 static int kv_calculate_dpm_settings(struct radeon_device *rdev); 48 static void kv_enable_new_levels(struct radeon_device *rdev); 49 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 50 struct radeon_ps *new_rps); 51 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); 52 static int kv_set_enabled_levels(struct radeon_device *rdev); 53 static int kv_force_dpm_highest(struct radeon_device *rdev); 54 static int kv_force_dpm_lowest(struct radeon_device *rdev); 55 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 56 struct radeon_ps *new_rps, 57 struct radeon_ps *old_rps); 58 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 59 int min_temp, int max_temp); 60 static int kv_init_fps_limits(struct radeon_device *rdev); 61 62 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 63 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate); 64 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate); 65 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate); 66 67 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 68 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 69 extern void cik_update_cg(struct radeon_device *rdev, 70 u32 block, bool enable); 71 72 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 73 { 74 { 0, 4, 1 }, 75 { 1, 4, 1 }, 76 { 2, 5, 1 }, 77 { 3, 4, 2 }, 78 { 4, 1, 1 }, 79 { 5, 5, 2 }, 80 { 6, 6, 1 }, 81 { 7, 9, 2 }, 82 { 0xffffffff } 83 }; 84 85 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 86 { 87 { 0, 4, 1 }, 88 { 0xffffffff } 89 }; 90 91 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 92 { 93 { 0, 4, 1 }, 94 { 0xffffffff } 95 }; 96 97 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 98 { 99 { 0, 4, 1 }, 100 { 0xffffffff } 101 }; 102 103 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 104 { 105 { 0, 4, 1 }, 106 { 0xffffffff } 107 }; 108 109 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 110 { 111 { 0, 4, 1 }, 112 { 1, 4, 1 }, 113 { 2, 5, 1 }, 114 { 3, 4, 1 }, 115 { 4, 1, 1 }, 116 { 5, 5, 1 }, 117 { 6, 6, 1 }, 118 { 7, 9, 1 }, 119 { 8, 4, 1 }, 120 { 9, 2, 1 }, 121 { 10, 3, 1 }, 122 { 11, 6, 1 }, 123 { 12, 8, 2 }, 124 { 13, 1, 1 }, 125 { 14, 2, 1 }, 126 { 15, 3, 1 }, 127 { 16, 1, 1 }, 128 { 17, 4, 1 }, 129 { 18, 3, 1 }, 130 { 19, 1, 1 }, 131 { 20, 8, 1 }, 132 { 21, 5, 1 }, 133 { 22, 1, 1 }, 134 { 23, 1, 1 }, 135 { 24, 4, 1 }, 136 { 27, 6, 1 }, 137 { 28, 1, 1 }, 138 { 0xffffffff } 139 }; 140 141 static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 142 { 143 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 144 }; 145 146 static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 147 { 148 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 149 }; 150 151 static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 152 { 153 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 154 }; 155 156 static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 157 { 158 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 159 }; 160 161 static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 162 { 163 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 164 }; 165 166 static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 167 { 168 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 169 }; 170 171 static const struct kv_pt_config_reg didt_config_kv[] = 172 { 173 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 174 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 175 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 176 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 177 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 178 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 179 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 180 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 181 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 182 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 183 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 184 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 185 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 186 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 187 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 188 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 189 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 190 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 191 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 192 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 193 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 194 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 195 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 196 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 197 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 198 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 199 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 200 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 201 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 202 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 203 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 204 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 205 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 206 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 207 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 208 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 209 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 210 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 211 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 212 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 213 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 214 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 215 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 216 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 217 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 218 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 219 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 220 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 221 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 222 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 223 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 224 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 225 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 226 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 227 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 228 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 229 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 230 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 231 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 232 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 233 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 234 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 235 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 236 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 237 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 238 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 239 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 240 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 241 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 242 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 243 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 244 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 245 { 0xFFFFFFFF } 246 }; 247 248 static struct kv_ps *kv_get_ps(struct radeon_ps *rps) 249 { 250 struct kv_ps *ps = rps->ps_priv; 251 252 return ps; 253 } 254 255 static struct kv_power_info *kv_get_pi(struct radeon_device *rdev) 256 { 257 struct kv_power_info *pi = rdev->pm.dpm.priv; 258 259 return pi; 260 } 261 262 #if 0 263 static void kv_program_local_cac_table(struct radeon_device *rdev, 264 const struct kv_lcac_config_values *local_cac_table, 265 const struct kv_lcac_config_reg *local_cac_reg) 266 { 267 u32 i, count, data; 268 const struct kv_lcac_config_values *values = local_cac_table; 269 270 while (values->block_id != 0xffffffff) { 271 count = values->signal_id; 272 for (i = 0; i < count; i++) { 273 data = ((values->block_id << local_cac_reg->block_shift) & 274 local_cac_reg->block_mask); 275 data |= ((i << local_cac_reg->signal_shift) & 276 local_cac_reg->signal_mask); 277 data |= ((values->t << local_cac_reg->t_shift) & 278 local_cac_reg->t_mask); 279 data |= ((1 << local_cac_reg->enable_shift) & 280 local_cac_reg->enable_mask); 281 WREG32_SMC(local_cac_reg->cntl, data); 282 } 283 values++; 284 } 285 } 286 #endif 287 288 static int kv_program_pt_config_registers(struct radeon_device *rdev, 289 const struct kv_pt_config_reg *cac_config_regs) 290 { 291 const struct kv_pt_config_reg *config_regs = cac_config_regs; 292 u32 data; 293 u32 cache = 0; 294 295 if (config_regs == NULL) 296 return -EINVAL; 297 298 while (config_regs->offset != 0xFFFFFFFF) { 299 if (config_regs->type == KV_CONFIGREG_CACHE) { 300 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 301 } else { 302 switch (config_regs->type) { 303 case KV_CONFIGREG_SMC_IND: 304 data = RREG32_SMC(config_regs->offset); 305 break; 306 case KV_CONFIGREG_DIDT_IND: 307 data = RREG32_DIDT(config_regs->offset); 308 break; 309 default: 310 data = RREG32(config_regs->offset << 2); 311 break; 312 } 313 314 data &= ~config_regs->mask; 315 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 316 data |= cache; 317 cache = 0; 318 319 switch (config_regs->type) { 320 case KV_CONFIGREG_SMC_IND: 321 WREG32_SMC(config_regs->offset, data); 322 break; 323 case KV_CONFIGREG_DIDT_IND: 324 WREG32_DIDT(config_regs->offset, data); 325 break; 326 default: 327 WREG32(config_regs->offset << 2, data); 328 break; 329 } 330 } 331 config_regs++; 332 } 333 334 return 0; 335 } 336 337 static void kv_do_enable_didt(struct radeon_device *rdev, bool enable) 338 { 339 struct kv_power_info *pi = kv_get_pi(rdev); 340 u32 data; 341 342 if (pi->caps_sq_ramping) { 343 data = RREG32_DIDT(DIDT_SQ_CTRL0); 344 if (enable) 345 data |= DIDT_CTRL_EN; 346 else 347 data &= ~DIDT_CTRL_EN; 348 WREG32_DIDT(DIDT_SQ_CTRL0, data); 349 } 350 351 if (pi->caps_db_ramping) { 352 data = RREG32_DIDT(DIDT_DB_CTRL0); 353 if (enable) 354 data |= DIDT_CTRL_EN; 355 else 356 data &= ~DIDT_CTRL_EN; 357 WREG32_DIDT(DIDT_DB_CTRL0, data); 358 } 359 360 if (pi->caps_td_ramping) { 361 data = RREG32_DIDT(DIDT_TD_CTRL0); 362 if (enable) 363 data |= DIDT_CTRL_EN; 364 else 365 data &= ~DIDT_CTRL_EN; 366 WREG32_DIDT(DIDT_TD_CTRL0, data); 367 } 368 369 if (pi->caps_tcp_ramping) { 370 data = RREG32_DIDT(DIDT_TCP_CTRL0); 371 if (enable) 372 data |= DIDT_CTRL_EN; 373 else 374 data &= ~DIDT_CTRL_EN; 375 WREG32_DIDT(DIDT_TCP_CTRL0, data); 376 } 377 } 378 379 static int kv_enable_didt(struct radeon_device *rdev, bool enable) 380 { 381 struct kv_power_info *pi = kv_get_pi(rdev); 382 int ret; 383 384 if (pi->caps_sq_ramping || 385 pi->caps_db_ramping || 386 pi->caps_td_ramping || 387 pi->caps_tcp_ramping) { 388 cik_enter_rlc_safe_mode(rdev); 389 390 if (enable) { 391 ret = kv_program_pt_config_registers(rdev, didt_config_kv); 392 if (ret) { 393 cik_exit_rlc_safe_mode(rdev); 394 return ret; 395 } 396 } 397 398 kv_do_enable_didt(rdev, enable); 399 400 cik_exit_rlc_safe_mode(rdev); 401 } 402 403 return 0; 404 } 405 406 #if 0 407 static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev) 408 { 409 struct kv_power_info *pi = kv_get_pi(rdev); 410 411 if (pi->caps_cac) { 412 WREG32_SMC(LCAC_SX0_OVR_SEL, 0); 413 WREG32_SMC(LCAC_SX0_OVR_VAL, 0); 414 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 415 416 WREG32_SMC(LCAC_MC0_OVR_SEL, 0); 417 WREG32_SMC(LCAC_MC0_OVR_VAL, 0); 418 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 419 420 WREG32_SMC(LCAC_MC1_OVR_SEL, 0); 421 WREG32_SMC(LCAC_MC1_OVR_VAL, 0); 422 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 423 424 WREG32_SMC(LCAC_MC2_OVR_SEL, 0); 425 WREG32_SMC(LCAC_MC2_OVR_VAL, 0); 426 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 427 428 WREG32_SMC(LCAC_MC3_OVR_SEL, 0); 429 WREG32_SMC(LCAC_MC3_OVR_VAL, 0); 430 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 431 432 WREG32_SMC(LCAC_CPL_OVR_SEL, 0); 433 WREG32_SMC(LCAC_CPL_OVR_VAL, 0); 434 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 435 } 436 } 437 #endif 438 439 static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable) 440 { 441 struct kv_power_info *pi = kv_get_pi(rdev); 442 int ret = 0; 443 444 if (pi->caps_cac) { 445 if (enable) { 446 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac); 447 if (ret) 448 pi->cac_enabled = false; 449 else 450 pi->cac_enabled = true; 451 } else if (pi->cac_enabled) { 452 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac); 453 pi->cac_enabled = false; 454 } 455 } 456 457 return ret; 458 } 459 460 static int kv_process_firmware_header(struct radeon_device *rdev) 461 { 462 struct kv_power_info *pi = kv_get_pi(rdev); 463 u32 tmp; 464 int ret; 465 466 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 467 offsetof(SMU7_Firmware_Header, DpmTable), 468 &tmp, pi->sram_end); 469 470 if (ret == 0) 471 pi->dpm_table_start = tmp; 472 473 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 474 offsetof(SMU7_Firmware_Header, SoftRegisters), 475 &tmp, pi->sram_end); 476 477 if (ret == 0) 478 pi->soft_regs_start = tmp; 479 480 return ret; 481 } 482 483 static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev) 484 { 485 struct kv_power_info *pi = kv_get_pi(rdev); 486 int ret; 487 488 pi->graphics_voltage_change_enable = 1; 489 490 ret = kv_copy_bytes_to_smc(rdev, 491 pi->dpm_table_start + 492 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 493 &pi->graphics_voltage_change_enable, 494 sizeof(u8), pi->sram_end); 495 496 return ret; 497 } 498 499 static int kv_set_dpm_interval(struct radeon_device *rdev) 500 { 501 struct kv_power_info *pi = kv_get_pi(rdev); 502 int ret; 503 504 pi->graphics_interval = 1; 505 506 ret = kv_copy_bytes_to_smc(rdev, 507 pi->dpm_table_start + 508 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 509 &pi->graphics_interval, 510 sizeof(u8), pi->sram_end); 511 512 return ret; 513 } 514 515 static int kv_set_dpm_boot_state(struct radeon_device *rdev) 516 { 517 struct kv_power_info *pi = kv_get_pi(rdev); 518 int ret; 519 520 ret = kv_copy_bytes_to_smc(rdev, 521 pi->dpm_table_start + 522 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 523 &pi->graphics_boot_level, 524 sizeof(u8), pi->sram_end); 525 526 return ret; 527 } 528 529 static void kv_program_vc(struct radeon_device *rdev) 530 { 531 WREG32_SMC(CG_FTV_0, 0x3FFFC100); 532 } 533 534 static void kv_clear_vc(struct radeon_device *rdev) 535 { 536 WREG32_SMC(CG_FTV_0, 0); 537 } 538 539 static int kv_set_divider_value(struct radeon_device *rdev, 540 u32 index, u32 sclk) 541 { 542 struct kv_power_info *pi = kv_get_pi(rdev); 543 struct atom_clock_dividers dividers; 544 int ret; 545 546 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 547 sclk, false, ÷rs); 548 if (ret) 549 return ret; 550 551 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 552 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 553 554 return 0; 555 } 556 557 static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, 558 struct sumo_vid_mapping_table *vid_mapping_table, 559 u32 vid_2bit) 560 { 561 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 562 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 563 u32 i; 564 565 if (vddc_sclk_table && vddc_sclk_table->count) { 566 if (vid_2bit < vddc_sclk_table->count) 567 return vddc_sclk_table->entries[vid_2bit].v; 568 else 569 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 570 } else { 571 for (i = 0; i < vid_mapping_table->num_entries; i++) { 572 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 573 return vid_mapping_table->entries[i].vid_7bit; 574 } 575 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 576 } 577 } 578 579 static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, 580 struct sumo_vid_mapping_table *vid_mapping_table, 581 u32 vid_7bit) 582 { 583 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 584 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 585 u32 i; 586 587 if (vddc_sclk_table && vddc_sclk_table->count) { 588 for (i = 0; i < vddc_sclk_table->count; i++) { 589 if (vddc_sclk_table->entries[i].v == vid_7bit) 590 return i; 591 } 592 return vddc_sclk_table->count - 1; 593 } else { 594 for (i = 0; i < vid_mapping_table->num_entries; i++) { 595 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 596 return vid_mapping_table->entries[i].vid_2bit; 597 } 598 599 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 600 } 601 } 602 603 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, 604 u16 voltage) 605 { 606 return 6200 - (voltage * 25); 607 } 608 609 static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, 610 u32 vid_2bit) 611 { 612 struct kv_power_info *pi = kv_get_pi(rdev); 613 u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, 614 &pi->sys_info.vid_mapping_table, 615 vid_2bit); 616 617 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); 618 } 619 620 621 static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid) 622 { 623 struct kv_power_info *pi = kv_get_pi(rdev); 624 625 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 626 pi->graphics_level[index].MinVddNb = 627 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid)); 628 629 return 0; 630 } 631 632 static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at) 633 { 634 struct kv_power_info *pi = kv_get_pi(rdev); 635 636 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 637 638 return 0; 639 } 640 641 static void kv_dpm_power_level_enable(struct radeon_device *rdev, 642 u32 index, bool enable) 643 { 644 struct kv_power_info *pi = kv_get_pi(rdev); 645 646 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 647 } 648 649 static void kv_start_dpm(struct radeon_device *rdev) 650 { 651 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 652 653 tmp |= GLOBAL_PWRMGT_EN; 654 WREG32_SMC(GENERAL_PWRMGT, tmp); 655 656 kv_smc_dpm_enable(rdev, true); 657 } 658 659 static void kv_stop_dpm(struct radeon_device *rdev) 660 { 661 kv_smc_dpm_enable(rdev, false); 662 } 663 664 static void kv_start_am(struct radeon_device *rdev) 665 { 666 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 667 668 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 669 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN; 670 671 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 672 } 673 674 static void kv_reset_am(struct radeon_device *rdev) 675 { 676 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 677 678 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 679 680 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 681 } 682 683 static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze) 684 { 685 return kv_notify_message_to_smu(rdev, freeze ? 686 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 687 } 688 689 static int kv_force_lowest_valid(struct radeon_device *rdev) 690 { 691 return kv_force_dpm_lowest(rdev); 692 } 693 694 static int kv_unforce_levels(struct radeon_device *rdev) 695 { 696 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 697 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 698 else 699 return kv_set_enabled_levels(rdev); 700 } 701 702 static int kv_update_sclk_t(struct radeon_device *rdev) 703 { 704 struct kv_power_info *pi = kv_get_pi(rdev); 705 u32 low_sclk_interrupt_t = 0; 706 int ret = 0; 707 708 if (pi->caps_sclk_throttle_low_notification) { 709 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 710 711 ret = kv_copy_bytes_to_smc(rdev, 712 pi->dpm_table_start + 713 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 714 (u8 *)&low_sclk_interrupt_t, 715 sizeof(u32), pi->sram_end); 716 } 717 return ret; 718 } 719 720 static int kv_program_bootup_state(struct radeon_device *rdev) 721 { 722 struct kv_power_info *pi = kv_get_pi(rdev); 723 u32 i; 724 struct radeon_clock_voltage_dependency_table *table = 725 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 726 727 if (table && table->count) { 728 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 729 if (table->entries[i].clk == pi->boot_pl.sclk) 730 break; 731 } 732 733 pi->graphics_boot_level = (u8)i; 734 kv_dpm_power_level_enable(rdev, i, true); 735 } else { 736 struct sumo_sclk_voltage_mapping_table *table = 737 &pi->sys_info.sclk_voltage_mapping_table; 738 739 if (table->num_max_dpm_entries == 0) 740 return -EINVAL; 741 742 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 743 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 744 break; 745 } 746 747 pi->graphics_boot_level = (u8)i; 748 kv_dpm_power_level_enable(rdev, i, true); 749 } 750 return 0; 751 } 752 753 static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev) 754 { 755 struct kv_power_info *pi = kv_get_pi(rdev); 756 int ret; 757 758 pi->graphics_therm_throttle_enable = 1; 759 760 ret = kv_copy_bytes_to_smc(rdev, 761 pi->dpm_table_start + 762 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 763 &pi->graphics_therm_throttle_enable, 764 sizeof(u8), pi->sram_end); 765 766 return ret; 767 } 768 769 static int kv_upload_dpm_settings(struct radeon_device *rdev) 770 { 771 struct kv_power_info *pi = kv_get_pi(rdev); 772 int ret; 773 774 ret = kv_copy_bytes_to_smc(rdev, 775 pi->dpm_table_start + 776 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 777 (u8 *)&pi->graphics_level, 778 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 779 pi->sram_end); 780 781 if (ret) 782 return ret; 783 784 ret = kv_copy_bytes_to_smc(rdev, 785 pi->dpm_table_start + 786 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 787 &pi->graphics_dpm_level_count, 788 sizeof(u8), pi->sram_end); 789 790 return ret; 791 } 792 793 static u32 kv_get_clock_difference(u32 a, u32 b) 794 { 795 return (a >= b) ? a - b : b - a; 796 } 797 798 static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk) 799 { 800 struct kv_power_info *pi = kv_get_pi(rdev); 801 u32 value; 802 803 if (pi->caps_enable_dfs_bypass) { 804 if (kv_get_clock_difference(clk, 40000) < 200) 805 value = 3; 806 else if (kv_get_clock_difference(clk, 30000) < 200) 807 value = 2; 808 else if (kv_get_clock_difference(clk, 20000) < 200) 809 value = 7; 810 else if (kv_get_clock_difference(clk, 15000) < 200) 811 value = 6; 812 else if (kv_get_clock_difference(clk, 10000) < 200) 813 value = 8; 814 else 815 value = 0; 816 } else { 817 value = 0; 818 } 819 820 return value; 821 } 822 823 static int kv_populate_uvd_table(struct radeon_device *rdev) 824 { 825 struct kv_power_info *pi = kv_get_pi(rdev); 826 struct radeon_uvd_clock_voltage_dependency_table *table = 827 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 828 struct atom_clock_dividers dividers; 829 int ret; 830 u32 i; 831 832 if (table == NULL || table->count == 0) 833 return 0; 834 835 pi->uvd_level_count = 0; 836 for (i = 0; i < table->count; i++) { 837 if (pi->high_voltage_t && 838 (pi->high_voltage_t < table->entries[i].v)) 839 break; 840 841 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 842 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 843 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 844 845 pi->uvd_level[i].VClkBypassCntl = 846 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); 847 pi->uvd_level[i].DClkBypassCntl = 848 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); 849 850 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 851 table->entries[i].vclk, false, ÷rs); 852 if (ret) 853 return ret; 854 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 855 856 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 857 table->entries[i].dclk, false, ÷rs); 858 if (ret) 859 return ret; 860 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 861 862 pi->uvd_level_count++; 863 } 864 865 ret = kv_copy_bytes_to_smc(rdev, 866 pi->dpm_table_start + 867 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 868 (u8 *)&pi->uvd_level_count, 869 sizeof(u8), pi->sram_end); 870 if (ret) 871 return ret; 872 873 pi->uvd_interval = 1; 874 875 ret = kv_copy_bytes_to_smc(rdev, 876 pi->dpm_table_start + 877 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 878 &pi->uvd_interval, 879 sizeof(u8), pi->sram_end); 880 if (ret) 881 return ret; 882 883 ret = kv_copy_bytes_to_smc(rdev, 884 pi->dpm_table_start + 885 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 886 (u8 *)&pi->uvd_level, 887 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 888 pi->sram_end); 889 890 return ret; 891 892 } 893 894 static int kv_populate_vce_table(struct radeon_device *rdev) 895 { 896 struct kv_power_info *pi = kv_get_pi(rdev); 897 int ret; 898 u32 i; 899 struct radeon_vce_clock_voltage_dependency_table *table = 900 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 901 struct atom_clock_dividers dividers; 902 903 if (table == NULL || table->count == 0) 904 return 0; 905 906 pi->vce_level_count = 0; 907 for (i = 0; i < table->count; i++) { 908 if (pi->high_voltage_t && 909 pi->high_voltage_t < table->entries[i].v) 910 break; 911 912 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 913 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 914 915 pi->vce_level[i].ClkBypassCntl = 916 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); 917 918 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 919 table->entries[i].evclk, false, ÷rs); 920 if (ret) 921 return ret; 922 pi->vce_level[i].Divider = (u8)dividers.post_div; 923 924 pi->vce_level_count++; 925 } 926 927 ret = kv_copy_bytes_to_smc(rdev, 928 pi->dpm_table_start + 929 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 930 (u8 *)&pi->vce_level_count, 931 sizeof(u8), 932 pi->sram_end); 933 if (ret) 934 return ret; 935 936 pi->vce_interval = 1; 937 938 ret = kv_copy_bytes_to_smc(rdev, 939 pi->dpm_table_start + 940 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 941 (u8 *)&pi->vce_interval, 942 sizeof(u8), 943 pi->sram_end); 944 if (ret) 945 return ret; 946 947 ret = kv_copy_bytes_to_smc(rdev, 948 pi->dpm_table_start + 949 offsetof(SMU7_Fusion_DpmTable, VceLevel), 950 (u8 *)&pi->vce_level, 951 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 952 pi->sram_end); 953 954 return ret; 955 } 956 957 static int kv_populate_samu_table(struct radeon_device *rdev) 958 { 959 struct kv_power_info *pi = kv_get_pi(rdev); 960 struct radeon_clock_voltage_dependency_table *table = 961 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 962 struct atom_clock_dividers dividers; 963 int ret; 964 u32 i; 965 966 if (table == NULL || table->count == 0) 967 return 0; 968 969 pi->samu_level_count = 0; 970 for (i = 0; i < table->count; i++) { 971 if (pi->high_voltage_t && 972 pi->high_voltage_t < table->entries[i].v) 973 break; 974 975 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 976 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 977 978 pi->samu_level[i].ClkBypassCntl = 979 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); 980 981 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 982 table->entries[i].clk, false, ÷rs); 983 if (ret) 984 return ret; 985 pi->samu_level[i].Divider = (u8)dividers.post_div; 986 987 pi->samu_level_count++; 988 } 989 990 ret = kv_copy_bytes_to_smc(rdev, 991 pi->dpm_table_start + 992 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 993 (u8 *)&pi->samu_level_count, 994 sizeof(u8), 995 pi->sram_end); 996 if (ret) 997 return ret; 998 999 pi->samu_interval = 1; 1000 1001 ret = kv_copy_bytes_to_smc(rdev, 1002 pi->dpm_table_start + 1003 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 1004 (u8 *)&pi->samu_interval, 1005 sizeof(u8), 1006 pi->sram_end); 1007 if (ret) 1008 return ret; 1009 1010 ret = kv_copy_bytes_to_smc(rdev, 1011 pi->dpm_table_start + 1012 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1013 (u8 *)&pi->samu_level, 1014 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1015 pi->sram_end); 1016 if (ret) 1017 return ret; 1018 1019 return ret; 1020 } 1021 1022 1023 static int kv_populate_acp_table(struct radeon_device *rdev) 1024 { 1025 struct kv_power_info *pi = kv_get_pi(rdev); 1026 struct radeon_clock_voltage_dependency_table *table = 1027 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1028 struct atom_clock_dividers dividers; 1029 int ret; 1030 u32 i; 1031 1032 if (table == NULL || table->count == 0) 1033 return 0; 1034 1035 pi->acp_level_count = 0; 1036 for (i = 0; i < table->count; i++) { 1037 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1038 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1039 1040 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 1041 table->entries[i].clk, false, ÷rs); 1042 if (ret) 1043 return ret; 1044 pi->acp_level[i].Divider = (u8)dividers.post_div; 1045 1046 pi->acp_level_count++; 1047 } 1048 1049 ret = kv_copy_bytes_to_smc(rdev, 1050 pi->dpm_table_start + 1051 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1052 (u8 *)&pi->acp_level_count, 1053 sizeof(u8), 1054 pi->sram_end); 1055 if (ret) 1056 return ret; 1057 1058 pi->acp_interval = 1; 1059 1060 ret = kv_copy_bytes_to_smc(rdev, 1061 pi->dpm_table_start + 1062 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1063 (u8 *)&pi->acp_interval, 1064 sizeof(u8), 1065 pi->sram_end); 1066 if (ret) 1067 return ret; 1068 1069 ret = kv_copy_bytes_to_smc(rdev, 1070 pi->dpm_table_start + 1071 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1072 (u8 *)&pi->acp_level, 1073 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1074 pi->sram_end); 1075 if (ret) 1076 return ret; 1077 1078 return ret; 1079 } 1080 1081 static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev) 1082 { 1083 struct kv_power_info *pi = kv_get_pi(rdev); 1084 u32 i; 1085 struct radeon_clock_voltage_dependency_table *table = 1086 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1087 1088 if (table && table->count) { 1089 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1090 if (pi->caps_enable_dfs_bypass) { 1091 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1092 pi->graphics_level[i].ClkBypassCntl = 3; 1093 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1094 pi->graphics_level[i].ClkBypassCntl = 2; 1095 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1096 pi->graphics_level[i].ClkBypassCntl = 7; 1097 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1098 pi->graphics_level[i].ClkBypassCntl = 6; 1099 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1100 pi->graphics_level[i].ClkBypassCntl = 8; 1101 else 1102 pi->graphics_level[i].ClkBypassCntl = 0; 1103 } else { 1104 pi->graphics_level[i].ClkBypassCntl = 0; 1105 } 1106 } 1107 } else { 1108 struct sumo_sclk_voltage_mapping_table *table = 1109 &pi->sys_info.sclk_voltage_mapping_table; 1110 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1111 if (pi->caps_enable_dfs_bypass) { 1112 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1113 pi->graphics_level[i].ClkBypassCntl = 3; 1114 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1115 pi->graphics_level[i].ClkBypassCntl = 2; 1116 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1117 pi->graphics_level[i].ClkBypassCntl = 7; 1118 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1119 pi->graphics_level[i].ClkBypassCntl = 6; 1120 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1121 pi->graphics_level[i].ClkBypassCntl = 8; 1122 else 1123 pi->graphics_level[i].ClkBypassCntl = 0; 1124 } else { 1125 pi->graphics_level[i].ClkBypassCntl = 0; 1126 } 1127 } 1128 } 1129 } 1130 1131 static int kv_enable_ulv(struct radeon_device *rdev, bool enable) 1132 { 1133 return kv_notify_message_to_smu(rdev, enable ? 1134 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1135 } 1136 1137 static void kv_reset_acp_boot_level(struct radeon_device *rdev) 1138 { 1139 struct kv_power_info *pi = kv_get_pi(rdev); 1140 1141 pi->acp_boot_level = 0xff; 1142 } 1143 1144 static void kv_update_current_ps(struct radeon_device *rdev, 1145 struct radeon_ps *rps) 1146 { 1147 struct kv_ps *new_ps = kv_get_ps(rps); 1148 struct kv_power_info *pi = kv_get_pi(rdev); 1149 1150 pi->current_rps = *rps; 1151 pi->current_ps = *new_ps; 1152 pi->current_rps.ps_priv = &pi->current_ps; 1153 } 1154 1155 static void kv_update_requested_ps(struct radeon_device *rdev, 1156 struct radeon_ps *rps) 1157 { 1158 struct kv_ps *new_ps = kv_get_ps(rps); 1159 struct kv_power_info *pi = kv_get_pi(rdev); 1160 1161 pi->requested_rps = *rps; 1162 pi->requested_ps = *new_ps; 1163 pi->requested_rps.ps_priv = &pi->requested_ps; 1164 } 1165 1166 void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1167 { 1168 struct kv_power_info *pi = kv_get_pi(rdev); 1169 int ret; 1170 1171 if (pi->bapm_enable) { 1172 ret = kv_smc_bapm_enable(rdev, enable); 1173 if (ret) 1174 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1175 } 1176 } 1177 1178 static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) 1179 { 1180 u32 thermal_int; 1181 1182 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); 1183 if (enable) 1184 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; 1185 else 1186 thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); 1187 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); 1188 1189 } 1190 1191 int kv_dpm_enable(struct radeon_device *rdev) 1192 { 1193 struct kv_power_info *pi = kv_get_pi(rdev); 1194 int ret; 1195 1196 ret = kv_process_firmware_header(rdev); 1197 if (ret) { 1198 DRM_ERROR("kv_process_firmware_header failed\n"); 1199 return ret; 1200 } 1201 kv_init_fps_limits(rdev); 1202 kv_init_graphics_levels(rdev); 1203 ret = kv_program_bootup_state(rdev); 1204 if (ret) { 1205 DRM_ERROR("kv_program_bootup_state failed\n"); 1206 return ret; 1207 } 1208 kv_calculate_dfs_bypass_settings(rdev); 1209 ret = kv_upload_dpm_settings(rdev); 1210 if (ret) { 1211 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1212 return ret; 1213 } 1214 ret = kv_populate_uvd_table(rdev); 1215 if (ret) { 1216 DRM_ERROR("kv_populate_uvd_table failed\n"); 1217 return ret; 1218 } 1219 ret = kv_populate_vce_table(rdev); 1220 if (ret) { 1221 DRM_ERROR("kv_populate_vce_table failed\n"); 1222 return ret; 1223 } 1224 ret = kv_populate_samu_table(rdev); 1225 if (ret) { 1226 DRM_ERROR("kv_populate_samu_table failed\n"); 1227 return ret; 1228 } 1229 ret = kv_populate_acp_table(rdev); 1230 if (ret) { 1231 DRM_ERROR("kv_populate_acp_table failed\n"); 1232 return ret; 1233 } 1234 kv_program_vc(rdev); 1235 #if 0 1236 kv_initialize_hardware_cac_manager(rdev); 1237 #endif 1238 kv_start_am(rdev); 1239 if (pi->enable_auto_thermal_throttling) { 1240 ret = kv_enable_auto_thermal_throttling(rdev); 1241 if (ret) { 1242 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1243 return ret; 1244 } 1245 } 1246 ret = kv_enable_dpm_voltage_scaling(rdev); 1247 if (ret) { 1248 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1249 return ret; 1250 } 1251 ret = kv_set_dpm_interval(rdev); 1252 if (ret) { 1253 DRM_ERROR("kv_set_dpm_interval failed\n"); 1254 return ret; 1255 } 1256 ret = kv_set_dpm_boot_state(rdev); 1257 if (ret) { 1258 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1259 return ret; 1260 } 1261 ret = kv_enable_ulv(rdev, true); 1262 if (ret) { 1263 DRM_ERROR("kv_enable_ulv failed\n"); 1264 return ret; 1265 } 1266 kv_start_dpm(rdev); 1267 ret = kv_enable_didt(rdev, true); 1268 if (ret) { 1269 DRM_ERROR("kv_enable_didt failed\n"); 1270 return ret; 1271 } 1272 ret = kv_enable_smc_cac(rdev, true); 1273 if (ret) { 1274 DRM_ERROR("kv_enable_smc_cac failed\n"); 1275 return ret; 1276 } 1277 1278 kv_reset_acp_boot_level(rdev); 1279 1280 ret = kv_smc_bapm_enable(rdev, false); 1281 if (ret) { 1282 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1283 return ret; 1284 } 1285 1286 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1287 1288 return ret; 1289 } 1290 1291 int kv_dpm_late_enable(struct radeon_device *rdev) 1292 { 1293 int ret = 0; 1294 1295 if (rdev->irq.installed && 1296 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1297 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1298 if (ret) { 1299 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1300 return ret; 1301 } 1302 kv_enable_thermal_int(rdev, true); 1303 } 1304 1305 /* powerdown unused blocks for now */ 1306 kv_dpm_powergate_acp(rdev, true); 1307 kv_dpm_powergate_samu(rdev, true); 1308 kv_dpm_powergate_vce(rdev, true); 1309 kv_dpm_powergate_uvd(rdev, true); 1310 1311 return ret; 1312 } 1313 1314 void kv_dpm_disable(struct radeon_device *rdev) 1315 { 1316 kv_smc_bapm_enable(rdev, false); 1317 1318 if (rdev->family == CHIP_MULLINS) 1319 kv_enable_nb_dpm(rdev, false); 1320 1321 /* powerup blocks */ 1322 kv_dpm_powergate_acp(rdev, false); 1323 kv_dpm_powergate_samu(rdev, false); 1324 kv_dpm_powergate_vce(rdev, false); 1325 kv_dpm_powergate_uvd(rdev, false); 1326 1327 kv_enable_smc_cac(rdev, false); 1328 kv_enable_didt(rdev, false); 1329 kv_clear_vc(rdev); 1330 kv_stop_dpm(rdev); 1331 kv_enable_ulv(rdev, false); 1332 kv_reset_am(rdev); 1333 kv_enable_thermal_int(rdev, false); 1334 1335 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1336 } 1337 1338 #if 0 1339 static int kv_write_smc_soft_register(struct radeon_device *rdev, 1340 u16 reg_offset, u32 value) 1341 { 1342 struct kv_power_info *pi = kv_get_pi(rdev); 1343 1344 return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset, 1345 (u8 *)&value, sizeof(u16), pi->sram_end); 1346 } 1347 1348 static int kv_read_smc_soft_register(struct radeon_device *rdev, 1349 u16 reg_offset, u32 *value) 1350 { 1351 struct kv_power_info *pi = kv_get_pi(rdev); 1352 1353 return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset, 1354 value, pi->sram_end); 1355 } 1356 #endif 1357 1358 static void kv_init_sclk_t(struct radeon_device *rdev) 1359 { 1360 struct kv_power_info *pi = kv_get_pi(rdev); 1361 1362 pi->low_sclk_interrupt_t = 0; 1363 } 1364 1365 static int kv_init_fps_limits(struct radeon_device *rdev) 1366 { 1367 struct kv_power_info *pi = kv_get_pi(rdev); 1368 int ret = 0; 1369 1370 if (pi->caps_fps) { 1371 u16 tmp; 1372 1373 tmp = 45; 1374 pi->fps_high_t = cpu_to_be16(tmp); 1375 ret = kv_copy_bytes_to_smc(rdev, 1376 pi->dpm_table_start + 1377 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1378 (u8 *)&pi->fps_high_t, 1379 sizeof(u16), pi->sram_end); 1380 1381 tmp = 30; 1382 pi->fps_low_t = cpu_to_be16(tmp); 1383 1384 ret = kv_copy_bytes_to_smc(rdev, 1385 pi->dpm_table_start + 1386 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1387 (u8 *)&pi->fps_low_t, 1388 sizeof(u16), pi->sram_end); 1389 1390 } 1391 return ret; 1392 } 1393 1394 static void kv_init_powergate_state(struct radeon_device *rdev) 1395 { 1396 struct kv_power_info *pi = kv_get_pi(rdev); 1397 1398 pi->uvd_power_gated = false; 1399 pi->vce_power_gated = false; 1400 pi->samu_power_gated = false; 1401 pi->acp_power_gated = false; 1402 1403 } 1404 1405 static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 1406 { 1407 return kv_notify_message_to_smu(rdev, enable ? 1408 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1409 } 1410 1411 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) 1412 { 1413 return kv_notify_message_to_smu(rdev, enable ? 1414 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1415 } 1416 1417 static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) 1418 { 1419 return kv_notify_message_to_smu(rdev, enable ? 1420 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1421 } 1422 1423 static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable) 1424 { 1425 return kv_notify_message_to_smu(rdev, enable ? 1426 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1427 } 1428 1429 static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) 1430 { 1431 struct kv_power_info *pi = kv_get_pi(rdev); 1432 struct radeon_uvd_clock_voltage_dependency_table *table = 1433 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1434 int ret; 1435 u32 mask; 1436 1437 if (!gate) { 1438 if (table->count) 1439 pi->uvd_boot_level = table->count - 1; 1440 else 1441 pi->uvd_boot_level = 0; 1442 1443 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1444 mask = 1 << pi->uvd_boot_level; 1445 } else { 1446 mask = 0x1f; 1447 } 1448 1449 ret = kv_copy_bytes_to_smc(rdev, 1450 pi->dpm_table_start + 1451 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1452 (uint8_t *)&pi->uvd_boot_level, 1453 sizeof(u8), pi->sram_end); 1454 if (ret) 1455 return ret; 1456 1457 kv_send_msg_to_smc_with_parameter(rdev, 1458 PPSMC_MSG_UVDDPM_SetEnabledMask, 1459 mask); 1460 } 1461 1462 return kv_enable_uvd_dpm(rdev, !gate); 1463 } 1464 1465 static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk) 1466 { 1467 u8 i; 1468 struct radeon_vce_clock_voltage_dependency_table *table = 1469 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1470 1471 for (i = 0; i < table->count; i++) { 1472 if (table->entries[i].evclk >= evclk) 1473 break; 1474 } 1475 1476 return i; 1477 } 1478 1479 static int kv_update_vce_dpm(struct radeon_device *rdev, 1480 struct radeon_ps *radeon_new_state, 1481 struct radeon_ps *radeon_current_state) 1482 { 1483 struct kv_power_info *pi = kv_get_pi(rdev); 1484 struct radeon_vce_clock_voltage_dependency_table *table = 1485 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1486 int ret; 1487 1488 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { 1489 kv_dpm_powergate_vce(rdev, false); 1490 /* turn the clocks on when encoding */ 1491 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 1492 if (pi->caps_stable_p_state) 1493 pi->vce_boot_level = table->count - 1; 1494 else 1495 pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk); 1496 1497 ret = kv_copy_bytes_to_smc(rdev, 1498 pi->dpm_table_start + 1499 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1500 (u8 *)&pi->vce_boot_level, 1501 sizeof(u8), 1502 pi->sram_end); 1503 if (ret) 1504 return ret; 1505 1506 if (pi->caps_stable_p_state) 1507 kv_send_msg_to_smc_with_parameter(rdev, 1508 PPSMC_MSG_VCEDPM_SetEnabledMask, 1509 (1 << pi->vce_boot_level)); 1510 1511 kv_enable_vce_dpm(rdev, true); 1512 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { 1513 kv_enable_vce_dpm(rdev, false); 1514 /* turn the clocks off when not encoding */ 1515 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 1516 kv_dpm_powergate_vce(rdev, true); 1517 } 1518 1519 return 0; 1520 } 1521 1522 static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) 1523 { 1524 struct kv_power_info *pi = kv_get_pi(rdev); 1525 struct radeon_clock_voltage_dependency_table *table = 1526 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1527 int ret; 1528 1529 if (!gate) { 1530 if (pi->caps_stable_p_state) 1531 pi->samu_boot_level = table->count - 1; 1532 else 1533 pi->samu_boot_level = 0; 1534 1535 ret = kv_copy_bytes_to_smc(rdev, 1536 pi->dpm_table_start + 1537 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1538 (u8 *)&pi->samu_boot_level, 1539 sizeof(u8), 1540 pi->sram_end); 1541 if (ret) 1542 return ret; 1543 1544 if (pi->caps_stable_p_state) 1545 kv_send_msg_to_smc_with_parameter(rdev, 1546 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1547 (1 << pi->samu_boot_level)); 1548 } 1549 1550 return kv_enable_samu_dpm(rdev, !gate); 1551 } 1552 1553 static u8 kv_get_acp_boot_level(struct radeon_device *rdev) 1554 { 1555 u8 i; 1556 struct radeon_clock_voltage_dependency_table *table = 1557 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1558 1559 for (i = 0; i < table->count; i++) { 1560 #if 0 /* XXX Upstream has changed this to make sense. */ 1561 if (table->entries[i].clk >= 0) /* XXX */ 1562 break; 1563 #endif 1564 } 1565 1566 if (i >= table->count) 1567 i = table->count - 1; 1568 1569 return i; 1570 } 1571 1572 static void kv_update_acp_boot_level(struct radeon_device *rdev) 1573 { 1574 struct kv_power_info *pi = kv_get_pi(rdev); 1575 u8 acp_boot_level; 1576 1577 if (!pi->caps_stable_p_state) { 1578 acp_boot_level = kv_get_acp_boot_level(rdev); 1579 if (acp_boot_level != pi->acp_boot_level) { 1580 pi->acp_boot_level = acp_boot_level; 1581 kv_send_msg_to_smc_with_parameter(rdev, 1582 PPSMC_MSG_ACPDPM_SetEnabledMask, 1583 (1 << pi->acp_boot_level)); 1584 } 1585 } 1586 } 1587 1588 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1589 { 1590 struct kv_power_info *pi = kv_get_pi(rdev); 1591 struct radeon_clock_voltage_dependency_table *table = 1592 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1593 int ret; 1594 1595 if (!gate) { 1596 if (pi->caps_stable_p_state) 1597 pi->acp_boot_level = table->count - 1; 1598 else 1599 pi->acp_boot_level = kv_get_acp_boot_level(rdev); 1600 1601 ret = kv_copy_bytes_to_smc(rdev, 1602 pi->dpm_table_start + 1603 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1604 (u8 *)&pi->acp_boot_level, 1605 sizeof(u8), 1606 pi->sram_end); 1607 if (ret) 1608 return ret; 1609 1610 if (pi->caps_stable_p_state) 1611 kv_send_msg_to_smc_with_parameter(rdev, 1612 PPSMC_MSG_ACPDPM_SetEnabledMask, 1613 (1 << pi->acp_boot_level)); 1614 } 1615 1616 return kv_enable_acp_dpm(rdev, !gate); 1617 } 1618 1619 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 1620 { 1621 struct kv_power_info *pi = kv_get_pi(rdev); 1622 1623 if (pi->uvd_power_gated == gate) 1624 return; 1625 1626 pi->uvd_power_gated = gate; 1627 1628 if (gate) { 1629 if (pi->caps_uvd_pg) { 1630 uvd_v1_0_stop(rdev); 1631 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); 1632 } 1633 kv_update_uvd_dpm(rdev, gate); 1634 if (pi->caps_uvd_pg) 1635 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF); 1636 } else { 1637 if (pi->caps_uvd_pg) { 1638 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); 1639 uvd_v4_2_resume(rdev); 1640 uvd_v1_0_start(rdev); 1641 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); 1642 } 1643 kv_update_uvd_dpm(rdev, gate); 1644 } 1645 } 1646 1647 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate) 1648 { 1649 struct kv_power_info *pi = kv_get_pi(rdev); 1650 1651 if (pi->vce_power_gated == gate) 1652 return; 1653 1654 pi->vce_power_gated = gate; 1655 1656 if (gate) { 1657 if (pi->caps_vce_pg) { 1658 /* XXX do we need a vce_v1_0_stop() ? */ 1659 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); 1660 } 1661 } else { 1662 if (pi->caps_vce_pg) { 1663 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); 1664 vce_v2_0_resume(rdev); 1665 vce_v1_0_start(rdev); 1666 } 1667 } 1668 } 1669 1670 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate) 1671 { 1672 struct kv_power_info *pi = kv_get_pi(rdev); 1673 1674 if (pi->samu_power_gated == gate) 1675 return; 1676 1677 pi->samu_power_gated = gate; 1678 1679 if (gate) { 1680 kv_update_samu_dpm(rdev, true); 1681 if (pi->caps_samu_pg) 1682 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF); 1683 } else { 1684 if (pi->caps_samu_pg) 1685 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON); 1686 kv_update_samu_dpm(rdev, false); 1687 } 1688 } 1689 1690 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) 1691 { 1692 struct kv_power_info *pi = kv_get_pi(rdev); 1693 1694 if (pi->acp_power_gated == gate) 1695 return; 1696 1697 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 1698 return; 1699 1700 pi->acp_power_gated = gate; 1701 1702 if (gate) { 1703 kv_update_acp_dpm(rdev, true); 1704 if (pi->caps_acp_pg) 1705 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF); 1706 } else { 1707 if (pi->caps_acp_pg) 1708 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON); 1709 kv_update_acp_dpm(rdev, false); 1710 } 1711 } 1712 1713 static void kv_set_valid_clock_range(struct radeon_device *rdev, 1714 struct radeon_ps *new_rps) 1715 { 1716 struct kv_ps *new_ps = kv_get_ps(new_rps); 1717 struct kv_power_info *pi = kv_get_pi(rdev); 1718 u32 i; 1719 struct radeon_clock_voltage_dependency_table *table = 1720 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1721 1722 if (table && table->count) { 1723 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1724 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1725 (i == (pi->graphics_dpm_level_count - 1))) { 1726 pi->lowest_valid = i; 1727 break; 1728 } 1729 } 1730 1731 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1732 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1733 break; 1734 } 1735 pi->highest_valid = i; 1736 1737 if (pi->lowest_valid > pi->highest_valid) { 1738 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1739 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1740 pi->highest_valid = pi->lowest_valid; 1741 else 1742 pi->lowest_valid = pi->highest_valid; 1743 } 1744 } else { 1745 struct sumo_sclk_voltage_mapping_table *table = 1746 &pi->sys_info.sclk_voltage_mapping_table; 1747 1748 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1749 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1750 i == (int)(pi->graphics_dpm_level_count - 1)) { 1751 pi->lowest_valid = i; 1752 break; 1753 } 1754 } 1755 1756 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1757 if (table->entries[i].sclk_frequency <= 1758 new_ps->levels[new_ps->num_levels - 1].sclk) 1759 break; 1760 } 1761 pi->highest_valid = i; 1762 1763 if (pi->lowest_valid > pi->highest_valid) { 1764 if ((new_ps->levels[0].sclk - 1765 table->entries[pi->highest_valid].sclk_frequency) > 1766 (table->entries[pi->lowest_valid].sclk_frequency - 1767 new_ps->levels[new_ps->num_levels -1].sclk)) 1768 pi->highest_valid = pi->lowest_valid; 1769 else 1770 pi->lowest_valid = pi->highest_valid; 1771 } 1772 } 1773 } 1774 1775 static int kv_update_dfs_bypass_settings(struct radeon_device *rdev, 1776 struct radeon_ps *new_rps) 1777 { 1778 struct kv_ps *new_ps = kv_get_ps(new_rps); 1779 struct kv_power_info *pi = kv_get_pi(rdev); 1780 int ret = 0; 1781 u8 clk_bypass_cntl; 1782 1783 if (pi->caps_enable_dfs_bypass) { 1784 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1785 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1786 ret = kv_copy_bytes_to_smc(rdev, 1787 (pi->dpm_table_start + 1788 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1789 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1790 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1791 &clk_bypass_cntl, 1792 sizeof(u8), pi->sram_end); 1793 } 1794 1795 return ret; 1796 } 1797 1798 static int kv_enable_nb_dpm(struct radeon_device *rdev, 1799 bool enable) 1800 { 1801 struct kv_power_info *pi = kv_get_pi(rdev); 1802 int ret = 0; 1803 1804 if (enable) { 1805 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1806 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); 1807 if (ret == 0) 1808 pi->nb_dpm_enabled = true; 1809 } 1810 } else { 1811 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1812 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable); 1813 if (ret == 0) 1814 pi->nb_dpm_enabled = false; 1815 } 1816 } 1817 1818 return ret; 1819 } 1820 1821 int kv_dpm_force_performance_level(struct radeon_device *rdev, 1822 enum radeon_dpm_forced_level level) 1823 { 1824 int ret; 1825 1826 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1827 ret = kv_force_dpm_highest(rdev); 1828 if (ret) 1829 return ret; 1830 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1831 ret = kv_force_dpm_lowest(rdev); 1832 if (ret) 1833 return ret; 1834 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 1835 ret = kv_unforce_levels(rdev); 1836 if (ret) 1837 return ret; 1838 } 1839 1840 rdev->pm.dpm.forced_level = level; 1841 1842 return 0; 1843 } 1844 1845 int kv_dpm_pre_set_power_state(struct radeon_device *rdev) 1846 { 1847 struct kv_power_info *pi = kv_get_pi(rdev); 1848 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 1849 struct radeon_ps *new_ps = &requested_ps; 1850 1851 kv_update_requested_ps(rdev, new_ps); 1852 1853 kv_apply_state_adjust_rules(rdev, 1854 &pi->requested_rps, 1855 &pi->current_rps); 1856 1857 return 0; 1858 } 1859 1860 int kv_dpm_set_power_state(struct radeon_device *rdev) 1861 { 1862 struct kv_power_info *pi = kv_get_pi(rdev); 1863 struct radeon_ps *new_ps = &pi->requested_rps; 1864 struct radeon_ps *old_ps = &pi->current_rps; 1865 int ret; 1866 1867 if (pi->bapm_enable) { 1868 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1869 if (ret) { 1870 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1871 return ret; 1872 } 1873 } 1874 1875 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1876 if (pi->enable_dpm) { 1877 kv_set_valid_clock_range(rdev, new_ps); 1878 kv_update_dfs_bypass_settings(rdev, new_ps); 1879 ret = kv_calculate_ds_divider(rdev); 1880 if (ret) { 1881 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1882 return ret; 1883 } 1884 kv_calculate_nbps_level_settings(rdev); 1885 kv_calculate_dpm_settings(rdev); 1886 kv_force_lowest_valid(rdev); 1887 kv_enable_new_levels(rdev); 1888 kv_upload_dpm_settings(rdev); 1889 kv_program_nbps_index_settings(rdev, new_ps); 1890 kv_unforce_levels(rdev); 1891 kv_set_enabled_levels(rdev); 1892 kv_force_lowest_valid(rdev); 1893 kv_unforce_levels(rdev); 1894 1895 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1896 if (ret) { 1897 DRM_ERROR("kv_update_vce_dpm failed\n"); 1898 return ret; 1899 } 1900 kv_update_sclk_t(rdev); 1901 if (rdev->family == CHIP_MULLINS) 1902 kv_enable_nb_dpm(rdev, true); 1903 } 1904 } else { 1905 if (pi->enable_dpm) { 1906 kv_set_valid_clock_range(rdev, new_ps); 1907 kv_update_dfs_bypass_settings(rdev, new_ps); 1908 ret = kv_calculate_ds_divider(rdev); 1909 if (ret) { 1910 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1911 return ret; 1912 } 1913 kv_calculate_nbps_level_settings(rdev); 1914 kv_calculate_dpm_settings(rdev); 1915 kv_freeze_sclk_dpm(rdev, true); 1916 kv_upload_dpm_settings(rdev); 1917 kv_program_nbps_index_settings(rdev, new_ps); 1918 kv_freeze_sclk_dpm(rdev, false); 1919 kv_set_enabled_levels(rdev); 1920 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1921 if (ret) { 1922 DRM_ERROR("kv_update_vce_dpm failed\n"); 1923 return ret; 1924 } 1925 kv_update_acp_boot_level(rdev); 1926 kv_update_sclk_t(rdev); 1927 kv_enable_nb_dpm(rdev, true); 1928 } 1929 } 1930 1931 return 0; 1932 } 1933 1934 void kv_dpm_post_set_power_state(struct radeon_device *rdev) 1935 { 1936 struct kv_power_info *pi = kv_get_pi(rdev); 1937 struct radeon_ps *new_ps = &pi->requested_rps; 1938 1939 kv_update_current_ps(rdev, new_ps); 1940 } 1941 1942 void kv_dpm_setup_asic(struct radeon_device *rdev) 1943 { 1944 sumo_take_smu_control(rdev, true); 1945 kv_init_powergate_state(rdev); 1946 kv_init_sclk_t(rdev); 1947 } 1948 1949 #if 0 1950 void kv_dpm_reset_asic(struct radeon_device *rdev) 1951 { 1952 struct kv_power_info *pi = kv_get_pi(rdev); 1953 1954 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1955 kv_force_lowest_valid(rdev); 1956 kv_init_graphics_levels(rdev); 1957 kv_program_bootup_state(rdev); 1958 kv_upload_dpm_settings(rdev); 1959 kv_force_lowest_valid(rdev); 1960 kv_unforce_levels(rdev); 1961 } else { 1962 kv_init_graphics_levels(rdev); 1963 kv_program_bootup_state(rdev); 1964 kv_freeze_sclk_dpm(rdev, true); 1965 kv_upload_dpm_settings(rdev); 1966 kv_freeze_sclk_dpm(rdev, false); 1967 kv_set_enabled_level(rdev, pi->graphics_boot_level); 1968 } 1969 } 1970 #endif 1971 1972 //XXX use sumo_dpm_display_configuration_changed 1973 1974 static void kv_construct_max_power_limits_table(struct radeon_device *rdev, 1975 struct radeon_clock_and_voltage_limits *table) 1976 { 1977 struct kv_power_info *pi = kv_get_pi(rdev); 1978 1979 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 1980 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 1981 table->sclk = 1982 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 1983 table->vddc = 1984 kv_convert_2bit_index_to_voltage(rdev, 1985 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 1986 } 1987 1988 table->mclk = pi->sys_info.nbp_memory_clock[0]; 1989 } 1990 1991 static void kv_patch_voltage_values(struct radeon_device *rdev) 1992 { 1993 int i; 1994 struct radeon_uvd_clock_voltage_dependency_table *uvd_table = 1995 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1996 struct radeon_vce_clock_voltage_dependency_table *vce_table = 1997 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1998 struct radeon_clock_voltage_dependency_table *samu_table = 1999 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 2000 struct radeon_clock_voltage_dependency_table *acp_table = 2001 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 2002 2003 if (uvd_table->count) { 2004 for (i = 0; i < uvd_table->count; i++) 2005 uvd_table->entries[i].v = 2006 kv_convert_8bit_index_to_voltage(rdev, 2007 uvd_table->entries[i].v); 2008 } 2009 2010 if (vce_table->count) { 2011 for (i = 0; i < vce_table->count; i++) 2012 vce_table->entries[i].v = 2013 kv_convert_8bit_index_to_voltage(rdev, 2014 vce_table->entries[i].v); 2015 } 2016 2017 if (samu_table->count) { 2018 for (i = 0; i < samu_table->count; i++) 2019 samu_table->entries[i].v = 2020 kv_convert_8bit_index_to_voltage(rdev, 2021 samu_table->entries[i].v); 2022 } 2023 2024 if (acp_table->count) { 2025 for (i = 0; i < acp_table->count; i++) 2026 acp_table->entries[i].v = 2027 kv_convert_8bit_index_to_voltage(rdev, 2028 acp_table->entries[i].v); 2029 } 2030 2031 } 2032 2033 static void kv_construct_boot_state(struct radeon_device *rdev) 2034 { 2035 struct kv_power_info *pi = kv_get_pi(rdev); 2036 2037 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2038 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2039 pi->boot_pl.ds_divider_index = 0; 2040 pi->boot_pl.ss_divider_index = 0; 2041 pi->boot_pl.allow_gnb_slow = 1; 2042 pi->boot_pl.force_nbp_state = 0; 2043 pi->boot_pl.display_wm = 0; 2044 pi->boot_pl.vce_wm = 0; 2045 } 2046 2047 static int kv_force_dpm_highest(struct radeon_device *rdev) 2048 { 2049 int ret; 2050 u32 enable_mask, i; 2051 2052 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2053 if (ret) 2054 return ret; 2055 2056 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2057 if (enable_mask & (1 << i)) 2058 break; 2059 } 2060 2061 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2062 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2063 else 2064 return kv_set_enabled_level(rdev, i); 2065 } 2066 2067 static int kv_force_dpm_lowest(struct radeon_device *rdev) 2068 { 2069 int ret; 2070 u32 enable_mask, i; 2071 2072 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2073 if (ret) 2074 return ret; 2075 2076 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2077 if (enable_mask & (1 << i)) 2078 break; 2079 } 2080 2081 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2082 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2083 else 2084 return kv_set_enabled_level(rdev, i); 2085 } 2086 2087 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2088 u32 sclk, u32 min_sclk_in_sr) 2089 { 2090 struct kv_power_info *pi = kv_get_pi(rdev); 2091 u32 i; 2092 u32 temp; 2093 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? 2094 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; 2095 2096 if (sclk < min) 2097 return 0; 2098 2099 if (!pi->caps_sclk_ds) 2100 return 0; 2101 2102 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2103 temp = sclk / sumo_get_sleep_divider_from_id(i); 2104 if (temp >= min) 2105 break; 2106 } 2107 2108 return (u8)i; 2109 } 2110 2111 static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit) 2112 { 2113 struct kv_power_info *pi = kv_get_pi(rdev); 2114 struct radeon_clock_voltage_dependency_table *table = 2115 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2116 int i; 2117 2118 if (table && table->count) { 2119 for (i = table->count - 1; i >= 0; i--) { 2120 if (pi->high_voltage_t && 2121 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= 2122 pi->high_voltage_t)) { 2123 *limit = i; 2124 return 0; 2125 } 2126 } 2127 } else { 2128 struct sumo_sclk_voltage_mapping_table *table = 2129 &pi->sys_info.sclk_voltage_mapping_table; 2130 2131 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2132 if (pi->high_voltage_t && 2133 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= 2134 pi->high_voltage_t)) { 2135 *limit = i; 2136 return 0; 2137 } 2138 } 2139 } 2140 2141 *limit = 0; 2142 return 0; 2143 } 2144 2145 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 2146 struct radeon_ps *new_rps, 2147 struct radeon_ps *old_rps) 2148 { 2149 struct kv_ps *ps = kv_get_ps(new_rps); 2150 struct kv_power_info *pi = kv_get_pi(rdev); 2151 u32 min_sclk = 10000; /* ??? */ 2152 u32 sclk, mclk = 0; 2153 int i, limit; 2154 bool force_high; 2155 struct radeon_clock_voltage_dependency_table *table = 2156 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2157 u32 stable_p_state_sclk = 0; 2158 struct radeon_clock_and_voltage_limits *max_limits = 2159 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2160 2161 if (new_rps->vce_active) { 2162 new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 2163 new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 2164 } else { 2165 new_rps->evclk = 0; 2166 new_rps->ecclk = 0; 2167 } 2168 2169 mclk = max_limits->mclk; 2170 sclk = min_sclk; 2171 2172 if (pi->caps_stable_p_state) { 2173 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2174 2175 for (i = table->count - 1; i >= 0; i--) { 2176 if (stable_p_state_sclk >= table->entries[i].clk) { 2177 stable_p_state_sclk = table->entries[i].clk; 2178 break; 2179 } 2180 } 2181 2182 if (i > 0) 2183 stable_p_state_sclk = table->entries[0].clk; 2184 2185 sclk = stable_p_state_sclk; 2186 } 2187 2188 if (new_rps->vce_active) { 2189 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 2190 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 2191 } 2192 2193 ps->need_dfs_bypass = true; 2194 2195 for (i = 0; i < ps->num_levels; i++) { 2196 if (ps->levels[i].sclk < sclk) 2197 ps->levels[i].sclk = sclk; 2198 } 2199 2200 if (table && table->count) { 2201 for (i = 0; i < ps->num_levels; i++) { 2202 if (pi->high_voltage_t && 2203 (pi->high_voltage_t < 2204 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2205 kv_get_high_voltage_limit(rdev, &limit); 2206 ps->levels[i].sclk = table->entries[limit].clk; 2207 } 2208 } 2209 } else { 2210 struct sumo_sclk_voltage_mapping_table *table = 2211 &pi->sys_info.sclk_voltage_mapping_table; 2212 2213 for (i = 0; i < ps->num_levels; i++) { 2214 if (pi->high_voltage_t && 2215 (pi->high_voltage_t < 2216 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2217 kv_get_high_voltage_limit(rdev, &limit); 2218 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2219 } 2220 } 2221 } 2222 2223 if (pi->caps_stable_p_state) { 2224 for (i = 0; i < ps->num_levels; i++) { 2225 ps->levels[i].sclk = stable_p_state_sclk; 2226 } 2227 } 2228 2229 pi->video_start = new_rps->dclk || new_rps->vclk || 2230 new_rps->evclk || new_rps->ecclk; 2231 2232 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2233 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2234 pi->battery_state = true; 2235 else 2236 pi->battery_state = false; 2237 2238 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2239 ps->dpm0_pg_nb_ps_lo = 0x1; 2240 ps->dpm0_pg_nb_ps_hi = 0x0; 2241 ps->dpmx_nb_ps_lo = 0x1; 2242 ps->dpmx_nb_ps_hi = 0x0; 2243 } else { 2244 ps->dpm0_pg_nb_ps_lo = 0x3; 2245 ps->dpm0_pg_nb_ps_hi = 0x0; 2246 ps->dpmx_nb_ps_lo = 0x3; 2247 ps->dpmx_nb_ps_hi = 0x0; 2248 2249 if (pi->sys_info.nb_dpm_enable) { 2250 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2251 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2252 pi->disable_nb_ps3_in_battery; 2253 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2254 ps->dpm0_pg_nb_ps_hi = 0x2; 2255 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2256 ps->dpmx_nb_ps_hi = 0x2; 2257 } 2258 } 2259 } 2260 2261 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev, 2262 u32 index, bool enable) 2263 { 2264 struct kv_power_info *pi = kv_get_pi(rdev); 2265 2266 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2267 } 2268 2269 static int kv_calculate_ds_divider(struct radeon_device *rdev) 2270 { 2271 struct kv_power_info *pi = kv_get_pi(rdev); 2272 u32 sclk_in_sr = 10000; /* ??? */ 2273 u32 i; 2274 2275 if (pi->lowest_valid > pi->highest_valid) 2276 return -EINVAL; 2277 2278 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2279 pi->graphics_level[i].DeepSleepDivId = 2280 kv_get_sleep_divider_id_from_clock(rdev, 2281 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2282 sclk_in_sr); 2283 } 2284 return 0; 2285 } 2286 2287 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) 2288 { 2289 struct kv_power_info *pi = kv_get_pi(rdev); 2290 u32 i; 2291 bool force_high; 2292 struct radeon_clock_and_voltage_limits *max_limits = 2293 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2294 u32 mclk = max_limits->mclk; 2295 2296 if (pi->lowest_valid > pi->highest_valid) 2297 return -EINVAL; 2298 2299 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2300 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2301 pi->graphics_level[i].GnbSlow = 1; 2302 pi->graphics_level[i].ForceNbPs1 = 0; 2303 pi->graphics_level[i].UpH = 0; 2304 } 2305 2306 if (!pi->sys_info.nb_dpm_enable) 2307 return 0; 2308 2309 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2310 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2311 2312 if (force_high) { 2313 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2314 pi->graphics_level[i].GnbSlow = 0; 2315 } else { 2316 if (pi->battery_state) 2317 pi->graphics_level[0].ForceNbPs1 = 1; 2318 2319 pi->graphics_level[1].GnbSlow = 0; 2320 pi->graphics_level[2].GnbSlow = 0; 2321 pi->graphics_level[3].GnbSlow = 0; 2322 pi->graphics_level[4].GnbSlow = 0; 2323 } 2324 } else { 2325 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2326 pi->graphics_level[i].GnbSlow = 1; 2327 pi->graphics_level[i].ForceNbPs1 = 0; 2328 pi->graphics_level[i].UpH = 0; 2329 } 2330 2331 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2332 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2333 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2334 if (pi->lowest_valid != pi->highest_valid) 2335 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2336 } 2337 } 2338 return 0; 2339 } 2340 2341 static int kv_calculate_dpm_settings(struct radeon_device *rdev) 2342 { 2343 struct kv_power_info *pi = kv_get_pi(rdev); 2344 u32 i; 2345 2346 if (pi->lowest_valid > pi->highest_valid) 2347 return -EINVAL; 2348 2349 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2350 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2351 2352 return 0; 2353 } 2354 2355 static void kv_init_graphics_levels(struct radeon_device *rdev) 2356 { 2357 struct kv_power_info *pi = kv_get_pi(rdev); 2358 u32 i; 2359 struct radeon_clock_voltage_dependency_table *table = 2360 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2361 2362 if (table && table->count) { 2363 u32 vid_2bit; 2364 2365 pi->graphics_dpm_level_count = 0; 2366 for (i = 0; i < table->count; i++) { 2367 if (pi->high_voltage_t && 2368 (pi->high_voltage_t < 2369 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) 2370 break; 2371 2372 kv_set_divider_value(rdev, i, table->entries[i].clk); 2373 vid_2bit = kv_convert_vid7_to_vid2(rdev, 2374 &pi->sys_info.vid_mapping_table, 2375 table->entries[i].v); 2376 kv_set_vid(rdev, i, vid_2bit); 2377 kv_set_at(rdev, i, pi->at[i]); 2378 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2379 pi->graphics_dpm_level_count++; 2380 } 2381 } else { 2382 struct sumo_sclk_voltage_mapping_table *table = 2383 &pi->sys_info.sclk_voltage_mapping_table; 2384 2385 pi->graphics_dpm_level_count = 0; 2386 for (i = 0; i < table->num_max_dpm_entries; i++) { 2387 if (pi->high_voltage_t && 2388 pi->high_voltage_t < 2389 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) 2390 break; 2391 2392 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); 2393 kv_set_vid(rdev, i, table->entries[i].vid_2bit); 2394 kv_set_at(rdev, i, pi->at[i]); 2395 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2396 pi->graphics_dpm_level_count++; 2397 } 2398 } 2399 2400 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2401 kv_dpm_power_level_enable(rdev, i, false); 2402 } 2403 2404 static void kv_enable_new_levels(struct radeon_device *rdev) 2405 { 2406 struct kv_power_info *pi = kv_get_pi(rdev); 2407 u32 i; 2408 2409 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2410 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2411 kv_dpm_power_level_enable(rdev, i, true); 2412 } 2413 } 2414 2415 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) 2416 { 2417 u32 new_mask = (1 << level); 2418 2419 return kv_send_msg_to_smc_with_parameter(rdev, 2420 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2421 new_mask); 2422 } 2423 2424 static int kv_set_enabled_levels(struct radeon_device *rdev) 2425 { 2426 struct kv_power_info *pi = kv_get_pi(rdev); 2427 u32 i, new_mask = 0; 2428 2429 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2430 new_mask |= (1 << i); 2431 2432 return kv_send_msg_to_smc_with_parameter(rdev, 2433 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2434 new_mask); 2435 } 2436 2437 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 2438 struct radeon_ps *new_rps) 2439 { 2440 struct kv_ps *new_ps = kv_get_ps(new_rps); 2441 struct kv_power_info *pi = kv_get_pi(rdev); 2442 u32 nbdpmconfig1; 2443 2444 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2445 return; 2446 2447 if (pi->sys_info.nb_dpm_enable) { 2448 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1); 2449 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | 2450 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK); 2451 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) | 2452 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) | 2453 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) | 2454 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi)); 2455 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1); 2456 } 2457 } 2458 2459 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 2460 int min_temp, int max_temp) 2461 { 2462 int low_temp = 0 * 1000; 2463 int high_temp = 255 * 1000; 2464 u32 tmp; 2465 2466 if (low_temp < min_temp) 2467 low_temp = min_temp; 2468 if (high_temp > max_temp) 2469 high_temp = max_temp; 2470 if (high_temp < low_temp) { 2471 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2472 return -EINVAL; 2473 } 2474 2475 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL); 2476 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK); 2477 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) | 2478 DIG_THERM_INTL(49 + (low_temp / 1000))); 2479 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp); 2480 2481 rdev->pm.dpm.thermal.min_temp = low_temp; 2482 rdev->pm.dpm.thermal.max_temp = high_temp; 2483 2484 return 0; 2485 } 2486 2487 union igp_info { 2488 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2489 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2490 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2491 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2492 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2493 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2494 }; 2495 2496 static int kv_parse_sys_info_table(struct radeon_device *rdev) 2497 { 2498 struct kv_power_info *pi = kv_get_pi(rdev); 2499 struct radeon_mode_info *mode_info = &rdev->mode_info; 2500 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2501 union igp_info *igp_info; 2502 u8 frev, crev; 2503 u16 data_offset; 2504 int i; 2505 2506 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 2507 &frev, &crev, &data_offset)) { 2508 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2509 data_offset); 2510 2511 if (crev != 8) { 2512 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2513 return -EINVAL; 2514 } 2515 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2516 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2517 pi->sys_info.bootup_nb_voltage_index = 2518 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2519 if (igp_info->info_8.ucHtcTmpLmt == 0) 2520 pi->sys_info.htc_tmp_lmt = 203; 2521 else 2522 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2523 if (igp_info->info_8.ucHtcHystLmt == 0) 2524 pi->sys_info.htc_hyst_lmt = 5; 2525 else 2526 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2527 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2528 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2529 } 2530 2531 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2532 pi->sys_info.nb_dpm_enable = true; 2533 else 2534 pi->sys_info.nb_dpm_enable = false; 2535 2536 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2537 pi->sys_info.nbp_memory_clock[i] = 2538 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2539 pi->sys_info.nbp_n_clock[i] = 2540 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2541 } 2542 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2543 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2544 pi->caps_enable_dfs_bypass = true; 2545 2546 sumo_construct_sclk_voltage_mapping_table(rdev, 2547 &pi->sys_info.sclk_voltage_mapping_table, 2548 igp_info->info_8.sAvail_SCLK); 2549 2550 sumo_construct_vid_mapping_table(rdev, 2551 &pi->sys_info.vid_mapping_table, 2552 igp_info->info_8.sAvail_SCLK); 2553 2554 kv_construct_max_power_limits_table(rdev, 2555 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2556 } 2557 return 0; 2558 } 2559 2560 union power_info { 2561 struct _ATOM_POWERPLAY_INFO info; 2562 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2563 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2564 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2565 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2566 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2567 }; 2568 2569 union pplib_clock_info { 2570 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2571 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2572 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2573 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2574 }; 2575 2576 union pplib_power_state { 2577 struct _ATOM_PPLIB_STATE v1; 2578 struct _ATOM_PPLIB_STATE_V2 v2; 2579 }; 2580 2581 static void kv_patch_boot_state(struct radeon_device *rdev, 2582 struct kv_ps *ps) 2583 { 2584 struct kv_power_info *pi = kv_get_pi(rdev); 2585 2586 ps->num_levels = 1; 2587 ps->levels[0] = pi->boot_pl; 2588 } 2589 2590 static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev, 2591 struct radeon_ps *rps, 2592 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2593 u8 table_rev) 2594 { 2595 struct kv_ps *ps = kv_get_ps(rps); 2596 2597 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2598 rps->class = le16_to_cpu(non_clock_info->usClassification); 2599 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2600 2601 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2602 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2603 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2604 } else { 2605 rps->vclk = 0; 2606 rps->dclk = 0; 2607 } 2608 2609 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2610 rdev->pm.dpm.boot_ps = rps; 2611 kv_patch_boot_state(rdev, ps); 2612 } 2613 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2614 rdev->pm.dpm.uvd_ps = rps; 2615 } 2616 2617 static void kv_parse_pplib_clock_info(struct radeon_device *rdev, 2618 struct radeon_ps *rps, int index, 2619 union pplib_clock_info *clock_info) 2620 { 2621 struct kv_power_info *pi = kv_get_pi(rdev); 2622 struct kv_ps *ps = kv_get_ps(rps); 2623 struct kv_pl *pl = &ps->levels[index]; 2624 u32 sclk; 2625 2626 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2627 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2628 pl->sclk = sclk; 2629 pl->vddc_index = clock_info->sumo.vddcIndex; 2630 2631 ps->num_levels = index + 1; 2632 2633 if (pi->caps_sclk_ds) { 2634 pl->ds_divider_index = 5; 2635 pl->ss_divider_index = 5; 2636 } 2637 } 2638 2639 static int kv_parse_power_table(struct radeon_device *rdev) 2640 { 2641 struct radeon_mode_info *mode_info = &rdev->mode_info; 2642 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2643 union pplib_power_state *power_state; 2644 int i, j, k, non_clock_array_index, clock_array_index; 2645 union pplib_clock_info *clock_info; 2646 struct _StateArray *state_array; 2647 struct _ClockInfoArray *clock_info_array; 2648 struct _NonClockInfoArray *non_clock_info_array; 2649 union power_info *power_info; 2650 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2651 u16 data_offset; 2652 u8 frev, crev; 2653 u8 *power_state_offset; 2654 struct kv_ps *ps; 2655 2656 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2657 &frev, &crev, &data_offset)) 2658 return -EINVAL; 2659 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2660 2661 state_array = (struct _StateArray *) 2662 (mode_info->atom_context->bios + data_offset + 2663 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2664 clock_info_array = (struct _ClockInfoArray *) 2665 (mode_info->atom_context->bios + data_offset + 2666 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2667 non_clock_info_array = (struct _NonClockInfoArray *) 2668 (mode_info->atom_context->bios + data_offset + 2669 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2670 2671 rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, 2672 sizeof(struct radeon_ps), 2673 GFP_KERNEL); 2674 if (!rdev->pm.dpm.ps) 2675 return -ENOMEM; 2676 power_state_offset = (u8 *)state_array->states; 2677 for (i = 0; i < state_array->ucNumEntries; i++) { 2678 u8 *idx; 2679 power_state = (union pplib_power_state *)power_state_offset; 2680 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2681 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2682 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2683 if (!rdev->pm.power_state[i].clock_info) 2684 return -EINVAL; 2685 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2686 if (ps == NULL) { 2687 kfree(rdev->pm.dpm.ps); 2688 return -ENOMEM; 2689 } 2690 rdev->pm.dpm.ps[i].ps_priv = ps; 2691 k = 0; 2692 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2693 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2694 clock_array_index = idx[j]; 2695 if (clock_array_index >= clock_info_array->ucNumEntries) 2696 continue; 2697 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2698 break; 2699 clock_info = (union pplib_clock_info *) 2700 ((u8 *)&clock_info_array->clockInfo[0] + 2701 (clock_array_index * clock_info_array->ucEntrySize)); 2702 kv_parse_pplib_clock_info(rdev, 2703 &rdev->pm.dpm.ps[i], k, 2704 clock_info); 2705 k++; 2706 } 2707 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 2708 non_clock_info, 2709 non_clock_info_array->ucEntrySize); 2710 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2711 } 2712 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 2713 2714 /* fill in the vce power states */ 2715 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 2716 u32 sclk; 2717 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 2718 clock_info = (union pplib_clock_info *) 2719 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2720 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2721 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2722 rdev->pm.dpm.vce_states[i].sclk = sclk; 2723 rdev->pm.dpm.vce_states[i].mclk = 0; 2724 } 2725 2726 return 0; 2727 } 2728 2729 int kv_dpm_init(struct radeon_device *rdev) 2730 { 2731 struct kv_power_info *pi; 2732 int ret, i; 2733 2734 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2735 if (pi == NULL) 2736 return -ENOMEM; 2737 rdev->pm.dpm.priv = pi; 2738 2739 ret = r600_get_platform_caps(rdev); 2740 if (ret) 2741 return ret; 2742 2743 ret = r600_parse_extended_power_table(rdev); 2744 if (ret) 2745 return ret; 2746 2747 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2748 pi->at[i] = TRINITY_AT_DFLT; 2749 2750 pi->sram_end = SMC_RAM_END; 2751 2752 /* Enabling nb dpm on an asrock system prevents dpm from working */ 2753 if (rdev->pdev->subsystem_vendor == 0x1849) 2754 pi->enable_nb_dpm = false; 2755 else 2756 pi->enable_nb_dpm = true; 2757 2758 pi->caps_power_containment = true; 2759 pi->caps_cac = true; 2760 pi->enable_didt = false; 2761 if (pi->enable_didt) { 2762 pi->caps_sq_ramping = true; 2763 pi->caps_db_ramping = true; 2764 pi->caps_td_ramping = true; 2765 pi->caps_tcp_ramping = true; 2766 } 2767 2768 pi->caps_sclk_ds = true; 2769 pi->enable_auto_thermal_throttling = true; 2770 pi->disable_nb_ps3_in_battery = false; 2771 if (radeon_bapm == -1) { 2772 /* only enable bapm on KB, ML by default */ 2773 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2774 pi->bapm_enable = true; 2775 else 2776 pi->bapm_enable = false; 2777 } else if (radeon_bapm == 0) { 2778 pi->bapm_enable = false; 2779 } else { 2780 pi->bapm_enable = true; 2781 } 2782 pi->voltage_drop_t = 0; 2783 pi->caps_sclk_throttle_low_notification = false; 2784 pi->caps_fps = false; /* true? */ 2785 pi->caps_uvd_pg = true; 2786 pi->caps_uvd_dpm = true; 2787 pi->caps_vce_pg = false; /* XXX true */ 2788 pi->caps_samu_pg = false; 2789 pi->caps_acp_pg = false; 2790 pi->caps_stable_p_state = false; 2791 2792 ret = kv_parse_sys_info_table(rdev); 2793 if (ret) 2794 return ret; 2795 2796 kv_patch_voltage_values(rdev); 2797 kv_construct_boot_state(rdev); 2798 2799 ret = kv_parse_power_table(rdev); 2800 if (ret) 2801 return ret; 2802 2803 pi->enable_dpm = true; 2804 2805 return 0; 2806 } 2807 2808 #ifdef CONFIG_DEBUG_FS 2809 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 2810 struct seq_file *m) 2811 { 2812 struct kv_power_info *pi = kv_get_pi(rdev); 2813 u32 current_index = 2814 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2815 CURR_SCLK_INDEX_SHIFT; 2816 u32 sclk, tmp; 2817 u16 vddc; 2818 2819 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2820 seq_printf(m, "invalid dpm profile %d\n", current_index); 2821 } else { 2822 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2823 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2824 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; 2825 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); 2826 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2827 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2828 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2829 current_index, sclk, vddc); 2830 } 2831 } 2832 #endif /* CONFIG_DEBUG_FS */ 2833 2834 u32 kv_dpm_get_current_sclk(struct radeon_device *rdev) 2835 { 2836 struct kv_power_info *pi = kv_get_pi(rdev); 2837 u32 current_index = 2838 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2839 CURR_SCLK_INDEX_SHIFT; 2840 u32 sclk; 2841 2842 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2843 return 0; 2844 } else { 2845 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2846 return sclk; 2847 } 2848 } 2849 2850 u32 kv_dpm_get_current_mclk(struct radeon_device *rdev) 2851 { 2852 struct kv_power_info *pi = kv_get_pi(rdev); 2853 2854 return pi->sys_info.bootup_uma_clk; 2855 } 2856 2857 void kv_dpm_print_power_state(struct radeon_device *rdev, 2858 struct radeon_ps *rps) 2859 { 2860 int i; 2861 struct kv_ps *ps = kv_get_ps(rps); 2862 2863 r600_dpm_print_class_info(rps->class, rps->class2); 2864 r600_dpm_print_cap_info(rps->caps); 2865 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2866 for (i = 0; i < ps->num_levels; i++) { 2867 struct kv_pl *pl = &ps->levels[i]; 2868 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2869 i, pl->sclk, 2870 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index)); 2871 } 2872 r600_dpm_print_ps_status(rdev, rps); 2873 } 2874 2875 void kv_dpm_fini(struct radeon_device *rdev) 2876 { 2877 int i; 2878 2879 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 2880 kfree(rdev->pm.dpm.ps[i].ps_priv); 2881 } 2882 kfree(rdev->pm.dpm.ps); 2883 kfree(rdev->pm.dpm.priv); 2884 r600_free_extended_power_table(rdev); 2885 } 2886 2887 void kv_dpm_display_configuration_changed(struct radeon_device *rdev) 2888 { 2889 2890 } 2891 2892 u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low) 2893 { 2894 struct kv_power_info *pi = kv_get_pi(rdev); 2895 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2896 2897 if (low) 2898 return requested_state->levels[0].sclk; 2899 else 2900 return requested_state->levels[requested_state->num_levels - 1].sclk; 2901 } 2902 2903 u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low) 2904 { 2905 struct kv_power_info *pi = kv_get_pi(rdev); 2906 2907 return pi->sys_info.bootup_uma_clk; 2908 } 2909 2910