radeon_kv_dpm.c revision 1.1.2.2 1 /* $NetBSD: radeon_kv_dpm.c,v 1.1.2.2 2018/09/06 06:56:32 pgoyette Exp $ */
2
3 /*
4 * Copyright 2013 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: radeon_kv_dpm.c,v 1.1.2.2 2018/09/06 06:56:32 pgoyette Exp $");
28
29 #include "drmP.h"
30 #include "radeon.h"
31 #include "cikd.h"
32 #include "r600_dpm.h"
33 #include "kv_dpm.h"
34 #include "radeon_asic.h"
35 #include <linux/seq_file.h>
36
37 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
38 #define KV_MINIMUM_ENGINE_CLOCK 800
39 #define SMC_RAM_END 0x40000
40
41 static int kv_enable_nb_dpm(struct radeon_device *rdev,
42 bool enable);
43 static void kv_init_graphics_levels(struct radeon_device *rdev);
44 static int kv_calculate_ds_divider(struct radeon_device *rdev);
45 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
46 static int kv_calculate_dpm_settings(struct radeon_device *rdev);
47 static void kv_enable_new_levels(struct radeon_device *rdev);
48 static void kv_program_nbps_index_settings(struct radeon_device *rdev,
49 struct radeon_ps *new_rps);
50 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level);
51 static int kv_set_enabled_levels(struct radeon_device *rdev);
52 static int kv_force_dpm_highest(struct radeon_device *rdev);
53 static int kv_force_dpm_lowest(struct radeon_device *rdev);
54 static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
55 struct radeon_ps *new_rps,
56 struct radeon_ps *old_rps);
57 static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
58 int min_temp, int max_temp);
59 static int kv_init_fps_limits(struct radeon_device *rdev);
60
61 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
62 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
63 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
64 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
65
66 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
67 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
68 extern void cik_update_cg(struct radeon_device *rdev,
69 u32 block, bool enable);
70
71 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
72 {
73 { 0, 4, 1 },
74 { 1, 4, 1 },
75 { 2, 5, 1 },
76 { 3, 4, 2 },
77 { 4, 1, 1 },
78 { 5, 5, 2 },
79 { 6, 6, 1 },
80 { 7, 9, 2 },
81 { 0xffffffff }
82 };
83
84 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
85 {
86 { 0, 4, 1 },
87 { 0xffffffff }
88 };
89
90 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
91 {
92 { 0, 4, 1 },
93 { 0xffffffff }
94 };
95
96 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
97 {
98 { 0, 4, 1 },
99 { 0xffffffff }
100 };
101
102 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
103 {
104 { 0, 4, 1 },
105 { 0xffffffff }
106 };
107
108 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
109 {
110 { 0, 4, 1 },
111 { 1, 4, 1 },
112 { 2, 5, 1 },
113 { 3, 4, 1 },
114 { 4, 1, 1 },
115 { 5, 5, 1 },
116 { 6, 6, 1 },
117 { 7, 9, 1 },
118 { 8, 4, 1 },
119 { 9, 2, 1 },
120 { 10, 3, 1 },
121 { 11, 6, 1 },
122 { 12, 8, 2 },
123 { 13, 1, 1 },
124 { 14, 2, 1 },
125 { 15, 3, 1 },
126 { 16, 1, 1 },
127 { 17, 4, 1 },
128 { 18, 3, 1 },
129 { 19, 1, 1 },
130 { 20, 8, 1 },
131 { 21, 5, 1 },
132 { 22, 1, 1 },
133 { 23, 1, 1 },
134 { 24, 4, 1 },
135 { 27, 6, 1 },
136 { 28, 1, 1 },
137 { 0xffffffff }
138 };
139
140 static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
141 {
142 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
143 };
144
145 static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
146 {
147 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
148 };
149
150 static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
151 {
152 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
153 };
154
155 static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
156 {
157 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
158 };
159
160 static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
161 {
162 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
163 };
164
165 static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
166 {
167 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
168 };
169
170 static const struct kv_pt_config_reg didt_config_kv[] =
171 {
172 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
173 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
174 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
175 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
176 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
177 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
178 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
179 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
180 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
181 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
182 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
183 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
184 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
185 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
186 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
187 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
188 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
189 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
190 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
191 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
192 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
193 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
194 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
195 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
196 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
197 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
198 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
199 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
200 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
201 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
202 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
203 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
204 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
205 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
206 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
207 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
208 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
209 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
210 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
211 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
212 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
213 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
214 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
215 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
216 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
217 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
218 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
219 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
220 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
221 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
222 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
223 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
224 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
225 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
226 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
227 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
228 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
229 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
230 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
231 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
232 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
233 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
234 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
235 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
236 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
237 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
238 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
239 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
240 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
241 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
242 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
243 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
244 { 0xFFFFFFFF }
245 };
246
247 static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
248 {
249 struct kv_ps *ps = rps->ps_priv;
250
251 return ps;
252 }
253
254 static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
255 {
256 struct kv_power_info *pi = rdev->pm.dpm.priv;
257
258 return pi;
259 }
260
261 #if 0
262 static void kv_program_local_cac_table(struct radeon_device *rdev,
263 const struct kv_lcac_config_values *local_cac_table,
264 const struct kv_lcac_config_reg *local_cac_reg)
265 {
266 u32 i, count, data;
267 const struct kv_lcac_config_values *values = local_cac_table;
268
269 while (values->block_id != 0xffffffff) {
270 count = values->signal_id;
271 for (i = 0; i < count; i++) {
272 data = ((values->block_id << local_cac_reg->block_shift) &
273 local_cac_reg->block_mask);
274 data |= ((i << local_cac_reg->signal_shift) &
275 local_cac_reg->signal_mask);
276 data |= ((values->t << local_cac_reg->t_shift) &
277 local_cac_reg->t_mask);
278 data |= ((1 << local_cac_reg->enable_shift) &
279 local_cac_reg->enable_mask);
280 WREG32_SMC(local_cac_reg->cntl, data);
281 }
282 values++;
283 }
284 }
285 #endif
286
287 static int kv_program_pt_config_registers(struct radeon_device *rdev,
288 const struct kv_pt_config_reg *cac_config_regs)
289 {
290 const struct kv_pt_config_reg *config_regs = cac_config_regs;
291 u32 data;
292 u32 cache = 0;
293
294 if (config_regs == NULL)
295 return -EINVAL;
296
297 while (config_regs->offset != 0xFFFFFFFF) {
298 if (config_regs->type == KV_CONFIGREG_CACHE) {
299 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
300 } else {
301 switch (config_regs->type) {
302 case KV_CONFIGREG_SMC_IND:
303 data = RREG32_SMC(config_regs->offset);
304 break;
305 case KV_CONFIGREG_DIDT_IND:
306 data = RREG32_DIDT(config_regs->offset);
307 break;
308 default:
309 data = RREG32(config_regs->offset << 2);
310 break;
311 }
312
313 data &= ~config_regs->mask;
314 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
315 data |= cache;
316 cache = 0;
317
318 switch (config_regs->type) {
319 case KV_CONFIGREG_SMC_IND:
320 WREG32_SMC(config_regs->offset, data);
321 break;
322 case KV_CONFIGREG_DIDT_IND:
323 WREG32_DIDT(config_regs->offset, data);
324 break;
325 default:
326 WREG32(config_regs->offset << 2, data);
327 break;
328 }
329 }
330 config_regs++;
331 }
332
333 return 0;
334 }
335
336 static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
337 {
338 struct kv_power_info *pi = kv_get_pi(rdev);
339 u32 data;
340
341 if (pi->caps_sq_ramping) {
342 data = RREG32_DIDT(DIDT_SQ_CTRL0);
343 if (enable)
344 data |= DIDT_CTRL_EN;
345 else
346 data &= ~DIDT_CTRL_EN;
347 WREG32_DIDT(DIDT_SQ_CTRL0, data);
348 }
349
350 if (pi->caps_db_ramping) {
351 data = RREG32_DIDT(DIDT_DB_CTRL0);
352 if (enable)
353 data |= DIDT_CTRL_EN;
354 else
355 data &= ~DIDT_CTRL_EN;
356 WREG32_DIDT(DIDT_DB_CTRL0, data);
357 }
358
359 if (pi->caps_td_ramping) {
360 data = RREG32_DIDT(DIDT_TD_CTRL0);
361 if (enable)
362 data |= DIDT_CTRL_EN;
363 else
364 data &= ~DIDT_CTRL_EN;
365 WREG32_DIDT(DIDT_TD_CTRL0, data);
366 }
367
368 if (pi->caps_tcp_ramping) {
369 data = RREG32_DIDT(DIDT_TCP_CTRL0);
370 if (enable)
371 data |= DIDT_CTRL_EN;
372 else
373 data &= ~DIDT_CTRL_EN;
374 WREG32_DIDT(DIDT_TCP_CTRL0, data);
375 }
376 }
377
378 static int kv_enable_didt(struct radeon_device *rdev, bool enable)
379 {
380 struct kv_power_info *pi = kv_get_pi(rdev);
381 int ret;
382
383 if (pi->caps_sq_ramping ||
384 pi->caps_db_ramping ||
385 pi->caps_td_ramping ||
386 pi->caps_tcp_ramping) {
387 cik_enter_rlc_safe_mode(rdev);
388
389 if (enable) {
390 ret = kv_program_pt_config_registers(rdev, didt_config_kv);
391 if (ret) {
392 cik_exit_rlc_safe_mode(rdev);
393 return ret;
394 }
395 }
396
397 kv_do_enable_didt(rdev, enable);
398
399 cik_exit_rlc_safe_mode(rdev);
400 }
401
402 return 0;
403 }
404
405 #if 0
406 static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
407 {
408 struct kv_power_info *pi = kv_get_pi(rdev);
409
410 if (pi->caps_cac) {
411 WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
412 WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
413 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
414
415 WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
416 WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
417 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
418
419 WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
420 WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
421 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
422
423 WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
424 WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
425 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
426
427 WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
428 WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
429 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
430
431 WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
432 WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
433 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
434 }
435 }
436 #endif
437
438 static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
439 {
440 struct kv_power_info *pi = kv_get_pi(rdev);
441 int ret = 0;
442
443 if (pi->caps_cac) {
444 if (enable) {
445 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
446 if (ret)
447 pi->cac_enabled = false;
448 else
449 pi->cac_enabled = true;
450 } else if (pi->cac_enabled) {
451 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
452 pi->cac_enabled = false;
453 }
454 }
455
456 return ret;
457 }
458
459 static int kv_process_firmware_header(struct radeon_device *rdev)
460 {
461 struct kv_power_info *pi = kv_get_pi(rdev);
462 u32 tmp;
463 int ret;
464
465 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
466 offsetof(SMU7_Firmware_Header, DpmTable),
467 &tmp, pi->sram_end);
468
469 if (ret == 0)
470 pi->dpm_table_start = tmp;
471
472 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
473 offsetof(SMU7_Firmware_Header, SoftRegisters),
474 &tmp, pi->sram_end);
475
476 if (ret == 0)
477 pi->soft_regs_start = tmp;
478
479 return ret;
480 }
481
482 static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
483 {
484 struct kv_power_info *pi = kv_get_pi(rdev);
485 int ret;
486
487 pi->graphics_voltage_change_enable = 1;
488
489 ret = kv_copy_bytes_to_smc(rdev,
490 pi->dpm_table_start +
491 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
492 &pi->graphics_voltage_change_enable,
493 sizeof(u8), pi->sram_end);
494
495 return ret;
496 }
497
498 static int kv_set_dpm_interval(struct radeon_device *rdev)
499 {
500 struct kv_power_info *pi = kv_get_pi(rdev);
501 int ret;
502
503 pi->graphics_interval = 1;
504
505 ret = kv_copy_bytes_to_smc(rdev,
506 pi->dpm_table_start +
507 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
508 &pi->graphics_interval,
509 sizeof(u8), pi->sram_end);
510
511 return ret;
512 }
513
514 static int kv_set_dpm_boot_state(struct radeon_device *rdev)
515 {
516 struct kv_power_info *pi = kv_get_pi(rdev);
517 int ret;
518
519 ret = kv_copy_bytes_to_smc(rdev,
520 pi->dpm_table_start +
521 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
522 &pi->graphics_boot_level,
523 sizeof(u8), pi->sram_end);
524
525 return ret;
526 }
527
528 static void kv_program_vc(struct radeon_device *rdev)
529 {
530 WREG32_SMC(CG_FTV_0, 0x3FFFC100);
531 }
532
533 static void kv_clear_vc(struct radeon_device *rdev)
534 {
535 WREG32_SMC(CG_FTV_0, 0);
536 }
537
538 static int kv_set_divider_value(struct radeon_device *rdev,
539 u32 index, u32 sclk)
540 {
541 struct kv_power_info *pi = kv_get_pi(rdev);
542 struct atom_clock_dividers dividers;
543 int ret;
544
545 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
546 sclk, false, ÷rs);
547 if (ret)
548 return ret;
549
550 pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
551 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
552
553 return 0;
554 }
555
556 static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev,
557 struct sumo_vid_mapping_table *vid_mapping_table,
558 u32 vid_2bit)
559 {
560 struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
561 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
562 u32 i;
563
564 if (vddc_sclk_table && vddc_sclk_table->count) {
565 if (vid_2bit < vddc_sclk_table->count)
566 return vddc_sclk_table->entries[vid_2bit].v;
567 else
568 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
569 } else {
570 for (i = 0; i < vid_mapping_table->num_entries; i++) {
571 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
572 return vid_mapping_table->entries[i].vid_7bit;
573 }
574 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
575 }
576 }
577
578 static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev,
579 struct sumo_vid_mapping_table *vid_mapping_table,
580 u32 vid_7bit)
581 {
582 struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
583 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
584 u32 i;
585
586 if (vddc_sclk_table && vddc_sclk_table->count) {
587 for (i = 0; i < vddc_sclk_table->count; i++) {
588 if (vddc_sclk_table->entries[i].v == vid_7bit)
589 return i;
590 }
591 return vddc_sclk_table->count - 1;
592 } else {
593 for (i = 0; i < vid_mapping_table->num_entries; i++) {
594 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
595 return vid_mapping_table->entries[i].vid_2bit;
596 }
597
598 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
599 }
600 }
601
602 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
603 u16 voltage)
604 {
605 return 6200 - (voltage * 25);
606 }
607
608 static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
609 u32 vid_2bit)
610 {
611 struct kv_power_info *pi = kv_get_pi(rdev);
612 u32 vid_8bit = kv_convert_vid2_to_vid7(rdev,
613 &pi->sys_info.vid_mapping_table,
614 vid_2bit);
615
616 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
617 }
618
619
620 static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
621 {
622 struct kv_power_info *pi = kv_get_pi(rdev);
623
624 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
625 pi->graphics_level[index].MinVddNb =
626 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
627
628 return 0;
629 }
630
631 static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
632 {
633 struct kv_power_info *pi = kv_get_pi(rdev);
634
635 pi->graphics_level[index].AT = cpu_to_be16((u16)at);
636
637 return 0;
638 }
639
640 static void kv_dpm_power_level_enable(struct radeon_device *rdev,
641 u32 index, bool enable)
642 {
643 struct kv_power_info *pi = kv_get_pi(rdev);
644
645 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
646 }
647
648 static void kv_start_dpm(struct radeon_device *rdev)
649 {
650 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
651
652 tmp |= GLOBAL_PWRMGT_EN;
653 WREG32_SMC(GENERAL_PWRMGT, tmp);
654
655 kv_smc_dpm_enable(rdev, true);
656 }
657
658 static void kv_stop_dpm(struct radeon_device *rdev)
659 {
660 kv_smc_dpm_enable(rdev, false);
661 }
662
663 static void kv_start_am(struct radeon_device *rdev)
664 {
665 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
666
667 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
668 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
669
670 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
671 }
672
673 static void kv_reset_am(struct radeon_device *rdev)
674 {
675 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
676
677 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
678
679 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
680 }
681
682 static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
683 {
684 return kv_notify_message_to_smu(rdev, freeze ?
685 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
686 }
687
688 static int kv_force_lowest_valid(struct radeon_device *rdev)
689 {
690 return kv_force_dpm_lowest(rdev);
691 }
692
693 static int kv_unforce_levels(struct radeon_device *rdev)
694 {
695 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
696 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
697 else
698 return kv_set_enabled_levels(rdev);
699 }
700
701 static int kv_update_sclk_t(struct radeon_device *rdev)
702 {
703 struct kv_power_info *pi = kv_get_pi(rdev);
704 u32 low_sclk_interrupt_t = 0;
705 int ret = 0;
706
707 if (pi->caps_sclk_throttle_low_notification) {
708 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
709
710 ret = kv_copy_bytes_to_smc(rdev,
711 pi->dpm_table_start +
712 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
713 (u8 *)&low_sclk_interrupt_t,
714 sizeof(u32), pi->sram_end);
715 }
716 return ret;
717 }
718
719 static int kv_program_bootup_state(struct radeon_device *rdev)
720 {
721 struct kv_power_info *pi = kv_get_pi(rdev);
722 u32 i;
723 struct radeon_clock_voltage_dependency_table *table =
724 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
725
726 if (table && table->count) {
727 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
728 if (table->entries[i].clk == pi->boot_pl.sclk)
729 break;
730 }
731
732 pi->graphics_boot_level = (u8)i;
733 kv_dpm_power_level_enable(rdev, i, true);
734 } else {
735 struct sumo_sclk_voltage_mapping_table *table =
736 &pi->sys_info.sclk_voltage_mapping_table;
737
738 if (table->num_max_dpm_entries == 0)
739 return -EINVAL;
740
741 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
742 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
743 break;
744 }
745
746 pi->graphics_boot_level = (u8)i;
747 kv_dpm_power_level_enable(rdev, i, true);
748 }
749 return 0;
750 }
751
752 static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
753 {
754 struct kv_power_info *pi = kv_get_pi(rdev);
755 int ret;
756
757 pi->graphics_therm_throttle_enable = 1;
758
759 ret = kv_copy_bytes_to_smc(rdev,
760 pi->dpm_table_start +
761 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
762 &pi->graphics_therm_throttle_enable,
763 sizeof(u8), pi->sram_end);
764
765 return ret;
766 }
767
768 static int kv_upload_dpm_settings(struct radeon_device *rdev)
769 {
770 struct kv_power_info *pi = kv_get_pi(rdev);
771 int ret;
772
773 ret = kv_copy_bytes_to_smc(rdev,
774 pi->dpm_table_start +
775 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
776 (u8 *)&pi->graphics_level,
777 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
778 pi->sram_end);
779
780 if (ret)
781 return ret;
782
783 ret = kv_copy_bytes_to_smc(rdev,
784 pi->dpm_table_start +
785 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
786 &pi->graphics_dpm_level_count,
787 sizeof(u8), pi->sram_end);
788
789 return ret;
790 }
791
792 static u32 kv_get_clock_difference(u32 a, u32 b)
793 {
794 return (a >= b) ? a - b : b - a;
795 }
796
797 static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
798 {
799 struct kv_power_info *pi = kv_get_pi(rdev);
800 u32 value;
801
802 if (pi->caps_enable_dfs_bypass) {
803 if (kv_get_clock_difference(clk, 40000) < 200)
804 value = 3;
805 else if (kv_get_clock_difference(clk, 30000) < 200)
806 value = 2;
807 else if (kv_get_clock_difference(clk, 20000) < 200)
808 value = 7;
809 else if (kv_get_clock_difference(clk, 15000) < 200)
810 value = 6;
811 else if (kv_get_clock_difference(clk, 10000) < 200)
812 value = 8;
813 else
814 value = 0;
815 } else {
816 value = 0;
817 }
818
819 return value;
820 }
821
822 static int kv_populate_uvd_table(struct radeon_device *rdev)
823 {
824 struct kv_power_info *pi = kv_get_pi(rdev);
825 struct radeon_uvd_clock_voltage_dependency_table *table =
826 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
827 struct atom_clock_dividers dividers;
828 int ret;
829 u32 i;
830
831 if (table == NULL || table->count == 0)
832 return 0;
833
834 pi->uvd_level_count = 0;
835 for (i = 0; i < table->count; i++) {
836 if (pi->high_voltage_t &&
837 (pi->high_voltage_t < table->entries[i].v))
838 break;
839
840 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
841 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
842 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
843
844 pi->uvd_level[i].VClkBypassCntl =
845 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
846 pi->uvd_level[i].DClkBypassCntl =
847 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
848
849 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
850 table->entries[i].vclk, false, ÷rs);
851 if (ret)
852 return ret;
853 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
854
855 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
856 table->entries[i].dclk, false, ÷rs);
857 if (ret)
858 return ret;
859 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
860
861 pi->uvd_level_count++;
862 }
863
864 ret = kv_copy_bytes_to_smc(rdev,
865 pi->dpm_table_start +
866 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
867 (u8 *)&pi->uvd_level_count,
868 sizeof(u8), pi->sram_end);
869 if (ret)
870 return ret;
871
872 pi->uvd_interval = 1;
873
874 ret = kv_copy_bytes_to_smc(rdev,
875 pi->dpm_table_start +
876 offsetof(SMU7_Fusion_DpmTable, UVDInterval),
877 &pi->uvd_interval,
878 sizeof(u8), pi->sram_end);
879 if (ret)
880 return ret;
881
882 ret = kv_copy_bytes_to_smc(rdev,
883 pi->dpm_table_start +
884 offsetof(SMU7_Fusion_DpmTable, UvdLevel),
885 (u8 *)&pi->uvd_level,
886 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
887 pi->sram_end);
888
889 return ret;
890
891 }
892
893 static int kv_populate_vce_table(struct radeon_device *rdev)
894 {
895 struct kv_power_info *pi = kv_get_pi(rdev);
896 int ret;
897 u32 i;
898 struct radeon_vce_clock_voltage_dependency_table *table =
899 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
900 struct atom_clock_dividers dividers;
901
902 if (table == NULL || table->count == 0)
903 return 0;
904
905 pi->vce_level_count = 0;
906 for (i = 0; i < table->count; i++) {
907 if (pi->high_voltage_t &&
908 pi->high_voltage_t < table->entries[i].v)
909 break;
910
911 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
912 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
913
914 pi->vce_level[i].ClkBypassCntl =
915 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
916
917 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
918 table->entries[i].evclk, false, ÷rs);
919 if (ret)
920 return ret;
921 pi->vce_level[i].Divider = (u8)dividers.post_div;
922
923 pi->vce_level_count++;
924 }
925
926 ret = kv_copy_bytes_to_smc(rdev,
927 pi->dpm_table_start +
928 offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
929 (u8 *)&pi->vce_level_count,
930 sizeof(u8),
931 pi->sram_end);
932 if (ret)
933 return ret;
934
935 pi->vce_interval = 1;
936
937 ret = kv_copy_bytes_to_smc(rdev,
938 pi->dpm_table_start +
939 offsetof(SMU7_Fusion_DpmTable, VCEInterval),
940 (u8 *)&pi->vce_interval,
941 sizeof(u8),
942 pi->sram_end);
943 if (ret)
944 return ret;
945
946 ret = kv_copy_bytes_to_smc(rdev,
947 pi->dpm_table_start +
948 offsetof(SMU7_Fusion_DpmTable, VceLevel),
949 (u8 *)&pi->vce_level,
950 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
951 pi->sram_end);
952
953 return ret;
954 }
955
956 static int kv_populate_samu_table(struct radeon_device *rdev)
957 {
958 struct kv_power_info *pi = kv_get_pi(rdev);
959 struct radeon_clock_voltage_dependency_table *table =
960 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
961 struct atom_clock_dividers dividers;
962 int ret;
963 u32 i;
964
965 if (table == NULL || table->count == 0)
966 return 0;
967
968 pi->samu_level_count = 0;
969 for (i = 0; i < table->count; i++) {
970 if (pi->high_voltage_t &&
971 pi->high_voltage_t < table->entries[i].v)
972 break;
973
974 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
975 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
976
977 pi->samu_level[i].ClkBypassCntl =
978 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
979
980 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
981 table->entries[i].clk, false, ÷rs);
982 if (ret)
983 return ret;
984 pi->samu_level[i].Divider = (u8)dividers.post_div;
985
986 pi->samu_level_count++;
987 }
988
989 ret = kv_copy_bytes_to_smc(rdev,
990 pi->dpm_table_start +
991 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
992 (u8 *)&pi->samu_level_count,
993 sizeof(u8),
994 pi->sram_end);
995 if (ret)
996 return ret;
997
998 pi->samu_interval = 1;
999
1000 ret = kv_copy_bytes_to_smc(rdev,
1001 pi->dpm_table_start +
1002 offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
1003 (u8 *)&pi->samu_interval,
1004 sizeof(u8),
1005 pi->sram_end);
1006 if (ret)
1007 return ret;
1008
1009 ret = kv_copy_bytes_to_smc(rdev,
1010 pi->dpm_table_start +
1011 offsetof(SMU7_Fusion_DpmTable, SamuLevel),
1012 (u8 *)&pi->samu_level,
1013 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
1014 pi->sram_end);
1015 if (ret)
1016 return ret;
1017
1018 return ret;
1019 }
1020
1021
1022 static int kv_populate_acp_table(struct radeon_device *rdev)
1023 {
1024 struct kv_power_info *pi = kv_get_pi(rdev);
1025 struct radeon_clock_voltage_dependency_table *table =
1026 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1027 struct atom_clock_dividers dividers;
1028 int ret;
1029 u32 i;
1030
1031 if (table == NULL || table->count == 0)
1032 return 0;
1033
1034 pi->acp_level_count = 0;
1035 for (i = 0; i < table->count; i++) {
1036 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
1037 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
1038
1039 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
1040 table->entries[i].clk, false, ÷rs);
1041 if (ret)
1042 return ret;
1043 pi->acp_level[i].Divider = (u8)dividers.post_div;
1044
1045 pi->acp_level_count++;
1046 }
1047
1048 ret = kv_copy_bytes_to_smc(rdev,
1049 pi->dpm_table_start +
1050 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
1051 (u8 *)&pi->acp_level_count,
1052 sizeof(u8),
1053 pi->sram_end);
1054 if (ret)
1055 return ret;
1056
1057 pi->acp_interval = 1;
1058
1059 ret = kv_copy_bytes_to_smc(rdev,
1060 pi->dpm_table_start +
1061 offsetof(SMU7_Fusion_DpmTable, ACPInterval),
1062 (u8 *)&pi->acp_interval,
1063 sizeof(u8),
1064 pi->sram_end);
1065 if (ret)
1066 return ret;
1067
1068 ret = kv_copy_bytes_to_smc(rdev,
1069 pi->dpm_table_start +
1070 offsetof(SMU7_Fusion_DpmTable, AcpLevel),
1071 (u8 *)&pi->acp_level,
1072 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
1073 pi->sram_end);
1074 if (ret)
1075 return ret;
1076
1077 return ret;
1078 }
1079
1080 static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
1081 {
1082 struct kv_power_info *pi = kv_get_pi(rdev);
1083 u32 i;
1084 struct radeon_clock_voltage_dependency_table *table =
1085 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1086
1087 if (table && table->count) {
1088 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1089 if (pi->caps_enable_dfs_bypass) {
1090 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
1091 pi->graphics_level[i].ClkBypassCntl = 3;
1092 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
1093 pi->graphics_level[i].ClkBypassCntl = 2;
1094 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
1095 pi->graphics_level[i].ClkBypassCntl = 7;
1096 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
1097 pi->graphics_level[i].ClkBypassCntl = 6;
1098 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
1099 pi->graphics_level[i].ClkBypassCntl = 8;
1100 else
1101 pi->graphics_level[i].ClkBypassCntl = 0;
1102 } else {
1103 pi->graphics_level[i].ClkBypassCntl = 0;
1104 }
1105 }
1106 } else {
1107 struct sumo_sclk_voltage_mapping_table *table =
1108 &pi->sys_info.sclk_voltage_mapping_table;
1109 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1110 if (pi->caps_enable_dfs_bypass) {
1111 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
1112 pi->graphics_level[i].ClkBypassCntl = 3;
1113 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
1114 pi->graphics_level[i].ClkBypassCntl = 2;
1115 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
1116 pi->graphics_level[i].ClkBypassCntl = 7;
1117 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
1118 pi->graphics_level[i].ClkBypassCntl = 6;
1119 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
1120 pi->graphics_level[i].ClkBypassCntl = 8;
1121 else
1122 pi->graphics_level[i].ClkBypassCntl = 0;
1123 } else {
1124 pi->graphics_level[i].ClkBypassCntl = 0;
1125 }
1126 }
1127 }
1128 }
1129
1130 static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
1131 {
1132 return kv_notify_message_to_smu(rdev, enable ?
1133 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1134 }
1135
1136 static void kv_reset_acp_boot_level(struct radeon_device *rdev)
1137 {
1138 struct kv_power_info *pi = kv_get_pi(rdev);
1139
1140 pi->acp_boot_level = 0xff;
1141 }
1142
1143 static void kv_update_current_ps(struct radeon_device *rdev,
1144 struct radeon_ps *rps)
1145 {
1146 struct kv_ps *new_ps = kv_get_ps(rps);
1147 struct kv_power_info *pi = kv_get_pi(rdev);
1148
1149 pi->current_rps = *rps;
1150 pi->current_ps = *new_ps;
1151 pi->current_rps.ps_priv = &pi->current_ps;
1152 }
1153
1154 static void kv_update_requested_ps(struct radeon_device *rdev,
1155 struct radeon_ps *rps)
1156 {
1157 struct kv_ps *new_ps = kv_get_ps(rps);
1158 struct kv_power_info *pi = kv_get_pi(rdev);
1159
1160 pi->requested_rps = *rps;
1161 pi->requested_ps = *new_ps;
1162 pi->requested_rps.ps_priv = &pi->requested_ps;
1163 }
1164
1165 void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
1166 {
1167 struct kv_power_info *pi = kv_get_pi(rdev);
1168 int ret;
1169
1170 if (pi->bapm_enable) {
1171 ret = kv_smc_bapm_enable(rdev, enable);
1172 if (ret)
1173 DRM_ERROR("kv_smc_bapm_enable failed\n");
1174 }
1175 }
1176
1177 static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
1178 {
1179 u32 thermal_int;
1180
1181 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
1182 if (enable)
1183 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
1184 else
1185 thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
1186 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
1187
1188 }
1189
1190 int kv_dpm_enable(struct radeon_device *rdev)
1191 {
1192 struct kv_power_info *pi = kv_get_pi(rdev);
1193 int ret;
1194
1195 ret = kv_process_firmware_header(rdev);
1196 if (ret) {
1197 DRM_ERROR("kv_process_firmware_header failed\n");
1198 return ret;
1199 }
1200 kv_init_fps_limits(rdev);
1201 kv_init_graphics_levels(rdev);
1202 ret = kv_program_bootup_state(rdev);
1203 if (ret) {
1204 DRM_ERROR("kv_program_bootup_state failed\n");
1205 return ret;
1206 }
1207 kv_calculate_dfs_bypass_settings(rdev);
1208 ret = kv_upload_dpm_settings(rdev);
1209 if (ret) {
1210 DRM_ERROR("kv_upload_dpm_settings failed\n");
1211 return ret;
1212 }
1213 ret = kv_populate_uvd_table(rdev);
1214 if (ret) {
1215 DRM_ERROR("kv_populate_uvd_table failed\n");
1216 return ret;
1217 }
1218 ret = kv_populate_vce_table(rdev);
1219 if (ret) {
1220 DRM_ERROR("kv_populate_vce_table failed\n");
1221 return ret;
1222 }
1223 ret = kv_populate_samu_table(rdev);
1224 if (ret) {
1225 DRM_ERROR("kv_populate_samu_table failed\n");
1226 return ret;
1227 }
1228 ret = kv_populate_acp_table(rdev);
1229 if (ret) {
1230 DRM_ERROR("kv_populate_acp_table failed\n");
1231 return ret;
1232 }
1233 kv_program_vc(rdev);
1234 #if 0
1235 kv_initialize_hardware_cac_manager(rdev);
1236 #endif
1237 kv_start_am(rdev);
1238 if (pi->enable_auto_thermal_throttling) {
1239 ret = kv_enable_auto_thermal_throttling(rdev);
1240 if (ret) {
1241 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1242 return ret;
1243 }
1244 }
1245 ret = kv_enable_dpm_voltage_scaling(rdev);
1246 if (ret) {
1247 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1248 return ret;
1249 }
1250 ret = kv_set_dpm_interval(rdev);
1251 if (ret) {
1252 DRM_ERROR("kv_set_dpm_interval failed\n");
1253 return ret;
1254 }
1255 ret = kv_set_dpm_boot_state(rdev);
1256 if (ret) {
1257 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1258 return ret;
1259 }
1260 ret = kv_enable_ulv(rdev, true);
1261 if (ret) {
1262 DRM_ERROR("kv_enable_ulv failed\n");
1263 return ret;
1264 }
1265 kv_start_dpm(rdev);
1266 ret = kv_enable_didt(rdev, true);
1267 if (ret) {
1268 DRM_ERROR("kv_enable_didt failed\n");
1269 return ret;
1270 }
1271 ret = kv_enable_smc_cac(rdev, true);
1272 if (ret) {
1273 DRM_ERROR("kv_enable_smc_cac failed\n");
1274 return ret;
1275 }
1276
1277 kv_reset_acp_boot_level(rdev);
1278
1279 ret = kv_smc_bapm_enable(rdev, false);
1280 if (ret) {
1281 DRM_ERROR("kv_smc_bapm_enable failed\n");
1282 return ret;
1283 }
1284
1285 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1286
1287 return ret;
1288 }
1289
1290 int kv_dpm_late_enable(struct radeon_device *rdev)
1291 {
1292 int ret = 0;
1293
1294 if (rdev->irq.installed &&
1295 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1296 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1297 if (ret) {
1298 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1299 return ret;
1300 }
1301 kv_enable_thermal_int(rdev, true);
1302 }
1303
1304 /* powerdown unused blocks for now */
1305 kv_dpm_powergate_acp(rdev, true);
1306 kv_dpm_powergate_samu(rdev, true);
1307 kv_dpm_powergate_vce(rdev, true);
1308 kv_dpm_powergate_uvd(rdev, true);
1309
1310 return ret;
1311 }
1312
1313 void kv_dpm_disable(struct radeon_device *rdev)
1314 {
1315 kv_smc_bapm_enable(rdev, false);
1316
1317 if (rdev->family == CHIP_MULLINS)
1318 kv_enable_nb_dpm(rdev, false);
1319
1320 /* powerup blocks */
1321 kv_dpm_powergate_acp(rdev, false);
1322 kv_dpm_powergate_samu(rdev, false);
1323 kv_dpm_powergate_vce(rdev, false);
1324 kv_dpm_powergate_uvd(rdev, false);
1325
1326 kv_enable_smc_cac(rdev, false);
1327 kv_enable_didt(rdev, false);
1328 kv_clear_vc(rdev);
1329 kv_stop_dpm(rdev);
1330 kv_enable_ulv(rdev, false);
1331 kv_reset_am(rdev);
1332 kv_enable_thermal_int(rdev, false);
1333
1334 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1335 }
1336
1337 #if 0
1338 static int kv_write_smc_soft_register(struct radeon_device *rdev,
1339 u16 reg_offset, u32 value)
1340 {
1341 struct kv_power_info *pi = kv_get_pi(rdev);
1342
1343 return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
1344 (u8 *)&value, sizeof(u16), pi->sram_end);
1345 }
1346
1347 static int kv_read_smc_soft_register(struct radeon_device *rdev,
1348 u16 reg_offset, u32 *value)
1349 {
1350 struct kv_power_info *pi = kv_get_pi(rdev);
1351
1352 return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
1353 value, pi->sram_end);
1354 }
1355 #endif
1356
1357 static void kv_init_sclk_t(struct radeon_device *rdev)
1358 {
1359 struct kv_power_info *pi = kv_get_pi(rdev);
1360
1361 pi->low_sclk_interrupt_t = 0;
1362 }
1363
1364 static int kv_init_fps_limits(struct radeon_device *rdev)
1365 {
1366 struct kv_power_info *pi = kv_get_pi(rdev);
1367 int ret = 0;
1368
1369 if (pi->caps_fps) {
1370 u16 tmp;
1371
1372 tmp = 45;
1373 pi->fps_high_t = cpu_to_be16(tmp);
1374 ret = kv_copy_bytes_to_smc(rdev,
1375 pi->dpm_table_start +
1376 offsetof(SMU7_Fusion_DpmTable, FpsHighT),
1377 (u8 *)&pi->fps_high_t,
1378 sizeof(u16), pi->sram_end);
1379
1380 tmp = 30;
1381 pi->fps_low_t = cpu_to_be16(tmp);
1382
1383 ret = kv_copy_bytes_to_smc(rdev,
1384 pi->dpm_table_start +
1385 offsetof(SMU7_Fusion_DpmTable, FpsLowT),
1386 (u8 *)&pi->fps_low_t,
1387 sizeof(u16), pi->sram_end);
1388
1389 }
1390 return ret;
1391 }
1392
1393 static void kv_init_powergate_state(struct radeon_device *rdev)
1394 {
1395 struct kv_power_info *pi = kv_get_pi(rdev);
1396
1397 pi->uvd_power_gated = false;
1398 pi->vce_power_gated = false;
1399 pi->samu_power_gated = false;
1400 pi->acp_power_gated = false;
1401
1402 }
1403
1404 static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
1405 {
1406 return kv_notify_message_to_smu(rdev, enable ?
1407 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1408 }
1409
1410 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
1411 {
1412 return kv_notify_message_to_smu(rdev, enable ?
1413 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1414 }
1415
1416 static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
1417 {
1418 return kv_notify_message_to_smu(rdev, enable ?
1419 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
1420 }
1421
1422 static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
1423 {
1424 return kv_notify_message_to_smu(rdev, enable ?
1425 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
1426 }
1427
1428 static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1429 {
1430 struct kv_power_info *pi = kv_get_pi(rdev);
1431 struct radeon_uvd_clock_voltage_dependency_table *table =
1432 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1433 int ret;
1434 u32 mask;
1435
1436 if (!gate) {
1437 if (table->count)
1438 pi->uvd_boot_level = table->count - 1;
1439 else
1440 pi->uvd_boot_level = 0;
1441
1442 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
1443 mask = 1 << pi->uvd_boot_level;
1444 } else {
1445 mask = 0x1f;
1446 }
1447
1448 ret = kv_copy_bytes_to_smc(rdev,
1449 pi->dpm_table_start +
1450 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
1451 (uint8_t *)&pi->uvd_boot_level,
1452 sizeof(u8), pi->sram_end);
1453 if (ret)
1454 return ret;
1455
1456 kv_send_msg_to_smc_with_parameter(rdev,
1457 PPSMC_MSG_UVDDPM_SetEnabledMask,
1458 mask);
1459 }
1460
1461 return kv_enable_uvd_dpm(rdev, !gate);
1462 }
1463
1464 static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
1465 {
1466 u8 i;
1467 struct radeon_vce_clock_voltage_dependency_table *table =
1468 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1469
1470 for (i = 0; i < table->count; i++) {
1471 if (table->entries[i].evclk >= evclk)
1472 break;
1473 }
1474
1475 return i;
1476 }
1477
1478 static int kv_update_vce_dpm(struct radeon_device *rdev,
1479 struct radeon_ps *radeon_new_state,
1480 struct radeon_ps *radeon_current_state)
1481 {
1482 struct kv_power_info *pi = kv_get_pi(rdev);
1483 struct radeon_vce_clock_voltage_dependency_table *table =
1484 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1485 int ret;
1486
1487 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
1488 kv_dpm_powergate_vce(rdev, false);
1489 /* turn the clocks on when encoding */
1490 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
1491 if (pi->caps_stable_p_state)
1492 pi->vce_boot_level = table->count - 1;
1493 else
1494 pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
1495
1496 ret = kv_copy_bytes_to_smc(rdev,
1497 pi->dpm_table_start +
1498 offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
1499 (u8 *)&pi->vce_boot_level,
1500 sizeof(u8),
1501 pi->sram_end);
1502 if (ret)
1503 return ret;
1504
1505 if (pi->caps_stable_p_state)
1506 kv_send_msg_to_smc_with_parameter(rdev,
1507 PPSMC_MSG_VCEDPM_SetEnabledMask,
1508 (1 << pi->vce_boot_level));
1509
1510 kv_enable_vce_dpm(rdev, true);
1511 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
1512 kv_enable_vce_dpm(rdev, false);
1513 /* turn the clocks off when not encoding */
1514 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
1515 kv_dpm_powergate_vce(rdev, true);
1516 }
1517
1518 return 0;
1519 }
1520
1521 static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1522 {
1523 struct kv_power_info *pi = kv_get_pi(rdev);
1524 struct radeon_clock_voltage_dependency_table *table =
1525 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1526 int ret;
1527
1528 if (!gate) {
1529 if (pi->caps_stable_p_state)
1530 pi->samu_boot_level = table->count - 1;
1531 else
1532 pi->samu_boot_level = 0;
1533
1534 ret = kv_copy_bytes_to_smc(rdev,
1535 pi->dpm_table_start +
1536 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
1537 (u8 *)&pi->samu_boot_level,
1538 sizeof(u8),
1539 pi->sram_end);
1540 if (ret)
1541 return ret;
1542
1543 if (pi->caps_stable_p_state)
1544 kv_send_msg_to_smc_with_parameter(rdev,
1545 PPSMC_MSG_SAMUDPM_SetEnabledMask,
1546 (1 << pi->samu_boot_level));
1547 }
1548
1549 return kv_enable_samu_dpm(rdev, !gate);
1550 }
1551
1552 static u8 kv_get_acp_boot_level(struct radeon_device *rdev)
1553 {
1554 u8 i;
1555 struct radeon_clock_voltage_dependency_table *table =
1556 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1557
1558 for (i = 0; i < table->count; i++) {
1559 #if 0 /* XXX Upstream has changed this to make sense. */
1560 if (table->entries[i].clk >= 0) /* XXX */
1561 break;
1562 #endif
1563 }
1564
1565 if (i >= table->count)
1566 i = table->count - 1;
1567
1568 return i;
1569 }
1570
1571 static void kv_update_acp_boot_level(struct radeon_device *rdev)
1572 {
1573 struct kv_power_info *pi = kv_get_pi(rdev);
1574 u8 acp_boot_level;
1575
1576 if (!pi->caps_stable_p_state) {
1577 acp_boot_level = kv_get_acp_boot_level(rdev);
1578 if (acp_boot_level != pi->acp_boot_level) {
1579 pi->acp_boot_level = acp_boot_level;
1580 kv_send_msg_to_smc_with_parameter(rdev,
1581 PPSMC_MSG_ACPDPM_SetEnabledMask,
1582 (1 << pi->acp_boot_level));
1583 }
1584 }
1585 }
1586
1587 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1588 {
1589 struct kv_power_info *pi = kv_get_pi(rdev);
1590 struct radeon_clock_voltage_dependency_table *table =
1591 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1592 int ret;
1593
1594 if (!gate) {
1595 if (pi->caps_stable_p_state)
1596 pi->acp_boot_level = table->count - 1;
1597 else
1598 pi->acp_boot_level = kv_get_acp_boot_level(rdev);
1599
1600 ret = kv_copy_bytes_to_smc(rdev,
1601 pi->dpm_table_start +
1602 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
1603 (u8 *)&pi->acp_boot_level,
1604 sizeof(u8),
1605 pi->sram_end);
1606 if (ret)
1607 return ret;
1608
1609 if (pi->caps_stable_p_state)
1610 kv_send_msg_to_smc_with_parameter(rdev,
1611 PPSMC_MSG_ACPDPM_SetEnabledMask,
1612 (1 << pi->acp_boot_level));
1613 }
1614
1615 return kv_enable_acp_dpm(rdev, !gate);
1616 }
1617
1618 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
1619 {
1620 struct kv_power_info *pi = kv_get_pi(rdev);
1621
1622 if (pi->uvd_power_gated == gate)
1623 return;
1624
1625 pi->uvd_power_gated = gate;
1626
1627 if (gate) {
1628 if (pi->caps_uvd_pg) {
1629 uvd_v1_0_stop(rdev);
1630 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
1631 }
1632 kv_update_uvd_dpm(rdev, gate);
1633 if (pi->caps_uvd_pg)
1634 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
1635 } else {
1636 if (pi->caps_uvd_pg) {
1637 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
1638 uvd_v4_2_resume(rdev);
1639 uvd_v1_0_start(rdev);
1640 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
1641 }
1642 kv_update_uvd_dpm(rdev, gate);
1643 }
1644 }
1645
1646 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
1647 {
1648 struct kv_power_info *pi = kv_get_pi(rdev);
1649
1650 if (pi->vce_power_gated == gate)
1651 return;
1652
1653 pi->vce_power_gated = gate;
1654
1655 if (gate) {
1656 if (pi->caps_vce_pg) {
1657 /* XXX do we need a vce_v1_0_stop() ? */
1658 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
1659 }
1660 } else {
1661 if (pi->caps_vce_pg) {
1662 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
1663 vce_v2_0_resume(rdev);
1664 vce_v1_0_start(rdev);
1665 }
1666 }
1667 }
1668
1669 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
1670 {
1671 struct kv_power_info *pi = kv_get_pi(rdev);
1672
1673 if (pi->samu_power_gated == gate)
1674 return;
1675
1676 pi->samu_power_gated = gate;
1677
1678 if (gate) {
1679 kv_update_samu_dpm(rdev, true);
1680 if (pi->caps_samu_pg)
1681 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
1682 } else {
1683 if (pi->caps_samu_pg)
1684 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
1685 kv_update_samu_dpm(rdev, false);
1686 }
1687 }
1688
1689 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
1690 {
1691 struct kv_power_info *pi = kv_get_pi(rdev);
1692
1693 if (pi->acp_power_gated == gate)
1694 return;
1695
1696 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1697 return;
1698
1699 pi->acp_power_gated = gate;
1700
1701 if (gate) {
1702 kv_update_acp_dpm(rdev, true);
1703 if (pi->caps_acp_pg)
1704 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
1705 } else {
1706 if (pi->caps_acp_pg)
1707 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
1708 kv_update_acp_dpm(rdev, false);
1709 }
1710 }
1711
1712 static void kv_set_valid_clock_range(struct radeon_device *rdev,
1713 struct radeon_ps *new_rps)
1714 {
1715 struct kv_ps *new_ps = kv_get_ps(new_rps);
1716 struct kv_power_info *pi = kv_get_pi(rdev);
1717 u32 i;
1718 struct radeon_clock_voltage_dependency_table *table =
1719 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1720
1721 if (table && table->count) {
1722 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1723 if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
1724 (i == (pi->graphics_dpm_level_count - 1))) {
1725 pi->lowest_valid = i;
1726 break;
1727 }
1728 }
1729
1730 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1731 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
1732 break;
1733 }
1734 pi->highest_valid = i;
1735
1736 if (pi->lowest_valid > pi->highest_valid) {
1737 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
1738 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
1739 pi->highest_valid = pi->lowest_valid;
1740 else
1741 pi->lowest_valid = pi->highest_valid;
1742 }
1743 } else {
1744 struct sumo_sclk_voltage_mapping_table *table =
1745 &pi->sys_info.sclk_voltage_mapping_table;
1746
1747 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
1748 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
1749 i == (int)(pi->graphics_dpm_level_count - 1)) {
1750 pi->lowest_valid = i;
1751 break;
1752 }
1753 }
1754
1755 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1756 if (table->entries[i].sclk_frequency <=
1757 new_ps->levels[new_ps->num_levels - 1].sclk)
1758 break;
1759 }
1760 pi->highest_valid = i;
1761
1762 if (pi->lowest_valid > pi->highest_valid) {
1763 if ((new_ps->levels[0].sclk -
1764 table->entries[pi->highest_valid].sclk_frequency) >
1765 (table->entries[pi->lowest_valid].sclk_frequency -
1766 new_ps->levels[new_ps->num_levels -1].sclk))
1767 pi->highest_valid = pi->lowest_valid;
1768 else
1769 pi->lowest_valid = pi->highest_valid;
1770 }
1771 }
1772 }
1773
1774 static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
1775 struct radeon_ps *new_rps)
1776 {
1777 struct kv_ps *new_ps = kv_get_ps(new_rps);
1778 struct kv_power_info *pi = kv_get_pi(rdev);
1779 int ret = 0;
1780 u8 clk_bypass_cntl;
1781
1782 if (pi->caps_enable_dfs_bypass) {
1783 clk_bypass_cntl = new_ps->need_dfs_bypass ?
1784 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
1785 ret = kv_copy_bytes_to_smc(rdev,
1786 (pi->dpm_table_start +
1787 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
1788 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
1789 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
1790 &clk_bypass_cntl,
1791 sizeof(u8), pi->sram_end);
1792 }
1793
1794 return ret;
1795 }
1796
1797 static int kv_enable_nb_dpm(struct radeon_device *rdev,
1798 bool enable)
1799 {
1800 struct kv_power_info *pi = kv_get_pi(rdev);
1801 int ret = 0;
1802
1803 if (enable) {
1804 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1805 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
1806 if (ret == 0)
1807 pi->nb_dpm_enabled = true;
1808 }
1809 } else {
1810 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
1811 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
1812 if (ret == 0)
1813 pi->nb_dpm_enabled = false;
1814 }
1815 }
1816
1817 return ret;
1818 }
1819
1820 int kv_dpm_force_performance_level(struct radeon_device *rdev,
1821 enum radeon_dpm_forced_level level)
1822 {
1823 int ret;
1824
1825 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1826 ret = kv_force_dpm_highest(rdev);
1827 if (ret)
1828 return ret;
1829 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1830 ret = kv_force_dpm_lowest(rdev);
1831 if (ret)
1832 return ret;
1833 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
1834 ret = kv_unforce_levels(rdev);
1835 if (ret)
1836 return ret;
1837 }
1838
1839 rdev->pm.dpm.forced_level = level;
1840
1841 return 0;
1842 }
1843
1844 int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
1845 {
1846 struct kv_power_info *pi = kv_get_pi(rdev);
1847 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1848 struct radeon_ps *new_ps = &requested_ps;
1849
1850 kv_update_requested_ps(rdev, new_ps);
1851
1852 kv_apply_state_adjust_rules(rdev,
1853 &pi->requested_rps,
1854 &pi->current_rps);
1855
1856 return 0;
1857 }
1858
1859 int kv_dpm_set_power_state(struct radeon_device *rdev)
1860 {
1861 struct kv_power_info *pi = kv_get_pi(rdev);
1862 struct radeon_ps *new_ps = &pi->requested_rps;
1863 struct radeon_ps *old_ps = &pi->current_rps;
1864 int ret;
1865
1866 if (pi->bapm_enable) {
1867 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power);
1868 if (ret) {
1869 DRM_ERROR("kv_smc_bapm_enable failed\n");
1870 return ret;
1871 }
1872 }
1873
1874 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
1875 if (pi->enable_dpm) {
1876 kv_set_valid_clock_range(rdev, new_ps);
1877 kv_update_dfs_bypass_settings(rdev, new_ps);
1878 ret = kv_calculate_ds_divider(rdev);
1879 if (ret) {
1880 DRM_ERROR("kv_calculate_ds_divider failed\n");
1881 return ret;
1882 }
1883 kv_calculate_nbps_level_settings(rdev);
1884 kv_calculate_dpm_settings(rdev);
1885 kv_force_lowest_valid(rdev);
1886 kv_enable_new_levels(rdev);
1887 kv_upload_dpm_settings(rdev);
1888 kv_program_nbps_index_settings(rdev, new_ps);
1889 kv_unforce_levels(rdev);
1890 kv_set_enabled_levels(rdev);
1891 kv_force_lowest_valid(rdev);
1892 kv_unforce_levels(rdev);
1893
1894 ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1895 if (ret) {
1896 DRM_ERROR("kv_update_vce_dpm failed\n");
1897 return ret;
1898 }
1899 kv_update_sclk_t(rdev);
1900 if (rdev->family == CHIP_MULLINS)
1901 kv_enable_nb_dpm(rdev, true);
1902 }
1903 } else {
1904 if (pi->enable_dpm) {
1905 kv_set_valid_clock_range(rdev, new_ps);
1906 kv_update_dfs_bypass_settings(rdev, new_ps);
1907 ret = kv_calculate_ds_divider(rdev);
1908 if (ret) {
1909 DRM_ERROR("kv_calculate_ds_divider failed\n");
1910 return ret;
1911 }
1912 kv_calculate_nbps_level_settings(rdev);
1913 kv_calculate_dpm_settings(rdev);
1914 kv_freeze_sclk_dpm(rdev, true);
1915 kv_upload_dpm_settings(rdev);
1916 kv_program_nbps_index_settings(rdev, new_ps);
1917 kv_freeze_sclk_dpm(rdev, false);
1918 kv_set_enabled_levels(rdev);
1919 ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1920 if (ret) {
1921 DRM_ERROR("kv_update_vce_dpm failed\n");
1922 return ret;
1923 }
1924 kv_update_acp_boot_level(rdev);
1925 kv_update_sclk_t(rdev);
1926 kv_enable_nb_dpm(rdev, true);
1927 }
1928 }
1929
1930 return 0;
1931 }
1932
1933 void kv_dpm_post_set_power_state(struct radeon_device *rdev)
1934 {
1935 struct kv_power_info *pi = kv_get_pi(rdev);
1936 struct radeon_ps *new_ps = &pi->requested_rps;
1937
1938 kv_update_current_ps(rdev, new_ps);
1939 }
1940
1941 void kv_dpm_setup_asic(struct radeon_device *rdev)
1942 {
1943 sumo_take_smu_control(rdev, true);
1944 kv_init_powergate_state(rdev);
1945 kv_init_sclk_t(rdev);
1946 }
1947
1948 #if 0
1949 void kv_dpm_reset_asic(struct radeon_device *rdev)
1950 {
1951 struct kv_power_info *pi = kv_get_pi(rdev);
1952
1953 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
1954 kv_force_lowest_valid(rdev);
1955 kv_init_graphics_levels(rdev);
1956 kv_program_bootup_state(rdev);
1957 kv_upload_dpm_settings(rdev);
1958 kv_force_lowest_valid(rdev);
1959 kv_unforce_levels(rdev);
1960 } else {
1961 kv_init_graphics_levels(rdev);
1962 kv_program_bootup_state(rdev);
1963 kv_freeze_sclk_dpm(rdev, true);
1964 kv_upload_dpm_settings(rdev);
1965 kv_freeze_sclk_dpm(rdev, false);
1966 kv_set_enabled_level(rdev, pi->graphics_boot_level);
1967 }
1968 }
1969 #endif
1970
1971 //XXX use sumo_dpm_display_configuration_changed
1972
1973 static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
1974 struct radeon_clock_and_voltage_limits *table)
1975 {
1976 struct kv_power_info *pi = kv_get_pi(rdev);
1977
1978 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
1979 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
1980 table->sclk =
1981 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
1982 table->vddc =
1983 kv_convert_2bit_index_to_voltage(rdev,
1984 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
1985 }
1986
1987 table->mclk = pi->sys_info.nbp_memory_clock[0];
1988 }
1989
1990 static void kv_patch_voltage_values(struct radeon_device *rdev)
1991 {
1992 int i;
1993 struct radeon_uvd_clock_voltage_dependency_table *uvd_table =
1994 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1995 struct radeon_vce_clock_voltage_dependency_table *vce_table =
1996 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1997 struct radeon_clock_voltage_dependency_table *samu_table =
1998 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1999 struct radeon_clock_voltage_dependency_table *acp_table =
2000 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
2001
2002 if (uvd_table->count) {
2003 for (i = 0; i < uvd_table->count; i++)
2004 uvd_table->entries[i].v =
2005 kv_convert_8bit_index_to_voltage(rdev,
2006 uvd_table->entries[i].v);
2007 }
2008
2009 if (vce_table->count) {
2010 for (i = 0; i < vce_table->count; i++)
2011 vce_table->entries[i].v =
2012 kv_convert_8bit_index_to_voltage(rdev,
2013 vce_table->entries[i].v);
2014 }
2015
2016 if (samu_table->count) {
2017 for (i = 0; i < samu_table->count; i++)
2018 samu_table->entries[i].v =
2019 kv_convert_8bit_index_to_voltage(rdev,
2020 samu_table->entries[i].v);
2021 }
2022
2023 if (acp_table->count) {
2024 for (i = 0; i < acp_table->count; i++)
2025 acp_table->entries[i].v =
2026 kv_convert_8bit_index_to_voltage(rdev,
2027 acp_table->entries[i].v);
2028 }
2029
2030 }
2031
2032 static void kv_construct_boot_state(struct radeon_device *rdev)
2033 {
2034 struct kv_power_info *pi = kv_get_pi(rdev);
2035
2036 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
2037 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
2038 pi->boot_pl.ds_divider_index = 0;
2039 pi->boot_pl.ss_divider_index = 0;
2040 pi->boot_pl.allow_gnb_slow = 1;
2041 pi->boot_pl.force_nbp_state = 0;
2042 pi->boot_pl.display_wm = 0;
2043 pi->boot_pl.vce_wm = 0;
2044 }
2045
2046 static int kv_force_dpm_highest(struct radeon_device *rdev)
2047 {
2048 int ret;
2049 u32 enable_mask, i;
2050
2051 ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
2052 if (ret)
2053 return ret;
2054
2055 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
2056 if (enable_mask & (1 << i))
2057 break;
2058 }
2059
2060 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2061 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
2062 else
2063 return kv_set_enabled_level(rdev, i);
2064 }
2065
2066 static int kv_force_dpm_lowest(struct radeon_device *rdev)
2067 {
2068 int ret;
2069 u32 enable_mask, i;
2070
2071 ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
2072 if (ret)
2073 return ret;
2074
2075 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2076 if (enable_mask & (1 << i))
2077 break;
2078 }
2079
2080 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2081 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
2082 else
2083 return kv_set_enabled_level(rdev, i);
2084 }
2085
2086 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2087 u32 sclk, u32 min_sclk_in_sr)
2088 {
2089 struct kv_power_info *pi = kv_get_pi(rdev);
2090 u32 i;
2091 u32 temp;
2092 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
2093 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
2094
2095 if (sclk < min)
2096 return 0;
2097
2098 if (!pi->caps_sclk_ds)
2099 return 0;
2100
2101 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
2102 temp = sclk / sumo_get_sleep_divider_from_id(i);
2103 if (temp >= min)
2104 break;
2105 }
2106
2107 return (u8)i;
2108 }
2109
2110 static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
2111 {
2112 struct kv_power_info *pi = kv_get_pi(rdev);
2113 struct radeon_clock_voltage_dependency_table *table =
2114 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2115 int i;
2116
2117 if (table && table->count) {
2118 for (i = table->count - 1; i >= 0; i--) {
2119 if (pi->high_voltage_t &&
2120 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
2121 pi->high_voltage_t)) {
2122 *limit = i;
2123 return 0;
2124 }
2125 }
2126 } else {
2127 struct sumo_sclk_voltage_mapping_table *table =
2128 &pi->sys_info.sclk_voltage_mapping_table;
2129
2130 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
2131 if (pi->high_voltage_t &&
2132 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
2133 pi->high_voltage_t)) {
2134 *limit = i;
2135 return 0;
2136 }
2137 }
2138 }
2139
2140 *limit = 0;
2141 return 0;
2142 }
2143
2144 static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2145 struct radeon_ps *new_rps,
2146 struct radeon_ps *old_rps)
2147 {
2148 struct kv_ps *ps = kv_get_ps(new_rps);
2149 struct kv_power_info *pi = kv_get_pi(rdev);
2150 u32 min_sclk = 10000; /* ??? */
2151 u32 sclk, mclk = 0;
2152 int i, limit;
2153 bool force_high;
2154 struct radeon_clock_voltage_dependency_table *table =
2155 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2156 u32 stable_p_state_sclk = 0;
2157 struct radeon_clock_and_voltage_limits *max_limits =
2158 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2159
2160 if (new_rps->vce_active) {
2161 new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
2162 new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
2163 } else {
2164 new_rps->evclk = 0;
2165 new_rps->ecclk = 0;
2166 }
2167
2168 mclk = max_limits->mclk;
2169 sclk = min_sclk;
2170
2171 if (pi->caps_stable_p_state) {
2172 stable_p_state_sclk = (max_limits->sclk * 75) / 100;
2173
2174 for (i = table->count - 1; i >= 0; i++) {
2175 if (stable_p_state_sclk >= table->entries[i].clk) {
2176 stable_p_state_sclk = table->entries[i].clk;
2177 break;
2178 }
2179 }
2180
2181 if (i > 0)
2182 stable_p_state_sclk = table->entries[0].clk;
2183
2184 sclk = stable_p_state_sclk;
2185 }
2186
2187 if (new_rps->vce_active) {
2188 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
2189 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
2190 }
2191
2192 ps->need_dfs_bypass = true;
2193
2194 for (i = 0; i < ps->num_levels; i++) {
2195 if (ps->levels[i].sclk < sclk)
2196 ps->levels[i].sclk = sclk;
2197 }
2198
2199 if (table && table->count) {
2200 for (i = 0; i < ps->num_levels; i++) {
2201 if (pi->high_voltage_t &&
2202 (pi->high_voltage_t <
2203 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2204 kv_get_high_voltage_limit(rdev, &limit);
2205 ps->levels[i].sclk = table->entries[limit].clk;
2206 }
2207 }
2208 } else {
2209 struct sumo_sclk_voltage_mapping_table *table =
2210 &pi->sys_info.sclk_voltage_mapping_table;
2211
2212 for (i = 0; i < ps->num_levels; i++) {
2213 if (pi->high_voltage_t &&
2214 (pi->high_voltage_t <
2215 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
2216 kv_get_high_voltage_limit(rdev, &limit);
2217 ps->levels[i].sclk = table->entries[limit].sclk_frequency;
2218 }
2219 }
2220 }
2221
2222 if (pi->caps_stable_p_state) {
2223 for (i = 0; i < ps->num_levels; i++) {
2224 ps->levels[i].sclk = stable_p_state_sclk;
2225 }
2226 }
2227
2228 pi->video_start = new_rps->dclk || new_rps->vclk ||
2229 new_rps->evclk || new_rps->ecclk;
2230
2231 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2232 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
2233 pi->battery_state = true;
2234 else
2235 pi->battery_state = false;
2236
2237 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2238 ps->dpm0_pg_nb_ps_lo = 0x1;
2239 ps->dpm0_pg_nb_ps_hi = 0x0;
2240 ps->dpmx_nb_ps_lo = 0x1;
2241 ps->dpmx_nb_ps_hi = 0x0;
2242 } else {
2243 ps->dpm0_pg_nb_ps_lo = 0x3;
2244 ps->dpm0_pg_nb_ps_hi = 0x0;
2245 ps->dpmx_nb_ps_lo = 0x3;
2246 ps->dpmx_nb_ps_hi = 0x0;
2247
2248 if (pi->sys_info.nb_dpm_enable) {
2249 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2250 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
2251 pi->disable_nb_ps3_in_battery;
2252 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
2253 ps->dpm0_pg_nb_ps_hi = 0x2;
2254 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
2255 ps->dpmx_nb_ps_hi = 0x2;
2256 }
2257 }
2258 }
2259
2260 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
2261 u32 index, bool enable)
2262 {
2263 struct kv_power_info *pi = kv_get_pi(rdev);
2264
2265 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
2266 }
2267
2268 static int kv_calculate_ds_divider(struct radeon_device *rdev)
2269 {
2270 struct kv_power_info *pi = kv_get_pi(rdev);
2271 u32 sclk_in_sr = 10000; /* ??? */
2272 u32 i;
2273
2274 if (pi->lowest_valid > pi->highest_valid)
2275 return -EINVAL;
2276
2277 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2278 pi->graphics_level[i].DeepSleepDivId =
2279 kv_get_sleep_divider_id_from_clock(rdev,
2280 be32_to_cpu(pi->graphics_level[i].SclkFrequency),
2281 sclk_in_sr);
2282 }
2283 return 0;
2284 }
2285
2286 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
2287 {
2288 struct kv_power_info *pi = kv_get_pi(rdev);
2289 u32 i;
2290 bool force_high;
2291 struct radeon_clock_and_voltage_limits *max_limits =
2292 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2293 u32 mclk = max_limits->mclk;
2294
2295 if (pi->lowest_valid > pi->highest_valid)
2296 return -EINVAL;
2297
2298 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2299 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2300 pi->graphics_level[i].GnbSlow = 1;
2301 pi->graphics_level[i].ForceNbPs1 = 0;
2302 pi->graphics_level[i].UpH = 0;
2303 }
2304
2305 if (!pi->sys_info.nb_dpm_enable)
2306 return 0;
2307
2308 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2309 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
2310
2311 if (force_high) {
2312 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2313 pi->graphics_level[i].GnbSlow = 0;
2314 } else {
2315 if (pi->battery_state)
2316 pi->graphics_level[0].ForceNbPs1 = 1;
2317
2318 pi->graphics_level[1].GnbSlow = 0;
2319 pi->graphics_level[2].GnbSlow = 0;
2320 pi->graphics_level[3].GnbSlow = 0;
2321 pi->graphics_level[4].GnbSlow = 0;
2322 }
2323 } else {
2324 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2325 pi->graphics_level[i].GnbSlow = 1;
2326 pi->graphics_level[i].ForceNbPs1 = 0;
2327 pi->graphics_level[i].UpH = 0;
2328 }
2329
2330 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2331 pi->graphics_level[pi->lowest_valid].UpH = 0x28;
2332 pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
2333 if (pi->lowest_valid != pi->highest_valid)
2334 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
2335 }
2336 }
2337 return 0;
2338 }
2339
2340 static int kv_calculate_dpm_settings(struct radeon_device *rdev)
2341 {
2342 struct kv_power_info *pi = kv_get_pi(rdev);
2343 u32 i;
2344
2345 if (pi->lowest_valid > pi->highest_valid)
2346 return -EINVAL;
2347
2348 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2349 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
2350
2351 return 0;
2352 }
2353
2354 static void kv_init_graphics_levels(struct radeon_device *rdev)
2355 {
2356 struct kv_power_info *pi = kv_get_pi(rdev);
2357 u32 i;
2358 struct radeon_clock_voltage_dependency_table *table =
2359 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2360
2361 if (table && table->count) {
2362 u32 vid_2bit;
2363
2364 pi->graphics_dpm_level_count = 0;
2365 for (i = 0; i < table->count; i++) {
2366 if (pi->high_voltage_t &&
2367 (pi->high_voltage_t <
2368 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
2369 break;
2370
2371 kv_set_divider_value(rdev, i, table->entries[i].clk);
2372 vid_2bit = kv_convert_vid7_to_vid2(rdev,
2373 &pi->sys_info.vid_mapping_table,
2374 table->entries[i].v);
2375 kv_set_vid(rdev, i, vid_2bit);
2376 kv_set_at(rdev, i, pi->at[i]);
2377 kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2378 pi->graphics_dpm_level_count++;
2379 }
2380 } else {
2381 struct sumo_sclk_voltage_mapping_table *table =
2382 &pi->sys_info.sclk_voltage_mapping_table;
2383
2384 pi->graphics_dpm_level_count = 0;
2385 for (i = 0; i < table->num_max_dpm_entries; i++) {
2386 if (pi->high_voltage_t &&
2387 pi->high_voltage_t <
2388 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
2389 break;
2390
2391 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
2392 kv_set_vid(rdev, i, table->entries[i].vid_2bit);
2393 kv_set_at(rdev, i, pi->at[i]);
2394 kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2395 pi->graphics_dpm_level_count++;
2396 }
2397 }
2398
2399 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2400 kv_dpm_power_level_enable(rdev, i, false);
2401 }
2402
2403 static void kv_enable_new_levels(struct radeon_device *rdev)
2404 {
2405 struct kv_power_info *pi = kv_get_pi(rdev);
2406 u32 i;
2407
2408 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2409 if (i >= pi->lowest_valid && i <= pi->highest_valid)
2410 kv_dpm_power_level_enable(rdev, i, true);
2411 }
2412 }
2413
2414 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level)
2415 {
2416 u32 new_mask = (1 << level);
2417
2418 return kv_send_msg_to_smc_with_parameter(rdev,
2419 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2420 new_mask);
2421 }
2422
2423 static int kv_set_enabled_levels(struct radeon_device *rdev)
2424 {
2425 struct kv_power_info *pi = kv_get_pi(rdev);
2426 u32 i, new_mask = 0;
2427
2428 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2429 new_mask |= (1 << i);
2430
2431 return kv_send_msg_to_smc_with_parameter(rdev,
2432 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2433 new_mask);
2434 }
2435
2436 static void kv_program_nbps_index_settings(struct radeon_device *rdev,
2437 struct radeon_ps *new_rps)
2438 {
2439 struct kv_ps *new_ps = kv_get_ps(new_rps);
2440 struct kv_power_info *pi = kv_get_pi(rdev);
2441 u32 nbdpmconfig1;
2442
2443 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2444 return;
2445
2446 if (pi->sys_info.nb_dpm_enable) {
2447 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
2448 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
2449 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
2450 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
2451 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
2452 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
2453 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
2454 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
2455 }
2456 }
2457
2458 static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
2459 int min_temp, int max_temp)
2460 {
2461 int low_temp = 0 * 1000;
2462 int high_temp = 255 * 1000;
2463 u32 tmp;
2464
2465 if (low_temp < min_temp)
2466 low_temp = min_temp;
2467 if (high_temp > max_temp)
2468 high_temp = max_temp;
2469 if (high_temp < low_temp) {
2470 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
2471 return -EINVAL;
2472 }
2473
2474 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
2475 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
2476 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
2477 DIG_THERM_INTL(49 + (low_temp / 1000)));
2478 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
2479
2480 rdev->pm.dpm.thermal.min_temp = low_temp;
2481 rdev->pm.dpm.thermal.max_temp = high_temp;
2482
2483 return 0;
2484 }
2485
2486 union igp_info {
2487 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2488 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2489 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
2490 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2491 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2492 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2493 };
2494
2495 static int kv_parse_sys_info_table(struct radeon_device *rdev)
2496 {
2497 struct kv_power_info *pi = kv_get_pi(rdev);
2498 struct radeon_mode_info *mode_info = &rdev->mode_info;
2499 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2500 union igp_info *igp_info;
2501 u8 frev, crev;
2502 u16 data_offset;
2503 int i;
2504
2505 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2506 &frev, &crev, &data_offset)) {
2507 igp_info = (union igp_info *)(mode_info->atom_context->bios +
2508 data_offset);
2509
2510 if (crev != 8) {
2511 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
2512 return -EINVAL;
2513 }
2514 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
2515 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
2516 pi->sys_info.bootup_nb_voltage_index =
2517 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
2518 if (igp_info->info_8.ucHtcTmpLmt == 0)
2519 pi->sys_info.htc_tmp_lmt = 203;
2520 else
2521 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
2522 if (igp_info->info_8.ucHtcHystLmt == 0)
2523 pi->sys_info.htc_hyst_lmt = 5;
2524 else
2525 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
2526 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
2527 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2528 }
2529
2530 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
2531 pi->sys_info.nb_dpm_enable = true;
2532 else
2533 pi->sys_info.nb_dpm_enable = false;
2534
2535 for (i = 0; i < KV_NUM_NBPSTATES; i++) {
2536 pi->sys_info.nbp_memory_clock[i] =
2537 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
2538 pi->sys_info.nbp_n_clock[i] =
2539 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
2540 }
2541 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
2542 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
2543 pi->caps_enable_dfs_bypass = true;
2544
2545 sumo_construct_sclk_voltage_mapping_table(rdev,
2546 &pi->sys_info.sclk_voltage_mapping_table,
2547 igp_info->info_8.sAvail_SCLK);
2548
2549 sumo_construct_vid_mapping_table(rdev,
2550 &pi->sys_info.vid_mapping_table,
2551 igp_info->info_8.sAvail_SCLK);
2552
2553 kv_construct_max_power_limits_table(rdev,
2554 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
2555 }
2556 return 0;
2557 }
2558
2559 union power_info {
2560 struct _ATOM_POWERPLAY_INFO info;
2561 struct _ATOM_POWERPLAY_INFO_V2 info_2;
2562 struct _ATOM_POWERPLAY_INFO_V3 info_3;
2563 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2564 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2565 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2566 };
2567
2568 union pplib_clock_info {
2569 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2570 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2571 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2572 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2573 };
2574
2575 union pplib_power_state {
2576 struct _ATOM_PPLIB_STATE v1;
2577 struct _ATOM_PPLIB_STATE_V2 v2;
2578 };
2579
2580 static void kv_patch_boot_state(struct radeon_device *rdev,
2581 struct kv_ps *ps)
2582 {
2583 struct kv_power_info *pi = kv_get_pi(rdev);
2584
2585 ps->num_levels = 1;
2586 ps->levels[0] = pi->boot_pl;
2587 }
2588
2589 static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
2590 struct radeon_ps *rps,
2591 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2592 u8 table_rev)
2593 {
2594 struct kv_ps *ps = kv_get_ps(rps);
2595
2596 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2597 rps->class = le16_to_cpu(non_clock_info->usClassification);
2598 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2599
2600 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2601 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2602 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2603 } else {
2604 rps->vclk = 0;
2605 rps->dclk = 0;
2606 }
2607
2608 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2609 rdev->pm.dpm.boot_ps = rps;
2610 kv_patch_boot_state(rdev, ps);
2611 }
2612 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2613 rdev->pm.dpm.uvd_ps = rps;
2614 }
2615
2616 static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
2617 struct radeon_ps *rps, int index,
2618 union pplib_clock_info *clock_info)
2619 {
2620 struct kv_power_info *pi = kv_get_pi(rdev);
2621 struct kv_ps *ps = kv_get_ps(rps);
2622 struct kv_pl *pl = &ps->levels[index];
2623 u32 sclk;
2624
2625 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2626 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2627 pl->sclk = sclk;
2628 pl->vddc_index = clock_info->sumo.vddcIndex;
2629
2630 ps->num_levels = index + 1;
2631
2632 if (pi->caps_sclk_ds) {
2633 pl->ds_divider_index = 5;
2634 pl->ss_divider_index = 5;
2635 }
2636 }
2637
2638 static int kv_parse_power_table(struct radeon_device *rdev)
2639 {
2640 struct radeon_mode_info *mode_info = &rdev->mode_info;
2641 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2642 union pplib_power_state *power_state;
2643 int i, j, k, non_clock_array_index, clock_array_index;
2644 union pplib_clock_info *clock_info;
2645 struct _StateArray *state_array;
2646 struct _ClockInfoArray *clock_info_array;
2647 struct _NonClockInfoArray *non_clock_info_array;
2648 union power_info *power_info;
2649 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2650 u16 data_offset;
2651 u8 frev, crev;
2652 u8 *power_state_offset;
2653 struct kv_ps *ps;
2654
2655 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2656 &frev, &crev, &data_offset))
2657 return -EINVAL;
2658 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2659
2660 state_array = (struct _StateArray *)
2661 (mode_info->atom_context->bios + data_offset +
2662 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2663 clock_info_array = (struct _ClockInfoArray *)
2664 (mode_info->atom_context->bios + data_offset +
2665 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2666 non_clock_info_array = (struct _NonClockInfoArray *)
2667 (mode_info->atom_context->bios + data_offset +
2668 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2669
2670 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
2671 state_array->ucNumEntries, GFP_KERNEL);
2672 if (!rdev->pm.dpm.ps)
2673 return -ENOMEM;
2674 power_state_offset = (u8 *)state_array->states;
2675 for (i = 0; i < state_array->ucNumEntries; i++) {
2676 u8 *idx;
2677 power_state = (union pplib_power_state *)power_state_offset;
2678 non_clock_array_index = power_state->v2.nonClockInfoIndex;
2679 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2680 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2681 if (!rdev->pm.power_state[i].clock_info)
2682 return -EINVAL;
2683 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
2684 if (ps == NULL) {
2685 kfree(rdev->pm.dpm.ps);
2686 return -ENOMEM;
2687 }
2688 rdev->pm.dpm.ps[i].ps_priv = ps;
2689 k = 0;
2690 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
2691 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2692 clock_array_index = idx[j];
2693 if (clock_array_index >= clock_info_array->ucNumEntries)
2694 continue;
2695 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
2696 break;
2697 clock_info = (union pplib_clock_info *)
2698 ((u8 *)&clock_info_array->clockInfo[0] +
2699 (clock_array_index * clock_info_array->ucEntrySize));
2700 kv_parse_pplib_clock_info(rdev,
2701 &rdev->pm.dpm.ps[i], k,
2702 clock_info);
2703 k++;
2704 }
2705 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2706 non_clock_info,
2707 non_clock_info_array->ucEntrySize);
2708 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2709 }
2710 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
2711
2712 /* fill in the vce power states */
2713 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
2714 u32 sclk;
2715 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
2716 clock_info = (union pplib_clock_info *)
2717 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2718 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2719 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2720 rdev->pm.dpm.vce_states[i].sclk = sclk;
2721 rdev->pm.dpm.vce_states[i].mclk = 0;
2722 }
2723
2724 return 0;
2725 }
2726
2727 int kv_dpm_init(struct radeon_device *rdev)
2728 {
2729 struct kv_power_info *pi;
2730 int ret, i;
2731
2732 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
2733 if (pi == NULL)
2734 return -ENOMEM;
2735 rdev->pm.dpm.priv = pi;
2736
2737 ret = r600_get_platform_caps(rdev);
2738 if (ret)
2739 return ret;
2740
2741 ret = r600_parse_extended_power_table(rdev);
2742 if (ret)
2743 return ret;
2744
2745 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2746 pi->at[i] = TRINITY_AT_DFLT;
2747
2748 pi->sram_end = SMC_RAM_END;
2749
2750 /* Enabling nb dpm on an asrock system prevents dpm from working */
2751 if (rdev->pdev->subsystem_vendor == 0x1849)
2752 pi->enable_nb_dpm = false;
2753 else
2754 pi->enable_nb_dpm = true;
2755
2756 pi->caps_power_containment = true;
2757 pi->caps_cac = true;
2758 pi->enable_didt = false;
2759 if (pi->enable_didt) {
2760 pi->caps_sq_ramping = true;
2761 pi->caps_db_ramping = true;
2762 pi->caps_td_ramping = true;
2763 pi->caps_tcp_ramping = true;
2764 }
2765
2766 pi->caps_sclk_ds = true;
2767 pi->enable_auto_thermal_throttling = true;
2768 pi->disable_nb_ps3_in_battery = false;
2769 if (radeon_bapm == -1) {
2770 /* only enable bapm on KB, ML by default */
2771 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2772 pi->bapm_enable = true;
2773 else
2774 pi->bapm_enable = false;
2775 } else if (radeon_bapm == 0) {
2776 pi->bapm_enable = false;
2777 } else {
2778 pi->bapm_enable = true;
2779 }
2780 pi->voltage_drop_t = 0;
2781 pi->caps_sclk_throttle_low_notification = false;
2782 pi->caps_fps = false; /* true? */
2783 pi->caps_uvd_pg = true;
2784 pi->caps_uvd_dpm = true;
2785 pi->caps_vce_pg = false; /* XXX true */
2786 pi->caps_samu_pg = false;
2787 pi->caps_acp_pg = false;
2788 pi->caps_stable_p_state = false;
2789
2790 ret = kv_parse_sys_info_table(rdev);
2791 if (ret)
2792 return ret;
2793
2794 kv_patch_voltage_values(rdev);
2795 kv_construct_boot_state(rdev);
2796
2797 ret = kv_parse_power_table(rdev);
2798 if (ret)
2799 return ret;
2800
2801 pi->enable_dpm = true;
2802
2803 return 0;
2804 }
2805
2806 #ifdef CONFIG_DEBUG_FS
2807 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2808 struct seq_file *m)
2809 {
2810 struct kv_power_info *pi = kv_get_pi(rdev);
2811 u32 current_index =
2812 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2813 CURR_SCLK_INDEX_SHIFT;
2814 u32 sclk, tmp;
2815 u16 vddc;
2816
2817 if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2818 seq_printf(m, "invalid dpm profile %d\n", current_index);
2819 } else {
2820 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2821 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
2822 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
2823 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
2824 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
2825 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
2826 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
2827 current_index, sclk, vddc);
2828 }
2829 }
2830 #endif /* CONFIG_DEBUG_FS */
2831
2832 u32 kv_dpm_get_current_sclk(struct radeon_device *rdev)
2833 {
2834 struct kv_power_info *pi = kv_get_pi(rdev);
2835 u32 current_index =
2836 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
2837 CURR_SCLK_INDEX_SHIFT;
2838 u32 sclk;
2839
2840 if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2841 return 0;
2842 } else {
2843 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2844 return sclk;
2845 }
2846 }
2847
2848 u32 kv_dpm_get_current_mclk(struct radeon_device *rdev)
2849 {
2850 struct kv_power_info *pi = kv_get_pi(rdev);
2851
2852 return pi->sys_info.bootup_uma_clk;
2853 }
2854
2855 void kv_dpm_print_power_state(struct radeon_device *rdev,
2856 struct radeon_ps *rps)
2857 {
2858 int i;
2859 struct kv_ps *ps = kv_get_ps(rps);
2860
2861 r600_dpm_print_class_info(rps->class, rps->class2);
2862 r600_dpm_print_cap_info(rps->caps);
2863 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2864 for (i = 0; i < ps->num_levels; i++) {
2865 struct kv_pl *pl = &ps->levels[i];
2866 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2867 i, pl->sclk,
2868 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
2869 }
2870 r600_dpm_print_ps_status(rdev, rps);
2871 }
2872
2873 void kv_dpm_fini(struct radeon_device *rdev)
2874 {
2875 int i;
2876
2877 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2878 kfree(rdev->pm.dpm.ps[i].ps_priv);
2879 }
2880 kfree(rdev->pm.dpm.ps);
2881 kfree(rdev->pm.dpm.priv);
2882 r600_free_extended_power_table(rdev);
2883 }
2884
2885 void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
2886 {
2887
2888 }
2889
2890 u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
2891 {
2892 struct kv_power_info *pi = kv_get_pi(rdev);
2893 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
2894
2895 if (low)
2896 return requested_state->levels[0].sclk;
2897 else
2898 return requested_state->levels[requested_state->num_levels - 1].sclk;
2899 }
2900
2901 u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
2902 {
2903 struct kv_power_info *pi = kv_get_pi(rdev);
2904
2905 return pi->sys_info.bootup_uma_clk;
2906 }
2907
2908