amdgpu_smu.c revision 1.2 1 /* $NetBSD: amdgpu_smu.c,v 1.2 2021/12/18 23:45:26 riastradh Exp $ */
2
3 /*
4 * Copyright 2019 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_smu.c,v 1.2 2021/12/18 23:45:26 riastradh Exp $");
27
28 #include <linux/firmware.h>
29 #include <linux/pci.h>
30
31 #include "pp_debug.h"
32 #include "amdgpu.h"
33 #include "amdgpu_smu.h"
34 #include "smu_internal.h"
35 #include "soc15_common.h"
36 #include "smu_v11_0.h"
37 #include "smu_v12_0.h"
38 #include "atom.h"
39 #include "amd_pcie.h"
40 #include "vega20_ppt.h"
41 #include "arcturus_ppt.h"
42 #include "navi10_ppt.h"
43 #include "renoir_ppt.h"
44
45 #undef __SMU_DUMMY_MAP
46 #define __SMU_DUMMY_MAP(type) #type
47 static const char* __smu_message_names[] = {
48 SMU_MESSAGE_TYPES
49 };
50
51 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
52 {
53 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
54 return "unknown smu message";
55 return __smu_message_names[type];
56 }
57
58 #undef __SMU_DUMMY_MAP
59 #define __SMU_DUMMY_MAP(fea) #fea
60 static const char* __smu_feature_names[] = {
61 SMU_FEATURE_MASKS
62 };
63
64 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
65 {
66 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
67 return "unknown smu feature";
68 return __smu_feature_names[feature];
69 }
70
71 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
72 {
73 size_t size = 0;
74 int ret = 0, i = 0;
75 uint32_t feature_mask[2] = { 0 };
76 int32_t feature_index = 0;
77 uint32_t count = 0;
78 uint32_t sort_feature[SMU_FEATURE_COUNT];
79 uint64_t hw_feature_count = 0;
80
81 mutex_lock(&smu->mutex);
82
83 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
84 if (ret)
85 goto failed;
86
87 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n",
88 feature_mask[1], feature_mask[0]);
89
90 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
91 feature_index = smu_feature_get_index(smu, i);
92 if (feature_index < 0)
93 continue;
94 sort_feature[feature_index] = i;
95 hw_feature_count++;
96 }
97
98 for (i = 0; i < hw_feature_count; i++) {
99 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n",
100 count++,
101 smu_get_feature_name(smu, sort_feature[i]),
102 i,
103 !!smu_feature_is_enabled(smu, sort_feature[i]) ?
104 "enabled" : "disabled");
105 }
106
107 failed:
108 mutex_unlock(&smu->mutex);
109
110 return size;
111 }
112
113 static int smu_feature_update_enable_state(struct smu_context *smu,
114 uint64_t feature_mask,
115 bool enabled)
116 {
117 struct smu_feature *feature = &smu->smu_feature;
118 uint32_t feature_low = 0, feature_high = 0;
119 int ret = 0;
120
121 if (!smu->pm_enabled)
122 return ret;
123
124 feature_low = (feature_mask >> 0 ) & 0xffffffff;
125 feature_high = (feature_mask >> 32) & 0xffffffff;
126
127 if (enabled) {
128 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
129 feature_low);
130 if (ret)
131 return ret;
132 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
133 feature_high);
134 if (ret)
135 return ret;
136 } else {
137 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
138 feature_low);
139 if (ret)
140 return ret;
141 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
142 feature_high);
143 if (ret)
144 return ret;
145 }
146
147 mutex_lock(&feature->mutex);
148 if (enabled)
149 bitmap_or(feature->enabled, feature->enabled,
150 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
151 else
152 bitmap_andnot(feature->enabled, feature->enabled,
153 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
154 mutex_unlock(&feature->mutex);
155
156 return ret;
157 }
158
159 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
160 {
161 int ret = 0;
162 uint32_t feature_mask[2] = { 0 };
163 uint64_t feature_2_enabled = 0;
164 uint64_t feature_2_disabled = 0;
165 uint64_t feature_enables = 0;
166
167 mutex_lock(&smu->mutex);
168
169 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
170 if (ret)
171 goto out;
172
173 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
174
175 feature_2_enabled = ~feature_enables & new_mask;
176 feature_2_disabled = feature_enables & ~new_mask;
177
178 if (feature_2_enabled) {
179 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
180 if (ret)
181 goto out;
182 }
183 if (feature_2_disabled) {
184 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
185 if (ret)
186 goto out;
187 }
188
189 out:
190 mutex_unlock(&smu->mutex);
191
192 return ret;
193 }
194
195 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
196 {
197 int ret = 0;
198
199 if (!if_version && !smu_version)
200 return -EINVAL;
201
202 if (if_version) {
203 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
204 if (ret)
205 return ret;
206
207 ret = smu_read_smc_arg(smu, if_version);
208 if (ret)
209 return ret;
210 }
211
212 if (smu_version) {
213 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
214 if (ret)
215 return ret;
216
217 ret = smu_read_smc_arg(smu, smu_version);
218 if (ret)
219 return ret;
220 }
221
222 return ret;
223 }
224
225 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
226 uint32_t min, uint32_t max)
227 {
228 int ret = 0;
229
230 if (min <= 0 && max <= 0)
231 return -EINVAL;
232
233 if (!smu_clk_dpm_is_enabled(smu, clk_type))
234 return 0;
235
236 ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
237 return ret;
238 }
239
240 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
241 uint32_t min, uint32_t max)
242 {
243 int ret = 0, clk_id = 0;
244 uint32_t param;
245
246 if (min <= 0 && max <= 0)
247 return -EINVAL;
248
249 if (!smu_clk_dpm_is_enabled(smu, clk_type))
250 return 0;
251
252 clk_id = smu_clk_get_index(smu, clk_type);
253 if (clk_id < 0)
254 return clk_id;
255
256 if (max > 0) {
257 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
258 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
259 param);
260 if (ret)
261 return ret;
262 }
263
264 if (min > 0) {
265 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
266 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
267 param);
268 if (ret)
269 return ret;
270 }
271
272
273 return ret;
274 }
275
276 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
277 uint32_t *min, uint32_t *max, bool lock_needed)
278 {
279 uint32_t clock_limit;
280 int ret = 0;
281
282 if (!min && !max)
283 return -EINVAL;
284
285 if (lock_needed)
286 mutex_lock(&smu->mutex);
287
288 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
289 switch (clk_type) {
290 case SMU_MCLK:
291 case SMU_UCLK:
292 clock_limit = smu->smu_table.boot_values.uclk;
293 break;
294 case SMU_GFXCLK:
295 case SMU_SCLK:
296 clock_limit = smu->smu_table.boot_values.gfxclk;
297 break;
298 case SMU_SOCCLK:
299 clock_limit = smu->smu_table.boot_values.socclk;
300 break;
301 default:
302 clock_limit = 0;
303 break;
304 }
305
306 /* clock in Mhz unit */
307 if (min)
308 *min = clock_limit / 100;
309 if (max)
310 *max = clock_limit / 100;
311 } else {
312 /*
313 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
314 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
315 */
316 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
317 }
318
319 if (lock_needed)
320 mutex_unlock(&smu->mutex);
321
322 return ret;
323 }
324
325 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
326 uint16_t level, uint32_t *value)
327 {
328 int ret = 0, clk_id = 0;
329 uint32_t param;
330
331 if (!value)
332 return -EINVAL;
333
334 if (!smu_clk_dpm_is_enabled(smu, clk_type))
335 return 0;
336
337 clk_id = smu_clk_get_index(smu, clk_type);
338 if (clk_id < 0)
339 return clk_id;
340
341 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
342
343 ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
344 param);
345 if (ret)
346 return ret;
347
348 ret = smu_read_smc_arg(smu, ¶m);
349 if (ret)
350 return ret;
351
352 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
353 * now, we un-support it */
354 *value = param & 0x7fffffff;
355
356 return ret;
357 }
358
359 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
360 uint32_t *value)
361 {
362 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
363 }
364
365 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
366 uint32_t *min_value, uint32_t *max_value)
367 {
368 int ret = 0;
369 uint32_t level_count = 0;
370
371 if (!min_value && !max_value)
372 return -EINVAL;
373
374 if (min_value) {
375 /* by default, level 0 clock value as min value */
376 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
377 if (ret)
378 return ret;
379 }
380
381 if (max_value) {
382 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
383 if (ret)
384 return ret;
385
386 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
387 if (ret)
388 return ret;
389 }
390
391 return ret;
392 }
393
394 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
395 {
396 enum smu_feature_mask feature_id = 0;
397
398 switch (clk_type) {
399 case SMU_MCLK:
400 case SMU_UCLK:
401 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
402 break;
403 case SMU_GFXCLK:
404 case SMU_SCLK:
405 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
406 break;
407 case SMU_SOCCLK:
408 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
409 break;
410 default:
411 return true;
412 }
413
414 if(!smu_feature_is_enabled(smu, feature_id)) {
415 return false;
416 }
417
418 return true;
419 }
420
421 /**
422 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
423 *
424 * @smu: smu_context pointer
425 * @block_type: the IP block to power gate/ungate
426 * @gate: to power gate if true, ungate otherwise
427 *
428 * This API uses no smu->mutex lock protection due to:
429 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
430 * This is guarded to be race condition free by the caller.
431 * 2. Or get called on user setting request of power_dpm_force_performance_level.
432 * Under this case, the smu->mutex lock protection is already enforced on
433 * the parent API smu_force_performance_level of the call path.
434 */
435 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
436 bool gate)
437 {
438 int ret = 0;
439
440 switch (block_type) {
441 case AMD_IP_BLOCK_TYPE_UVD:
442 ret = smu_dpm_set_uvd_enable(smu, !gate);
443 break;
444 case AMD_IP_BLOCK_TYPE_VCE:
445 ret = smu_dpm_set_vce_enable(smu, !gate);
446 break;
447 case AMD_IP_BLOCK_TYPE_GFX:
448 ret = smu_gfx_off_control(smu, gate);
449 break;
450 case AMD_IP_BLOCK_TYPE_SDMA:
451 ret = smu_powergate_sdma(smu, gate);
452 break;
453 case AMD_IP_BLOCK_TYPE_JPEG:
454 ret = smu_dpm_set_jpeg_enable(smu, !gate);
455 break;
456 default:
457 break;
458 }
459
460 return ret;
461 }
462
463 int smu_get_power_num_states(struct smu_context *smu,
464 struct pp_states_info *state_info)
465 {
466 if (!state_info)
467 return -EINVAL;
468
469 /* not support power state */
470 memset(state_info, 0, sizeof(struct pp_states_info));
471 state_info->nums = 1;
472 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
473
474 return 0;
475 }
476
477 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
478 void *data, uint32_t *size)
479 {
480 struct smu_power_context *smu_power = &smu->smu_power;
481 struct smu_power_gate *power_gate = &smu_power->power_gate;
482 int ret = 0;
483
484 if(!data || !size)
485 return -EINVAL;
486
487 switch (sensor) {
488 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
489 *((uint32_t *)data) = smu->pstate_sclk;
490 *size = 4;
491 break;
492 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
493 *((uint32_t *)data) = smu->pstate_mclk;
494 *size = 4;
495 break;
496 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
497 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
498 *size = 8;
499 break;
500 case AMDGPU_PP_SENSOR_UVD_POWER:
501 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
502 *size = 4;
503 break;
504 case AMDGPU_PP_SENSOR_VCE_POWER:
505 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
506 *size = 4;
507 break;
508 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
509 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
510 *size = 4;
511 break;
512 default:
513 ret = -EINVAL;
514 break;
515 }
516
517 if (ret)
518 *size = 0;
519
520 return ret;
521 }
522
523 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
524 void *table_data, bool drv2smu)
525 {
526 struct smu_table_context *smu_table = &smu->smu_table;
527 struct amdgpu_device *adev = smu->adev;
528 struct smu_table *table = &smu_table->driver_table;
529 int table_id = smu_table_get_index(smu, table_index);
530 uint32_t table_size;
531 int ret = 0;
532
533 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
534 return -EINVAL;
535
536 table_size = smu_table->tables[table_index].size;
537
538 if (drv2smu) {
539 memcpy(table->cpu_addr, table_data, table_size);
540 /*
541 * Flush hdp cache: to guard the content seen by
542 * GPU is consitent with CPU.
543 */
544 amdgpu_asic_flush_hdp(adev, NULL);
545 }
546
547 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
548 SMU_MSG_TransferTableDram2Smu :
549 SMU_MSG_TransferTableSmu2Dram,
550 table_id | ((argument & 0xFFFF) << 16));
551 if (ret)
552 return ret;
553
554 if (!drv2smu) {
555 amdgpu_asic_flush_hdp(adev, NULL);
556 memcpy(table_data, table->cpu_addr, table_size);
557 }
558
559 return ret;
560 }
561
562 bool is_support_sw_smu(struct amdgpu_device *adev)
563 {
564 if (adev->asic_type == CHIP_VEGA20)
565 return (amdgpu_dpm == 2) ? true : false;
566 else if (adev->asic_type >= CHIP_ARCTURUS) {
567 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
568 return false;
569 else
570 return true;
571 } else
572 return false;
573 }
574
575 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
576 {
577 if (!is_support_sw_smu(adev))
578 return false;
579
580 if (adev->asic_type == CHIP_VEGA20)
581 return true;
582
583 return false;
584 }
585
586 int smu_sys_get_pp_table(struct smu_context *smu, void **table)
587 {
588 struct smu_table_context *smu_table = &smu->smu_table;
589 uint32_t powerplay_table_size;
590
591 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
592 return -EINVAL;
593
594 mutex_lock(&smu->mutex);
595
596 if (smu_table->hardcode_pptable)
597 *table = smu_table->hardcode_pptable;
598 else
599 *table = smu_table->power_play_table;
600
601 powerplay_table_size = smu_table->power_play_table_size;
602
603 mutex_unlock(&smu->mutex);
604
605 return powerplay_table_size;
606 }
607
608 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
609 {
610 struct smu_table_context *smu_table = &smu->smu_table;
611 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
612 int ret = 0;
613
614 if (!smu->pm_enabled)
615 return -EINVAL;
616 if (header->usStructureSize != size) {
617 pr_err("pp table size not matched !\n");
618 return -EIO;
619 }
620
621 mutex_lock(&smu->mutex);
622 if (!smu_table->hardcode_pptable)
623 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
624 if (!smu_table->hardcode_pptable) {
625 ret = -ENOMEM;
626 goto failed;
627 }
628
629 memcpy(smu_table->hardcode_pptable, buf, size);
630 smu_table->power_play_table = smu_table->hardcode_pptable;
631 smu_table->power_play_table_size = size;
632
633 /*
634 * Special hw_fini action(for Navi1x, the DPMs disablement will be
635 * skipped) may be needed for custom pptable uploading.
636 */
637 smu->uploading_custom_pp_table = true;
638
639 ret = smu_reset(smu);
640 if (ret)
641 pr_info("smu reset failed, ret = %d\n", ret);
642
643 smu->uploading_custom_pp_table = false;
644
645 failed:
646 mutex_unlock(&smu->mutex);
647 return ret;
648 }
649
650 int smu_feature_init_dpm(struct smu_context *smu)
651 {
652 struct smu_feature *feature = &smu->smu_feature;
653 int ret = 0;
654 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
655
656 if (!smu->pm_enabled)
657 return ret;
658 mutex_lock(&feature->mutex);
659 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
660 mutex_unlock(&feature->mutex);
661
662 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
663 SMU_FEATURE_MAX/32);
664 if (ret)
665 return ret;
666
667 mutex_lock(&feature->mutex);
668 bitmap_or(feature->allowed, feature->allowed,
669 (unsigned long *)allowed_feature_mask,
670 feature->feature_num);
671 mutex_unlock(&feature->mutex);
672
673 return ret;
674 }
675
676
677 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
678 {
679 struct smu_feature *feature = &smu->smu_feature;
680 int feature_id;
681 int ret = 0;
682
683 if (smu->is_apu)
684 return 1;
685
686 feature_id = smu_feature_get_index(smu, mask);
687 if (feature_id < 0)
688 return 0;
689
690 WARN_ON(feature_id > feature->feature_num);
691
692 mutex_lock(&feature->mutex);
693 ret = test_bit(feature_id, feature->enabled);
694 mutex_unlock(&feature->mutex);
695
696 return ret;
697 }
698
699 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
700 bool enable)
701 {
702 struct smu_feature *feature = &smu->smu_feature;
703 int feature_id;
704
705 feature_id = smu_feature_get_index(smu, mask);
706 if (feature_id < 0)
707 return -EINVAL;
708
709 WARN_ON(feature_id > feature->feature_num);
710
711 return smu_feature_update_enable_state(smu,
712 1ULL << feature_id,
713 enable);
714 }
715
716 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
717 {
718 struct smu_feature *feature = &smu->smu_feature;
719 int feature_id;
720 int ret = 0;
721
722 feature_id = smu_feature_get_index(smu, mask);
723 if (feature_id < 0)
724 return 0;
725
726 WARN_ON(feature_id > feature->feature_num);
727
728 mutex_lock(&feature->mutex);
729 ret = test_bit(feature_id, feature->supported);
730 mutex_unlock(&feature->mutex);
731
732 return ret;
733 }
734
735 int smu_feature_set_supported(struct smu_context *smu,
736 enum smu_feature_mask mask,
737 bool enable)
738 {
739 struct smu_feature *feature = &smu->smu_feature;
740 int feature_id;
741 int ret = 0;
742
743 feature_id = smu_feature_get_index(smu, mask);
744 if (feature_id < 0)
745 return -EINVAL;
746
747 WARN_ON(feature_id > feature->feature_num);
748
749 mutex_lock(&feature->mutex);
750 if (enable)
751 test_and_set_bit(feature_id, feature->supported);
752 else
753 test_and_clear_bit(feature_id, feature->supported);
754 mutex_unlock(&feature->mutex);
755
756 return ret;
757 }
758
759 static int smu_set_funcs(struct amdgpu_device *adev)
760 {
761 struct smu_context *smu = &adev->smu;
762
763 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
764 smu->od_enabled = true;
765
766 switch (adev->asic_type) {
767 case CHIP_VEGA20:
768 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
769 vega20_set_ppt_funcs(smu);
770 break;
771 case CHIP_NAVI10:
772 case CHIP_NAVI14:
773 case CHIP_NAVI12:
774 navi10_set_ppt_funcs(smu);
775 break;
776 case CHIP_ARCTURUS:
777 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
778 arcturus_set_ppt_funcs(smu);
779 /* OD is not supported on Arcturus */
780 smu->od_enabled =false;
781 break;
782 case CHIP_RENOIR:
783 renoir_set_ppt_funcs(smu);
784 break;
785 default:
786 return -EINVAL;
787 }
788
789 return 0;
790 }
791
792 static int smu_early_init(void *handle)
793 {
794 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
795 struct smu_context *smu = &adev->smu;
796
797 smu->adev = adev;
798 smu->pm_enabled = !!amdgpu_dpm;
799 smu->is_apu = false;
800 mutex_init(&smu->mutex);
801
802 return smu_set_funcs(adev);
803 }
804
805 static int smu_late_init(void *handle)
806 {
807 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
808 struct smu_context *smu = &adev->smu;
809
810 if (!smu->pm_enabled)
811 return 0;
812
813 smu_handle_task(&adev->smu,
814 smu->smu_dpm.dpm_level,
815 AMD_PP_TASK_COMPLETE_INIT,
816 false);
817
818 return 0;
819 }
820
821 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
822 uint16_t *size, uint8_t *frev, uint8_t *crev,
823 uint8_t **addr)
824 {
825 struct amdgpu_device *adev = smu->adev;
826 uint16_t data_start;
827
828 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
829 size, frev, crev, &data_start))
830 return -EINVAL;
831
832 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
833
834 return 0;
835 }
836
837 static int smu_initialize_pptable(struct smu_context *smu)
838 {
839 /* TODO */
840 return 0;
841 }
842
843 static int smu_smc_table_sw_init(struct smu_context *smu)
844 {
845 int ret;
846
847 ret = smu_initialize_pptable(smu);
848 if (ret) {
849 pr_err("Failed to init smu_initialize_pptable!\n");
850 return ret;
851 }
852
853 /**
854 * Create smu_table structure, and init smc tables such as
855 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
856 */
857 ret = smu_init_smc_tables(smu);
858 if (ret) {
859 pr_err("Failed to init smc tables!\n");
860 return ret;
861 }
862
863 /**
864 * Create smu_power_context structure, and allocate smu_dpm_context and
865 * context size to fill the smu_power_context data.
866 */
867 ret = smu_init_power(smu);
868 if (ret) {
869 pr_err("Failed to init smu_init_power!\n");
870 return ret;
871 }
872
873 return 0;
874 }
875
876 static int smu_smc_table_sw_fini(struct smu_context *smu)
877 {
878 int ret;
879
880 ret = smu_fini_smc_tables(smu);
881 if (ret) {
882 pr_err("Failed to smu_fini_smc_tables!\n");
883 return ret;
884 }
885
886 return 0;
887 }
888
889 static int smu_sw_init(void *handle)
890 {
891 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
892 struct smu_context *smu = &adev->smu;
893 int ret;
894
895 smu->pool_size = adev->pm.smu_prv_buffer_size;
896 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
897 mutex_init(&smu->smu_feature.mutex);
898 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
899 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
900 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
901
902 mutex_init(&smu->smu_baco.mutex);
903 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
904 smu->smu_baco.platform_support = false;
905
906 mutex_init(&smu->sensor_lock);
907 mutex_init(&smu->metrics_lock);
908
909 smu->watermarks_bitmap = 0;
910 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
911 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
912
913 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
914 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
915 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
916 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
917 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
918 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
919 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
920 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
921
922 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
923 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
924 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
925 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
926 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
927 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
928 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
929 smu->display_config = &adev->pm.pm_display_cfg;
930
931 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
932 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
933 ret = smu_init_microcode(smu);
934 if (ret) {
935 pr_err("Failed to load smu firmware!\n");
936 return ret;
937 }
938
939 ret = smu_smc_table_sw_init(smu);
940 if (ret) {
941 pr_err("Failed to sw init smc table!\n");
942 return ret;
943 }
944
945 ret = smu_register_irq_handler(smu);
946 if (ret) {
947 pr_err("Failed to register smc irq handler!\n");
948 return ret;
949 }
950
951 return 0;
952 }
953
954 static int smu_sw_fini(void *handle)
955 {
956 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
957 struct smu_context *smu = &adev->smu;
958 int ret;
959
960 kfree(smu->irq_source);
961 smu->irq_source = NULL;
962
963 ret = smu_smc_table_sw_fini(smu);
964 if (ret) {
965 pr_err("Failed to sw fini smc table!\n");
966 return ret;
967 }
968
969 ret = smu_fini_power(smu);
970 if (ret) {
971 pr_err("Failed to init smu_fini_power!\n");
972 return ret;
973 }
974
975 return 0;
976 }
977
978 static int smu_init_fb_allocations(struct smu_context *smu)
979 {
980 struct amdgpu_device *adev = smu->adev;
981 struct smu_table_context *smu_table = &smu->smu_table;
982 struct smu_table *tables = smu_table->tables;
983 struct smu_table *driver_table = &(smu_table->driver_table);
984 uint32_t max_table_size = 0;
985 int ret, i;
986
987 /* VRAM allocation for tool table */
988 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
989 ret = amdgpu_bo_create_kernel(adev,
990 tables[SMU_TABLE_PMSTATUSLOG].size,
991 tables[SMU_TABLE_PMSTATUSLOG].align,
992 tables[SMU_TABLE_PMSTATUSLOG].domain,
993 &tables[SMU_TABLE_PMSTATUSLOG].bo,
994 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
995 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
996 if (ret) {
997 pr_err("VRAM allocation for tool table failed!\n");
998 return ret;
999 }
1000 }
1001
1002 /* VRAM allocation for driver table */
1003 for (i = 0; i < SMU_TABLE_COUNT; i++) {
1004 if (tables[i].size == 0)
1005 continue;
1006
1007 if (i == SMU_TABLE_PMSTATUSLOG)
1008 continue;
1009
1010 if (max_table_size < tables[i].size)
1011 max_table_size = tables[i].size;
1012 }
1013
1014 driver_table->size = max_table_size;
1015 driver_table->align = PAGE_SIZE;
1016 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
1017
1018 ret = amdgpu_bo_create_kernel(adev,
1019 driver_table->size,
1020 driver_table->align,
1021 driver_table->domain,
1022 &driver_table->bo,
1023 &driver_table->mc_address,
1024 &driver_table->cpu_addr);
1025 if (ret) {
1026 pr_err("VRAM allocation for driver table failed!\n");
1027 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1028 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1029 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1030 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1031 }
1032
1033 return ret;
1034 }
1035
1036 static int smu_fini_fb_allocations(struct smu_context *smu)
1037 {
1038 struct smu_table_context *smu_table = &smu->smu_table;
1039 struct smu_table *tables = smu_table->tables;
1040 struct smu_table *driver_table = &(smu_table->driver_table);
1041
1042 if (!tables)
1043 return 0;
1044
1045 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1046 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1047 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1048 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1049
1050 amdgpu_bo_free_kernel(&driver_table->bo,
1051 &driver_table->mc_address,
1052 &driver_table->cpu_addr);
1053
1054 return 0;
1055 }
1056
1057 static int smu_smc_table_hw_init(struct smu_context *smu,
1058 bool initialize)
1059 {
1060 struct amdgpu_device *adev = smu->adev;
1061 int ret;
1062
1063 if (smu_is_dpm_running(smu) && adev->in_suspend) {
1064 pr_info("dpm has been enabled\n");
1065 return 0;
1066 }
1067
1068 if (adev->asic_type != CHIP_ARCTURUS) {
1069 ret = smu_init_display_count(smu, 0);
1070 if (ret)
1071 return ret;
1072 }
1073
1074 if (initialize) {
1075 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1076 ret = smu_get_vbios_bootup_values(smu);
1077 if (ret)
1078 return ret;
1079
1080 ret = smu_setup_pptable(smu);
1081 if (ret)
1082 return ret;
1083
1084 ret = smu_get_clk_info_from_vbios(smu);
1085 if (ret)
1086 return ret;
1087
1088 /*
1089 * check if the format_revision in vbios is up to pptable header
1090 * version, and the structure size is not 0.
1091 */
1092 ret = smu_check_pptable(smu);
1093 if (ret)
1094 return ret;
1095
1096 /*
1097 * allocate vram bos to store smc table contents.
1098 */
1099 ret = smu_init_fb_allocations(smu);
1100 if (ret)
1101 return ret;
1102
1103 /*
1104 * Parse pptable format and fill PPTable_t smc_pptable to
1105 * smu_table_context structure. And read the smc_dpm_table from vbios,
1106 * then fill it into smc_pptable.
1107 */
1108 ret = smu_parse_pptable(smu);
1109 if (ret)
1110 return ret;
1111
1112 /*
1113 * Send msg GetDriverIfVersion to check if the return value is equal
1114 * with DRIVER_IF_VERSION of smc header.
1115 */
1116 ret = smu_check_fw_version(smu);
1117 if (ret)
1118 return ret;
1119 }
1120
1121 /* smu_dump_pptable(smu); */
1122 if (!amdgpu_sriov_vf(adev)) {
1123 ret = smu_set_driver_table_location(smu);
1124 if (ret)
1125 return ret;
1126
1127 /*
1128 * Copy pptable bo in the vram to smc with SMU MSGs such as
1129 * SetDriverDramAddr and TransferTableDram2Smu.
1130 */
1131 ret = smu_write_pptable(smu);
1132 if (ret)
1133 return ret;
1134
1135 /* issue Run*Btc msg */
1136 ret = smu_run_btc(smu);
1137 if (ret)
1138 return ret;
1139 ret = smu_feature_set_allowed_mask(smu);
1140 if (ret)
1141 return ret;
1142
1143 ret = smu_system_features_control(smu, true);
1144 if (ret)
1145 return ret;
1146
1147 if (adev->asic_type == CHIP_NAVI10) {
1148 if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
1149 adev->pdev->revision == 0xc3 ||
1150 adev->pdev->revision == 0xca ||
1151 adev->pdev->revision == 0xcb)) ||
1152 (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
1153 adev->pdev->revision == 0xf4 ||
1154 adev->pdev->revision == 0xf5 ||
1155 adev->pdev->revision == 0xf6))) {
1156 ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1157 if (ret) {
1158 pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1159 return ret;
1160 }
1161 }
1162 }
1163 }
1164 if (adev->asic_type != CHIP_ARCTURUS) {
1165 ret = smu_notify_display_change(smu);
1166 if (ret)
1167 return ret;
1168
1169 /*
1170 * Set min deep sleep dce fclk with bootup value from vbios via
1171 * SetMinDeepSleepDcefclk MSG.
1172 */
1173 ret = smu_set_min_dcef_deep_sleep(smu);
1174 if (ret)
1175 return ret;
1176 }
1177
1178 /*
1179 * Set initialized values (get from vbios) to dpm tables context such as
1180 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1181 * type of clks.
1182 */
1183 if (initialize) {
1184 ret = smu_populate_smc_tables(smu);
1185 if (ret)
1186 return ret;
1187
1188 ret = smu_init_max_sustainable_clocks(smu);
1189 if (ret)
1190 return ret;
1191 }
1192
1193 if (adev->asic_type != CHIP_ARCTURUS) {
1194 ret = smu_override_pcie_parameters(smu);
1195 if (ret)
1196 return ret;
1197 }
1198
1199 ret = smu_set_default_od_settings(smu, initialize);
1200 if (ret)
1201 return ret;
1202
1203 if (initialize) {
1204 ret = smu_populate_umd_state_clk(smu);
1205 if (ret)
1206 return ret;
1207
1208 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1209 if (ret)
1210 return ret;
1211 }
1212
1213 /*
1214 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1215 */
1216 if (!amdgpu_sriov_vf(adev)) {
1217 ret = smu_set_tool_table_location(smu);
1218 }
1219 if (!smu_is_dpm_running(smu))
1220 pr_info("dpm has been disabled\n");
1221
1222 return ret;
1223 }
1224
1225 /**
1226 * smu_alloc_memory_pool - allocate memory pool in the system memory
1227 *
1228 * @smu: amdgpu_device pointer
1229 *
1230 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1231 * and DramLogSetDramAddr can notify it changed.
1232 *
1233 * Returns 0 on success, error on failure.
1234 */
1235 static int smu_alloc_memory_pool(struct smu_context *smu)
1236 {
1237 struct amdgpu_device *adev = smu->adev;
1238 struct smu_table_context *smu_table = &smu->smu_table;
1239 struct smu_table *memory_pool = &smu_table->memory_pool;
1240 uint64_t pool_size = smu->pool_size;
1241 int ret = 0;
1242
1243 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1244 return ret;
1245
1246 memory_pool->size = pool_size;
1247 memory_pool->align = PAGE_SIZE;
1248 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1249
1250 switch (pool_size) {
1251 case SMU_MEMORY_POOL_SIZE_256_MB:
1252 case SMU_MEMORY_POOL_SIZE_512_MB:
1253 case SMU_MEMORY_POOL_SIZE_1_GB:
1254 case SMU_MEMORY_POOL_SIZE_2_GB:
1255 ret = amdgpu_bo_create_kernel(adev,
1256 memory_pool->size,
1257 memory_pool->align,
1258 memory_pool->domain,
1259 &memory_pool->bo,
1260 &memory_pool->mc_address,
1261 &memory_pool->cpu_addr);
1262 break;
1263 default:
1264 break;
1265 }
1266
1267 return ret;
1268 }
1269
1270 static int smu_free_memory_pool(struct smu_context *smu)
1271 {
1272 struct smu_table_context *smu_table = &smu->smu_table;
1273 struct smu_table *memory_pool = &smu_table->memory_pool;
1274
1275 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1276 return 0;
1277
1278 amdgpu_bo_free_kernel(&memory_pool->bo,
1279 &memory_pool->mc_address,
1280 &memory_pool->cpu_addr);
1281
1282 memset(memory_pool, 0, sizeof(struct smu_table));
1283
1284 return 0;
1285 }
1286
1287 static int smu_start_smc_engine(struct smu_context *smu)
1288 {
1289 struct amdgpu_device *adev = smu->adev;
1290 int ret = 0;
1291
1292 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1293 if (adev->asic_type < CHIP_NAVI10) {
1294 if (smu->ppt_funcs->load_microcode) {
1295 ret = smu->ppt_funcs->load_microcode(smu);
1296 if (ret)
1297 return ret;
1298 }
1299 }
1300 }
1301
1302 if (smu->ppt_funcs->check_fw_status) {
1303 ret = smu->ppt_funcs->check_fw_status(smu);
1304 if (ret)
1305 pr_err("SMC is not ready\n");
1306 }
1307
1308 return ret;
1309 }
1310
1311 static int smu_hw_init(void *handle)
1312 {
1313 int ret;
1314 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1315 struct smu_context *smu = &adev->smu;
1316
1317 ret = smu_start_smc_engine(smu);
1318 if (ret) {
1319 pr_err("SMU is not ready yet!\n");
1320 return ret;
1321 }
1322
1323 if (smu->is_apu) {
1324 smu_powergate_sdma(&adev->smu, false);
1325 smu_powergate_vcn(&adev->smu, false);
1326 smu_powergate_jpeg(&adev->smu, false);
1327 smu_set_gfx_cgpg(&adev->smu, true);
1328 }
1329
1330 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1331 return 0;
1332
1333 if (!smu->pm_enabled)
1334 return 0;
1335
1336 ret = smu_feature_init_dpm(smu);
1337 if (ret)
1338 goto failed;
1339
1340 ret = smu_smc_table_hw_init(smu, true);
1341 if (ret)
1342 goto failed;
1343
1344 ret = smu_alloc_memory_pool(smu);
1345 if (ret)
1346 goto failed;
1347
1348 /*
1349 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1350 * pool location.
1351 */
1352 ret = smu_notify_memory_pool_location(smu);
1353 if (ret)
1354 goto failed;
1355
1356 ret = smu_start_thermal_control(smu);
1357 if (ret)
1358 goto failed;
1359
1360 if (!smu->pm_enabled)
1361 adev->pm.dpm_enabled = false;
1362 else
1363 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1364
1365 pr_info("SMU is initialized successfully!\n");
1366
1367 return 0;
1368
1369 failed:
1370 return ret;
1371 }
1372
1373 static int smu_stop_dpms(struct smu_context *smu)
1374 {
1375 return smu_system_features_control(smu, false);
1376 }
1377
1378 static int smu_hw_fini(void *handle)
1379 {
1380 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1381 struct smu_context *smu = &adev->smu;
1382 struct smu_table_context *table_context = &smu->smu_table;
1383 int ret = 0;
1384
1385 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1386 return 0;
1387
1388 if (smu->is_apu) {
1389 smu_powergate_sdma(&adev->smu, true);
1390 smu_powergate_vcn(&adev->smu, true);
1391 smu_powergate_jpeg(&adev->smu, true);
1392 }
1393
1394 if (!smu->pm_enabled)
1395 return 0;
1396
1397 if (!amdgpu_sriov_vf(adev)){
1398 ret = smu_stop_thermal_control(smu);
1399 if (ret) {
1400 pr_warn("Fail to stop thermal control!\n");
1401 return ret;
1402 }
1403
1404 /*
1405 * For custom pptable uploading, skip the DPM features
1406 * disable process on Navi1x ASICs.
1407 * - As the gfx related features are under control of
1408 * RLC on those ASICs. RLC reinitialization will be
1409 * needed to reenable them. That will cost much more
1410 * efforts.
1411 *
1412 * - SMU firmware can handle the DPM reenablement
1413 * properly.
1414 */
1415 if (!smu->uploading_custom_pp_table ||
1416 !((adev->asic_type >= CHIP_NAVI10) &&
1417 (adev->asic_type <= CHIP_NAVI12))) {
1418 ret = smu_stop_dpms(smu);
1419 if (ret) {
1420 pr_warn("Fail to stop Dpms!\n");
1421 return ret;
1422 }
1423 }
1424 }
1425
1426 kfree(table_context->driver_pptable);
1427 table_context->driver_pptable = NULL;
1428
1429 kfree(table_context->max_sustainable_clocks);
1430 table_context->max_sustainable_clocks = NULL;
1431
1432 kfree(table_context->overdrive_table);
1433 table_context->overdrive_table = NULL;
1434
1435 ret = smu_fini_fb_allocations(smu);
1436 if (ret)
1437 return ret;
1438
1439 ret = smu_free_memory_pool(smu);
1440 if (ret)
1441 return ret;
1442
1443 return 0;
1444 }
1445
1446 int smu_reset(struct smu_context *smu)
1447 {
1448 struct amdgpu_device *adev = smu->adev;
1449 int ret = 0;
1450
1451 ret = smu_hw_fini(adev);
1452 if (ret)
1453 return ret;
1454
1455 ret = smu_hw_init(adev);
1456 if (ret)
1457 return ret;
1458
1459 return ret;
1460 }
1461
1462 static int smu_suspend(void *handle)
1463 {
1464 int ret;
1465 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1466 struct smu_context *smu = &adev->smu;
1467 bool baco_feature_is_enabled = false;
1468
1469 if (!smu->pm_enabled)
1470 return 0;
1471
1472 if(!smu->is_apu)
1473 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1474
1475 ret = smu_system_features_control(smu, false);
1476 if (ret)
1477 return ret;
1478
1479 if (baco_feature_is_enabled) {
1480 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1481 if (ret) {
1482 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1483 return ret;
1484 }
1485 }
1486
1487 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1488
1489 if (adev->asic_type >= CHIP_NAVI10 &&
1490 adev->gfx.rlc.funcs->stop)
1491 adev->gfx.rlc.funcs->stop(adev);
1492 if (smu->is_apu)
1493 smu_set_gfx_cgpg(&adev->smu, false);
1494
1495 return 0;
1496 }
1497
1498 static int smu_resume(void *handle)
1499 {
1500 int ret;
1501 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1502 struct smu_context *smu = &adev->smu;
1503
1504 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1505 return 0;
1506
1507 if (!smu->pm_enabled)
1508 return 0;
1509
1510 pr_info("SMU is resuming...\n");
1511
1512 ret = smu_start_smc_engine(smu);
1513 if (ret) {
1514 pr_err("SMU is not ready yet!\n");
1515 goto failed;
1516 }
1517
1518 ret = smu_smc_table_hw_init(smu, false);
1519 if (ret)
1520 goto failed;
1521
1522 ret = smu_start_thermal_control(smu);
1523 if (ret)
1524 goto failed;
1525
1526 if (smu->is_apu)
1527 smu_set_gfx_cgpg(&adev->smu, true);
1528
1529 smu->disable_uclk_switch = 0;
1530
1531 pr_info("SMU is resumed successfully!\n");
1532
1533 return 0;
1534
1535 failed:
1536 return ret;
1537 }
1538
1539 int smu_display_configuration_change(struct smu_context *smu,
1540 const struct amd_pp_display_configuration *display_config)
1541 {
1542 int index = 0;
1543 int num_of_active_display = 0;
1544
1545 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1546 return -EINVAL;
1547
1548 if (!display_config)
1549 return -EINVAL;
1550
1551 mutex_lock(&smu->mutex);
1552
1553 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1554 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1555 display_config->min_dcef_deep_sleep_set_clk / 100);
1556
1557 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1558 if (display_config->displays[index].controller_id != 0)
1559 num_of_active_display++;
1560 }
1561
1562 smu_set_active_display_count(smu, num_of_active_display);
1563
1564 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1565 display_config->cpu_cc6_disable,
1566 display_config->cpu_pstate_disable,
1567 display_config->nb_pstate_switch_disable);
1568
1569 mutex_unlock(&smu->mutex);
1570
1571 return 0;
1572 }
1573
1574 static int smu_get_clock_info(struct smu_context *smu,
1575 struct smu_clock_info *clk_info,
1576 enum smu_perf_level_designation designation)
1577 {
1578 int ret;
1579 struct smu_performance_level level = {0};
1580
1581 if (!clk_info)
1582 return -EINVAL;
1583
1584 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1585 if (ret)
1586 return -EINVAL;
1587
1588 clk_info->min_mem_clk = level.memory_clock;
1589 clk_info->min_eng_clk = level.core_clock;
1590 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1591
1592 ret = smu_get_perf_level(smu, designation, &level);
1593 if (ret)
1594 return -EINVAL;
1595
1596 clk_info->min_mem_clk = level.memory_clock;
1597 clk_info->min_eng_clk = level.core_clock;
1598 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1599
1600 return 0;
1601 }
1602
1603 int smu_get_current_clocks(struct smu_context *smu,
1604 struct amd_pp_clock_info *clocks)
1605 {
1606 struct amd_pp_simple_clock_info simple_clocks = {0};
1607 struct smu_clock_info hw_clocks;
1608 int ret = 0;
1609
1610 if (!is_support_sw_smu(smu->adev))
1611 return -EINVAL;
1612
1613 mutex_lock(&smu->mutex);
1614
1615 smu_get_dal_power_level(smu, &simple_clocks);
1616
1617 if (smu->support_power_containment)
1618 ret = smu_get_clock_info(smu, &hw_clocks,
1619 PERF_LEVEL_POWER_CONTAINMENT);
1620 else
1621 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1622
1623 if (ret) {
1624 pr_err("Error in smu_get_clock_info\n");
1625 goto failed;
1626 }
1627
1628 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1629 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1630 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1631 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1632 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1633 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1634 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1635 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1636
1637 if (simple_clocks.level == 0)
1638 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1639 else
1640 clocks->max_clocks_state = simple_clocks.level;
1641
1642 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1643 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1644 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1645 }
1646
1647 failed:
1648 mutex_unlock(&smu->mutex);
1649 return ret;
1650 }
1651
1652 static int smu_set_clockgating_state(void *handle,
1653 enum amd_clockgating_state state)
1654 {
1655 return 0;
1656 }
1657
1658 static int smu_set_powergating_state(void *handle,
1659 enum amd_powergating_state state)
1660 {
1661 return 0;
1662 }
1663
1664 static int smu_enable_umd_pstate(void *handle,
1665 enum amd_dpm_forced_level *level)
1666 {
1667 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1668 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1669 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1670 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1671
1672 struct smu_context *smu = (struct smu_context*)(handle);
1673 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1674
1675 if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
1676 return -EINVAL;
1677
1678 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1679 /* enter umd pstate, save current level, disable gfx cg*/
1680 if (*level & profile_mode_mask) {
1681 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1682 smu_dpm_ctx->enable_umd_pstate = true;
1683 amdgpu_device_ip_set_clockgating_state(smu->adev,
1684 AMD_IP_BLOCK_TYPE_GFX,
1685 AMD_CG_STATE_UNGATE);
1686 amdgpu_device_ip_set_powergating_state(smu->adev,
1687 AMD_IP_BLOCK_TYPE_GFX,
1688 AMD_PG_STATE_UNGATE);
1689 }
1690 } else {
1691 /* exit umd pstate, restore level, enable gfx cg*/
1692 if (!(*level & profile_mode_mask)) {
1693 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1694 *level = smu_dpm_ctx->saved_dpm_level;
1695 smu_dpm_ctx->enable_umd_pstate = false;
1696 amdgpu_device_ip_set_clockgating_state(smu->adev,
1697 AMD_IP_BLOCK_TYPE_GFX,
1698 AMD_CG_STATE_GATE);
1699 amdgpu_device_ip_set_powergating_state(smu->adev,
1700 AMD_IP_BLOCK_TYPE_GFX,
1701 AMD_PG_STATE_GATE);
1702 }
1703 }
1704
1705 return 0;
1706 }
1707
1708 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1709 enum amd_dpm_forced_level level,
1710 bool skip_display_settings)
1711 {
1712 int ret = 0;
1713 int index = 0;
1714 long workload;
1715 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1716
1717 if (!smu->pm_enabled)
1718 return -EINVAL;
1719
1720 if (!skip_display_settings) {
1721 ret = smu_display_config_changed(smu);
1722 if (ret) {
1723 pr_err("Failed to change display config!");
1724 return ret;
1725 }
1726 }
1727
1728 ret = smu_apply_clocks_adjust_rules(smu);
1729 if (ret) {
1730 pr_err("Failed to apply clocks adjust rules!");
1731 return ret;
1732 }
1733
1734 if (!skip_display_settings) {
1735 ret = smu_notify_smc_display_config(smu);
1736 if (ret) {
1737 pr_err("Failed to notify smc display config!");
1738 return ret;
1739 }
1740 }
1741
1742 if (smu_dpm_ctx->dpm_level != level) {
1743 ret = smu_asic_set_performance_level(smu, level);
1744 if (ret) {
1745 pr_err("Failed to set performance level!");
1746 return ret;
1747 }
1748
1749 /* update the saved copy */
1750 smu_dpm_ctx->dpm_level = level;
1751 }
1752
1753 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1754 index = fls(smu->workload_mask);
1755 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1756 workload = smu->workload_setting[index];
1757
1758 if (smu->power_profile_mode != workload)
1759 smu_set_power_profile_mode(smu, &workload, 0, false);
1760 }
1761
1762 return ret;
1763 }
1764
1765 int smu_handle_task(struct smu_context *smu,
1766 enum amd_dpm_forced_level level,
1767 enum amd_pp_task task_id,
1768 bool lock_needed)
1769 {
1770 int ret = 0;
1771
1772 if (lock_needed)
1773 mutex_lock(&smu->mutex);
1774
1775 switch (task_id) {
1776 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1777 ret = smu_pre_display_config_changed(smu);
1778 if (ret)
1779 goto out;
1780 ret = smu_set_cpu_power_state(smu);
1781 if (ret)
1782 goto out;
1783 ret = smu_adjust_power_state_dynamic(smu, level, false);
1784 break;
1785 case AMD_PP_TASK_COMPLETE_INIT:
1786 case AMD_PP_TASK_READJUST_POWER_STATE:
1787 ret = smu_adjust_power_state_dynamic(smu, level, true);
1788 break;
1789 default:
1790 break;
1791 }
1792
1793 out:
1794 if (lock_needed)
1795 mutex_unlock(&smu->mutex);
1796
1797 return ret;
1798 }
1799
1800 int smu_switch_power_profile(struct smu_context *smu,
1801 enum PP_SMC_POWER_PROFILE type,
1802 bool en)
1803 {
1804 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1805 long workload;
1806 uint32_t index;
1807
1808 if (!smu->pm_enabled)
1809 return -EINVAL;
1810
1811 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1812 return -EINVAL;
1813
1814 mutex_lock(&smu->mutex);
1815
1816 if (!en) {
1817 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1818 index = fls(smu->workload_mask);
1819 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1820 workload = smu->workload_setting[index];
1821 } else {
1822 smu->workload_mask |= (1 << smu->workload_prority[type]);
1823 index = fls(smu->workload_mask);
1824 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1825 workload = smu->workload_setting[index];
1826 }
1827
1828 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1829 smu_set_power_profile_mode(smu, &workload, 0, false);
1830
1831 mutex_unlock(&smu->mutex);
1832
1833 return 0;
1834 }
1835
1836 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1837 {
1838 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1839 enum amd_dpm_forced_level level;
1840
1841 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1842 return -EINVAL;
1843
1844 mutex_lock(&(smu->mutex));
1845 level = smu_dpm_ctx->dpm_level;
1846 mutex_unlock(&(smu->mutex));
1847
1848 return level;
1849 }
1850
1851 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1852 {
1853 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1854 int ret = 0;
1855
1856 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1857 return -EINVAL;
1858
1859 mutex_lock(&smu->mutex);
1860
1861 ret = smu_enable_umd_pstate(smu, &level);
1862 if (ret) {
1863 mutex_unlock(&smu->mutex);
1864 return ret;
1865 }
1866
1867 ret = smu_handle_task(smu, level,
1868 AMD_PP_TASK_READJUST_POWER_STATE,
1869 false);
1870
1871 mutex_unlock(&smu->mutex);
1872
1873 return ret;
1874 }
1875
1876 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1877 {
1878 int ret = 0;
1879
1880 mutex_lock(&smu->mutex);
1881 ret = smu_init_display_count(smu, count);
1882 mutex_unlock(&smu->mutex);
1883
1884 return ret;
1885 }
1886
1887 int smu_force_clk_levels(struct smu_context *smu,
1888 enum smu_clk_type clk_type,
1889 uint32_t mask,
1890 bool lock_needed)
1891 {
1892 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1893 int ret = 0;
1894
1895 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1896 pr_debug("force clock level is for dpm manual mode only.\n");
1897 return -EINVAL;
1898 }
1899
1900 if (lock_needed)
1901 mutex_lock(&smu->mutex);
1902
1903 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1904 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1905
1906 if (lock_needed)
1907 mutex_unlock(&smu->mutex);
1908
1909 return ret;
1910 }
1911
1912 int smu_set_mp1_state(struct smu_context *smu,
1913 enum pp_mp1_state mp1_state)
1914 {
1915 uint16_t msg;
1916 int ret;
1917
1918 /*
1919 * The SMC is not fully ready. That may be
1920 * expected as the IP may be masked.
1921 * So, just return without error.
1922 */
1923 if (!smu->pm_enabled)
1924 return 0;
1925
1926 mutex_lock(&smu->mutex);
1927
1928 switch (mp1_state) {
1929 case PP_MP1_STATE_SHUTDOWN:
1930 msg = SMU_MSG_PrepareMp1ForShutdown;
1931 break;
1932 case PP_MP1_STATE_UNLOAD:
1933 msg = SMU_MSG_PrepareMp1ForUnload;
1934 break;
1935 case PP_MP1_STATE_RESET:
1936 msg = SMU_MSG_PrepareMp1ForReset;
1937 break;
1938 case PP_MP1_STATE_NONE:
1939 default:
1940 mutex_unlock(&smu->mutex);
1941 return 0;
1942 }
1943
1944 /* some asics may not support those messages */
1945 if (smu_msg_get_index(smu, msg) < 0) {
1946 mutex_unlock(&smu->mutex);
1947 return 0;
1948 }
1949
1950 ret = smu_send_smc_msg(smu, msg);
1951 if (ret)
1952 pr_err("[PrepareMp1] Failed!\n");
1953
1954 mutex_unlock(&smu->mutex);
1955
1956 return ret;
1957 }
1958
1959 int smu_set_df_cstate(struct smu_context *smu,
1960 enum pp_df_cstate state)
1961 {
1962 int ret = 0;
1963
1964 /*
1965 * The SMC is not fully ready. That may be
1966 * expected as the IP may be masked.
1967 * So, just return without error.
1968 */
1969 if (!smu->pm_enabled)
1970 return 0;
1971
1972 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1973 return 0;
1974
1975 mutex_lock(&smu->mutex);
1976
1977 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1978 if (ret)
1979 pr_err("[SetDfCstate] failed!\n");
1980
1981 mutex_unlock(&smu->mutex);
1982
1983 return ret;
1984 }
1985
1986 int smu_write_watermarks_table(struct smu_context *smu)
1987 {
1988 void *watermarks_table = smu->smu_table.watermarks_table;
1989
1990 if (!watermarks_table)
1991 return -EINVAL;
1992
1993 return smu_update_table(smu,
1994 SMU_TABLE_WATERMARKS,
1995 0,
1996 watermarks_table,
1997 true);
1998 }
1999
2000 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2001 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2002 {
2003 void *table = smu->smu_table.watermarks_table;
2004
2005 if (!table)
2006 return -EINVAL;
2007
2008 mutex_lock(&smu->mutex);
2009
2010 if (!smu->disable_watermark &&
2011 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2012 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2013 smu_set_watermarks_table(smu, table, clock_ranges);
2014 smu->watermarks_bitmap |= WATERMARKS_EXIST;
2015 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2016 }
2017
2018 mutex_unlock(&smu->mutex);
2019
2020 return 0;
2021 }
2022
2023 const struct amd_ip_funcs smu_ip_funcs = {
2024 .name = "smu",
2025 .early_init = smu_early_init,
2026 .late_init = smu_late_init,
2027 .sw_init = smu_sw_init,
2028 .sw_fini = smu_sw_fini,
2029 .hw_init = smu_hw_init,
2030 .hw_fini = smu_hw_fini,
2031 .suspend = smu_suspend,
2032 .resume = smu_resume,
2033 .is_idle = NULL,
2034 .check_soft_reset = NULL,
2035 .wait_for_idle = NULL,
2036 .soft_reset = NULL,
2037 .set_clockgating_state = smu_set_clockgating_state,
2038 .set_powergating_state = smu_set_powergating_state,
2039 .enable_umd_pstate = smu_enable_umd_pstate,
2040 };
2041
2042 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2043 {
2044 .type = AMD_IP_BLOCK_TYPE_SMC,
2045 .major = 11,
2046 .minor = 0,
2047 .rev = 0,
2048 .funcs = &smu_ip_funcs,
2049 };
2050
2051 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2052 {
2053 .type = AMD_IP_BLOCK_TYPE_SMC,
2054 .major = 12,
2055 .minor = 0,
2056 .rev = 0,
2057 .funcs = &smu_ip_funcs,
2058 };
2059
2060 int smu_load_microcode(struct smu_context *smu)
2061 {
2062 int ret = 0;
2063
2064 mutex_lock(&smu->mutex);
2065
2066 if (smu->ppt_funcs->load_microcode)
2067 ret = smu->ppt_funcs->load_microcode(smu);
2068
2069 mutex_unlock(&smu->mutex);
2070
2071 return ret;
2072 }
2073
2074 int smu_check_fw_status(struct smu_context *smu)
2075 {
2076 int ret = 0;
2077
2078 mutex_lock(&smu->mutex);
2079
2080 if (smu->ppt_funcs->check_fw_status)
2081 ret = smu->ppt_funcs->check_fw_status(smu);
2082
2083 mutex_unlock(&smu->mutex);
2084
2085 return ret;
2086 }
2087
2088 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2089 {
2090 int ret = 0;
2091
2092 mutex_lock(&smu->mutex);
2093
2094 if (smu->ppt_funcs->set_gfx_cgpg)
2095 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2096
2097 mutex_unlock(&smu->mutex);
2098
2099 return ret;
2100 }
2101
2102 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2103 {
2104 int ret = 0;
2105
2106 mutex_lock(&smu->mutex);
2107
2108 if (smu->ppt_funcs->set_fan_speed_rpm)
2109 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2110
2111 mutex_unlock(&smu->mutex);
2112
2113 return ret;
2114 }
2115
2116 int smu_get_power_limit(struct smu_context *smu,
2117 uint32_t *limit,
2118 bool def,
2119 bool lock_needed)
2120 {
2121 int ret = 0;
2122
2123 if (lock_needed)
2124 mutex_lock(&smu->mutex);
2125
2126 if (smu->ppt_funcs->get_power_limit)
2127 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2128
2129 if (lock_needed)
2130 mutex_unlock(&smu->mutex);
2131
2132 return ret;
2133 }
2134
2135 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2136 {
2137 int ret = 0;
2138
2139 mutex_lock(&smu->mutex);
2140
2141 if (smu->ppt_funcs->set_power_limit)
2142 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2143
2144 mutex_unlock(&smu->mutex);
2145
2146 return ret;
2147 }
2148
2149 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2150 {
2151 int ret = 0;
2152
2153 mutex_lock(&smu->mutex);
2154
2155 if (smu->ppt_funcs->print_clk_levels)
2156 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2157
2158 mutex_unlock(&smu->mutex);
2159
2160 return ret;
2161 }
2162
2163 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2164 {
2165 int ret = 0;
2166
2167 mutex_lock(&smu->mutex);
2168
2169 if (smu->ppt_funcs->get_od_percentage)
2170 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2171
2172 mutex_unlock(&smu->mutex);
2173
2174 return ret;
2175 }
2176
2177 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2178 {
2179 int ret = 0;
2180
2181 mutex_lock(&smu->mutex);
2182
2183 if (smu->ppt_funcs->set_od_percentage)
2184 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2185
2186 mutex_unlock(&smu->mutex);
2187
2188 return ret;
2189 }
2190
2191 int smu_od_edit_dpm_table(struct smu_context *smu,
2192 enum PP_OD_DPM_TABLE_COMMAND type,
2193 long *input, uint32_t size)
2194 {
2195 int ret = 0;
2196
2197 mutex_lock(&smu->mutex);
2198
2199 if (smu->ppt_funcs->od_edit_dpm_table)
2200 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2201
2202 mutex_unlock(&smu->mutex);
2203
2204 return ret;
2205 }
2206
2207 int smu_read_sensor(struct smu_context *smu,
2208 enum amd_pp_sensors sensor,
2209 void *data, uint32_t *size)
2210 {
2211 int ret = 0;
2212
2213 mutex_lock(&smu->mutex);
2214
2215 if (smu->ppt_funcs->read_sensor)
2216 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2217
2218 mutex_unlock(&smu->mutex);
2219
2220 return ret;
2221 }
2222
2223 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2224 {
2225 int ret = 0;
2226
2227 mutex_lock(&smu->mutex);
2228
2229 if (smu->ppt_funcs->get_power_profile_mode)
2230 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2231
2232 mutex_unlock(&smu->mutex);
2233
2234 return ret;
2235 }
2236
2237 int smu_set_power_profile_mode(struct smu_context *smu,
2238 long *param,
2239 uint32_t param_size,
2240 bool lock_needed)
2241 {
2242 int ret = 0;
2243
2244 if (lock_needed)
2245 mutex_lock(&smu->mutex);
2246
2247 if (smu->ppt_funcs->set_power_profile_mode)
2248 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2249
2250 if (lock_needed)
2251 mutex_unlock(&smu->mutex);
2252
2253 return ret;
2254 }
2255
2256
2257 int smu_get_fan_control_mode(struct smu_context *smu)
2258 {
2259 int ret = 0;
2260
2261 mutex_lock(&smu->mutex);
2262
2263 if (smu->ppt_funcs->get_fan_control_mode)
2264 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2265
2266 mutex_unlock(&smu->mutex);
2267
2268 return ret;
2269 }
2270
2271 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2272 {
2273 int ret = 0;
2274
2275 mutex_lock(&smu->mutex);
2276
2277 if (smu->ppt_funcs->set_fan_control_mode)
2278 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2279
2280 mutex_unlock(&smu->mutex);
2281
2282 return ret;
2283 }
2284
2285 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2286 {
2287 int ret = 0;
2288
2289 mutex_lock(&smu->mutex);
2290
2291 if (smu->ppt_funcs->get_fan_speed_percent)
2292 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2293
2294 mutex_unlock(&smu->mutex);
2295
2296 return ret;
2297 }
2298
2299 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2300 {
2301 int ret = 0;
2302
2303 mutex_lock(&smu->mutex);
2304
2305 if (smu->ppt_funcs->set_fan_speed_percent)
2306 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2307
2308 mutex_unlock(&smu->mutex);
2309
2310 return ret;
2311 }
2312
2313 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2314 {
2315 int ret = 0;
2316
2317 mutex_lock(&smu->mutex);
2318
2319 if (smu->ppt_funcs->get_fan_speed_rpm)
2320 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2321
2322 mutex_unlock(&smu->mutex);
2323
2324 return ret;
2325 }
2326
2327 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2328 {
2329 int ret = 0;
2330
2331 mutex_lock(&smu->mutex);
2332
2333 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2334 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2335
2336 mutex_unlock(&smu->mutex);
2337
2338 return ret;
2339 }
2340
2341 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2342 {
2343 int ret = 0;
2344
2345 if (smu->ppt_funcs->set_active_display_count)
2346 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2347
2348 return ret;
2349 }
2350
2351 int smu_get_clock_by_type(struct smu_context *smu,
2352 enum amd_pp_clock_type type,
2353 struct amd_pp_clocks *clocks)
2354 {
2355 int ret = 0;
2356
2357 mutex_lock(&smu->mutex);
2358
2359 if (smu->ppt_funcs->get_clock_by_type)
2360 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2361
2362 mutex_unlock(&smu->mutex);
2363
2364 return ret;
2365 }
2366
2367 int smu_get_max_high_clocks(struct smu_context *smu,
2368 struct amd_pp_simple_clock_info *clocks)
2369 {
2370 int ret = 0;
2371
2372 mutex_lock(&smu->mutex);
2373
2374 if (smu->ppt_funcs->get_max_high_clocks)
2375 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2376
2377 mutex_unlock(&smu->mutex);
2378
2379 return ret;
2380 }
2381
2382 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2383 enum smu_clk_type clk_type,
2384 struct pp_clock_levels_with_latency *clocks)
2385 {
2386 int ret = 0;
2387
2388 mutex_lock(&smu->mutex);
2389
2390 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2391 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2392
2393 mutex_unlock(&smu->mutex);
2394
2395 return ret;
2396 }
2397
2398 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2399 enum amd_pp_clock_type type,
2400 struct pp_clock_levels_with_voltage *clocks)
2401 {
2402 int ret = 0;
2403
2404 mutex_lock(&smu->mutex);
2405
2406 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2407 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2408
2409 mutex_unlock(&smu->mutex);
2410
2411 return ret;
2412 }
2413
2414
2415 int smu_display_clock_voltage_request(struct smu_context *smu,
2416 struct pp_display_clock_request *clock_req)
2417 {
2418 int ret = 0;
2419
2420 mutex_lock(&smu->mutex);
2421
2422 if (smu->ppt_funcs->display_clock_voltage_request)
2423 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2424
2425 mutex_unlock(&smu->mutex);
2426
2427 return ret;
2428 }
2429
2430
2431 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2432 {
2433 int ret = -EINVAL;
2434
2435 mutex_lock(&smu->mutex);
2436
2437 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2438 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2439
2440 mutex_unlock(&smu->mutex);
2441
2442 return ret;
2443 }
2444
2445 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2446 {
2447 int ret = 0;
2448
2449 mutex_lock(&smu->mutex);
2450
2451 if (smu->ppt_funcs->notify_smu_enable_pwe)
2452 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2453
2454 mutex_unlock(&smu->mutex);
2455
2456 return ret;
2457 }
2458
2459 int smu_set_xgmi_pstate(struct smu_context *smu,
2460 uint32_t pstate)
2461 {
2462 int ret = 0;
2463
2464 mutex_lock(&smu->mutex);
2465
2466 if (smu->ppt_funcs->set_xgmi_pstate)
2467 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2468
2469 mutex_unlock(&smu->mutex);
2470
2471 return ret;
2472 }
2473
2474 int smu_set_azalia_d3_pme(struct smu_context *smu)
2475 {
2476 int ret = 0;
2477
2478 mutex_lock(&smu->mutex);
2479
2480 if (smu->ppt_funcs->set_azalia_d3_pme)
2481 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2482
2483 mutex_unlock(&smu->mutex);
2484
2485 return ret;
2486 }
2487
2488 bool smu_baco_is_support(struct smu_context *smu)
2489 {
2490 bool ret = false;
2491
2492 mutex_lock(&smu->mutex);
2493
2494 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2495 ret = smu->ppt_funcs->baco_is_support(smu);
2496
2497 mutex_unlock(&smu->mutex);
2498
2499 return ret;
2500 }
2501
2502 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2503 {
2504 if (smu->ppt_funcs->baco_get_state)
2505 return -EINVAL;
2506
2507 mutex_lock(&smu->mutex);
2508 *state = smu->ppt_funcs->baco_get_state(smu);
2509 mutex_unlock(&smu->mutex);
2510
2511 return 0;
2512 }
2513
2514 int smu_baco_enter(struct smu_context *smu)
2515 {
2516 int ret = 0;
2517
2518 mutex_lock(&smu->mutex);
2519
2520 if (smu->ppt_funcs->baco_enter)
2521 ret = smu->ppt_funcs->baco_enter(smu);
2522
2523 mutex_unlock(&smu->mutex);
2524
2525 return ret;
2526 }
2527
2528 int smu_baco_exit(struct smu_context *smu)
2529 {
2530 int ret = 0;
2531
2532 mutex_lock(&smu->mutex);
2533
2534 if (smu->ppt_funcs->baco_exit)
2535 ret = smu->ppt_funcs->baco_exit(smu);
2536
2537 mutex_unlock(&smu->mutex);
2538
2539 return ret;
2540 }
2541
2542 int smu_mode2_reset(struct smu_context *smu)
2543 {
2544 int ret = 0;
2545
2546 mutex_lock(&smu->mutex);
2547
2548 if (smu->ppt_funcs->mode2_reset)
2549 ret = smu->ppt_funcs->mode2_reset(smu);
2550
2551 mutex_unlock(&smu->mutex);
2552
2553 return ret;
2554 }
2555
2556 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2557 struct pp_smu_nv_clock_table *max_clocks)
2558 {
2559 int ret = 0;
2560
2561 mutex_lock(&smu->mutex);
2562
2563 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2564 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2565
2566 mutex_unlock(&smu->mutex);
2567
2568 return ret;
2569 }
2570
2571 int smu_get_uclk_dpm_states(struct smu_context *smu,
2572 unsigned int *clock_values_in_khz,
2573 unsigned int *num_states)
2574 {
2575 int ret = 0;
2576
2577 mutex_lock(&smu->mutex);
2578
2579 if (smu->ppt_funcs->get_uclk_dpm_states)
2580 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2581
2582 mutex_unlock(&smu->mutex);
2583
2584 return ret;
2585 }
2586
2587 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2588 {
2589 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2590
2591 mutex_lock(&smu->mutex);
2592
2593 if (smu->ppt_funcs->get_current_power_state)
2594 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2595
2596 mutex_unlock(&smu->mutex);
2597
2598 return pm_state;
2599 }
2600
2601 int smu_get_dpm_clock_table(struct smu_context *smu,
2602 struct dpm_clocks *clock_table)
2603 {
2604 int ret = 0;
2605
2606 mutex_lock(&smu->mutex);
2607
2608 if (smu->ppt_funcs->get_dpm_clock_table)
2609 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2610
2611 mutex_unlock(&smu->mutex);
2612
2613 return ret;
2614 }
2615
2616 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2617 {
2618 uint32_t ret = 0;
2619
2620 if (smu->ppt_funcs->get_pptable_power_limit)
2621 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2622
2623 return ret;
2624 }
2625
2626 int smu_send_smc_msg(struct smu_context *smu,
2627 enum smu_message_type msg)
2628 {
2629 int ret;
2630
2631 ret = smu_send_smc_msg_with_param(smu, msg, 0);
2632 return ret;
2633 }
2634