amdgpu_smu.c revision 1.3 1 /* $NetBSD: amdgpu_smu.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $ */
2
3 /*
4 * Copyright 2019 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_smu.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $");
27
28 #include <linux/firmware.h>
29 #include <linux/pci.h>
30
31 #include "pp_debug.h"
32 #include "amdgpu.h"
33 #include "amdgpu_smu.h"
34 #include "smu_internal.h"
35 #include "soc15_common.h"
36 #include "smu_v11_0.h"
37 #include "smu_v12_0.h"
38 #include "atom.h"
39 #include "amd_pcie.h"
40 #include "vega20_ppt.h"
41 #include "arcturus_ppt.h"
42 #include "navi10_ppt.h"
43 #include "renoir_ppt.h"
44
45 #include <linux/nbsd-namespace.h>
46
47 #undef __SMU_DUMMY_MAP
48 #define __SMU_DUMMY_MAP(type) #type
49 static const char* __smu_message_names[] = {
50 SMU_MESSAGE_TYPES
51 };
52
53 const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type)
54 {
55 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
56 return "unknown smu message";
57 return __smu_message_names[type];
58 }
59
60 #undef __SMU_DUMMY_MAP
61 #define __SMU_DUMMY_MAP(fea) #fea
62 static const char* __smu_feature_names[] = {
63 SMU_FEATURE_MASKS
64 };
65
66 const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature)
67 {
68 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
69 return "unknown smu feature";
70 return __smu_feature_names[feature];
71 }
72
73 size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
74 {
75 size_t size = 0;
76 int ret = 0, i = 0;
77 uint32_t feature_mask[2] = { 0 };
78 int32_t feature_index = 0;
79 uint32_t count = 0;
80 uint32_t sort_feature[SMU_FEATURE_COUNT];
81 uint64_t hw_feature_count = 0;
82
83 mutex_lock(&smu->mutex);
84
85 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
86 if (ret)
87 goto failed;
88
89 size = snprintf(buf + size, SIZE_MAX/*XXX*/, "features high: 0x%08x low: 0x%08x\n",
90 feature_mask[1], feature_mask[0]);
91
92 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
93 feature_index = smu_feature_get_index(smu, i);
94 if (feature_index < 0)
95 continue;
96 sort_feature[feature_index] = i;
97 hw_feature_count++;
98 }
99
100 for (i = 0; i < hw_feature_count; i++) {
101 size += snprintf(buf + size, SIZE_MAX/*XXX*/, "%02d. %-20s (%2d) : %s\n",
102 count++,
103 smu_get_feature_name(smu, sort_feature[i]),
104 i,
105 !!smu_feature_is_enabled(smu, sort_feature[i]) ?
106 "enabled" : "disabled");
107 }
108
109 failed:
110 mutex_unlock(&smu->mutex);
111
112 return size;
113 }
114
115 static int smu_feature_update_enable_state(struct smu_context *smu,
116 uint64_t feature_mask,
117 bool enabled)
118 {
119 struct smu_feature *feature = &smu->smu_feature;
120 uint32_t feature_low = 0, feature_high = 0;
121 int ret = 0;
122
123 if (!smu->pm_enabled)
124 return ret;
125
126 feature_low = (feature_mask >> 0 ) & 0xffffffff;
127 feature_high = (feature_mask >> 32) & 0xffffffff;
128
129 if (enabled) {
130 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
131 feature_low);
132 if (ret)
133 return ret;
134 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
135 feature_high);
136 if (ret)
137 return ret;
138 } else {
139 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
140 feature_low);
141 if (ret)
142 return ret;
143 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
144 feature_high);
145 if (ret)
146 return ret;
147 }
148
149 mutex_lock(&feature->mutex);
150 if (enabled)
151 bitmap_or(feature->enabled, feature->enabled,
152 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
153 else
154 bitmap_andnot(feature->enabled, feature->enabled,
155 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
156 mutex_unlock(&feature->mutex);
157
158 return ret;
159 }
160
161 int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
162 {
163 int ret = 0;
164 uint32_t feature_mask[2] = { 0 };
165 uint64_t feature_2_enabled = 0;
166 uint64_t feature_2_disabled = 0;
167 uint64_t feature_enables = 0;
168
169 mutex_lock(&smu->mutex);
170
171 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
172 if (ret)
173 goto out;
174
175 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
176
177 feature_2_enabled = ~feature_enables & new_mask;
178 feature_2_disabled = feature_enables & ~new_mask;
179
180 if (feature_2_enabled) {
181 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
182 if (ret)
183 goto out;
184 }
185 if (feature_2_disabled) {
186 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
187 if (ret)
188 goto out;
189 }
190
191 out:
192 mutex_unlock(&smu->mutex);
193
194 return ret;
195 }
196
197 int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
198 {
199 int ret = 0;
200
201 if (!if_version && !smu_version)
202 return -EINVAL;
203
204 if (if_version) {
205 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
206 if (ret)
207 return ret;
208
209 ret = smu_read_smc_arg(smu, if_version);
210 if (ret)
211 return ret;
212 }
213
214 if (smu_version) {
215 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
216 if (ret)
217 return ret;
218
219 ret = smu_read_smc_arg(smu, smu_version);
220 if (ret)
221 return ret;
222 }
223
224 return ret;
225 }
226
227 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
228 uint32_t min, uint32_t max)
229 {
230 int ret = 0;
231
232 if (min <= 0 && max <= 0)
233 return -EINVAL;
234
235 if (!smu_clk_dpm_is_enabled(smu, clk_type))
236 return 0;
237
238 ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
239 return ret;
240 }
241
242 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
243 uint32_t min, uint32_t max)
244 {
245 int ret = 0, clk_id = 0;
246 uint32_t param;
247
248 if (min <= 0 && max <= 0)
249 return -EINVAL;
250
251 if (!smu_clk_dpm_is_enabled(smu, clk_type))
252 return 0;
253
254 clk_id = smu_clk_get_index(smu, clk_type);
255 if (clk_id < 0)
256 return clk_id;
257
258 if (max > 0) {
259 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
260 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
261 param);
262 if (ret)
263 return ret;
264 }
265
266 if (min > 0) {
267 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
268 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
269 param);
270 if (ret)
271 return ret;
272 }
273
274
275 return ret;
276 }
277
278 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
279 uint32_t *min, uint32_t *max, bool lock_needed)
280 {
281 uint32_t clock_limit;
282 int ret = 0;
283
284 if (!min && !max)
285 return -EINVAL;
286
287 if (lock_needed)
288 mutex_lock(&smu->mutex);
289
290 if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
291 switch (clk_type) {
292 case SMU_MCLK:
293 case SMU_UCLK:
294 clock_limit = smu->smu_table.boot_values.uclk;
295 break;
296 case SMU_GFXCLK:
297 case SMU_SCLK:
298 clock_limit = smu->smu_table.boot_values.gfxclk;
299 break;
300 case SMU_SOCCLK:
301 clock_limit = smu->smu_table.boot_values.socclk;
302 break;
303 default:
304 clock_limit = 0;
305 break;
306 }
307
308 /* clock in Mhz unit */
309 if (min)
310 *min = clock_limit / 100;
311 if (max)
312 *max = clock_limit / 100;
313 } else {
314 /*
315 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
316 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
317 */
318 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
319 }
320
321 if (lock_needed)
322 mutex_unlock(&smu->mutex);
323
324 return ret;
325 }
326
327 int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type,
328 uint16_t level, uint32_t *value)
329 {
330 int ret = 0, clk_id = 0;
331 uint32_t param;
332
333 if (!value)
334 return -EINVAL;
335
336 if (!smu_clk_dpm_is_enabled(smu, clk_type))
337 return 0;
338
339 clk_id = smu_clk_get_index(smu, clk_type);
340 if (clk_id < 0)
341 return clk_id;
342
343 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
344
345 ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
346 param);
347 if (ret)
348 return ret;
349
350 ret = smu_read_smc_arg(smu, ¶m);
351 if (ret)
352 return ret;
353
354 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
355 * now, we un-support it */
356 *value = param & 0x7fffffff;
357
358 return ret;
359 }
360
361 int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
362 uint32_t *value)
363 {
364 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
365 }
366
367 int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
368 uint32_t *min_value, uint32_t *max_value)
369 {
370 int ret = 0;
371 uint32_t level_count = 0;
372
373 if (!min_value && !max_value)
374 return -EINVAL;
375
376 if (min_value) {
377 /* by default, level 0 clock value as min value */
378 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value);
379 if (ret)
380 return ret;
381 }
382
383 if (max_value) {
384 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
385 if (ret)
386 return ret;
387
388 ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value);
389 if (ret)
390 return ret;
391 }
392
393 return ret;
394 }
395
396 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
397 {
398 enum smu_feature_mask feature_id = 0;
399
400 switch (clk_type) {
401 case SMU_MCLK:
402 case SMU_UCLK:
403 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
404 break;
405 case SMU_GFXCLK:
406 case SMU_SCLK:
407 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
408 break;
409 case SMU_SOCCLK:
410 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
411 break;
412 default:
413 return true;
414 }
415
416 if(!smu_feature_is_enabled(smu, feature_id)) {
417 return false;
418 }
419
420 return true;
421 }
422
423 /**
424 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
425 *
426 * @smu: smu_context pointer
427 * @block_type: the IP block to power gate/ungate
428 * @gate: to power gate if true, ungate otherwise
429 *
430 * This API uses no smu->mutex lock protection due to:
431 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
432 * This is guarded to be race condition free by the caller.
433 * 2. Or get called on user setting request of power_dpm_force_performance_level.
434 * Under this case, the smu->mutex lock protection is already enforced on
435 * the parent API smu_force_performance_level of the call path.
436 */
437 int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
438 bool gate)
439 {
440 int ret = 0;
441
442 switch (block_type) {
443 case AMD_IP_BLOCK_TYPE_UVD:
444 ret = smu_dpm_set_uvd_enable(smu, !gate);
445 break;
446 case AMD_IP_BLOCK_TYPE_VCE:
447 ret = smu_dpm_set_vce_enable(smu, !gate);
448 break;
449 case AMD_IP_BLOCK_TYPE_GFX:
450 ret = smu_gfx_off_control(smu, gate);
451 break;
452 case AMD_IP_BLOCK_TYPE_SDMA:
453 ret = smu_powergate_sdma(smu, gate);
454 break;
455 case AMD_IP_BLOCK_TYPE_JPEG:
456 ret = smu_dpm_set_jpeg_enable(smu, !gate);
457 break;
458 default:
459 break;
460 }
461
462 return ret;
463 }
464
465 int smu_get_power_num_states(struct smu_context *smu,
466 struct pp_states_info *state_info)
467 {
468 if (!state_info)
469 return -EINVAL;
470
471 /* not support power state */
472 memset(state_info, 0, sizeof(struct pp_states_info));
473 state_info->nums = 1;
474 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
475
476 return 0;
477 }
478
479 int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
480 void *data, uint32_t *size)
481 {
482 struct smu_power_context *smu_power = &smu->smu_power;
483 struct smu_power_gate *power_gate = &smu_power->power_gate;
484 int ret = 0;
485
486 if(!data || !size)
487 return -EINVAL;
488
489 switch (sensor) {
490 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
491 *((uint32_t *)data) = smu->pstate_sclk;
492 *size = 4;
493 break;
494 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
495 *((uint32_t *)data) = smu->pstate_mclk;
496 *size = 4;
497 break;
498 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
499 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
500 *size = 8;
501 break;
502 case AMDGPU_PP_SENSOR_UVD_POWER:
503 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
504 *size = 4;
505 break;
506 case AMDGPU_PP_SENSOR_VCE_POWER:
507 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
508 *size = 4;
509 break;
510 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
511 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1;
512 *size = 4;
513 break;
514 default:
515 ret = -EINVAL;
516 break;
517 }
518
519 if (ret)
520 *size = 0;
521
522 return ret;
523 }
524
525 int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
526 void *table_data, bool drv2smu)
527 {
528 struct smu_table_context *smu_table = &smu->smu_table;
529 struct amdgpu_device *adev = smu->adev;
530 struct smu_table *table = &smu_table->driver_table;
531 int table_id = smu_table_get_index(smu, table_index);
532 uint32_t table_size;
533 int ret = 0;
534
535 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
536 return -EINVAL;
537
538 table_size = smu_table->tables[table_index].size;
539
540 if (drv2smu) {
541 memcpy(table->cpu_addr, table_data, table_size);
542 /*
543 * Flush hdp cache: to guard the content seen by
544 * GPU is consitent with CPU.
545 */
546 amdgpu_asic_flush_hdp(adev, NULL);
547 }
548
549 ret = smu_send_smc_msg_with_param(smu, drv2smu ?
550 SMU_MSG_TransferTableDram2Smu :
551 SMU_MSG_TransferTableSmu2Dram,
552 table_id | ((argument & 0xFFFF) << 16));
553 if (ret)
554 return ret;
555
556 if (!drv2smu) {
557 amdgpu_asic_flush_hdp(adev, NULL);
558 memcpy(table_data, table->cpu_addr, table_size);
559 }
560
561 return ret;
562 }
563
564 bool is_support_sw_smu(struct amdgpu_device *adev)
565 {
566 if (adev->asic_type == CHIP_VEGA20)
567 return (amdgpu_dpm == 2) ? true : false;
568 else if (adev->asic_type >= CHIP_ARCTURUS) {
569 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
570 return false;
571 else
572 return true;
573 } else
574 return false;
575 }
576
577 bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
578 {
579 if (!is_support_sw_smu(adev))
580 return false;
581
582 if (adev->asic_type == CHIP_VEGA20)
583 return true;
584
585 return false;
586 }
587
588 int smu_sys_get_pp_table(struct smu_context *smu, const void **table)
589 {
590 struct smu_table_context *smu_table = &smu->smu_table;
591 uint32_t powerplay_table_size;
592
593 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
594 return -EINVAL;
595
596 mutex_lock(&smu->mutex);
597
598 if (smu_table->hardcode_pptable)
599 *table = smu_table->hardcode_pptable;
600 else
601 *table = smu_table->power_play_table;
602
603 powerplay_table_size = smu_table->power_play_table_size;
604
605 mutex_unlock(&smu->mutex);
606
607 return powerplay_table_size;
608 }
609
610 int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
611 {
612 struct smu_table_context *smu_table = &smu->smu_table;
613 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
614 int ret = 0;
615
616 if (!smu->pm_enabled)
617 return -EINVAL;
618 if (header->usStructureSize != size) {
619 pr_err("pp table size not matched !\n");
620 return -EIO;
621 }
622
623 mutex_lock(&smu->mutex);
624 if (!smu_table->hardcode_pptable)
625 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
626 if (!smu_table->hardcode_pptable) {
627 ret = -ENOMEM;
628 goto failed;
629 }
630
631 memcpy(smu_table->hardcode_pptable, buf, size);
632 smu_table->power_play_table = smu_table->hardcode_pptable;
633 smu_table->power_play_table_size = size;
634
635 /*
636 * Special hw_fini action(for Navi1x, the DPMs disablement will be
637 * skipped) may be needed for custom pptable uploading.
638 */
639 smu->uploading_custom_pp_table = true;
640
641 ret = smu_reset(smu);
642 if (ret)
643 pr_info("smu reset failed, ret = %d\n", ret);
644
645 smu->uploading_custom_pp_table = false;
646
647 failed:
648 mutex_unlock(&smu->mutex);
649 return ret;
650 }
651
652 int smu_feature_init_dpm(struct smu_context *smu)
653 {
654 struct smu_feature *feature = &smu->smu_feature;
655 int ret = 0;
656 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
657
658 if (!smu->pm_enabled)
659 return ret;
660 mutex_lock(&feature->mutex);
661 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
662 mutex_unlock(&feature->mutex);
663
664 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
665 SMU_FEATURE_MAX/32);
666 if (ret)
667 return ret;
668
669 mutex_lock(&feature->mutex);
670 bitmap_or(feature->allowed, feature->allowed,
671 (unsigned long *)allowed_feature_mask,
672 feature->feature_num);
673 mutex_unlock(&feature->mutex);
674
675 return ret;
676 }
677
678
679 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
680 {
681 struct smu_feature *feature = &smu->smu_feature;
682 int feature_id;
683 int ret = 0;
684
685 if (smu->is_apu)
686 return 1;
687
688 feature_id = smu_feature_get_index(smu, mask);
689 if (feature_id < 0)
690 return 0;
691
692 WARN_ON(feature_id > feature->feature_num);
693
694 mutex_lock(&feature->mutex);
695 ret = test_bit(feature_id, feature->enabled);
696 mutex_unlock(&feature->mutex);
697
698 return ret;
699 }
700
701 int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
702 bool enable)
703 {
704 struct smu_feature *feature = &smu->smu_feature;
705 int feature_id;
706
707 feature_id = smu_feature_get_index(smu, mask);
708 if (feature_id < 0)
709 return -EINVAL;
710
711 WARN_ON(feature_id > feature->feature_num);
712
713 return smu_feature_update_enable_state(smu,
714 1ULL << feature_id,
715 enable);
716 }
717
718 int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
719 {
720 struct smu_feature *feature = &smu->smu_feature;
721 int feature_id;
722 int ret = 0;
723
724 feature_id = smu_feature_get_index(smu, mask);
725 if (feature_id < 0)
726 return 0;
727
728 WARN_ON(feature_id > feature->feature_num);
729
730 mutex_lock(&feature->mutex);
731 ret = test_bit(feature_id, feature->supported);
732 mutex_unlock(&feature->mutex);
733
734 return ret;
735 }
736
737 int smu_feature_set_supported(struct smu_context *smu,
738 enum smu_feature_mask mask,
739 bool enable)
740 {
741 struct smu_feature *feature = &smu->smu_feature;
742 int feature_id;
743 int ret = 0;
744
745 feature_id = smu_feature_get_index(smu, mask);
746 if (feature_id < 0)
747 return -EINVAL;
748
749 WARN_ON(feature_id > feature->feature_num);
750
751 mutex_lock(&feature->mutex);
752 if (enable)
753 test_and_set_bit(feature_id, feature->supported);
754 else
755 test_and_clear_bit(feature_id, feature->supported);
756 mutex_unlock(&feature->mutex);
757
758 return ret;
759 }
760
761 static int smu_set_funcs(struct amdgpu_device *adev)
762 {
763 struct smu_context *smu = &adev->smu;
764
765 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
766 smu->od_enabled = true;
767
768 switch (adev->asic_type) {
769 case CHIP_VEGA20:
770 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
771 vega20_set_ppt_funcs(smu);
772 break;
773 case CHIP_NAVI10:
774 case CHIP_NAVI14:
775 case CHIP_NAVI12:
776 navi10_set_ppt_funcs(smu);
777 break;
778 case CHIP_ARCTURUS:
779 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
780 arcturus_set_ppt_funcs(smu);
781 /* OD is not supported on Arcturus */
782 smu->od_enabled =false;
783 break;
784 case CHIP_RENOIR:
785 renoir_set_ppt_funcs(smu);
786 break;
787 default:
788 return -EINVAL;
789 }
790
791 return 0;
792 }
793
794 static int smu_early_init(void *handle)
795 {
796 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
797 struct smu_context *smu = &adev->smu;
798
799 smu->adev = adev;
800 smu->pm_enabled = !!amdgpu_dpm;
801 smu->is_apu = false;
802 mutex_init(&smu->mutex);
803
804 return smu_set_funcs(adev);
805 }
806
807 static int smu_late_init(void *handle)
808 {
809 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
810 struct smu_context *smu = &adev->smu;
811
812 if (!smu->pm_enabled)
813 return 0;
814
815 smu_handle_task(&adev->smu,
816 smu->smu_dpm.dpm_level,
817 AMD_PP_TASK_COMPLETE_INIT,
818 false);
819
820 return 0;
821 }
822
823 int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
824 uint16_t *size, uint8_t *frev, uint8_t *crev,
825 uint8_t **addr)
826 {
827 struct amdgpu_device *adev = smu->adev;
828 uint16_t data_start;
829
830 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
831 size, frev, crev, &data_start))
832 return -EINVAL;
833
834 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
835
836 return 0;
837 }
838
839 static int smu_initialize_pptable(struct smu_context *smu)
840 {
841 /* TODO */
842 return 0;
843 }
844
845 static int smu_smc_table_sw_init(struct smu_context *smu)
846 {
847 int ret;
848
849 ret = smu_initialize_pptable(smu);
850 if (ret) {
851 pr_err("Failed to init smu_initialize_pptable!\n");
852 return ret;
853 }
854
855 /**
856 * Create smu_table structure, and init smc tables such as
857 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
858 */
859 ret = smu_init_smc_tables(smu);
860 if (ret) {
861 pr_err("Failed to init smc tables!\n");
862 return ret;
863 }
864
865 /**
866 * Create smu_power_context structure, and allocate smu_dpm_context and
867 * context size to fill the smu_power_context data.
868 */
869 ret = smu_init_power(smu);
870 if (ret) {
871 pr_err("Failed to init smu_init_power!\n");
872 return ret;
873 }
874
875 return 0;
876 }
877
878 static int smu_smc_table_sw_fini(struct smu_context *smu)
879 {
880 int ret;
881
882 ret = smu_fini_smc_tables(smu);
883 if (ret) {
884 pr_err("Failed to smu_fini_smc_tables!\n");
885 return ret;
886 }
887
888 return 0;
889 }
890
891 static int smu_sw_init(void *handle)
892 {
893 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
894 struct smu_context *smu = &adev->smu;
895 int ret;
896
897 smu->pool_size = adev->pm.smu_prv_buffer_size;
898 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
899 mutex_init(&smu->smu_feature.mutex);
900 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
901 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
902 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
903
904 mutex_init(&smu->smu_baco.mutex);
905 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
906 smu->smu_baco.platform_support = false;
907
908 mutex_init(&smu->sensor_lock);
909 mutex_init(&smu->metrics_lock);
910
911 smu->watermarks_bitmap = 0;
912 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
913 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
914
915 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
916 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
917 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
918 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
919 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
920 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
921 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
922 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
923
924 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
925 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
926 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
927 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
928 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
929 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
930 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
931 smu->display_config = &adev->pm.pm_display_cfg;
932
933 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
934 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
935 ret = smu_init_microcode(smu);
936 if (ret) {
937 pr_err("Failed to load smu firmware!\n");
938 return ret;
939 }
940
941 ret = smu_smc_table_sw_init(smu);
942 if (ret) {
943 pr_err("Failed to sw init smc table!\n");
944 return ret;
945 }
946
947 ret = smu_register_irq_handler(smu);
948 if (ret) {
949 pr_err("Failed to register smc irq handler!\n");
950 return ret;
951 }
952
953 return 0;
954 }
955
956 static int smu_sw_fini(void *handle)
957 {
958 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
959 struct smu_context *smu = &adev->smu;
960 int ret;
961
962 kfree(smu->irq_source);
963 smu->irq_source = NULL;
964
965 ret = smu_smc_table_sw_fini(smu);
966 if (ret) {
967 pr_err("Failed to sw fini smc table!\n");
968 return ret;
969 }
970
971 ret = smu_fini_power(smu);
972 if (ret) {
973 pr_err("Failed to init smu_fini_power!\n");
974 return ret;
975 }
976
977 return 0;
978 }
979
980 static int smu_init_fb_allocations(struct smu_context *smu)
981 {
982 struct amdgpu_device *adev = smu->adev;
983 struct smu_table_context *smu_table = &smu->smu_table;
984 struct smu_table *tables = smu_table->tables;
985 struct smu_table *driver_table = &(smu_table->driver_table);
986 uint32_t max_table_size = 0;
987 int ret, i;
988
989 /* VRAM allocation for tool table */
990 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
991 ret = amdgpu_bo_create_kernel(adev,
992 tables[SMU_TABLE_PMSTATUSLOG].size,
993 tables[SMU_TABLE_PMSTATUSLOG].align,
994 tables[SMU_TABLE_PMSTATUSLOG].domain,
995 &tables[SMU_TABLE_PMSTATUSLOG].bo,
996 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
997 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
998 if (ret) {
999 pr_err("VRAM allocation for tool table failed!\n");
1000 return ret;
1001 }
1002 }
1003
1004 /* VRAM allocation for driver table */
1005 for (i = 0; i < SMU_TABLE_COUNT; i++) {
1006 if (tables[i].size == 0)
1007 continue;
1008
1009 if (i == SMU_TABLE_PMSTATUSLOG)
1010 continue;
1011
1012 if (max_table_size < tables[i].size)
1013 max_table_size = tables[i].size;
1014 }
1015
1016 driver_table->size = max_table_size;
1017 driver_table->align = PAGE_SIZE;
1018 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
1019
1020 ret = amdgpu_bo_create_kernel(adev,
1021 driver_table->size,
1022 driver_table->align,
1023 driver_table->domain,
1024 &driver_table->bo,
1025 &driver_table->mc_address,
1026 &driver_table->cpu_addr);
1027 if (ret) {
1028 pr_err("VRAM allocation for driver table failed!\n");
1029 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1030 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1031 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1032 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1033 }
1034
1035 return ret;
1036 }
1037
1038 static int smu_fini_fb_allocations(struct smu_context *smu)
1039 {
1040 struct smu_table_context *smu_table = &smu->smu_table;
1041 struct smu_table *tables = smu_table->tables;
1042 struct smu_table *driver_table = &(smu_table->driver_table);
1043
1044 if (!tables)
1045 return 0;
1046
1047 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1048 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1049 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1050 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1051
1052 amdgpu_bo_free_kernel(&driver_table->bo,
1053 &driver_table->mc_address,
1054 &driver_table->cpu_addr);
1055
1056 return 0;
1057 }
1058
1059 static int smu_smc_table_hw_init(struct smu_context *smu,
1060 bool initialize)
1061 {
1062 struct amdgpu_device *adev = smu->adev;
1063 int ret;
1064
1065 if (smu_is_dpm_running(smu) && adev->in_suspend) {
1066 pr_info("dpm has been enabled\n");
1067 return 0;
1068 }
1069
1070 if (adev->asic_type != CHIP_ARCTURUS) {
1071 ret = smu_init_display_count(smu, 0);
1072 if (ret)
1073 return ret;
1074 }
1075
1076 if (initialize) {
1077 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1078 ret = smu_get_vbios_bootup_values(smu);
1079 if (ret)
1080 return ret;
1081
1082 ret = smu_setup_pptable(smu);
1083 if (ret)
1084 return ret;
1085
1086 ret = smu_get_clk_info_from_vbios(smu);
1087 if (ret)
1088 return ret;
1089
1090 /*
1091 * check if the format_revision in vbios is up to pptable header
1092 * version, and the structure size is not 0.
1093 */
1094 ret = smu_check_pptable(smu);
1095 if (ret)
1096 return ret;
1097
1098 /*
1099 * allocate vram bos to store smc table contents.
1100 */
1101 ret = smu_init_fb_allocations(smu);
1102 if (ret)
1103 return ret;
1104
1105 /*
1106 * Parse pptable format and fill PPTable_t smc_pptable to
1107 * smu_table_context structure. And read the smc_dpm_table from vbios,
1108 * then fill it into smc_pptable.
1109 */
1110 ret = smu_parse_pptable(smu);
1111 if (ret)
1112 return ret;
1113
1114 /*
1115 * Send msg GetDriverIfVersion to check if the return value is equal
1116 * with DRIVER_IF_VERSION of smc header.
1117 */
1118 ret = smu_check_fw_version(smu);
1119 if (ret)
1120 return ret;
1121 }
1122
1123 /* smu_dump_pptable(smu); */
1124 if (!amdgpu_sriov_vf(adev)) {
1125 ret = smu_set_driver_table_location(smu);
1126 if (ret)
1127 return ret;
1128
1129 /*
1130 * Copy pptable bo in the vram to smc with SMU MSGs such as
1131 * SetDriverDramAddr and TransferTableDram2Smu.
1132 */
1133 ret = smu_write_pptable(smu);
1134 if (ret)
1135 return ret;
1136
1137 /* issue Run*Btc msg */
1138 ret = smu_run_btc(smu);
1139 if (ret)
1140 return ret;
1141 ret = smu_feature_set_allowed_mask(smu);
1142 if (ret)
1143 return ret;
1144
1145 ret = smu_system_features_control(smu, true);
1146 if (ret)
1147 return ret;
1148
1149 if (adev->asic_type == CHIP_NAVI10) {
1150 if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 ||
1151 adev->pdev->revision == 0xc3 ||
1152 adev->pdev->revision == 0xca ||
1153 adev->pdev->revision == 0xcb)) ||
1154 (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 ||
1155 adev->pdev->revision == 0xf4 ||
1156 adev->pdev->revision == 0xf5 ||
1157 adev->pdev->revision == 0xf6))) {
1158 ret = smu_disable_umc_cdr_12gbps_workaround(smu);
1159 if (ret) {
1160 pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n");
1161 return ret;
1162 }
1163 }
1164 }
1165 }
1166 if (adev->asic_type != CHIP_ARCTURUS) {
1167 ret = smu_notify_display_change(smu);
1168 if (ret)
1169 return ret;
1170
1171 /*
1172 * Set min deep sleep dce fclk with bootup value from vbios via
1173 * SetMinDeepSleepDcefclk MSG.
1174 */
1175 ret = smu_set_min_dcef_deep_sleep(smu);
1176 if (ret)
1177 return ret;
1178 }
1179
1180 /*
1181 * Set initialized values (get from vbios) to dpm tables context such as
1182 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1183 * type of clks.
1184 */
1185 if (initialize) {
1186 ret = smu_populate_smc_tables(smu);
1187 if (ret)
1188 return ret;
1189
1190 ret = smu_init_max_sustainable_clocks(smu);
1191 if (ret)
1192 return ret;
1193 }
1194
1195 if (adev->asic_type != CHIP_ARCTURUS) {
1196 ret = smu_override_pcie_parameters(smu);
1197 if (ret)
1198 return ret;
1199 }
1200
1201 ret = smu_set_default_od_settings(smu, initialize);
1202 if (ret)
1203 return ret;
1204
1205 if (initialize) {
1206 ret = smu_populate_umd_state_clk(smu);
1207 if (ret)
1208 return ret;
1209
1210 ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
1211 if (ret)
1212 return ret;
1213 }
1214
1215 /*
1216 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1217 */
1218 if (!amdgpu_sriov_vf(adev)) {
1219 ret = smu_set_tool_table_location(smu);
1220 }
1221 if (!smu_is_dpm_running(smu))
1222 pr_info("dpm has been disabled\n");
1223
1224 return ret;
1225 }
1226
1227 /**
1228 * smu_alloc_memory_pool - allocate memory pool in the system memory
1229 *
1230 * @smu: amdgpu_device pointer
1231 *
1232 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1233 * and DramLogSetDramAddr can notify it changed.
1234 *
1235 * Returns 0 on success, error on failure.
1236 */
1237 static int smu_alloc_memory_pool(struct smu_context *smu)
1238 {
1239 struct amdgpu_device *adev = smu->adev;
1240 struct smu_table_context *smu_table = &smu->smu_table;
1241 struct smu_table *memory_pool = &smu_table->memory_pool;
1242 uint64_t pool_size = smu->pool_size;
1243 int ret = 0;
1244
1245 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1246 return ret;
1247
1248 memory_pool->size = pool_size;
1249 memory_pool->align = PAGE_SIZE;
1250 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1251
1252 switch (pool_size) {
1253 case SMU_MEMORY_POOL_SIZE_256_MB:
1254 case SMU_MEMORY_POOL_SIZE_512_MB:
1255 case SMU_MEMORY_POOL_SIZE_1_GB:
1256 case SMU_MEMORY_POOL_SIZE_2_GB:
1257 ret = amdgpu_bo_create_kernel(adev,
1258 memory_pool->size,
1259 memory_pool->align,
1260 memory_pool->domain,
1261 &memory_pool->bo,
1262 &memory_pool->mc_address,
1263 &memory_pool->cpu_addr);
1264 break;
1265 default:
1266 break;
1267 }
1268
1269 return ret;
1270 }
1271
1272 static int smu_free_memory_pool(struct smu_context *smu)
1273 {
1274 struct smu_table_context *smu_table = &smu->smu_table;
1275 struct smu_table *memory_pool = &smu_table->memory_pool;
1276
1277 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1278 return 0;
1279
1280 amdgpu_bo_free_kernel(&memory_pool->bo,
1281 &memory_pool->mc_address,
1282 &memory_pool->cpu_addr);
1283
1284 memset(memory_pool, 0, sizeof(struct smu_table));
1285
1286 return 0;
1287 }
1288
1289 static int smu_start_smc_engine(struct smu_context *smu)
1290 {
1291 struct amdgpu_device *adev = smu->adev;
1292 int ret = 0;
1293
1294 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1295 if (adev->asic_type < CHIP_NAVI10) {
1296 if (smu->ppt_funcs->load_microcode) {
1297 ret = smu->ppt_funcs->load_microcode(smu);
1298 if (ret)
1299 return ret;
1300 }
1301 }
1302 }
1303
1304 if (smu->ppt_funcs->check_fw_status) {
1305 ret = smu->ppt_funcs->check_fw_status(smu);
1306 if (ret)
1307 pr_err("SMC is not ready\n");
1308 }
1309
1310 return ret;
1311 }
1312
1313 static int smu_hw_init(void *handle)
1314 {
1315 int ret;
1316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1317 struct smu_context *smu = &adev->smu;
1318
1319 ret = smu_start_smc_engine(smu);
1320 if (ret) {
1321 pr_err("SMU is not ready yet!\n");
1322 return ret;
1323 }
1324
1325 if (smu->is_apu) {
1326 smu_powergate_sdma(&adev->smu, false);
1327 smu_powergate_vcn(&adev->smu, false);
1328 smu_powergate_jpeg(&adev->smu, false);
1329 smu_set_gfx_cgpg(&adev->smu, true);
1330 }
1331
1332 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1333 return 0;
1334
1335 if (!smu->pm_enabled)
1336 return 0;
1337
1338 ret = smu_feature_init_dpm(smu);
1339 if (ret)
1340 goto failed;
1341
1342 ret = smu_smc_table_hw_init(smu, true);
1343 if (ret)
1344 goto failed;
1345
1346 ret = smu_alloc_memory_pool(smu);
1347 if (ret)
1348 goto failed;
1349
1350 /*
1351 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1352 * pool location.
1353 */
1354 ret = smu_notify_memory_pool_location(smu);
1355 if (ret)
1356 goto failed;
1357
1358 ret = smu_start_thermal_control(smu);
1359 if (ret)
1360 goto failed;
1361
1362 if (!smu->pm_enabled)
1363 adev->pm.dpm_enabled = false;
1364 else
1365 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1366
1367 pr_info("SMU is initialized successfully!\n");
1368
1369 return 0;
1370
1371 failed:
1372 return ret;
1373 }
1374
1375 static int smu_stop_dpms(struct smu_context *smu)
1376 {
1377 return smu_system_features_control(smu, false);
1378 }
1379
1380 static int smu_hw_fini(void *handle)
1381 {
1382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383 struct smu_context *smu = &adev->smu;
1384 struct smu_table_context *table_context = &smu->smu_table;
1385 int ret = 0;
1386
1387 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1388 return 0;
1389
1390 if (smu->is_apu) {
1391 smu_powergate_sdma(&adev->smu, true);
1392 smu_powergate_vcn(&adev->smu, true);
1393 smu_powergate_jpeg(&adev->smu, true);
1394 }
1395
1396 if (!smu->pm_enabled)
1397 return 0;
1398
1399 if (!amdgpu_sriov_vf(adev)){
1400 ret = smu_stop_thermal_control(smu);
1401 if (ret) {
1402 pr_warn("Fail to stop thermal control!\n");
1403 return ret;
1404 }
1405
1406 /*
1407 * For custom pptable uploading, skip the DPM features
1408 * disable process on Navi1x ASICs.
1409 * - As the gfx related features are under control of
1410 * RLC on those ASICs. RLC reinitialization will be
1411 * needed to reenable them. That will cost much more
1412 * efforts.
1413 *
1414 * - SMU firmware can handle the DPM reenablement
1415 * properly.
1416 */
1417 if (!smu->uploading_custom_pp_table ||
1418 !((adev->asic_type >= CHIP_NAVI10) &&
1419 (adev->asic_type <= CHIP_NAVI12))) {
1420 ret = smu_stop_dpms(smu);
1421 if (ret) {
1422 pr_warn("Fail to stop Dpms!\n");
1423 return ret;
1424 }
1425 }
1426 }
1427
1428 kfree(table_context->driver_pptable);
1429 table_context->driver_pptable = NULL;
1430
1431 kfree(table_context->max_sustainable_clocks);
1432 table_context->max_sustainable_clocks = NULL;
1433
1434 kfree(table_context->overdrive_table);
1435 table_context->overdrive_table = NULL;
1436
1437 ret = smu_fini_fb_allocations(smu);
1438 if (ret)
1439 return ret;
1440
1441 ret = smu_free_memory_pool(smu);
1442 if (ret)
1443 return ret;
1444
1445 return 0;
1446 }
1447
1448 int smu_reset(struct smu_context *smu)
1449 {
1450 struct amdgpu_device *adev = smu->adev;
1451 int ret = 0;
1452
1453 ret = smu_hw_fini(adev);
1454 if (ret)
1455 return ret;
1456
1457 ret = smu_hw_init(adev);
1458 if (ret)
1459 return ret;
1460
1461 return ret;
1462 }
1463
1464 static int smu_suspend(void *handle)
1465 {
1466 int ret;
1467 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1468 struct smu_context *smu = &adev->smu;
1469 bool baco_feature_is_enabled = false;
1470
1471 if (!smu->pm_enabled)
1472 return 0;
1473
1474 if(!smu->is_apu)
1475 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
1476
1477 ret = smu_system_features_control(smu, false);
1478 if (ret)
1479 return ret;
1480
1481 if (baco_feature_is_enabled) {
1482 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
1483 if (ret) {
1484 pr_warn("set BACO feature enabled failed, return %d\n", ret);
1485 return ret;
1486 }
1487 }
1488
1489 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1490
1491 if (adev->asic_type >= CHIP_NAVI10 &&
1492 adev->gfx.rlc.funcs->stop)
1493 adev->gfx.rlc.funcs->stop(adev);
1494 if (smu->is_apu)
1495 smu_set_gfx_cgpg(&adev->smu, false);
1496
1497 return 0;
1498 }
1499
1500 static int smu_resume(void *handle)
1501 {
1502 int ret;
1503 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1504 struct smu_context *smu = &adev->smu;
1505
1506 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
1507 return 0;
1508
1509 if (!smu->pm_enabled)
1510 return 0;
1511
1512 pr_info("SMU is resuming...\n");
1513
1514 ret = smu_start_smc_engine(smu);
1515 if (ret) {
1516 pr_err("SMU is not ready yet!\n");
1517 goto failed;
1518 }
1519
1520 ret = smu_smc_table_hw_init(smu, false);
1521 if (ret)
1522 goto failed;
1523
1524 ret = smu_start_thermal_control(smu);
1525 if (ret)
1526 goto failed;
1527
1528 if (smu->is_apu)
1529 smu_set_gfx_cgpg(&adev->smu, true);
1530
1531 smu->disable_uclk_switch = 0;
1532
1533 pr_info("SMU is resumed successfully!\n");
1534
1535 return 0;
1536
1537 failed:
1538 return ret;
1539 }
1540
1541 int smu_display_configuration_change(struct smu_context *smu,
1542 const struct amd_pp_display_configuration *display_config)
1543 {
1544 int index = 0;
1545 int num_of_active_display = 0;
1546
1547 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
1548 return -EINVAL;
1549
1550 if (!display_config)
1551 return -EINVAL;
1552
1553 mutex_lock(&smu->mutex);
1554
1555 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
1556 smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
1557 display_config->min_dcef_deep_sleep_set_clk / 100);
1558
1559 for (index = 0; index < display_config->num_path_including_non_display; index++) {
1560 if (display_config->displays[index].controller_id != 0)
1561 num_of_active_display++;
1562 }
1563
1564 smu_set_active_display_count(smu, num_of_active_display);
1565
1566 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
1567 display_config->cpu_cc6_disable,
1568 display_config->cpu_pstate_disable,
1569 display_config->nb_pstate_switch_disable);
1570
1571 mutex_unlock(&smu->mutex);
1572
1573 return 0;
1574 }
1575
1576 static int smu_get_clock_info(struct smu_context *smu,
1577 struct smu_clock_info *clk_info,
1578 enum smu_perf_level_designation designation)
1579 {
1580 int ret;
1581 struct smu_performance_level level = {0};
1582
1583 if (!clk_info)
1584 return -EINVAL;
1585
1586 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
1587 if (ret)
1588 return -EINVAL;
1589
1590 clk_info->min_mem_clk = level.memory_clock;
1591 clk_info->min_eng_clk = level.core_clock;
1592 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1593
1594 ret = smu_get_perf_level(smu, designation, &level);
1595 if (ret)
1596 return -EINVAL;
1597
1598 clk_info->min_mem_clk = level.memory_clock;
1599 clk_info->min_eng_clk = level.core_clock;
1600 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
1601
1602 return 0;
1603 }
1604
1605 int smu_get_current_clocks(struct smu_context *smu,
1606 struct amd_pp_clock_info *clocks)
1607 {
1608 struct amd_pp_simple_clock_info simple_clocks = {0};
1609 struct smu_clock_info hw_clocks;
1610 int ret = 0;
1611
1612 if (!is_support_sw_smu(smu->adev))
1613 return -EINVAL;
1614
1615 mutex_lock(&smu->mutex);
1616
1617 smu_get_dal_power_level(smu, &simple_clocks);
1618
1619 if (smu->support_power_containment)
1620 ret = smu_get_clock_info(smu, &hw_clocks,
1621 PERF_LEVEL_POWER_CONTAINMENT);
1622 else
1623 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
1624
1625 if (ret) {
1626 pr_err("Error in smu_get_clock_info\n");
1627 goto failed;
1628 }
1629
1630 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1631 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1632 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1633 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1634 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1635 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1636 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1637 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1638
1639 if (simple_clocks.level == 0)
1640 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1641 else
1642 clocks->max_clocks_state = simple_clocks.level;
1643
1644 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
1645 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1646 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1647 }
1648
1649 failed:
1650 mutex_unlock(&smu->mutex);
1651 return ret;
1652 }
1653
1654 static int smu_set_clockgating_state(void *handle,
1655 enum amd_clockgating_state state)
1656 {
1657 return 0;
1658 }
1659
1660 static int smu_set_powergating_state(void *handle,
1661 enum amd_powergating_state state)
1662 {
1663 return 0;
1664 }
1665
1666 static int smu_enable_umd_pstate(void *handle,
1667 enum amd_dpm_forced_level *level)
1668 {
1669 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1670 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1671 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1672 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1673
1674 struct smu_context *smu = (struct smu_context*)(handle);
1675 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1676
1677 if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
1678 return -EINVAL;
1679
1680 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
1681 /* enter umd pstate, save current level, disable gfx cg*/
1682 if (*level & profile_mode_mask) {
1683 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
1684 smu_dpm_ctx->enable_umd_pstate = true;
1685 amdgpu_device_ip_set_clockgating_state(smu->adev,
1686 AMD_IP_BLOCK_TYPE_GFX,
1687 AMD_CG_STATE_UNGATE);
1688 amdgpu_device_ip_set_powergating_state(smu->adev,
1689 AMD_IP_BLOCK_TYPE_GFX,
1690 AMD_PG_STATE_UNGATE);
1691 }
1692 } else {
1693 /* exit umd pstate, restore level, enable gfx cg*/
1694 if (!(*level & profile_mode_mask)) {
1695 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
1696 *level = smu_dpm_ctx->saved_dpm_level;
1697 smu_dpm_ctx->enable_umd_pstate = false;
1698 amdgpu_device_ip_set_clockgating_state(smu->adev,
1699 AMD_IP_BLOCK_TYPE_GFX,
1700 AMD_CG_STATE_GATE);
1701 amdgpu_device_ip_set_powergating_state(smu->adev,
1702 AMD_IP_BLOCK_TYPE_GFX,
1703 AMD_PG_STATE_GATE);
1704 }
1705 }
1706
1707 return 0;
1708 }
1709
1710 int smu_adjust_power_state_dynamic(struct smu_context *smu,
1711 enum amd_dpm_forced_level level,
1712 bool skip_display_settings)
1713 {
1714 int ret = 0;
1715 int index = 0;
1716 long workload;
1717 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1718
1719 if (!smu->pm_enabled)
1720 return -EINVAL;
1721
1722 if (!skip_display_settings) {
1723 ret = smu_display_config_changed(smu);
1724 if (ret) {
1725 pr_err("Failed to change display config!");
1726 return ret;
1727 }
1728 }
1729
1730 ret = smu_apply_clocks_adjust_rules(smu);
1731 if (ret) {
1732 pr_err("Failed to apply clocks adjust rules!");
1733 return ret;
1734 }
1735
1736 if (!skip_display_settings) {
1737 ret = smu_notify_smc_display_config(smu);
1738 if (ret) {
1739 pr_err("Failed to notify smc display config!");
1740 return ret;
1741 }
1742 }
1743
1744 if (smu_dpm_ctx->dpm_level != level) {
1745 ret = smu_asic_set_performance_level(smu, level);
1746 if (ret) {
1747 pr_err("Failed to set performance level!");
1748 return ret;
1749 }
1750
1751 /* update the saved copy */
1752 smu_dpm_ctx->dpm_level = level;
1753 }
1754
1755 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1756 index = fls(smu->workload_mask);
1757 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1758 workload = smu->workload_setting[index];
1759
1760 if (smu->power_profile_mode != workload)
1761 smu_set_power_profile_mode(smu, &workload, 0, false);
1762 }
1763
1764 return ret;
1765 }
1766
1767 int smu_handle_task(struct smu_context *smu,
1768 enum amd_dpm_forced_level level,
1769 enum amd_pp_task task_id,
1770 bool lock_needed)
1771 {
1772 int ret = 0;
1773
1774 if (lock_needed)
1775 mutex_lock(&smu->mutex);
1776
1777 switch (task_id) {
1778 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
1779 ret = smu_pre_display_config_changed(smu);
1780 if (ret)
1781 goto out;
1782 ret = smu_set_cpu_power_state(smu);
1783 if (ret)
1784 goto out;
1785 ret = smu_adjust_power_state_dynamic(smu, level, false);
1786 break;
1787 case AMD_PP_TASK_COMPLETE_INIT:
1788 case AMD_PP_TASK_READJUST_POWER_STATE:
1789 ret = smu_adjust_power_state_dynamic(smu, level, true);
1790 break;
1791 default:
1792 break;
1793 }
1794
1795 out:
1796 if (lock_needed)
1797 mutex_unlock(&smu->mutex);
1798
1799 return ret;
1800 }
1801
1802 int smu_switch_power_profile(struct smu_context *smu,
1803 enum PP_SMC_POWER_PROFILE type,
1804 bool en)
1805 {
1806 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1807 long workload;
1808 uint32_t index;
1809
1810 if (!smu->pm_enabled)
1811 return -EINVAL;
1812
1813 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
1814 return -EINVAL;
1815
1816 mutex_lock(&smu->mutex);
1817
1818 if (!en) {
1819 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1820 index = fls(smu->workload_mask);
1821 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1822 workload = smu->workload_setting[index];
1823 } else {
1824 smu->workload_mask |= (1 << smu->workload_prority[type]);
1825 index = fls(smu->workload_mask);
1826 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
1827 workload = smu->workload_setting[index];
1828 }
1829
1830 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1831 smu_set_power_profile_mode(smu, &workload, 0, false);
1832
1833 mutex_unlock(&smu->mutex);
1834
1835 return 0;
1836 }
1837
1838 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
1839 {
1840 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1841 enum amd_dpm_forced_level level;
1842
1843 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1844 return -EINVAL;
1845
1846 mutex_lock(&(smu->mutex));
1847 level = smu_dpm_ctx->dpm_level;
1848 mutex_unlock(&(smu->mutex));
1849
1850 return level;
1851 }
1852
1853 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
1854 {
1855 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1856 int ret = 0;
1857
1858 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1859 return -EINVAL;
1860
1861 mutex_lock(&smu->mutex);
1862
1863 ret = smu_enable_umd_pstate(smu, &level);
1864 if (ret) {
1865 mutex_unlock(&smu->mutex);
1866 return ret;
1867 }
1868
1869 ret = smu_handle_task(smu, level,
1870 AMD_PP_TASK_READJUST_POWER_STATE,
1871 false);
1872
1873 mutex_unlock(&smu->mutex);
1874
1875 return ret;
1876 }
1877
1878 int smu_set_display_count(struct smu_context *smu, uint32_t count)
1879 {
1880 int ret = 0;
1881
1882 mutex_lock(&smu->mutex);
1883 ret = smu_init_display_count(smu, count);
1884 mutex_unlock(&smu->mutex);
1885
1886 return ret;
1887 }
1888
1889 int smu_force_clk_levels(struct smu_context *smu,
1890 enum smu_clk_type clk_type,
1891 uint32_t mask,
1892 bool lock_needed)
1893 {
1894 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1895 int ret = 0;
1896
1897 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
1898 pr_debug("force clock level is for dpm manual mode only.\n");
1899 return -EINVAL;
1900 }
1901
1902 if (lock_needed)
1903 mutex_lock(&smu->mutex);
1904
1905 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
1906 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
1907
1908 if (lock_needed)
1909 mutex_unlock(&smu->mutex);
1910
1911 return ret;
1912 }
1913
1914 int smu_set_mp1_state(struct smu_context *smu,
1915 enum pp_mp1_state mp1_state)
1916 {
1917 uint16_t msg;
1918 int ret;
1919
1920 /*
1921 * The SMC is not fully ready. That may be
1922 * expected as the IP may be masked.
1923 * So, just return without error.
1924 */
1925 if (!smu->pm_enabled)
1926 return 0;
1927
1928 mutex_lock(&smu->mutex);
1929
1930 switch (mp1_state) {
1931 case PP_MP1_STATE_SHUTDOWN:
1932 msg = SMU_MSG_PrepareMp1ForShutdown;
1933 break;
1934 case PP_MP1_STATE_UNLOAD:
1935 msg = SMU_MSG_PrepareMp1ForUnload;
1936 break;
1937 case PP_MP1_STATE_RESET:
1938 msg = SMU_MSG_PrepareMp1ForReset;
1939 break;
1940 case PP_MP1_STATE_NONE:
1941 default:
1942 mutex_unlock(&smu->mutex);
1943 return 0;
1944 }
1945
1946 /* some asics may not support those messages */
1947 if (smu_msg_get_index(smu, msg) < 0) {
1948 mutex_unlock(&smu->mutex);
1949 return 0;
1950 }
1951
1952 ret = smu_send_smc_msg(smu, msg);
1953 if (ret)
1954 pr_err("[PrepareMp1] Failed!\n");
1955
1956 mutex_unlock(&smu->mutex);
1957
1958 return ret;
1959 }
1960
1961 int smu_set_df_cstate(struct smu_context *smu,
1962 enum pp_df_cstate state)
1963 {
1964 int ret = 0;
1965
1966 /*
1967 * The SMC is not fully ready. That may be
1968 * expected as the IP may be masked.
1969 * So, just return without error.
1970 */
1971 if (!smu->pm_enabled)
1972 return 0;
1973
1974 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
1975 return 0;
1976
1977 mutex_lock(&smu->mutex);
1978
1979 ret = smu->ppt_funcs->set_df_cstate(smu, state);
1980 if (ret)
1981 pr_err("[SetDfCstate] failed!\n");
1982
1983 mutex_unlock(&smu->mutex);
1984
1985 return ret;
1986 }
1987
1988 int smu_write_watermarks_table(struct smu_context *smu)
1989 {
1990 void *watermarks_table = smu->smu_table.watermarks_table;
1991
1992 if (!watermarks_table)
1993 return -EINVAL;
1994
1995 return smu_update_table(smu,
1996 SMU_TABLE_WATERMARKS,
1997 0,
1998 watermarks_table,
1999 true);
2000 }
2001
2002 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
2003 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
2004 {
2005 void *table = smu->smu_table.watermarks_table;
2006
2007 if (!table)
2008 return -EINVAL;
2009
2010 mutex_lock(&smu->mutex);
2011
2012 if (!smu->disable_watermark &&
2013 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
2014 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
2015 smu_set_watermarks_table(smu, table, clock_ranges);
2016 smu->watermarks_bitmap |= WATERMARKS_EXIST;
2017 smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
2018 }
2019
2020 mutex_unlock(&smu->mutex);
2021
2022 return 0;
2023 }
2024
2025 const struct amd_ip_funcs smu_ip_funcs = {
2026 .name = "smu",
2027 .early_init = smu_early_init,
2028 .late_init = smu_late_init,
2029 .sw_init = smu_sw_init,
2030 .sw_fini = smu_sw_fini,
2031 .hw_init = smu_hw_init,
2032 .hw_fini = smu_hw_fini,
2033 .suspend = smu_suspend,
2034 .resume = smu_resume,
2035 .is_idle = NULL,
2036 .check_soft_reset = NULL,
2037 .wait_for_idle = NULL,
2038 .soft_reset = NULL,
2039 .set_clockgating_state = smu_set_clockgating_state,
2040 .set_powergating_state = smu_set_powergating_state,
2041 .enable_umd_pstate = smu_enable_umd_pstate,
2042 };
2043
2044 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
2045 {
2046 .type = AMD_IP_BLOCK_TYPE_SMC,
2047 .major = 11,
2048 .minor = 0,
2049 .rev = 0,
2050 .funcs = &smu_ip_funcs,
2051 };
2052
2053 const struct amdgpu_ip_block_version smu_v12_0_ip_block =
2054 {
2055 .type = AMD_IP_BLOCK_TYPE_SMC,
2056 .major = 12,
2057 .minor = 0,
2058 .rev = 0,
2059 .funcs = &smu_ip_funcs,
2060 };
2061
2062 int smu_load_microcode(struct smu_context *smu)
2063 {
2064 int ret = 0;
2065
2066 mutex_lock(&smu->mutex);
2067
2068 if (smu->ppt_funcs->load_microcode)
2069 ret = smu->ppt_funcs->load_microcode(smu);
2070
2071 mutex_unlock(&smu->mutex);
2072
2073 return ret;
2074 }
2075
2076 int smu_check_fw_status(struct smu_context *smu)
2077 {
2078 int ret = 0;
2079
2080 mutex_lock(&smu->mutex);
2081
2082 if (smu->ppt_funcs->check_fw_status)
2083 ret = smu->ppt_funcs->check_fw_status(smu);
2084
2085 mutex_unlock(&smu->mutex);
2086
2087 return ret;
2088 }
2089
2090 int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2091 {
2092 int ret = 0;
2093
2094 mutex_lock(&smu->mutex);
2095
2096 if (smu->ppt_funcs->set_gfx_cgpg)
2097 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2098
2099 mutex_unlock(&smu->mutex);
2100
2101 return ret;
2102 }
2103
2104 int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
2105 {
2106 int ret = 0;
2107
2108 mutex_lock(&smu->mutex);
2109
2110 if (smu->ppt_funcs->set_fan_speed_rpm)
2111 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2112
2113 mutex_unlock(&smu->mutex);
2114
2115 return ret;
2116 }
2117
2118 int smu_get_power_limit(struct smu_context *smu,
2119 uint32_t *limit,
2120 bool def,
2121 bool lock_needed)
2122 {
2123 int ret = 0;
2124
2125 if (lock_needed)
2126 mutex_lock(&smu->mutex);
2127
2128 if (smu->ppt_funcs->get_power_limit)
2129 ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
2130
2131 if (lock_needed)
2132 mutex_unlock(&smu->mutex);
2133
2134 return ret;
2135 }
2136
2137 int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
2138 {
2139 int ret = 0;
2140
2141 mutex_lock(&smu->mutex);
2142
2143 if (smu->ppt_funcs->set_power_limit)
2144 ret = smu->ppt_funcs->set_power_limit(smu, limit);
2145
2146 mutex_unlock(&smu->mutex);
2147
2148 return ret;
2149 }
2150
2151 int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2152 {
2153 int ret = 0;
2154
2155 mutex_lock(&smu->mutex);
2156
2157 if (smu->ppt_funcs->print_clk_levels)
2158 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2159
2160 mutex_unlock(&smu->mutex);
2161
2162 return ret;
2163 }
2164
2165 int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
2166 {
2167 int ret = 0;
2168
2169 mutex_lock(&smu->mutex);
2170
2171 if (smu->ppt_funcs->get_od_percentage)
2172 ret = smu->ppt_funcs->get_od_percentage(smu, type);
2173
2174 mutex_unlock(&smu->mutex);
2175
2176 return ret;
2177 }
2178
2179 int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
2180 {
2181 int ret = 0;
2182
2183 mutex_lock(&smu->mutex);
2184
2185 if (smu->ppt_funcs->set_od_percentage)
2186 ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
2187
2188 mutex_unlock(&smu->mutex);
2189
2190 return ret;
2191 }
2192
2193 int smu_od_edit_dpm_table(struct smu_context *smu,
2194 enum PP_OD_DPM_TABLE_COMMAND type,
2195 long *input, uint32_t size)
2196 {
2197 int ret = 0;
2198
2199 mutex_lock(&smu->mutex);
2200
2201 if (smu->ppt_funcs->od_edit_dpm_table)
2202 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2203
2204 mutex_unlock(&smu->mutex);
2205
2206 return ret;
2207 }
2208
2209 int smu_read_sensor(struct smu_context *smu,
2210 enum amd_pp_sensors sensor,
2211 void *data, uint32_t *size)
2212 {
2213 int ret = 0;
2214
2215 mutex_lock(&smu->mutex);
2216
2217 if (smu->ppt_funcs->read_sensor)
2218 ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
2219
2220 mutex_unlock(&smu->mutex);
2221
2222 return ret;
2223 }
2224
2225 int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
2226 {
2227 int ret = 0;
2228
2229 mutex_lock(&smu->mutex);
2230
2231 if (smu->ppt_funcs->get_power_profile_mode)
2232 ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
2233
2234 mutex_unlock(&smu->mutex);
2235
2236 return ret;
2237 }
2238
2239 int smu_set_power_profile_mode(struct smu_context *smu,
2240 long *param,
2241 uint32_t param_size,
2242 bool lock_needed)
2243 {
2244 int ret = 0;
2245
2246 if (lock_needed)
2247 mutex_lock(&smu->mutex);
2248
2249 if (smu->ppt_funcs->set_power_profile_mode)
2250 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2251
2252 if (lock_needed)
2253 mutex_unlock(&smu->mutex);
2254
2255 return ret;
2256 }
2257
2258
2259 int smu_get_fan_control_mode(struct smu_context *smu)
2260 {
2261 int ret = 0;
2262
2263 mutex_lock(&smu->mutex);
2264
2265 if (smu->ppt_funcs->get_fan_control_mode)
2266 ret = smu->ppt_funcs->get_fan_control_mode(smu);
2267
2268 mutex_unlock(&smu->mutex);
2269
2270 return ret;
2271 }
2272
2273 int smu_set_fan_control_mode(struct smu_context *smu, int value)
2274 {
2275 int ret = 0;
2276
2277 mutex_lock(&smu->mutex);
2278
2279 if (smu->ppt_funcs->set_fan_control_mode)
2280 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2281
2282 mutex_unlock(&smu->mutex);
2283
2284 return ret;
2285 }
2286
2287 int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
2288 {
2289 int ret = 0;
2290
2291 mutex_lock(&smu->mutex);
2292
2293 if (smu->ppt_funcs->get_fan_speed_percent)
2294 ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
2295
2296 mutex_unlock(&smu->mutex);
2297
2298 return ret;
2299 }
2300
2301 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
2302 {
2303 int ret = 0;
2304
2305 mutex_lock(&smu->mutex);
2306
2307 if (smu->ppt_funcs->set_fan_speed_percent)
2308 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
2309
2310 mutex_unlock(&smu->mutex);
2311
2312 return ret;
2313 }
2314
2315 int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
2316 {
2317 int ret = 0;
2318
2319 mutex_lock(&smu->mutex);
2320
2321 if (smu->ppt_funcs->get_fan_speed_rpm)
2322 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2323
2324 mutex_unlock(&smu->mutex);
2325
2326 return ret;
2327 }
2328
2329 int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
2330 {
2331 int ret = 0;
2332
2333 mutex_lock(&smu->mutex);
2334
2335 if (smu->ppt_funcs->set_deep_sleep_dcefclk)
2336 ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
2337
2338 mutex_unlock(&smu->mutex);
2339
2340 return ret;
2341 }
2342
2343 int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
2344 {
2345 int ret = 0;
2346
2347 if (smu->ppt_funcs->set_active_display_count)
2348 ret = smu->ppt_funcs->set_active_display_count(smu, count);
2349
2350 return ret;
2351 }
2352
2353 int smu_get_clock_by_type(struct smu_context *smu,
2354 enum amd_pp_clock_type type,
2355 struct amd_pp_clocks *clocks)
2356 {
2357 int ret = 0;
2358
2359 mutex_lock(&smu->mutex);
2360
2361 if (smu->ppt_funcs->get_clock_by_type)
2362 ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
2363
2364 mutex_unlock(&smu->mutex);
2365
2366 return ret;
2367 }
2368
2369 int smu_get_max_high_clocks(struct smu_context *smu,
2370 struct amd_pp_simple_clock_info *clocks)
2371 {
2372 int ret = 0;
2373
2374 mutex_lock(&smu->mutex);
2375
2376 if (smu->ppt_funcs->get_max_high_clocks)
2377 ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
2378
2379 mutex_unlock(&smu->mutex);
2380
2381 return ret;
2382 }
2383
2384 int smu_get_clock_by_type_with_latency(struct smu_context *smu,
2385 enum smu_clk_type clk_type,
2386 struct pp_clock_levels_with_latency *clocks)
2387 {
2388 int ret = 0;
2389
2390 mutex_lock(&smu->mutex);
2391
2392 if (smu->ppt_funcs->get_clock_by_type_with_latency)
2393 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2394
2395 mutex_unlock(&smu->mutex);
2396
2397 return ret;
2398 }
2399
2400 int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
2401 enum amd_pp_clock_type type,
2402 struct pp_clock_levels_with_voltage *clocks)
2403 {
2404 int ret = 0;
2405
2406 mutex_lock(&smu->mutex);
2407
2408 if (smu->ppt_funcs->get_clock_by_type_with_voltage)
2409 ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
2410
2411 mutex_unlock(&smu->mutex);
2412
2413 return ret;
2414 }
2415
2416
2417 int smu_display_clock_voltage_request(struct smu_context *smu,
2418 struct pp_display_clock_request *clock_req)
2419 {
2420 int ret = 0;
2421
2422 mutex_lock(&smu->mutex);
2423
2424 if (smu->ppt_funcs->display_clock_voltage_request)
2425 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2426
2427 mutex_unlock(&smu->mutex);
2428
2429 return ret;
2430 }
2431
2432
2433 int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
2434 {
2435 int ret = -EINVAL;
2436
2437 mutex_lock(&smu->mutex);
2438
2439 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2440 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2441
2442 mutex_unlock(&smu->mutex);
2443
2444 return ret;
2445 }
2446
2447 int smu_notify_smu_enable_pwe(struct smu_context *smu)
2448 {
2449 int ret = 0;
2450
2451 mutex_lock(&smu->mutex);
2452
2453 if (smu->ppt_funcs->notify_smu_enable_pwe)
2454 ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
2455
2456 mutex_unlock(&smu->mutex);
2457
2458 return ret;
2459 }
2460
2461 int smu_set_xgmi_pstate(struct smu_context *smu,
2462 uint32_t pstate)
2463 {
2464 int ret = 0;
2465
2466 mutex_lock(&smu->mutex);
2467
2468 if (smu->ppt_funcs->set_xgmi_pstate)
2469 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2470
2471 mutex_unlock(&smu->mutex);
2472
2473 return ret;
2474 }
2475
2476 int smu_set_azalia_d3_pme(struct smu_context *smu)
2477 {
2478 int ret = 0;
2479
2480 mutex_lock(&smu->mutex);
2481
2482 if (smu->ppt_funcs->set_azalia_d3_pme)
2483 ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
2484
2485 mutex_unlock(&smu->mutex);
2486
2487 return ret;
2488 }
2489
2490 bool smu_baco_is_support(struct smu_context *smu)
2491 {
2492 bool ret = false;
2493
2494 mutex_lock(&smu->mutex);
2495
2496 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2497 ret = smu->ppt_funcs->baco_is_support(smu);
2498
2499 mutex_unlock(&smu->mutex);
2500
2501 return ret;
2502 }
2503
2504 int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
2505 {
2506 if (smu->ppt_funcs->baco_get_state)
2507 return -EINVAL;
2508
2509 mutex_lock(&smu->mutex);
2510 *state = smu->ppt_funcs->baco_get_state(smu);
2511 mutex_unlock(&smu->mutex);
2512
2513 return 0;
2514 }
2515
2516 int smu_baco_enter(struct smu_context *smu)
2517 {
2518 int ret = 0;
2519
2520 mutex_lock(&smu->mutex);
2521
2522 if (smu->ppt_funcs->baco_enter)
2523 ret = smu->ppt_funcs->baco_enter(smu);
2524
2525 mutex_unlock(&smu->mutex);
2526
2527 return ret;
2528 }
2529
2530 int smu_baco_exit(struct smu_context *smu)
2531 {
2532 int ret = 0;
2533
2534 mutex_lock(&smu->mutex);
2535
2536 if (smu->ppt_funcs->baco_exit)
2537 ret = smu->ppt_funcs->baco_exit(smu);
2538
2539 mutex_unlock(&smu->mutex);
2540
2541 return ret;
2542 }
2543
2544 int smu_mode2_reset(struct smu_context *smu)
2545 {
2546 int ret = 0;
2547
2548 mutex_lock(&smu->mutex);
2549
2550 if (smu->ppt_funcs->mode2_reset)
2551 ret = smu->ppt_funcs->mode2_reset(smu);
2552
2553 mutex_unlock(&smu->mutex);
2554
2555 return ret;
2556 }
2557
2558 int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
2559 struct pp_smu_nv_clock_table *max_clocks)
2560 {
2561 int ret = 0;
2562
2563 mutex_lock(&smu->mutex);
2564
2565 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2566 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
2567
2568 mutex_unlock(&smu->mutex);
2569
2570 return ret;
2571 }
2572
2573 int smu_get_uclk_dpm_states(struct smu_context *smu,
2574 unsigned int *clock_values_in_khz,
2575 unsigned int *num_states)
2576 {
2577 int ret = 0;
2578
2579 mutex_lock(&smu->mutex);
2580
2581 if (smu->ppt_funcs->get_uclk_dpm_states)
2582 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
2583
2584 mutex_unlock(&smu->mutex);
2585
2586 return ret;
2587 }
2588
2589 enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
2590 {
2591 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
2592
2593 mutex_lock(&smu->mutex);
2594
2595 if (smu->ppt_funcs->get_current_power_state)
2596 pm_state = smu->ppt_funcs->get_current_power_state(smu);
2597
2598 mutex_unlock(&smu->mutex);
2599
2600 return pm_state;
2601 }
2602
2603 int smu_get_dpm_clock_table(struct smu_context *smu,
2604 struct dpm_clocks *clock_table)
2605 {
2606 int ret = 0;
2607
2608 mutex_lock(&smu->mutex);
2609
2610 if (smu->ppt_funcs->get_dpm_clock_table)
2611 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
2612
2613 mutex_unlock(&smu->mutex);
2614
2615 return ret;
2616 }
2617
2618 uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
2619 {
2620 uint32_t ret = 0;
2621
2622 if (smu->ppt_funcs->get_pptable_power_limit)
2623 ret = smu->ppt_funcs->get_pptable_power_limit(smu);
2624
2625 return ret;
2626 }
2627
2628 int smu_send_smc_msg(struct smu_context *smu,
2629 enum smu_message_type msg)
2630 {
2631 int ret;
2632
2633 ret = smu_send_smc_msg_with_param(smu, msg, 0);
2634 return ret;
2635 }
2636