Home | History | Annotate | Line # | Download | only in smumgr
      1 /*	$NetBSD: amdgpu_vega20_smumgr.c,v 1.2 2021/12/18 23:45:27 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2018 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  */
     25 
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vega20_smumgr.c,v 1.2 2021/12/18 23:45:27 riastradh Exp $");
     28 
     29 #include "smumgr.h"
     30 #include "vega20_inc.h"
     31 #include "soc15_common.h"
     32 #include "vega20_smumgr.h"
     33 #include "vega20_ppsmc.h"
     34 #include "smu11_driver_if.h"
     35 #include "ppatomctrl.h"
     36 #include "pp_debug.h"
     37 #include "smu_ucode_xfer_vi.h"
     38 #include "smu7_smumgr.h"
     39 #include "vega20_hwmgr.h"
     40 
     41 /* MP Apertures */
     42 #define MP0_Public			0x03800000
     43 #define MP0_SRAM			0x03900000
     44 #define MP1_Public			0x03b00000
     45 #define MP1_SRAM			0x03c00004
     46 
     47 /* address block */
     48 #define smnMP1_FIRMWARE_FLAGS		0x3010024
     49 #define smnMP0_FW_INTF			0x30101c0
     50 #define smnMP1_PUB_CTRL			0x3010b14
     51 
     52 bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
     53 {
     54 	struct amdgpu_device *adev = hwmgr->adev;
     55 	uint32_t mp1_fw_flags;
     56 
     57 	mp1_fw_flags = RREG32_PCIE(MP1_Public |
     58 				   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
     59 
     60 	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
     61 	    MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
     62 		return true;
     63 
     64 	return false;
     65 }
     66 
     67 /*
     68  * Check if SMC has responded to previous message.
     69  *
     70  * @param    smumgr  the address of the powerplay hardware manager.
     71  * @return   TRUE    SMC has responded, FALSE otherwise.
     72  */
     73 static uint32_t vega20_wait_for_response(struct pp_hwmgr *hwmgr)
     74 {
     75 	struct amdgpu_device *adev = hwmgr->adev;
     76 	uint32_t reg;
     77 
     78 	reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
     79 
     80 	phm_wait_for_register_unequal(hwmgr, reg,
     81 			0, MP1_C2PMSG_90__CONTENT_MASK);
     82 
     83 	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
     84 }
     85 
     86 /*
     87  * Send a message to the SMC, and do not wait for its response.
     88  * @param    smumgr  the address of the powerplay hardware manager.
     89  * @param    msg the message to send.
     90  * @return   Always return 0.
     91  */
     92 static int vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
     93 		uint16_t msg)
     94 {
     95 	struct amdgpu_device *adev = hwmgr->adev;
     96 
     97 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
     98 
     99 	return 0;
    100 }
    101 
    102 /*
    103  * Send a message to the SMC, and wait for its response.
    104  * @param    hwmgr  the address of the powerplay hardware manager.
    105  * @param    msg the message to send.
    106  * @return   Always return 0.
    107  */
    108 static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
    109 {
    110 	struct amdgpu_device *adev = hwmgr->adev;
    111 	int ret = 0;
    112 
    113 	vega20_wait_for_response(hwmgr);
    114 
    115 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
    116 
    117 	vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
    118 
    119 	ret = vega20_wait_for_response(hwmgr);
    120 	if (ret != PPSMC_Result_OK)
    121 		pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
    122 
    123 	return (ret == PPSMC_Result_OK) ? 0 : -EIO;
    124 }
    125 
    126 /*
    127  * Send a message to the SMC with parameter
    128  * @param    hwmgr:  the address of the powerplay hardware manager.
    129  * @param    msg: the message to send.
    130  * @param    parameter: the parameter to send
    131  * @return   Always return 0.
    132  */
    133 static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
    134 		uint16_t msg, uint32_t parameter)
    135 {
    136 	struct amdgpu_device *adev = hwmgr->adev;
    137 	int ret = 0;
    138 
    139 	vega20_wait_for_response(hwmgr);
    140 
    141 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
    142 
    143 	WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
    144 
    145 	vega20_send_msg_to_smc_without_waiting(hwmgr, msg);
    146 
    147 	ret = vega20_wait_for_response(hwmgr);
    148 	if (ret != PPSMC_Result_OK)
    149 		pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
    150 
    151 	return (ret == PPSMC_Result_OK) ? 0 : -EIO;
    152 }
    153 
    154 static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr)
    155 {
    156 	struct amdgpu_device *adev = hwmgr->adev;
    157 
    158 	return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
    159 }
    160 
    161 /*
    162  * Copy table from SMC into driver FB
    163  * @param   hwmgr    the address of the HW manager
    164  * @param   table_id    the driver's table ID to copy from
    165  */
    166 static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
    167 				      uint8_t *table, int16_t table_id)
    168 {
    169 	struct vega20_smumgr *priv =
    170 			(struct vega20_smumgr *)(hwmgr->smu_backend);
    171 	struct amdgpu_device *adev = hwmgr->adev;
    172 	int ret = 0;
    173 
    174 	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
    175 			"Invalid SMU Table ID!", return -EINVAL);
    176 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
    177 			"Invalid SMU Table version!", return -EINVAL);
    178 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
    179 			"Invalid SMU Table Length!", return -EINVAL);
    180 
    181 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    182 			PPSMC_MSG_SetDriverDramAddrHigh,
    183 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
    184 			"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
    185 			return ret);
    186 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    187 			PPSMC_MSG_SetDriverDramAddrLow,
    188 			lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
    189 			"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
    190 			return ret);
    191 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    192 			PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0,
    193 			"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
    194 			return ret);
    195 
    196 	/* flush hdp cache */
    197 	amdgpu_asic_flush_hdp(adev, NULL);
    198 
    199 	memcpy(table, priv->smu_tables.entry[table_id].table,
    200 			priv->smu_tables.entry[table_id].size);
    201 
    202 	return 0;
    203 }
    204 
    205 /*
    206  * Copy table from Driver FB into SMC
    207  * @param   hwmgr    the address of the HW manager
    208  * @param   table_id    the table to copy from
    209  */
    210 static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
    211 				    uint8_t *table, int16_t table_id)
    212 {
    213 	struct vega20_smumgr *priv =
    214 			(struct vega20_smumgr *)(hwmgr->smu_backend);
    215 	struct amdgpu_device *adev = hwmgr->adev;
    216 	int ret = 0;
    217 
    218 	PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
    219 			"Invalid SMU Table ID!", return -EINVAL);
    220 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0,
    221 			"Invalid SMU Table version!", return -EINVAL);
    222 	PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
    223 			"Invalid SMU Table Length!", return -EINVAL);
    224 
    225 	memcpy(priv->smu_tables.entry[table_id].table, table,
    226 			priv->smu_tables.entry[table_id].size);
    227 
    228 	amdgpu_asic_flush_hdp(adev, NULL);
    229 
    230 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    231 			PPSMC_MSG_SetDriverDramAddrHigh,
    232 			upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
    233 			"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
    234 			return ret);
    235 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    236 			PPSMC_MSG_SetDriverDramAddrLow,
    237 			lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
    238 			"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
    239 			return ret);
    240 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    241 			PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0,
    242 			"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
    243 			return ret);
    244 
    245 	return 0;
    246 }
    247 
    248 int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
    249 		uint8_t *table, uint16_t workload_type)
    250 {
    251 	struct vega20_smumgr *priv =
    252 			(struct vega20_smumgr *)(hwmgr->smu_backend);
    253 	struct amdgpu_device *adev = hwmgr->adev;
    254 	int ret = 0;
    255 
    256 	memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
    257 			priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
    258 
    259 	amdgpu_asic_flush_hdp(adev, NULL);
    260 
    261 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    262 			PPSMC_MSG_SetDriverDramAddrHigh,
    263 			upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
    264 			"[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
    265 			return ret);
    266 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    267 			PPSMC_MSG_SetDriverDramAddrLow,
    268 			lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
    269 			"[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
    270 			return ret);
    271 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    272 			PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
    273 			"[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
    274 			return ret);
    275 
    276 	return 0;
    277 }
    278 
    279 int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
    280 		uint8_t *table, uint16_t workload_type)
    281 {
    282 	struct vega20_smumgr *priv =
    283 			(struct vega20_smumgr *)(hwmgr->smu_backend);
    284 	struct amdgpu_device *adev = hwmgr->adev;
    285 	int ret = 0;
    286 
    287 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    288 			PPSMC_MSG_SetDriverDramAddrHigh,
    289 			upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
    290 			"[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
    291 			return ret);
    292 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    293 			PPSMC_MSG_SetDriverDramAddrLow,
    294 			lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
    295 			"[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
    296 			return ret);
    297 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    298 			PPSMC_MSG_TransferTableSmu2Dram,
    299 			TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
    300 			"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
    301 			return ret);
    302 
    303 	/* flush hdp cache */
    304 	amdgpu_asic_flush_hdp(adev, NULL);
    305 
    306 	memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
    307 			priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
    308 
    309 	return 0;
    310 }
    311 
    312 int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
    313 		bool enable, uint64_t feature_mask)
    314 {
    315 	uint32_t smu_features_low, smu_features_high;
    316 	int ret = 0;
    317 
    318 	smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT);
    319 	smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
    320 
    321 	if (enable) {
    322 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    323 				PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
    324 				"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
    325 				return ret);
    326 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    327 				PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
    328 				"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
    329 				return ret);
    330 	} else {
    331 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    332 				PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
    333 				"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
    334 				return ret);
    335 		PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    336 				PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
    337 				"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
    338 				return ret);
    339 	}
    340 
    341 	return 0;
    342 }
    343 
    344 int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
    345 		uint64_t *features_enabled)
    346 {
    347 	uint32_t smc_features_low, smc_features_high;
    348 	int ret = 0;
    349 
    350 	if (features_enabled == NULL)
    351 		return -EINVAL;
    352 
    353 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
    354 			PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
    355 			"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
    356 			return ret);
    357 	smc_features_low = vega20_get_argument(hwmgr);
    358 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
    359 			PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
    360 			"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
    361 			return ret);
    362 	smc_features_high = vega20_get_argument(hwmgr);
    363 
    364 	*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
    365 			(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
    366 
    367 	return 0;
    368 }
    369 
    370 static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
    371 {
    372 	struct vega20_smumgr *priv =
    373 			(struct vega20_smumgr *)(hwmgr->smu_backend);
    374 	int ret = 0;
    375 
    376 	if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
    377 		ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    378 				PPSMC_MSG_SetToolsDramAddrHigh,
    379 				upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
    380 		if (!ret)
    381 			ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    382 					PPSMC_MSG_SetToolsDramAddrLow,
    383 					lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
    384 	}
    385 
    386 	return ret;
    387 }
    388 
    389 int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
    390 {
    391 	struct vega20_smumgr *priv =
    392 			(struct vega20_smumgr *)(hwmgr->smu_backend);
    393 	int ret = 0;
    394 
    395 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    396 			PPSMC_MSG_SetDriverDramAddrHigh,
    397 			upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
    398 			"[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
    399 			return ret);
    400 	PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
    401 			PPSMC_MSG_SetDriverDramAddrLow,
    402 			lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
    403 			"[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
    404 			return ret);
    405 
    406 	return ret;
    407 }
    408 
    409 static int vega20_smu_init(struct pp_hwmgr *hwmgr)
    410 {
    411 	struct vega20_smumgr *priv;
    412 	unsigned long tools_size = 0x19000;
    413 	int ret = 0;
    414 
    415 	struct cgs_firmware_info info = {0};
    416 
    417 	ret = cgs_get_firmware_info(hwmgr->device,
    418 				smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
    419 				&info);
    420 	if (ret || !info.kptr)
    421 		return -EINVAL;
    422 
    423 	priv = kzalloc(sizeof(struct vega20_smumgr), GFP_KERNEL);
    424 	if (!priv)
    425 		return -ENOMEM;
    426 
    427 	hwmgr->smu_backend = priv;
    428 
    429 	/* allocate space for pptable */
    430 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
    431 			sizeof(PPTable_t),
    432 			PAGE_SIZE,
    433 			AMDGPU_GEM_DOMAIN_VRAM,
    434 			&priv->smu_tables.entry[TABLE_PPTABLE].handle,
    435 			&priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
    436 			&priv->smu_tables.entry[TABLE_PPTABLE].table);
    437 	if (ret)
    438 		goto free_backend;
    439 
    440 	priv->smu_tables.entry[TABLE_PPTABLE].version = 0x01;
    441 	priv->smu_tables.entry[TABLE_PPTABLE].size = sizeof(PPTable_t);
    442 
    443 	/* allocate space for watermarks table */
    444 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
    445 			sizeof(Watermarks_t),
    446 			PAGE_SIZE,
    447 			AMDGPU_GEM_DOMAIN_VRAM,
    448 			&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
    449 			&priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
    450 			&priv->smu_tables.entry[TABLE_WATERMARKS].table);
    451 	if (ret)
    452 		goto err0;
    453 
    454 	priv->smu_tables.entry[TABLE_WATERMARKS].version = 0x01;
    455 	priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t);
    456 
    457 	/* allocate space for pmstatuslog table */
    458 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
    459 			tools_size,
    460 			PAGE_SIZE,
    461 			AMDGPU_GEM_DOMAIN_VRAM,
    462 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
    463 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
    464 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
    465 	if (ret)
    466 		goto err1;
    467 
    468 	priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01;
    469 	priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size;
    470 
    471 	/* allocate space for OverDrive table */
    472 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
    473 			sizeof(OverDriveTable_t),
    474 			PAGE_SIZE,
    475 			AMDGPU_GEM_DOMAIN_VRAM,
    476 			&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
    477 			&priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
    478 			&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
    479 	if (ret)
    480 		goto err2;
    481 
    482 	priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01;
    483 	priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t);
    484 
    485 	/* allocate space for SmuMetrics table */
    486 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
    487 			sizeof(SmuMetrics_t),
    488 			PAGE_SIZE,
    489 			AMDGPU_GEM_DOMAIN_VRAM,
    490 			&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
    491 			&priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
    492 			&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
    493 	if (ret)
    494 		goto err3;
    495 
    496 	priv->smu_tables.entry[TABLE_SMU_METRICS].version = 0x01;
    497 	priv->smu_tables.entry[TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t);
    498 
    499 	/* allocate space for ActivityMonitor table */
    500 	ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
    501 			sizeof(DpmActivityMonitorCoeffInt_t),
    502 			PAGE_SIZE,
    503 			AMDGPU_GEM_DOMAIN_VRAM,
    504 			&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
    505 			&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
    506 			&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
    507 	if (ret)
    508 		goto err4;
    509 
    510 	priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
    511 	priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
    512 
    513 	return 0;
    514 
    515 err4:
    516 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
    517 			&priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
    518 			&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
    519 err3:
    520 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
    521 			&priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
    522 			&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
    523 err2:
    524 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
    525 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
    526 			&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
    527 err1:
    528 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
    529 			&priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
    530 			&priv->smu_tables.entry[TABLE_WATERMARKS].table);
    531 err0:
    532 	amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
    533 			&priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
    534 			&priv->smu_tables.entry[TABLE_PPTABLE].table);
    535 free_backend:
    536 	kfree(hwmgr->smu_backend);
    537 
    538 	return -EINVAL;
    539 }
    540 
    541 static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
    542 {
    543 	struct vega20_smumgr *priv =
    544 			(struct vega20_smumgr *)(hwmgr->smu_backend);
    545 
    546 	if (priv) {
    547 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
    548 				&priv->smu_tables.entry[TABLE_PPTABLE].mc_addr,
    549 				&priv->smu_tables.entry[TABLE_PPTABLE].table);
    550 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle,
    551 				&priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr,
    552 				&priv->smu_tables.entry[TABLE_WATERMARKS].table);
    553 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle,
    554 				&priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr,
    555 				&priv->smu_tables.entry[TABLE_PMSTATUSLOG].table);
    556 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle,
    557 				&priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr,
    558 				&priv->smu_tables.entry[TABLE_OVERDRIVE].table);
    559 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle,
    560 				&priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr,
    561 				&priv->smu_tables.entry[TABLE_SMU_METRICS].table);
    562 		amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle,
    563 				&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr,
    564 				&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table);
    565 		kfree(hwmgr->smu_backend);
    566 		hwmgr->smu_backend = NULL;
    567 	}
    568 	return 0;
    569 }
    570 
    571 static int vega20_start_smu(struct pp_hwmgr *hwmgr)
    572 {
    573 	int ret;
    574 
    575 	ret = vega20_is_smc_ram_running(hwmgr);
    576 	PP_ASSERT_WITH_CODE(ret,
    577 			"[Vega20StartSmu] SMC is not running!",
    578 			return -EINVAL);
    579 
    580 	ret = vega20_set_tools_address(hwmgr);
    581 	PP_ASSERT_WITH_CODE(!ret,
    582 			"[Vega20StartSmu] Failed to set tools address!",
    583 			return ret);
    584 
    585 	return 0;
    586 }
    587 
    588 static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr)
    589 {
    590 	uint64_t features_enabled = 0;
    591 
    592 	vega20_get_enabled_smc_features(hwmgr, &features_enabled);
    593 
    594 	if (features_enabled & SMC_DPM_FEATURES)
    595 		return true;
    596 	else
    597 		return false;
    598 }
    599 
    600 static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
    601 				    uint16_t table_id, bool rw)
    602 {
    603 	int ret;
    604 
    605 	if (rw)
    606 		ret = vega20_copy_table_from_smc(hwmgr, table, table_id);
    607 	else
    608 		ret = vega20_copy_table_to_smc(hwmgr, table, table_id);
    609 
    610 	return ret;
    611 }
    612 
    613 const struct pp_smumgr_func vega20_smu_funcs = {
    614 	.name = "vega20_smu",
    615 	.smu_init = &vega20_smu_init,
    616 	.smu_fini = &vega20_smu_fini,
    617 	.start_smu = &vega20_start_smu,
    618 	.request_smu_load_specific_fw = NULL,
    619 	.send_msg_to_smc = &vega20_send_msg_to_smc,
    620 	.send_msg_to_smc_with_parameter = &vega20_send_msg_to_smc_with_parameter,
    621 	.download_pptable_settings = NULL,
    622 	.upload_pptable_settings = NULL,
    623 	.is_dpm_running = vega20_is_dpm_running,
    624 	.get_argument = vega20_get_argument,
    625 	.smc_table_manager = vega20_smc_table_manager,
    626 };
    627