Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_nv.c revision 1.2
      1 /*	$NetBSD: amdgpu_nv.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2019 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  */
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_nv.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
     27 
     28 #include <linux/firmware.h>
     29 #include <linux/slab.h>
     30 #include <linux/module.h>
     31 #include <linux/pci.h>
     32 
     33 #include "amdgpu.h"
     34 #include "amdgpu_atombios.h"
     35 #include "amdgpu_ih.h"
     36 #include "amdgpu_uvd.h"
     37 #include "amdgpu_vce.h"
     38 #include "amdgpu_ucode.h"
     39 #include "amdgpu_psp.h"
     40 #include "amdgpu_smu.h"
     41 #include "atom.h"
     42 #include "amd_pcie.h"
     43 
     44 #include "gc/gc_10_1_0_offset.h"
     45 #include "gc/gc_10_1_0_sh_mask.h"
     46 #include "hdp/hdp_5_0_0_offset.h"
     47 #include "hdp/hdp_5_0_0_sh_mask.h"
     48 #include "smuio/smuio_11_0_0_offset.h"
     49 
     50 #include "soc15.h"
     51 #include "soc15_common.h"
     52 #include "gmc_v10_0.h"
     53 #include "gfxhub_v2_0.h"
     54 #include "mmhub_v2_0.h"
     55 #include "nbio_v2_3.h"
     56 #include "nv.h"
     57 #include "navi10_ih.h"
     58 #include "gfx_v10_0.h"
     59 #include "sdma_v5_0.h"
     60 #include "vcn_v2_0.h"
     61 #include "jpeg_v2_0.h"
     62 #include "dce_virtual.h"
     63 #include "mes_v10_1.h"
     64 #include "mxgpu_nv.h"
     65 
     66 static const struct amd_ip_funcs nv_common_ip_funcs;
     67 
     68 /*
     69  * Indirect registers accessor
     70  */
     71 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
     72 {
     73 	unsigned long flags, address, data;
     74 	u32 r;
     75 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
     76 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
     77 
     78 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
     79 	WREG32(address, reg);
     80 	(void)RREG32(address);
     81 	r = RREG32(data);
     82 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
     83 	return r;
     84 }
     85 
     86 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
     87 {
     88 	unsigned long flags, address, data;
     89 
     90 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
     91 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
     92 
     93 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
     94 	WREG32(address, reg);
     95 	(void)RREG32(address);
     96 	WREG32(data, v);
     97 	(void)RREG32(data);
     98 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
     99 }
    100 
    101 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
    102 {
    103 	unsigned long flags, address, data;
    104 	u32 r;
    105 
    106 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
    107 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
    108 
    109 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
    110 	WREG32(address, (reg));
    111 	r = RREG32(data);
    112 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
    113 	return r;
    114 }
    115 
    116 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    117 {
    118 	unsigned long flags, address, data;
    119 
    120 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
    121 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
    122 
    123 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
    124 	WREG32(address, (reg));
    125 	WREG32(data, (v));
    126 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
    127 }
    128 
    129 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
    130 {
    131 	return adev->nbio.funcs->get_memsize(adev);
    132 }
    133 
    134 static u32 nv_get_xclk(struct amdgpu_device *adev)
    135 {
    136 	return adev->clock.spll.reference_freq;
    137 }
    138 
    139 
    140 void nv_grbm_select(struct amdgpu_device *adev,
    141 		     u32 me, u32 pipe, u32 queue, u32 vmid)
    142 {
    143 	u32 grbm_gfx_cntl = 0;
    144 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
    145 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
    146 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
    147 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
    148 
    149 	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
    150 }
    151 
    152 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
    153 {
    154 	/* todo */
    155 }
    156 
    157 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
    158 {
    159 	/* todo */
    160 	return false;
    161 }
    162 
    163 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
    164 				  u8 *bios, u32 length_bytes)
    165 {
    166 	u32 *dw_ptr;
    167 	u32 i, length_dw;
    168 
    169 	if (bios == NULL)
    170 		return false;
    171 	if (length_bytes == 0)
    172 		return false;
    173 	/* APU vbios image is part of sbios image */
    174 	if (adev->flags & AMD_IS_APU)
    175 		return false;
    176 
    177 	dw_ptr = (u32 *)bios;
    178 	length_dw = ALIGN(length_bytes, 4) / 4;
    179 
    180 	/* set rom index to 0 */
    181 	WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
    182 	/* read out the rom data */
    183 	for (i = 0; i < length_dw; i++)
    184 		dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
    185 
    186 	return true;
    187 }
    188 
    189 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
    190 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
    191 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
    192 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
    193 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
    194 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
    195 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
    196 #if 0	/* TODO: will set it when SDMA header is available */
    197 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
    198 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
    199 #endif
    200 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
    201 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
    202 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
    203 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
    204 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
    205 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
    206 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
    207 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
    208 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
    209 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
    210 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
    211 };
    212 
    213 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
    214 					 u32 sh_num, u32 reg_offset)
    215 {
    216 	uint32_t val;
    217 
    218 	mutex_lock(&adev->grbm_idx_mutex);
    219 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
    220 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
    221 
    222 	val = RREG32(reg_offset);
    223 
    224 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
    225 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
    226 	mutex_unlock(&adev->grbm_idx_mutex);
    227 	return val;
    228 }
    229 
    230 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
    231 				      bool indexed, u32 se_num,
    232 				      u32 sh_num, u32 reg_offset)
    233 {
    234 	if (indexed) {
    235 		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
    236 	} else {
    237 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
    238 			return adev->gfx.config.gb_addr_config;
    239 		return RREG32(reg_offset);
    240 	}
    241 }
    242 
    243 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
    244 			    u32 sh_num, u32 reg_offset, u32 *value)
    245 {
    246 	uint32_t i;
    247 	struct soc15_allowed_register_entry  *en;
    248 
    249 	*value = 0;
    250 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
    251 		en = &nv_allowed_read_registers[i];
    252 		if (reg_offset !=
    253 		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
    254 			continue;
    255 
    256 		*value = nv_get_register_value(adev,
    257 					       nv_allowed_read_registers[i].grbm_indexed,
    258 					       se_num, sh_num, reg_offset);
    259 		return 0;
    260 	}
    261 	return -EINVAL;
    262 }
    263 
    264 #if 0
    265 static void nv_gpu_pci_config_reset(struct amdgpu_device *adev)
    266 {
    267 	u32 i;
    268 
    269 	dev_info(adev->dev, "GPU pci config reset\n");
    270 
    271 	/* disable BM */
    272 	pci_clear_master(adev->pdev);
    273 	/* reset */
    274 	amdgpu_pci_config_reset(adev);
    275 
    276 	udelay(100);
    277 
    278 	/* wait for asic to come out of reset */
    279 	for (i = 0; i < adev->usec_timeout; i++) {
    280 		u32 memsize = nbio_v2_3_get_memsize(adev);
    281 		if (memsize != 0xffffffff)
    282 			break;
    283 		udelay(1);
    284 	}
    285 
    286 }
    287 #endif
    288 
    289 static int nv_asic_mode1_reset(struct amdgpu_device *adev)
    290 {
    291 	u32 i;
    292 	int ret = 0;
    293 
    294 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
    295 
    296 	dev_info(adev->dev, "GPU mode1 reset\n");
    297 
    298 	/* disable BM */
    299 	pci_clear_master(adev->pdev);
    300 
    301 	pci_save_state(adev->pdev);
    302 
    303 	ret = psp_gpu_reset(adev);
    304 	if (ret)
    305 		dev_err(adev->dev, "GPU mode1 reset failed\n");
    306 
    307 	pci_restore_state(adev->pdev);
    308 
    309 	/* wait for asic to come out of reset */
    310 	for (i = 0; i < adev->usec_timeout; i++) {
    311 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
    312 
    313 		if (memsize != 0xffffffff)
    314 			break;
    315 		udelay(1);
    316 	}
    317 
    318 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
    319 
    320 	return ret;
    321 }
    322 
    323 static bool nv_asic_supports_baco(struct amdgpu_device *adev)
    324 {
    325 	struct smu_context *smu = &adev->smu;
    326 
    327 	if (smu_baco_is_support(smu))
    328 		return true;
    329 	else
    330 		return false;
    331 }
    332 
    333 static enum amd_reset_method
    334 nv_asic_reset_method(struct amdgpu_device *adev)
    335 {
    336 	struct smu_context *smu = &adev->smu;
    337 
    338 	if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
    339 		return AMD_RESET_METHOD_BACO;
    340 	else
    341 		return AMD_RESET_METHOD_MODE1;
    342 }
    343 
    344 static int nv_asic_reset(struct amdgpu_device *adev)
    345 {
    346 
    347 	/* FIXME: it doesn't work since vega10 */
    348 #if 0
    349 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
    350 
    351 	nv_gpu_pci_config_reset(adev);
    352 
    353 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
    354 #endif
    355 	int ret = 0;
    356 	struct smu_context *smu = &adev->smu;
    357 
    358 	if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
    359 		if (!adev->in_suspend)
    360 			amdgpu_inc_vram_lost(adev);
    361 		ret = smu_baco_enter(smu);
    362 		if (ret)
    363 			return ret;
    364 		ret = smu_baco_exit(smu);
    365 		if (ret)
    366 			return ret;
    367 	} else {
    368 		if (!adev->in_suspend)
    369 			amdgpu_inc_vram_lost(adev);
    370 		ret = nv_asic_mode1_reset(adev);
    371 	}
    372 
    373 	return ret;
    374 }
    375 
    376 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
    377 {
    378 	/* todo */
    379 	return 0;
    380 }
    381 
    382 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
    383 {
    384 	/* todo */
    385 	return 0;
    386 }
    387 
    388 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
    389 {
    390 	if (pci_is_root_bus(adev->pdev->bus))
    391 		return;
    392 
    393 	if (amdgpu_pcie_gen2 == 0)
    394 		return;
    395 
    396 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
    397 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
    398 		return;
    399 
    400 	/* todo */
    401 }
    402 
    403 static void nv_program_aspm(struct amdgpu_device *adev)
    404 {
    405 
    406 	if (amdgpu_aspm == 0)
    407 		return;
    408 
    409 	/* todo */
    410 }
    411 
    412 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
    413 					bool enable)
    414 {
    415 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
    416 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
    417 }
    418 
    419 static const struct amdgpu_ip_block_version nv_common_ip_block =
    420 {
    421 	.type = AMD_IP_BLOCK_TYPE_COMMON,
    422 	.major = 1,
    423 	.minor = 0,
    424 	.rev = 0,
    425 	.funcs = &nv_common_ip_funcs,
    426 };
    427 
    428 static int nv_reg_base_init(struct amdgpu_device *adev)
    429 {
    430 	int r;
    431 
    432 	if (amdgpu_discovery) {
    433 		r = amdgpu_discovery_reg_base_init(adev);
    434 		if (r) {
    435 			DRM_WARN("failed to init reg base from ip discovery table, "
    436 					"fallback to legacy init method\n");
    437 			goto legacy_init;
    438 		}
    439 
    440 		return 0;
    441 	}
    442 
    443 legacy_init:
    444 	switch (adev->asic_type) {
    445 	case CHIP_NAVI10:
    446 		navi10_reg_base_init(adev);
    447 		break;
    448 	case CHIP_NAVI14:
    449 		navi14_reg_base_init(adev);
    450 		break;
    451 	case CHIP_NAVI12:
    452 		navi12_reg_base_init(adev);
    453 		break;
    454 	default:
    455 		return -EINVAL;
    456 	}
    457 
    458 	return 0;
    459 }
    460 
    461 int nv_set_ip_blocks(struct amdgpu_device *adev)
    462 {
    463 	int r;
    464 
    465 	/* Set IP register base before any HW register access */
    466 	r = nv_reg_base_init(adev);
    467 	if (r)
    468 		return r;
    469 
    470 	adev->nbio.funcs = &nbio_v2_3_funcs;
    471 	adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
    472 
    473 	adev->nbio.funcs->detect_hw_virt(adev);
    474 
    475 	if (amdgpu_sriov_vf(adev))
    476 		adev->virt.ops = &xgpu_nv_virt_ops;
    477 
    478 	switch (adev->asic_type) {
    479 	case CHIP_NAVI10:
    480 	case CHIP_NAVI14:
    481 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
    482 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
    483 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
    484 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
    485 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
    486 		    !amdgpu_sriov_vf(adev))
    487 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
    488 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
    489 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
    490 #if defined(CONFIG_DRM_AMD_DC)
    491 		else if (amdgpu_device_has_dc_support(adev))
    492 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
    493 #endif
    494 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
    495 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
    496 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
    497 		    !amdgpu_sriov_vf(adev))
    498 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
    499 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
    500 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
    501 		if (adev->enable_mes)
    502 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
    503 		break;
    504 	case CHIP_NAVI12:
    505 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
    506 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
    507 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
    508 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
    509 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
    510 		    !amdgpu_sriov_vf(adev))
    511 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
    512 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
    513 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
    514 #if defined(CONFIG_DRM_AMD_DC)
    515 		else if (amdgpu_device_has_dc_support(adev))
    516 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
    517 #endif
    518 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
    519 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
    520 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
    521 		    !amdgpu_sriov_vf(adev))
    522 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
    523 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
    524 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
    525 		break;
    526 	default:
    527 		return -EINVAL;
    528 	}
    529 
    530 	return 0;
    531 }
    532 
    533 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
    534 {
    535 	return adev->nbio.funcs->get_rev_id(adev);
    536 }
    537 
    538 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
    539 {
    540 	adev->nbio.funcs->hdp_flush(adev, ring);
    541 }
    542 
    543 static void nv_invalidate_hdp(struct amdgpu_device *adev,
    544 				struct amdgpu_ring *ring)
    545 {
    546 	if (!ring || !ring->funcs->emit_wreg) {
    547 		WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
    548 	} else {
    549 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
    550 					HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
    551 	}
    552 }
    553 
    554 static bool nv_need_full_reset(struct amdgpu_device *adev)
    555 {
    556 	return true;
    557 }
    558 
    559 static void nv_get_pcie_usage(struct amdgpu_device *adev,
    560 			      uint64_t *count0,
    561 			      uint64_t *count1)
    562 {
    563 	/*TODO*/
    564 }
    565 
    566 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
    567 {
    568 #if 0
    569 	u32 sol_reg;
    570 
    571 	if (adev->flags & AMD_IS_APU)
    572 		return false;
    573 
    574 	/* Check sOS sign of life register to confirm sys driver and sOS
    575 	 * are already been loaded.
    576 	 */
    577 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
    578 	if (sol_reg)
    579 		return true;
    580 #endif
    581 	/* TODO: re-enable it when mode1 reset is functional */
    582 	return false;
    583 }
    584 
    585 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
    586 {
    587 
    588 	/* TODO
    589 	 * dummy implement for pcie_replay_count sysfs interface
    590 	 * */
    591 
    592 	return 0;
    593 }
    594 
    595 static void nv_init_doorbell_index(struct amdgpu_device *adev)
    596 {
    597 	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
    598 	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
    599 	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
    600 	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
    601 	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
    602 	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
    603 	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
    604 	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
    605 	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
    606 	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
    607 	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
    608 	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
    609 	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
    610 	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
    611 	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
    612 	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
    613 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
    614 	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
    615 	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
    616 	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
    617 	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
    618 	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
    619 
    620 	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
    621 	adev->doorbell_index.sdma_doorbell_range = 20;
    622 }
    623 
    624 static const struct amdgpu_asic_funcs nv_asic_funcs =
    625 {
    626 	.read_disabled_bios = &nv_read_disabled_bios,
    627 	.read_bios_from_rom = &nv_read_bios_from_rom,
    628 	.read_register = &nv_read_register,
    629 	.reset = &nv_asic_reset,
    630 	.reset_method = &nv_asic_reset_method,
    631 	.set_vga_state = &nv_vga_set_state,
    632 	.get_xclk = &nv_get_xclk,
    633 	.set_uvd_clocks = &nv_set_uvd_clocks,
    634 	.set_vce_clocks = &nv_set_vce_clocks,
    635 	.get_config_memsize = &nv_get_config_memsize,
    636 	.flush_hdp = &nv_flush_hdp,
    637 	.invalidate_hdp = &nv_invalidate_hdp,
    638 	.init_doorbell_index = &nv_init_doorbell_index,
    639 	.need_full_reset = &nv_need_full_reset,
    640 	.get_pcie_usage = &nv_get_pcie_usage,
    641 	.need_reset_on_init = &nv_need_reset_on_init,
    642 	.get_pcie_replay_count = &nv_get_pcie_replay_count,
    643 	.supports_baco = &nv_asic_supports_baco,
    644 };
    645 
    646 static int nv_common_early_init(void *handle)
    647 {
    648 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
    649 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    650 
    651 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
    652 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
    653 	adev->smc_rreg = NULL;
    654 	adev->smc_wreg = NULL;
    655 	adev->pcie_rreg = &nv_pcie_rreg;
    656 	adev->pcie_wreg = &nv_pcie_wreg;
    657 
    658 	/* TODO: will add them during VCN v2 implementation */
    659 	adev->uvd_ctx_rreg = NULL;
    660 	adev->uvd_ctx_wreg = NULL;
    661 
    662 	adev->didt_rreg = &nv_didt_rreg;
    663 	adev->didt_wreg = &nv_didt_wreg;
    664 
    665 	adev->asic_funcs = &nv_asic_funcs;
    666 
    667 	adev->rev_id = nv_get_rev_id(adev);
    668 	adev->external_rev_id = 0xff;
    669 	switch (adev->asic_type) {
    670 	case CHIP_NAVI10:
    671 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
    672 			AMD_CG_SUPPORT_GFX_CGCG |
    673 			AMD_CG_SUPPORT_IH_CG |
    674 			AMD_CG_SUPPORT_HDP_MGCG |
    675 			AMD_CG_SUPPORT_HDP_LS |
    676 			AMD_CG_SUPPORT_SDMA_MGCG |
    677 			AMD_CG_SUPPORT_SDMA_LS |
    678 			AMD_CG_SUPPORT_MC_MGCG |
    679 			AMD_CG_SUPPORT_MC_LS |
    680 			AMD_CG_SUPPORT_ATHUB_MGCG |
    681 			AMD_CG_SUPPORT_ATHUB_LS |
    682 			AMD_CG_SUPPORT_VCN_MGCG |
    683 			AMD_CG_SUPPORT_JPEG_MGCG |
    684 			AMD_CG_SUPPORT_BIF_MGCG |
    685 			AMD_CG_SUPPORT_BIF_LS;
    686 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
    687 			AMD_PG_SUPPORT_VCN_DPG |
    688 			AMD_PG_SUPPORT_JPEG |
    689 			AMD_PG_SUPPORT_ATHUB;
    690 		adev->external_rev_id = adev->rev_id + 0x1;
    691 		break;
    692 	case CHIP_NAVI14:
    693 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
    694 			AMD_CG_SUPPORT_GFX_CGCG |
    695 			AMD_CG_SUPPORT_IH_CG |
    696 			AMD_CG_SUPPORT_HDP_MGCG |
    697 			AMD_CG_SUPPORT_HDP_LS |
    698 			AMD_CG_SUPPORT_SDMA_MGCG |
    699 			AMD_CG_SUPPORT_SDMA_LS |
    700 			AMD_CG_SUPPORT_MC_MGCG |
    701 			AMD_CG_SUPPORT_MC_LS |
    702 			AMD_CG_SUPPORT_ATHUB_MGCG |
    703 			AMD_CG_SUPPORT_ATHUB_LS |
    704 			AMD_CG_SUPPORT_VCN_MGCG |
    705 			AMD_CG_SUPPORT_JPEG_MGCG |
    706 			AMD_CG_SUPPORT_BIF_MGCG |
    707 			AMD_CG_SUPPORT_BIF_LS;
    708 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
    709 			AMD_PG_SUPPORT_JPEG |
    710 			AMD_PG_SUPPORT_VCN_DPG;
    711 		adev->external_rev_id = adev->rev_id + 20;
    712 		break;
    713 	case CHIP_NAVI12:
    714 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
    715 			AMD_CG_SUPPORT_GFX_MGLS |
    716 			AMD_CG_SUPPORT_GFX_CGCG |
    717 			AMD_CG_SUPPORT_GFX_CP_LS |
    718 			AMD_CG_SUPPORT_GFX_RLC_LS |
    719 			AMD_CG_SUPPORT_IH_CG |
    720 			AMD_CG_SUPPORT_HDP_MGCG |
    721 			AMD_CG_SUPPORT_HDP_LS |
    722 			AMD_CG_SUPPORT_SDMA_MGCG |
    723 			AMD_CG_SUPPORT_SDMA_LS |
    724 			AMD_CG_SUPPORT_MC_MGCG |
    725 			AMD_CG_SUPPORT_MC_LS |
    726 			AMD_CG_SUPPORT_ATHUB_MGCG |
    727 			AMD_CG_SUPPORT_ATHUB_LS |
    728 			AMD_CG_SUPPORT_VCN_MGCG |
    729 			AMD_CG_SUPPORT_JPEG_MGCG;
    730 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
    731 			AMD_PG_SUPPORT_VCN_DPG |
    732 			AMD_PG_SUPPORT_JPEG |
    733 			AMD_PG_SUPPORT_ATHUB;
    734 		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
    735 		 * as a consequence, the rev_id and external_rev_id are wrong.
    736 		 * workaround it by hardcoding rev_id to 0 (default value).
    737 		 */
    738 		if (amdgpu_sriov_vf(adev))
    739 			adev->rev_id = 0;
    740 		adev->external_rev_id = adev->rev_id + 0xa;
    741 		break;
    742 	default:
    743 		/* FIXME: not supported yet */
    744 		return -EINVAL;
    745 	}
    746 
    747 	if (amdgpu_sriov_vf(adev)) {
    748 		amdgpu_virt_init_setting(adev);
    749 		xgpu_nv_mailbox_set_irq_funcs(adev);
    750 	}
    751 
    752 	return 0;
    753 }
    754 
    755 static int nv_common_late_init(void *handle)
    756 {
    757 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    758 
    759 	if (amdgpu_sriov_vf(adev))
    760 		xgpu_nv_mailbox_get_irq(adev);
    761 
    762 	return 0;
    763 }
    764 
    765 static int nv_common_sw_init(void *handle)
    766 {
    767 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    768 
    769 	if (amdgpu_sriov_vf(adev))
    770 		xgpu_nv_mailbox_add_irq_id(adev);
    771 
    772 	return 0;
    773 }
    774 
    775 static int nv_common_sw_fini(void *handle)
    776 {
    777 	return 0;
    778 }
    779 
    780 static int nv_common_hw_init(void *handle)
    781 {
    782 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    783 
    784 	/* enable pcie gen2/3 link */
    785 	nv_pcie_gen3_enable(adev);
    786 	/* enable aspm */
    787 	nv_program_aspm(adev);
    788 	/* setup nbio registers */
    789 	adev->nbio.funcs->init_registers(adev);
    790 	/* remap HDP registers to a hole in mmio space,
    791 	 * for the purpose of expose those registers
    792 	 * to process space
    793 	 */
    794 	if (adev->nbio.funcs->remap_hdp_registers)
    795 		adev->nbio.funcs->remap_hdp_registers(adev);
    796 	/* enable the doorbell aperture */
    797 	nv_enable_doorbell_aperture(adev, true);
    798 
    799 	return 0;
    800 }
    801 
    802 static int nv_common_hw_fini(void *handle)
    803 {
    804 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    805 
    806 	/* disable the doorbell aperture */
    807 	nv_enable_doorbell_aperture(adev, false);
    808 
    809 	return 0;
    810 }
    811 
    812 static int nv_common_suspend(void *handle)
    813 {
    814 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    815 
    816 	return nv_common_hw_fini(adev);
    817 }
    818 
    819 static int nv_common_resume(void *handle)
    820 {
    821 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    822 
    823 	return nv_common_hw_init(adev);
    824 }
    825 
    826 static bool nv_common_is_idle(void *handle)
    827 {
    828 	return true;
    829 }
    830 
    831 static int nv_common_wait_for_idle(void *handle)
    832 {
    833 	return 0;
    834 }
    835 
    836 static int nv_common_soft_reset(void *handle)
    837 {
    838 	return 0;
    839 }
    840 
    841 static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
    842 					   bool enable)
    843 {
    844 	uint32_t hdp_clk_cntl, hdp_clk_cntl1;
    845 	uint32_t hdp_mem_pwr_cntl;
    846 
    847 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
    848 				AMD_CG_SUPPORT_HDP_DS |
    849 				AMD_CG_SUPPORT_HDP_SD)))
    850 		return;
    851 
    852 	hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
    853 	hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
    854 
    855 	/* Before doing clock/power mode switch,
    856 	 * forced on IPH & RC clock */
    857 	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
    858 				     IPH_MEM_CLK_SOFT_OVERRIDE, 1);
    859 	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
    860 				     RC_MEM_CLK_SOFT_OVERRIDE, 1);
    861 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
    862 
    863 	/* HDP 5.0 doesn't support dynamic power mode switch,
    864 	 * disable clock and power gating before any changing */
    865 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    866 					 IPH_MEM_POWER_CTRL_EN, 0);
    867 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    868 					 IPH_MEM_POWER_LS_EN, 0);
    869 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    870 					 IPH_MEM_POWER_DS_EN, 0);
    871 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    872 					 IPH_MEM_POWER_SD_EN, 0);
    873 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    874 					 RC_MEM_POWER_CTRL_EN, 0);
    875 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    876 					 RC_MEM_POWER_LS_EN, 0);
    877 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    878 					 RC_MEM_POWER_DS_EN, 0);
    879 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    880 					 RC_MEM_POWER_SD_EN, 0);
    881 	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
    882 
    883 	/* only one clock gating mode (LS/DS/SD) can be enabled */
    884 	if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
    885 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    886 						 HDP_MEM_POWER_CTRL,
    887 						 IPH_MEM_POWER_LS_EN, enable);
    888 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    889 						 HDP_MEM_POWER_CTRL,
    890 						 RC_MEM_POWER_LS_EN, enable);
    891 	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
    892 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    893 						 HDP_MEM_POWER_CTRL,
    894 						 IPH_MEM_POWER_DS_EN, enable);
    895 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    896 						 HDP_MEM_POWER_CTRL,
    897 						 RC_MEM_POWER_DS_EN, enable);
    898 	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
    899 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    900 						 HDP_MEM_POWER_CTRL,
    901 						 IPH_MEM_POWER_SD_EN, enable);
    902 		/* RC should not use shut down mode, fallback to ds */
    903 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    904 						 HDP_MEM_POWER_CTRL,
    905 						 RC_MEM_POWER_DS_EN, enable);
    906 	}
    907 
    908 	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
    909 
    910 	/* restore IPH & RC clock override after clock/power mode changing */
    911 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
    912 }
    913 
    914 static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
    915 				       bool enable)
    916 {
    917 	uint32_t hdp_clk_cntl;
    918 
    919 	if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
    920 		return;
    921 
    922 	hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
    923 
    924 	if (enable) {
    925 		hdp_clk_cntl &=
    926 			~(uint32_t)
    927 			  (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
    928 			   HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
    929 			   HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
    930 			   HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
    931 			   HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
    932 			   HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
    933 	} else {
    934 		hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
    935 			HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
    936 			HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
    937 			HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
    938 			HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
    939 			HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
    940 	}
    941 
    942 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
    943 }
    944 
    945 static int nv_common_set_clockgating_state(void *handle,
    946 					   enum amd_clockgating_state state)
    947 {
    948 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    949 
    950 	if (amdgpu_sriov_vf(adev))
    951 		return 0;
    952 
    953 	switch (adev->asic_type) {
    954 	case CHIP_NAVI10:
    955 	case CHIP_NAVI14:
    956 	case CHIP_NAVI12:
    957 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
    958 				state == AMD_CG_STATE_GATE);
    959 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
    960 				state == AMD_CG_STATE_GATE);
    961 		nv_update_hdp_mem_power_gating(adev,
    962 				   state == AMD_CG_STATE_GATE);
    963 		nv_update_hdp_clock_gating(adev,
    964 				state == AMD_CG_STATE_GATE);
    965 		break;
    966 	default:
    967 		break;
    968 	}
    969 	return 0;
    970 }
    971 
    972 static int nv_common_set_powergating_state(void *handle,
    973 					   enum amd_powergating_state state)
    974 {
    975 	/* TODO */
    976 	return 0;
    977 }
    978 
    979 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
    980 {
    981 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    982 	uint32_t tmp;
    983 
    984 	if (amdgpu_sriov_vf(adev))
    985 		*flags = 0;
    986 
    987 	adev->nbio.funcs->get_clockgating_state(adev, flags);
    988 
    989 	/* AMD_CG_SUPPORT_HDP_MGCG */
    990 	tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
    991 	if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
    992 		     HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
    993 		     HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
    994 		     HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
    995 		     HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
    996 		     HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
    997 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
    998 
    999 	/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
   1000 	tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
   1001 	if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
   1002 		*flags |= AMD_CG_SUPPORT_HDP_LS;
   1003 	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
   1004 		*flags |= AMD_CG_SUPPORT_HDP_DS;
   1005 	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
   1006 		*flags |= AMD_CG_SUPPORT_HDP_SD;
   1007 
   1008 	return;
   1009 }
   1010 
   1011 static const struct amd_ip_funcs nv_common_ip_funcs = {
   1012 	.name = "nv_common",
   1013 	.early_init = nv_common_early_init,
   1014 	.late_init = nv_common_late_init,
   1015 	.sw_init = nv_common_sw_init,
   1016 	.sw_fini = nv_common_sw_fini,
   1017 	.hw_init = nv_common_hw_init,
   1018 	.hw_fini = nv_common_hw_fini,
   1019 	.suspend = nv_common_suspend,
   1020 	.resume = nv_common_resume,
   1021 	.is_idle = nv_common_is_idle,
   1022 	.wait_for_idle = nv_common_wait_for_idle,
   1023 	.soft_reset = nv_common_soft_reset,
   1024 	.set_clockgating_state = nv_common_set_clockgating_state,
   1025 	.set_powergating_state = nv_common_set_powergating_state,
   1026 	.get_clockgating_state = nv_common_get_clockgating_state,
   1027 };
   1028