Home | History | Annotate | Line # | Download | only in amdgpu
      1 /*	$NetBSD: amdgpu_nv.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2019 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  */
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_nv.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $");
     27 
     28 #include <linux/firmware.h>
     29 #include <linux/slab.h>
     30 #include <linux/module.h>
     31 #include <linux/pci.h>
     32 
     33 #include "amdgpu.h"
     34 #include "amdgpu_atombios.h"
     35 #include "amdgpu_ih.h"
     36 #include "amdgpu_uvd.h"
     37 #include "amdgpu_vce.h"
     38 #include "amdgpu_ucode.h"
     39 #include "amdgpu_psp.h"
     40 #include "amdgpu_smu.h"
     41 #include "atom.h"
     42 #include "amd_pcie.h"
     43 
     44 #include "gc/gc_10_1_0_offset.h"
     45 #include "gc/gc_10_1_0_sh_mask.h"
     46 #include "hdp/hdp_5_0_0_offset.h"
     47 #include "hdp/hdp_5_0_0_sh_mask.h"
     48 #include "smuio/smuio_11_0_0_offset.h"
     49 
     50 #include "soc15.h"
     51 #include "soc15_common.h"
     52 #include "gmc_v10_0.h"
     53 #include "gfxhub_v2_0.h"
     54 #include "mmhub_v2_0.h"
     55 #include "nbio_v2_3.h"
     56 #include "nv.h"
     57 #include "navi10_ih.h"
     58 #include "gfx_v10_0.h"
     59 #include "sdma_v5_0.h"
     60 #include "vcn_v2_0.h"
     61 #include "jpeg_v2_0.h"
     62 #include "dce_virtual.h"
     63 #include "mes_v10_1.h"
     64 #include "mxgpu_nv.h"
     65 
     66 #include <linux/nbsd-namespace.h>
     67 
     68 static const struct amd_ip_funcs nv_common_ip_funcs;
     69 
     70 /*
     71  * Indirect registers accessor
     72  */
     73 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
     74 {
     75 	unsigned long flags, address, data;
     76 	u32 r;
     77 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
     78 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
     79 
     80 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
     81 	WREG32(address, reg);
     82 	(void)RREG32(address);
     83 	r = RREG32(data);
     84 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
     85 	return r;
     86 }
     87 
     88 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
     89 {
     90 	unsigned long flags, address, data;
     91 
     92 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
     93 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
     94 
     95 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
     96 	WREG32(address, reg);
     97 	(void)RREG32(address);
     98 	WREG32(data, v);
     99 	(void)RREG32(data);
    100 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
    101 }
    102 
    103 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
    104 {
    105 	unsigned long flags, address, data;
    106 	u32 r;
    107 
    108 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
    109 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
    110 
    111 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
    112 	WREG32(address, (reg));
    113 	r = RREG32(data);
    114 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
    115 	return r;
    116 }
    117 
    118 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
    119 {
    120 	unsigned long flags, address, data;
    121 
    122 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
    123 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
    124 
    125 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
    126 	WREG32(address, (reg));
    127 	WREG32(data, (v));
    128 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
    129 }
    130 
    131 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
    132 {
    133 	return adev->nbio.funcs->get_memsize(adev);
    134 }
    135 
    136 static u32 nv_get_xclk(struct amdgpu_device *adev)
    137 {
    138 	return adev->clock.spll.reference_freq;
    139 }
    140 
    141 
    142 void nv_grbm_select(struct amdgpu_device *adev,
    143 		     u32 me, u32 pipe, u32 queue, u32 vmid)
    144 {
    145 	u32 grbm_gfx_cntl = 0;
    146 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
    147 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
    148 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
    149 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
    150 
    151 	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
    152 }
    153 
    154 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
    155 {
    156 	/* todo */
    157 }
    158 
    159 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
    160 {
    161 	/* todo */
    162 	return false;
    163 }
    164 
    165 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
    166 				  u8 *bios, u32 length_bytes)
    167 {
    168 	u32 *dw_ptr;
    169 	u32 i, length_dw;
    170 
    171 	if (bios == NULL)
    172 		return false;
    173 	if (length_bytes == 0)
    174 		return false;
    175 	/* APU vbios image is part of sbios image */
    176 	if (adev->flags & AMD_IS_APU)
    177 		return false;
    178 
    179 	dw_ptr = (u32 *)bios;
    180 	length_dw = ALIGN(length_bytes, 4) / 4;
    181 
    182 	/* set rom index to 0 */
    183 	WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
    184 	/* read out the rom data */
    185 	for (i = 0; i < length_dw; i++)
    186 		dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
    187 
    188 	return true;
    189 }
    190 
    191 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
    192 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
    193 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
    194 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
    195 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
    196 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
    197 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
    198 #if 0	/* TODO: will set it when SDMA header is available */
    199 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
    200 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
    201 #endif
    202 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
    203 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
    204 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
    205 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
    206 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
    207 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
    208 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
    209 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
    210 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
    211 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
    212 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
    213 };
    214 
    215 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
    216 					 u32 sh_num, u32 reg_offset)
    217 {
    218 	uint32_t val;
    219 
    220 	mutex_lock(&adev->grbm_idx_mutex);
    221 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
    222 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
    223 
    224 	val = RREG32(reg_offset);
    225 
    226 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
    227 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
    228 	mutex_unlock(&adev->grbm_idx_mutex);
    229 	return val;
    230 }
    231 
    232 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
    233 				      bool indexed, u32 se_num,
    234 				      u32 sh_num, u32 reg_offset)
    235 {
    236 	if (indexed) {
    237 		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
    238 	} else {
    239 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
    240 			return adev->gfx.config.gb_addr_config;
    241 		return RREG32(reg_offset);
    242 	}
    243 }
    244 
    245 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
    246 			    u32 sh_num, u32 reg_offset, u32 *value)
    247 {
    248 	uint32_t i;
    249 	struct soc15_allowed_register_entry  *en;
    250 
    251 	*value = 0;
    252 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
    253 		en = &nv_allowed_read_registers[i];
    254 		if (reg_offset !=
    255 		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
    256 			continue;
    257 
    258 		*value = nv_get_register_value(adev,
    259 					       nv_allowed_read_registers[i].grbm_indexed,
    260 					       se_num, sh_num, reg_offset);
    261 		return 0;
    262 	}
    263 	return -EINVAL;
    264 }
    265 
    266 #if 0
    267 static void nv_gpu_pci_config_reset(struct amdgpu_device *adev)
    268 {
    269 	u32 i;
    270 
    271 	dev_info(adev->dev, "GPU pci config reset\n");
    272 
    273 	/* disable BM */
    274 	pci_clear_master(adev->pdev);
    275 	/* reset */
    276 	amdgpu_pci_config_reset(adev);
    277 
    278 	udelay(100);
    279 
    280 	/* wait for asic to come out of reset */
    281 	for (i = 0; i < adev->usec_timeout; i++) {
    282 		u32 memsize = nbio_v2_3_get_memsize(adev);
    283 		if (memsize != 0xffffffff)
    284 			break;
    285 		udelay(1);
    286 	}
    287 
    288 }
    289 #endif
    290 
    291 static int nv_asic_mode1_reset(struct amdgpu_device *adev)
    292 {
    293 	u32 i;
    294 	int ret = 0;
    295 
    296 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
    297 
    298 	dev_info(adev->dev, "GPU mode1 reset\n");
    299 
    300 	/* disable BM */
    301 	pci_clear_master(adev->pdev);
    302 
    303 	pci_save_state(adev->pdev);
    304 
    305 	ret = psp_gpu_reset(adev);
    306 	if (ret)
    307 		dev_err(adev->dev, "GPU mode1 reset failed\n");
    308 
    309 	pci_restore_state(adev->pdev);
    310 
    311 	/* wait for asic to come out of reset */
    312 	for (i = 0; i < adev->usec_timeout; i++) {
    313 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
    314 
    315 		if (memsize != 0xffffffff)
    316 			break;
    317 		udelay(1);
    318 	}
    319 
    320 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
    321 
    322 	return ret;
    323 }
    324 
    325 static bool nv_asic_supports_baco(struct amdgpu_device *adev)
    326 {
    327 	struct smu_context *smu = &adev->smu;
    328 
    329 	if (smu_baco_is_support(smu))
    330 		return true;
    331 	else
    332 		return false;
    333 }
    334 
    335 static enum amd_reset_method
    336 nv_asic_reset_method(struct amdgpu_device *adev)
    337 {
    338 	struct smu_context *smu = &adev->smu;
    339 
    340 	if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
    341 		return AMD_RESET_METHOD_BACO;
    342 	else
    343 		return AMD_RESET_METHOD_MODE1;
    344 }
    345 
    346 static int nv_asic_reset(struct amdgpu_device *adev)
    347 {
    348 
    349 	/* FIXME: it doesn't work since vega10 */
    350 #if 0
    351 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
    352 
    353 	nv_gpu_pci_config_reset(adev);
    354 
    355 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
    356 #endif
    357 	int ret = 0;
    358 	struct smu_context *smu = &adev->smu;
    359 
    360 	if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
    361 		if (!adev->in_suspend)
    362 			amdgpu_inc_vram_lost(adev);
    363 		ret = smu_baco_enter(smu);
    364 		if (ret)
    365 			return ret;
    366 		ret = smu_baco_exit(smu);
    367 		if (ret)
    368 			return ret;
    369 	} else {
    370 		if (!adev->in_suspend)
    371 			amdgpu_inc_vram_lost(adev);
    372 		ret = nv_asic_mode1_reset(adev);
    373 	}
    374 
    375 	return ret;
    376 }
    377 
    378 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
    379 {
    380 	/* todo */
    381 	return 0;
    382 }
    383 
    384 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
    385 {
    386 	/* todo */
    387 	return 0;
    388 }
    389 
    390 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
    391 {
    392 	if (pci_is_root_bus(adev->pdev->bus))
    393 		return;
    394 
    395 	if (amdgpu_pcie_gen2 == 0)
    396 		return;
    397 
    398 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
    399 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
    400 		return;
    401 
    402 	/* todo */
    403 }
    404 
    405 static void nv_program_aspm(struct amdgpu_device *adev)
    406 {
    407 
    408 	if (amdgpu_aspm == 0)
    409 		return;
    410 
    411 	/* todo */
    412 }
    413 
    414 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
    415 					bool enable)
    416 {
    417 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
    418 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
    419 }
    420 
    421 static const struct amdgpu_ip_block_version nv_common_ip_block =
    422 {
    423 	.type = AMD_IP_BLOCK_TYPE_COMMON,
    424 	.major = 1,
    425 	.minor = 0,
    426 	.rev = 0,
    427 	.funcs = &nv_common_ip_funcs,
    428 };
    429 
    430 static int nv_reg_base_init(struct amdgpu_device *adev)
    431 {
    432 	int r;
    433 
    434 	if (amdgpu_discovery) {
    435 		r = amdgpu_discovery_reg_base_init(adev);
    436 		if (r) {
    437 			DRM_WARN("failed to init reg base from ip discovery table, "
    438 					"fallback to legacy init method\n");
    439 			goto legacy_init;
    440 		}
    441 
    442 		return 0;
    443 	}
    444 
    445 legacy_init:
    446 	switch (adev->asic_type) {
    447 	case CHIP_NAVI10:
    448 		navi10_reg_base_init(adev);
    449 		break;
    450 	case CHIP_NAVI14:
    451 		navi14_reg_base_init(adev);
    452 		break;
    453 	case CHIP_NAVI12:
    454 		navi12_reg_base_init(adev);
    455 		break;
    456 	default:
    457 		return -EINVAL;
    458 	}
    459 
    460 	return 0;
    461 }
    462 
    463 int nv_set_ip_blocks(struct amdgpu_device *adev)
    464 {
    465 	int r;
    466 
    467 	/* Set IP register base before any HW register access */
    468 	r = nv_reg_base_init(adev);
    469 	if (r)
    470 		return r;
    471 
    472 	adev->nbio.funcs = &nbio_v2_3_funcs;
    473 	adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
    474 
    475 	adev->nbio.funcs->detect_hw_virt(adev);
    476 
    477 	if (amdgpu_sriov_vf(adev))
    478 		adev->virt.ops = &xgpu_nv_virt_ops;
    479 
    480 	switch (adev->asic_type) {
    481 	case CHIP_NAVI10:
    482 	case CHIP_NAVI14:
    483 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
    484 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
    485 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
    486 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
    487 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
    488 		    !amdgpu_sriov_vf(adev))
    489 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
    490 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
    491 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
    492 #if defined(CONFIG_DRM_AMD_DC)
    493 		else if (amdgpu_device_has_dc_support(adev))
    494 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
    495 #endif
    496 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
    497 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
    498 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
    499 		    !amdgpu_sriov_vf(adev))
    500 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
    501 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
    502 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
    503 		if (adev->enable_mes)
    504 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
    505 		break;
    506 	case CHIP_NAVI12:
    507 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
    508 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
    509 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
    510 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
    511 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
    512 		    !amdgpu_sriov_vf(adev))
    513 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
    514 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
    515 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
    516 #if defined(CONFIG_DRM_AMD_DC)
    517 		else if (amdgpu_device_has_dc_support(adev))
    518 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
    519 #endif
    520 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
    521 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
    522 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
    523 		    !amdgpu_sriov_vf(adev))
    524 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
    525 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
    526 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
    527 		break;
    528 	default:
    529 		return -EINVAL;
    530 	}
    531 
    532 	return 0;
    533 }
    534 
    535 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
    536 {
    537 	return adev->nbio.funcs->get_rev_id(adev);
    538 }
    539 
    540 static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
    541 {
    542 	adev->nbio.funcs->hdp_flush(adev, ring);
    543 }
    544 
    545 static void nv_invalidate_hdp(struct amdgpu_device *adev,
    546 				struct amdgpu_ring *ring)
    547 {
    548 	if (!ring || !ring->funcs->emit_wreg) {
    549 		WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
    550 	} else {
    551 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
    552 					HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
    553 	}
    554 }
    555 
    556 static bool nv_need_full_reset(struct amdgpu_device *adev)
    557 {
    558 	return true;
    559 }
    560 
    561 static void nv_get_pcie_usage(struct amdgpu_device *adev,
    562 			      uint64_t *count0,
    563 			      uint64_t *count1)
    564 {
    565 	/*TODO*/
    566 }
    567 
    568 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
    569 {
    570 #if 0
    571 	u32 sol_reg;
    572 
    573 	if (adev->flags & AMD_IS_APU)
    574 		return false;
    575 
    576 	/* Check sOS sign of life register to confirm sys driver and sOS
    577 	 * are already been loaded.
    578 	 */
    579 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
    580 	if (sol_reg)
    581 		return true;
    582 #endif
    583 	/* TODO: re-enable it when mode1 reset is functional */
    584 	return false;
    585 }
    586 
    587 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
    588 {
    589 
    590 	/* TODO
    591 	 * dummy implement for pcie_replay_count sysfs interface
    592 	 * */
    593 
    594 	return 0;
    595 }
    596 
    597 static void nv_init_doorbell_index(struct amdgpu_device *adev)
    598 {
    599 	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
    600 	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
    601 	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
    602 	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
    603 	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
    604 	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
    605 	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
    606 	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
    607 	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
    608 	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
    609 	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
    610 	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
    611 	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
    612 	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
    613 	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
    614 	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
    615 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
    616 	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
    617 	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
    618 	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
    619 	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
    620 	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
    621 
    622 	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
    623 	adev->doorbell_index.sdma_doorbell_range = 20;
    624 }
    625 
    626 static const struct amdgpu_asic_funcs nv_asic_funcs =
    627 {
    628 	.read_disabled_bios = &nv_read_disabled_bios,
    629 	.read_bios_from_rom = &nv_read_bios_from_rom,
    630 	.read_register = &nv_read_register,
    631 	.reset = &nv_asic_reset,
    632 	.reset_method = &nv_asic_reset_method,
    633 	.set_vga_state = &nv_vga_set_state,
    634 	.get_xclk = &nv_get_xclk,
    635 	.set_uvd_clocks = &nv_set_uvd_clocks,
    636 	.set_vce_clocks = &nv_set_vce_clocks,
    637 	.get_config_memsize = &nv_get_config_memsize,
    638 	.flush_hdp = &nv_flush_hdp,
    639 	.invalidate_hdp = &nv_invalidate_hdp,
    640 	.init_doorbell_index = &nv_init_doorbell_index,
    641 	.need_full_reset = &nv_need_full_reset,
    642 	.get_pcie_usage = &nv_get_pcie_usage,
    643 	.need_reset_on_init = &nv_need_reset_on_init,
    644 	.get_pcie_replay_count = &nv_get_pcie_replay_count,
    645 	.supports_baco = &nv_asic_supports_baco,
    646 };
    647 
    648 static int nv_common_early_init(void *handle)
    649 {
    650 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
    651 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    652 
    653 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
    654 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
    655 	adev->smc_rreg = NULL;
    656 	adev->smc_wreg = NULL;
    657 	adev->pcie_rreg = &nv_pcie_rreg;
    658 	adev->pcie_wreg = &nv_pcie_wreg;
    659 
    660 	/* TODO: will add them during VCN v2 implementation */
    661 	adev->uvd_ctx_rreg = NULL;
    662 	adev->uvd_ctx_wreg = NULL;
    663 
    664 	adev->didt_rreg = &nv_didt_rreg;
    665 	adev->didt_wreg = &nv_didt_wreg;
    666 
    667 	adev->asic_funcs = &nv_asic_funcs;
    668 
    669 	adev->rev_id = nv_get_rev_id(adev);
    670 	adev->external_rev_id = 0xff;
    671 	switch (adev->asic_type) {
    672 	case CHIP_NAVI10:
    673 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
    674 			AMD_CG_SUPPORT_GFX_CGCG |
    675 			AMD_CG_SUPPORT_IH_CG |
    676 			AMD_CG_SUPPORT_HDP_MGCG |
    677 			AMD_CG_SUPPORT_HDP_LS |
    678 			AMD_CG_SUPPORT_SDMA_MGCG |
    679 			AMD_CG_SUPPORT_SDMA_LS |
    680 			AMD_CG_SUPPORT_MC_MGCG |
    681 			AMD_CG_SUPPORT_MC_LS |
    682 			AMD_CG_SUPPORT_ATHUB_MGCG |
    683 			AMD_CG_SUPPORT_ATHUB_LS |
    684 			AMD_CG_SUPPORT_VCN_MGCG |
    685 			AMD_CG_SUPPORT_JPEG_MGCG |
    686 			AMD_CG_SUPPORT_BIF_MGCG |
    687 			AMD_CG_SUPPORT_BIF_LS;
    688 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
    689 			AMD_PG_SUPPORT_VCN_DPG |
    690 			AMD_PG_SUPPORT_JPEG |
    691 			AMD_PG_SUPPORT_ATHUB;
    692 		adev->external_rev_id = adev->rev_id + 0x1;
    693 		break;
    694 	case CHIP_NAVI14:
    695 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
    696 			AMD_CG_SUPPORT_GFX_CGCG |
    697 			AMD_CG_SUPPORT_IH_CG |
    698 			AMD_CG_SUPPORT_HDP_MGCG |
    699 			AMD_CG_SUPPORT_HDP_LS |
    700 			AMD_CG_SUPPORT_SDMA_MGCG |
    701 			AMD_CG_SUPPORT_SDMA_LS |
    702 			AMD_CG_SUPPORT_MC_MGCG |
    703 			AMD_CG_SUPPORT_MC_LS |
    704 			AMD_CG_SUPPORT_ATHUB_MGCG |
    705 			AMD_CG_SUPPORT_ATHUB_LS |
    706 			AMD_CG_SUPPORT_VCN_MGCG |
    707 			AMD_CG_SUPPORT_JPEG_MGCG |
    708 			AMD_CG_SUPPORT_BIF_MGCG |
    709 			AMD_CG_SUPPORT_BIF_LS;
    710 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
    711 			AMD_PG_SUPPORT_JPEG |
    712 			AMD_PG_SUPPORT_VCN_DPG;
    713 		adev->external_rev_id = adev->rev_id + 20;
    714 		break;
    715 	case CHIP_NAVI12:
    716 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
    717 			AMD_CG_SUPPORT_GFX_MGLS |
    718 			AMD_CG_SUPPORT_GFX_CGCG |
    719 			AMD_CG_SUPPORT_GFX_CP_LS |
    720 			AMD_CG_SUPPORT_GFX_RLC_LS |
    721 			AMD_CG_SUPPORT_IH_CG |
    722 			AMD_CG_SUPPORT_HDP_MGCG |
    723 			AMD_CG_SUPPORT_HDP_LS |
    724 			AMD_CG_SUPPORT_SDMA_MGCG |
    725 			AMD_CG_SUPPORT_SDMA_LS |
    726 			AMD_CG_SUPPORT_MC_MGCG |
    727 			AMD_CG_SUPPORT_MC_LS |
    728 			AMD_CG_SUPPORT_ATHUB_MGCG |
    729 			AMD_CG_SUPPORT_ATHUB_LS |
    730 			AMD_CG_SUPPORT_VCN_MGCG |
    731 			AMD_CG_SUPPORT_JPEG_MGCG;
    732 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
    733 			AMD_PG_SUPPORT_VCN_DPG |
    734 			AMD_PG_SUPPORT_JPEG |
    735 			AMD_PG_SUPPORT_ATHUB;
    736 		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
    737 		 * as a consequence, the rev_id and external_rev_id are wrong.
    738 		 * workaround it by hardcoding rev_id to 0 (default value).
    739 		 */
    740 		if (amdgpu_sriov_vf(adev))
    741 			adev->rev_id = 0;
    742 		adev->external_rev_id = adev->rev_id + 0xa;
    743 		break;
    744 	default:
    745 		/* FIXME: not supported yet */
    746 		return -EINVAL;
    747 	}
    748 
    749 	if (amdgpu_sriov_vf(adev)) {
    750 		amdgpu_virt_init_setting(adev);
    751 		xgpu_nv_mailbox_set_irq_funcs(adev);
    752 	}
    753 
    754 	return 0;
    755 }
    756 
    757 static int nv_common_late_init(void *handle)
    758 {
    759 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    760 
    761 	if (amdgpu_sriov_vf(adev))
    762 		xgpu_nv_mailbox_get_irq(adev);
    763 
    764 	return 0;
    765 }
    766 
    767 static int nv_common_sw_init(void *handle)
    768 {
    769 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    770 
    771 	if (amdgpu_sriov_vf(adev))
    772 		xgpu_nv_mailbox_add_irq_id(adev);
    773 
    774 	return 0;
    775 }
    776 
    777 static int nv_common_sw_fini(void *handle)
    778 {
    779 	return 0;
    780 }
    781 
    782 static int nv_common_hw_init(void *handle)
    783 {
    784 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    785 
    786 	/* enable pcie gen2/3 link */
    787 	nv_pcie_gen3_enable(adev);
    788 	/* enable aspm */
    789 	nv_program_aspm(adev);
    790 	/* setup nbio registers */
    791 	adev->nbio.funcs->init_registers(adev);
    792 	/* remap HDP registers to a hole in mmio space,
    793 	 * for the purpose of expose those registers
    794 	 * to process space
    795 	 */
    796 	if (adev->nbio.funcs->remap_hdp_registers)
    797 		adev->nbio.funcs->remap_hdp_registers(adev);
    798 	/* enable the doorbell aperture */
    799 	nv_enable_doorbell_aperture(adev, true);
    800 
    801 	return 0;
    802 }
    803 
    804 static int nv_common_hw_fini(void *handle)
    805 {
    806 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    807 
    808 	/* disable the doorbell aperture */
    809 	nv_enable_doorbell_aperture(adev, false);
    810 
    811 	return 0;
    812 }
    813 
    814 static int nv_common_suspend(void *handle)
    815 {
    816 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    817 
    818 	return nv_common_hw_fini(adev);
    819 }
    820 
    821 static int nv_common_resume(void *handle)
    822 {
    823 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    824 
    825 	return nv_common_hw_init(adev);
    826 }
    827 
    828 static bool nv_common_is_idle(void *handle)
    829 {
    830 	return true;
    831 }
    832 
    833 static int nv_common_wait_for_idle(void *handle)
    834 {
    835 	return 0;
    836 }
    837 
    838 static int nv_common_soft_reset(void *handle)
    839 {
    840 	return 0;
    841 }
    842 
    843 static void nv_update_hdp_mem_power_gating(struct amdgpu_device *adev,
    844 					   bool enable)
    845 {
    846 	uint32_t hdp_clk_cntl, hdp_clk_cntl1;
    847 	uint32_t hdp_mem_pwr_cntl;
    848 
    849 	if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
    850 				AMD_CG_SUPPORT_HDP_DS |
    851 				AMD_CG_SUPPORT_HDP_SD)))
    852 		return;
    853 
    854 	hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
    855 	hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
    856 
    857 	/* Before doing clock/power mode switch,
    858 	 * forced on IPH & RC clock */
    859 	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
    860 				     IPH_MEM_CLK_SOFT_OVERRIDE, 1);
    861 	hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
    862 				     RC_MEM_CLK_SOFT_OVERRIDE, 1);
    863 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
    864 
    865 	/* HDP 5.0 doesn't support dynamic power mode switch,
    866 	 * disable clock and power gating before any changing */
    867 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    868 					 IPH_MEM_POWER_CTRL_EN, 0);
    869 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    870 					 IPH_MEM_POWER_LS_EN, 0);
    871 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    872 					 IPH_MEM_POWER_DS_EN, 0);
    873 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    874 					 IPH_MEM_POWER_SD_EN, 0);
    875 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    876 					 RC_MEM_POWER_CTRL_EN, 0);
    877 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    878 					 RC_MEM_POWER_LS_EN, 0);
    879 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    880 					 RC_MEM_POWER_DS_EN, 0);
    881 	hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
    882 					 RC_MEM_POWER_SD_EN, 0);
    883 	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
    884 
    885 	/* only one clock gating mode (LS/DS/SD) can be enabled */
    886 	if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
    887 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    888 						 HDP_MEM_POWER_CTRL,
    889 						 IPH_MEM_POWER_LS_EN, enable);
    890 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    891 						 HDP_MEM_POWER_CTRL,
    892 						 RC_MEM_POWER_LS_EN, enable);
    893 	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
    894 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    895 						 HDP_MEM_POWER_CTRL,
    896 						 IPH_MEM_POWER_DS_EN, enable);
    897 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    898 						 HDP_MEM_POWER_CTRL,
    899 						 RC_MEM_POWER_DS_EN, enable);
    900 	} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
    901 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    902 						 HDP_MEM_POWER_CTRL,
    903 						 IPH_MEM_POWER_SD_EN, enable);
    904 		/* RC should not use shut down mode, fallback to ds */
    905 		hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
    906 						 HDP_MEM_POWER_CTRL,
    907 						 RC_MEM_POWER_DS_EN, enable);
    908 	}
    909 
    910 	WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
    911 
    912 	/* restore IPH & RC clock override after clock/power mode changing */
    913 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl1);
    914 }
    915 
    916 static void nv_update_hdp_clock_gating(struct amdgpu_device *adev,
    917 				       bool enable)
    918 {
    919 	uint32_t hdp_clk_cntl;
    920 
    921 	if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
    922 		return;
    923 
    924 	hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
    925 
    926 	if (enable) {
    927 		hdp_clk_cntl &=
    928 			~(uint32_t)
    929 			  (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
    930 			   HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
    931 			   HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
    932 			   HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
    933 			   HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
    934 			   HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
    935 	} else {
    936 		hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
    937 			HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
    938 			HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
    939 			HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
    940 			HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
    941 			HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
    942 	}
    943 
    944 	WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
    945 }
    946 
    947 static int nv_common_set_clockgating_state(void *handle,
    948 					   enum amd_clockgating_state state)
    949 {
    950 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    951 
    952 	if (amdgpu_sriov_vf(adev))
    953 		return 0;
    954 
    955 	switch (adev->asic_type) {
    956 	case CHIP_NAVI10:
    957 	case CHIP_NAVI14:
    958 	case CHIP_NAVI12:
    959 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
    960 				state == AMD_CG_STATE_GATE);
    961 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
    962 				state == AMD_CG_STATE_GATE);
    963 		nv_update_hdp_mem_power_gating(adev,
    964 				   state == AMD_CG_STATE_GATE);
    965 		nv_update_hdp_clock_gating(adev,
    966 				state == AMD_CG_STATE_GATE);
    967 		break;
    968 	default:
    969 		break;
    970 	}
    971 	return 0;
    972 }
    973 
    974 static int nv_common_set_powergating_state(void *handle,
    975 					   enum amd_powergating_state state)
    976 {
    977 	/* TODO */
    978 	return 0;
    979 }
    980 
    981 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
    982 {
    983 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    984 	uint32_t tmp;
    985 
    986 	if (amdgpu_sriov_vf(adev))
    987 		*flags = 0;
    988 
    989 	adev->nbio.funcs->get_clockgating_state(adev, flags);
    990 
    991 	/* AMD_CG_SUPPORT_HDP_MGCG */
    992 	tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
    993 	if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
    994 		     HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
    995 		     HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
    996 		     HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
    997 		     HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
    998 		     HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
    999 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
   1000 
   1001 	/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
   1002 	tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
   1003 	if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
   1004 		*flags |= AMD_CG_SUPPORT_HDP_LS;
   1005 	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
   1006 		*flags |= AMD_CG_SUPPORT_HDP_DS;
   1007 	else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
   1008 		*flags |= AMD_CG_SUPPORT_HDP_SD;
   1009 
   1010 	return;
   1011 }
   1012 
   1013 static const struct amd_ip_funcs nv_common_ip_funcs = {
   1014 	.name = "nv_common",
   1015 	.early_init = nv_common_early_init,
   1016 	.late_init = nv_common_late_init,
   1017 	.sw_init = nv_common_sw_init,
   1018 	.sw_fini = nv_common_sw_fini,
   1019 	.hw_init = nv_common_hw_init,
   1020 	.hw_fini = nv_common_hw_fini,
   1021 	.suspend = nv_common_suspend,
   1022 	.resume = nv_common_resume,
   1023 	.is_idle = nv_common_is_idle,
   1024 	.wait_for_idle = nv_common_wait_for_idle,
   1025 	.soft_reset = nv_common_soft_reset,
   1026 	.set_clockgating_state = nv_common_set_clockgating_state,
   1027 	.set_powergating_state = nv_common_set_powergating_state,
   1028 	.get_clockgating_state = nv_common_get_clockgating_state,
   1029 };
   1030