Home | History | Annotate | Line # | Download | only in amdgpu
      1 /*	$NetBSD: amdgpu_vce_v2_0.c,v 1.4 2021/12/19 12:21:29 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2013 Advanced Micro Devices, Inc.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining a
      8  * copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sub license, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     18  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     22  *
     23  * The above copyright notice and this permission notice (including the
     24  * next paragraph) shall be included in all copies or substantial portions
     25  * of the Software.
     26  *
     27  * Authors: Christian Knig <christian.koenig (at) amd.com>
     28  */
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vce_v2_0.c,v 1.4 2021/12/19 12:21:29 riastradh Exp $");
     32 
     33 #include <linux/firmware.h>
     34 
     35 #include "amdgpu.h"
     36 #include "amdgpu_vce.h"
     37 #include "cikd.h"
     38 #include "vce/vce_2_0_d.h"
     39 #include "vce/vce_2_0_sh_mask.h"
     40 #include "smu/smu_7_0_1_d.h"
     41 #include "smu/smu_7_0_1_sh_mask.h"
     42 #include "oss/oss_2_0_d.h"
     43 #include "oss/oss_2_0_sh_mask.h"
     44 
     45 #define VCE_V2_0_FW_SIZE	(256 * 1024)
     46 #define VCE_V2_0_STACK_SIZE	(64 * 1024)
     47 #define VCE_V2_0_DATA_SIZE	(23552 * AMDGPU_MAX_VCE_HANDLES)
     48 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
     49 
     50 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
     51 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
     52 
     53 /**
     54  * vce_v2_0_ring_get_rptr - get read pointer
     55  *
     56  * @ring: amdgpu_ring pointer
     57  *
     58  * Returns the current hardware read pointer
     59  */
     60 static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
     61 {
     62 	struct amdgpu_device *adev = ring->adev;
     63 
     64 	if (ring->me == 0)
     65 		return RREG32(mmVCE_RB_RPTR);
     66 	else
     67 		return RREG32(mmVCE_RB_RPTR2);
     68 }
     69 
     70 /**
     71  * vce_v2_0_ring_get_wptr - get write pointer
     72  *
     73  * @ring: amdgpu_ring pointer
     74  *
     75  * Returns the current hardware write pointer
     76  */
     77 static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
     78 {
     79 	struct amdgpu_device *adev = ring->adev;
     80 
     81 	if (ring->me == 0)
     82 		return RREG32(mmVCE_RB_WPTR);
     83 	else
     84 		return RREG32(mmVCE_RB_WPTR2);
     85 }
     86 
     87 /**
     88  * vce_v2_0_ring_set_wptr - set write pointer
     89  *
     90  * @ring: amdgpu_ring pointer
     91  *
     92  * Commits the write pointer to the hardware
     93  */
     94 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
     95 {
     96 	struct amdgpu_device *adev = ring->adev;
     97 
     98 	if (ring->me == 0)
     99 		WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
    100 	else
    101 		WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
    102 }
    103 
    104 static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
    105 {
    106 	int i, j;
    107 
    108 	for (i = 0; i < 10; ++i) {
    109 		for (j = 0; j < 100; ++j) {
    110 			uint32_t status = RREG32(mmVCE_LMI_STATUS);
    111 
    112 			if (status & 0x337f)
    113 				return 0;
    114 			mdelay(10);
    115 		}
    116 	}
    117 
    118 	return -ETIMEDOUT;
    119 }
    120 
    121 static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
    122 {
    123 	int i, j;
    124 
    125 	for (i = 0; i < 10; ++i) {
    126 		for (j = 0; j < 100; ++j) {
    127 			uint32_t status = RREG32(mmVCE_STATUS);
    128 
    129 			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
    130 				return 0;
    131 			mdelay(10);
    132 		}
    133 
    134 		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
    135 		WREG32_P(mmVCE_SOFT_RESET,
    136 			VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
    137 			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
    138 		mdelay(10);
    139 		WREG32_P(mmVCE_SOFT_RESET, 0,
    140 			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
    141 		mdelay(10);
    142 	}
    143 
    144 	return -ETIMEDOUT;
    145 }
    146 
    147 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
    148 {
    149 	WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
    150 }
    151 
    152 static void vce_v2_0_init_cg(struct amdgpu_device *adev)
    153 {
    154 	u32 tmp;
    155 
    156 	tmp = RREG32(mmVCE_CLOCK_GATING_A);
    157 	tmp &= ~0xfff;
    158 	tmp |= ((0 << 0) | (4 << 4));
    159 	tmp |= 0x40000;
    160 	WREG32(mmVCE_CLOCK_GATING_A, tmp);
    161 
    162 	tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
    163 	tmp &= ~0xfff;
    164 	tmp |= ((0 << 0) | (4 << 4));
    165 	WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
    166 
    167 	tmp = RREG32(mmVCE_CLOCK_GATING_B);
    168 	tmp |= 0x10;
    169 	tmp &= ~0x100000;
    170 	WREG32(mmVCE_CLOCK_GATING_B, tmp);
    171 }
    172 
    173 static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
    174 {
    175 	uint32_t size, offset;
    176 
    177 	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
    178 	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
    179 	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
    180 	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
    181 
    182 	WREG32(mmVCE_LMI_CTRL, 0x00398000);
    183 	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
    184 	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
    185 	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
    186 	WREG32(mmVCE_LMI_VM_CTRL, 0);
    187 
    188 	WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
    189 
    190 	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
    191 	size = VCE_V2_0_FW_SIZE;
    192 	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
    193 	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
    194 
    195 	offset += size;
    196 	size = VCE_V2_0_STACK_SIZE;
    197 	WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
    198 	WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
    199 
    200 	offset += size;
    201 	size = VCE_V2_0_DATA_SIZE;
    202 	WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
    203 	WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
    204 
    205 	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
    206 	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
    207 }
    208 
    209 static bool vce_v2_0_is_idle(void *handle)
    210 {
    211 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    212 
    213 	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
    214 }
    215 
    216 static int vce_v2_0_wait_for_idle(void *handle)
    217 {
    218 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    219 	unsigned i;
    220 
    221 	for (i = 0; i < adev->usec_timeout; i++) {
    222 		if (vce_v2_0_is_idle(handle))
    223 			return 0;
    224 	}
    225 	return -ETIMEDOUT;
    226 }
    227 
    228 /**
    229  * vce_v2_0_start - start VCE block
    230  *
    231  * @adev: amdgpu_device pointer
    232  *
    233  * Setup and start the VCE block
    234  */
    235 static int vce_v2_0_start(struct amdgpu_device *adev)
    236 {
    237 	struct amdgpu_ring *ring;
    238 	int r;
    239 
    240 	/* set BUSY flag */
    241 	WREG32_P(mmVCE_STATUS, 1, ~1);
    242 
    243 	vce_v2_0_init_cg(adev);
    244 	vce_v2_0_disable_cg(adev);
    245 
    246 	vce_v2_0_mc_resume(adev);
    247 
    248 	ring = &adev->vce.ring[0];
    249 	WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
    250 	WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
    251 	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
    252 	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
    253 	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
    254 
    255 	ring = &adev->vce.ring[1];
    256 	WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
    257 	WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
    258 	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
    259 	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
    260 	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
    261 
    262 	WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
    263 	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
    264 	mdelay(100);
    265 	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
    266 
    267 	r = vce_v2_0_firmware_loaded(adev);
    268 
    269 	/* clear BUSY flag */
    270 	WREG32_P(mmVCE_STATUS, 0, ~1);
    271 
    272 	if (r) {
    273 		DRM_ERROR("VCE not responding, giving up!!!\n");
    274 		return r;
    275 	}
    276 
    277 	return 0;
    278 }
    279 
    280 static int vce_v2_0_stop(struct amdgpu_device *adev)
    281 {
    282 	int i;
    283 	int status;
    284 
    285 	if (vce_v2_0_lmi_clean(adev)) {
    286 		DRM_INFO("vce is not idle \n");
    287 		return 0;
    288 	}
    289 
    290 	if (vce_v2_0_wait_for_idle(adev)) {
    291 		DRM_INFO("VCE is busy, Can't set clock gating");
    292 		return 0;
    293 	}
    294 
    295 	/* Stall UMC and register bus before resetting VCPU */
    296 	WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
    297 
    298 	for (i = 0; i < 100; ++i) {
    299 		status = RREG32(mmVCE_LMI_STATUS);
    300 		if (status & 0x240)
    301 			break;
    302 		mdelay(1);
    303 	}
    304 
    305 	WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
    306 
    307 	/* put LMI, VCPU, RBC etc... into reset */
    308 	WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
    309 
    310 	WREG32(mmVCE_STATUS, 0);
    311 
    312 	return 0;
    313 }
    314 
    315 static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
    316 {
    317 	u32 tmp;
    318 
    319 	if (gated) {
    320 		tmp = RREG32(mmVCE_CLOCK_GATING_B);
    321 		tmp |= 0xe70000;
    322 		WREG32(mmVCE_CLOCK_GATING_B, tmp);
    323 
    324 		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
    325 		tmp |= 0xff000000;
    326 		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
    327 
    328 		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
    329 		tmp &= ~0x3fc;
    330 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
    331 
    332 		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
    333 	} else {
    334 		tmp = RREG32(mmVCE_CLOCK_GATING_B);
    335 		tmp |= 0xe7;
    336 		tmp &= ~0xe70000;
    337 		WREG32(mmVCE_CLOCK_GATING_B, tmp);
    338 
    339 		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
    340 		tmp |= 0x1fe000;
    341 		tmp &= ~0xff000000;
    342 		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
    343 
    344 		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
    345 		tmp |= 0x3fc;
    346 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
    347 	}
    348 }
    349 
    350 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
    351 {
    352 	u32 orig, tmp;
    353 
    354 /* LMI_MC/LMI_UMC always set in dynamic,
    355  * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
    356  */
    357 	tmp = RREG32(mmVCE_CLOCK_GATING_B);
    358 	tmp &= ~0x00060006;
    359 
    360 /* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
    361 	if (gated) {
    362 		tmp |= 0xe10000;
    363 		WREG32(mmVCE_CLOCK_GATING_B, tmp);
    364 	} else {
    365 		tmp |= 0xe1;
    366 		tmp &= ~0xe10000;
    367 		WREG32(mmVCE_CLOCK_GATING_B, tmp);
    368 	}
    369 
    370 	orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
    371 	tmp &= ~0x1fe000;
    372 	tmp &= ~0xff000000;
    373 	if (tmp != orig)
    374 		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
    375 
    376 	orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
    377 	tmp &= ~0x3fc;
    378 	if (tmp != orig)
    379 		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
    380 
    381 	/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
    382 	WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
    383 
    384 	if(gated)
    385 		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
    386 }
    387 
    388 static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
    389 								bool sw_cg)
    390 {
    391 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
    392 		if (sw_cg)
    393 			vce_v2_0_set_sw_cg(adev, true);
    394 		else
    395 			vce_v2_0_set_dyn_cg(adev, true);
    396 	} else {
    397 		vce_v2_0_disable_cg(adev);
    398 
    399 		if (sw_cg)
    400 			vce_v2_0_set_sw_cg(adev, false);
    401 		else
    402 			vce_v2_0_set_dyn_cg(adev, false);
    403 	}
    404 }
    405 
    406 static int vce_v2_0_early_init(void *handle)
    407 {
    408 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    409 
    410 	adev->vce.num_rings = 2;
    411 
    412 	vce_v2_0_set_ring_funcs(adev);
    413 	vce_v2_0_set_irq_funcs(adev);
    414 
    415 	return 0;
    416 }
    417 
    418 static int vce_v2_0_sw_init(void *handle)
    419 {
    420 	struct amdgpu_ring *ring;
    421 	int r, i;
    422 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    423 
    424 	/* VCE */
    425 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
    426 	if (r)
    427 		return r;
    428 
    429 	r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
    430 		VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
    431 	if (r)
    432 		return r;
    433 
    434 	r = amdgpu_vce_resume(adev);
    435 	if (r)
    436 		return r;
    437 
    438 	for (i = 0; i < adev->vce.num_rings; i++) {
    439 		ring = &adev->vce.ring[i];
    440 		snprintf(ring->name, sizeof(ring->name), "vce%d", i);
    441 		r = amdgpu_ring_init(adev, ring, 512,
    442 				     &adev->vce.irq, 0);
    443 		if (r)
    444 			return r;
    445 	}
    446 
    447 	r = amdgpu_vce_entity_init(adev);
    448 
    449 	return r;
    450 }
    451 
    452 static int vce_v2_0_sw_fini(void *handle)
    453 {
    454 	int r;
    455 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    456 
    457 	r = amdgpu_vce_suspend(adev);
    458 	if (r)
    459 		return r;
    460 
    461 	return amdgpu_vce_sw_fini(adev);
    462 }
    463 
    464 static int vce_v2_0_hw_init(void *handle)
    465 {
    466 	int r, i;
    467 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    468 
    469 	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
    470 	vce_v2_0_enable_mgcg(adev, true, false);
    471 
    472 	for (i = 0; i < adev->vce.num_rings; i++) {
    473 		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
    474 		if (r)
    475 			return r;
    476 	}
    477 
    478 	DRM_INFO("VCE initialized successfully.\n");
    479 
    480 	return 0;
    481 }
    482 
    483 static int vce_v2_0_hw_fini(void *handle)
    484 {
    485 	return 0;
    486 }
    487 
    488 static int vce_v2_0_suspend(void *handle)
    489 {
    490 	int r;
    491 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    492 
    493 	r = vce_v2_0_hw_fini(adev);
    494 	if (r)
    495 		return r;
    496 
    497 	return amdgpu_vce_suspend(adev);
    498 }
    499 
    500 static int vce_v2_0_resume(void *handle)
    501 {
    502 	int r;
    503 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    504 
    505 	r = amdgpu_vce_resume(adev);
    506 	if (r)
    507 		return r;
    508 
    509 	return vce_v2_0_hw_init(adev);
    510 }
    511 
    512 static int vce_v2_0_soft_reset(void *handle)
    513 {
    514 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    515 
    516 	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
    517 	mdelay(5);
    518 
    519 	return vce_v2_0_start(adev);
    520 }
    521 
    522 static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
    523 					struct amdgpu_irq_src *source,
    524 					unsigned type,
    525 					enum amdgpu_interrupt_state state)
    526 {
    527 	uint32_t val = 0;
    528 
    529 	if (state == AMDGPU_IRQ_STATE_ENABLE)
    530 		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
    531 
    532 	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
    533 	return 0;
    534 }
    535 
    536 static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
    537 				      struct amdgpu_irq_src *source,
    538 				      struct amdgpu_iv_entry *entry)
    539 {
    540 	DRM_DEBUG("IH: VCE\n");
    541 	switch (entry->src_data[0]) {
    542 	case 0:
    543 	case 1:
    544 		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
    545 		break;
    546 	default:
    547 		DRM_ERROR("Unhandled interrupt: %d %d\n",
    548 			  entry->src_id, entry->src_data[0]);
    549 		break;
    550 	}
    551 
    552 	return 0;
    553 }
    554 
    555 static int vce_v2_0_set_clockgating_state(void *handle,
    556 					  enum amd_clockgating_state state)
    557 {
    558 	bool gate = false;
    559 	bool sw_cg = false;
    560 
    561 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    562 
    563 	if (state == AMD_CG_STATE_GATE) {
    564 		gate = true;
    565 		sw_cg = true;
    566 	}
    567 
    568 	vce_v2_0_enable_mgcg(adev, gate, sw_cg);
    569 
    570 	return 0;
    571 }
    572 
    573 static int vce_v2_0_set_powergating_state(void *handle,
    574 					  enum amd_powergating_state state)
    575 {
    576 	/* This doesn't actually powergate the VCE block.
    577 	 * That's done in the dpm code via the SMC.  This
    578 	 * just re-inits the block as necessary.  The actual
    579 	 * gating still happens in the dpm code.  We should
    580 	 * revisit this when there is a cleaner line between
    581 	 * the smc and the hw blocks
    582 	 */
    583 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
    584 
    585 	if (state == AMD_PG_STATE_GATE)
    586 		return vce_v2_0_stop(adev);
    587 	else
    588 		return vce_v2_0_start(adev);
    589 }
    590 
    591 static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
    592 	.name = "vce_v2_0",
    593 	.early_init = vce_v2_0_early_init,
    594 	.late_init = NULL,
    595 	.sw_init = vce_v2_0_sw_init,
    596 	.sw_fini = vce_v2_0_sw_fini,
    597 	.hw_init = vce_v2_0_hw_init,
    598 	.hw_fini = vce_v2_0_hw_fini,
    599 	.suspend = vce_v2_0_suspend,
    600 	.resume = vce_v2_0_resume,
    601 	.is_idle = vce_v2_0_is_idle,
    602 	.wait_for_idle = vce_v2_0_wait_for_idle,
    603 	.soft_reset = vce_v2_0_soft_reset,
    604 	.set_clockgating_state = vce_v2_0_set_clockgating_state,
    605 	.set_powergating_state = vce_v2_0_set_powergating_state,
    606 };
    607 
    608 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
    609 	.type = AMDGPU_RING_TYPE_VCE,
    610 	.align_mask = 0xf,
    611 	.nop = VCE_CMD_NO_OP,
    612 	.support_64bit_ptrs = false,
    613 	.no_user_fence = true,
    614 	.get_rptr = vce_v2_0_ring_get_rptr,
    615 	.get_wptr = vce_v2_0_ring_get_wptr,
    616 	.set_wptr = vce_v2_0_ring_set_wptr,
    617 	.parse_cs = amdgpu_vce_ring_parse_cs,
    618 	.emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
    619 	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
    620 	.emit_ib = amdgpu_vce_ring_emit_ib,
    621 	.emit_fence = amdgpu_vce_ring_emit_fence,
    622 	.test_ring = amdgpu_vce_ring_test_ring,
    623 	.test_ib = amdgpu_vce_ring_test_ib,
    624 	.insert_nop = amdgpu_ring_insert_nop,
    625 	.pad_ib = amdgpu_ring_generic_pad_ib,
    626 	.begin_use = amdgpu_vce_ring_begin_use,
    627 	.end_use = amdgpu_vce_ring_end_use,
    628 };
    629 
    630 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
    631 {
    632 	int i;
    633 
    634 	for (i = 0; i < adev->vce.num_rings; i++) {
    635 		adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
    636 		adev->vce.ring[i].me = i;
    637 	}
    638 }
    639 
    640 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
    641 	.set = vce_v2_0_set_interrupt_state,
    642 	.process = vce_v2_0_process_interrupt,
    643 };
    644 
    645 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
    646 {
    647 	adev->vce.irq.num_types = 1;
    648 	adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
    649 };
    650 
    651 const struct amdgpu_ip_block_version vce_v2_0_ip_block =
    652 {
    653 		.type = AMD_IP_BLOCK_TYPE_VCE,
    654 		.major = 2,
    655 		.minor = 0,
    656 		.rev = 0,
    657 		.funcs = &vce_v2_0_ip_funcs,
    658 };
    659