Home | History | Annotate | Line # | Download | only in amdgpu
      1 /*	$NetBSD: amdgpu_vcn.h,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2016 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  */
     25 
     26 #ifndef __AMDGPU_VCN_H__
     27 #define __AMDGPU_VCN_H__
     28 
     29 #define AMDGPU_VCN_STACK_SIZE		(128*1024)
     30 #define AMDGPU_VCN_CONTEXT_SIZE 	(512*1024)
     31 
     32 #define AMDGPU_VCN_FIRMWARE_OFFSET	256
     33 #define AMDGPU_VCN_MAX_ENC_RINGS	3
     34 
     35 #define AMDGPU_MAX_VCN_INSTANCES	2
     36 #define AMDGPU_MAX_VCN_ENC_RINGS  AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
     37 
     38 #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
     39 #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
     40 
     41 #define VCN_DEC_KMD_CMD 		0x80000000
     42 #define VCN_DEC_CMD_FENCE		0x00000000
     43 #define VCN_DEC_CMD_TRAP		0x00000001
     44 #define VCN_DEC_CMD_WRITE_REG		0x00000004
     45 #define VCN_DEC_CMD_REG_READ_COND_WAIT	0x00000006
     46 #define VCN_DEC_CMD_PACKET_START	0x0000000a
     47 #define VCN_DEC_CMD_PACKET_END		0x0000000b
     48 
     49 #define VCN_ENC_CMD_NO_OP		0x00000000
     50 #define VCN_ENC_CMD_END 		0x00000001
     51 #define VCN_ENC_CMD_IB			0x00000002
     52 #define VCN_ENC_CMD_FENCE		0x00000003
     53 #define VCN_ENC_CMD_TRAP		0x00000004
     54 #define VCN_ENC_CMD_REG_WRITE		0x0000000b
     55 #define VCN_ENC_CMD_REG_WAIT		0x0000000c
     56 
     57 #define VCN_VID_SOC_ADDRESS_2_0 	0x1fa00
     58 #define VCN_AON_SOC_ADDRESS_2_0 	0x1f800
     59 #define VCN_VID_IP_ADDRESS_2_0		0x0
     60 #define VCN_AON_IP_ADDRESS_2_0		0x30000
     61 
     62 #define mmUVD_RBC_XX_IB_REG_CHECK 					0x026b
     63 #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 				1
     64 #define mmUVD_REG_XX_MASK 						0x026c
     65 #define mmUVD_REG_XX_MASK_BASE_IDX 					1
     66 
     67 /* 1 second timeout */
     68 #define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
     69 
     70 #define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) 			\
     71 	({	WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); 			\
     72 		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, 				\
     73 			UVD_DPG_LMA_CTL__MASK_EN_MASK | 				\
     74 			((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) 	\
     75 			<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | 			\
     76 			(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); 		\
     77 		RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); 			\
     78 	})
     79 
     80 #define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) 		\
     81 	do { 										\
     82 		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); 			\
     83 		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); 			\
     84 		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, 				\
     85 			UVD_DPG_LMA_CTL__READ_WRITE_MASK | 				\
     86 			((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) 	\
     87 			<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | 			\
     88 			(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); 		\
     89 	} while (0)
     90 
     91 #define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) 						\
     92 	({											\
     93 		uint32_t internal_reg_offset, addr;						\
     94 		bool video_range, aon_range;							\
     95 												\
     96 		addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg);		\
     97 		addr <<= 2; 									\
     98 		video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && 		\
     99 				((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600)))));	\
    100 		aon_range   = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS_2_0)) && 		\
    101 				((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS_2_0 + 0x600)))));	\
    102 		if (video_range) 								\
    103 			internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS_2_0) + 	\
    104 				(VCN_VID_IP_ADDRESS_2_0));					\
    105 		else if (aon_range)								\
    106 			internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS_2_0) + 	\
    107 				(VCN_AON_IP_ADDRESS_2_0));					\
    108 		else										\
    109 			internal_reg_offset = (0xFFFFF & addr);					\
    110 												\
    111 		internal_reg_offset >>= 2;							\
    112 	})
    113 
    114 #define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) 					\
    115 	({											\
    116 		WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, 					\
    117 			(0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |				\
    118 			mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |				\
    119 			offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));			\
    120 		RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA);				\
    121 	})
    122 
    123 #define WREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, value, mask_en, indirect)			\
    124 	do {											\
    125 		if (!indirect) {								\
    126 			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value);			\
    127 			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, 				\
    128 				(0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |			\
    129 				 mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |			\
    130 				 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));		\
    131 		} else {									\
    132 			*adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset;		\
    133 			*adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value;			\
    134 		}										\
    135 	} while (0)
    136 
    137 enum engine_status_constants {
    138 	UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
    139 	UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0,
    140 	UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002,
    141 	UVD_STATUS__UVD_BUSY = 0x00000004,
    142 	GB_ADDR_CONFIG_DEFAULT = 0x26010011,
    143 	UVD_STATUS__IDLE = 0x2,
    144 	UVD_STATUS__BUSY = 0x5,
    145 	UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1,
    146 	UVD_STATUS__RBC_BUSY = 0x1,
    147 	UVD_PGFSM_STATUS_UVDJ_PWR_ON = 0,
    148 };
    149 
    150 enum internal_dpg_state {
    151 	VCN_DPG_STATE__UNPAUSE = 0,
    152 	VCN_DPG_STATE__PAUSE,
    153 };
    154 
    155 struct dpg_pause_state {
    156 	enum internal_dpg_state fw_based;
    157 	enum internal_dpg_state jpeg;
    158 };
    159 
    160 struct amdgpu_vcn_reg{
    161 	unsigned	data0;
    162 	unsigned	data1;
    163 	unsigned	cmd;
    164 	unsigned	nop;
    165 	unsigned	context_id;
    166 	unsigned	ib_vmid;
    167 	unsigned	ib_bar_low;
    168 	unsigned	ib_bar_high;
    169 	unsigned	ib_size;
    170 	unsigned	gp_scratch8;
    171 	unsigned	scratch9;
    172 };
    173 
    174 struct amdgpu_vcn_inst {
    175 	struct amdgpu_bo	*vcpu_bo;
    176 	void			*cpu_addr;
    177 	uint64_t		gpu_addr;
    178 	void			*saved_bo;
    179 	struct amdgpu_ring	ring_dec;
    180 	struct amdgpu_ring	ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
    181 	struct amdgpu_irq_src	irq;
    182 	struct amdgpu_vcn_reg	external;
    183 	struct amdgpu_bo	*dpg_sram_bo;
    184 	struct dpg_pause_state	pause_state;
    185 	void			*dpg_sram_cpu_addr;
    186 	uint64_t		dpg_sram_gpu_addr;
    187 	uint32_t		*dpg_sram_curr_addr;
    188 };
    189 
    190 struct amdgpu_vcn {
    191 	unsigned		fw_version;
    192 	struct delayed_work	idle_work;
    193 	const struct firmware	*fw;	/* VCN firmware */
    194 	unsigned		num_enc_rings;
    195 	enum amd_powergating_state cur_state;
    196 	bool			indirect_sram;
    197 
    198 	uint8_t	num_vcn_inst;
    199 	struct amdgpu_vcn_inst	 inst[AMDGPU_MAX_VCN_INSTANCES];
    200 	struct amdgpu_vcn_reg	 internal;
    201 	struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
    202 	struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
    203 	uint32_t		 num_vcn_enc_sched;
    204 	uint32_t		 num_vcn_dec_sched;
    205 
    206 	unsigned	harvest_config;
    207 	int (*pause_dpg_mode)(struct amdgpu_device *adev,
    208 		int inst_idx, struct dpg_pause_state *new_state);
    209 };
    210 
    211 int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
    212 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
    213 int amdgpu_vcn_suspend(struct amdgpu_device *adev);
    214 int amdgpu_vcn_resume(struct amdgpu_device *adev);
    215 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
    216 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring);
    217 
    218 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring);
    219 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
    220 
    221 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
    222 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
    223 
    224 #endif
    225