| /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
| amdgpu_sdma.c | 34 /* SDMA CSA reside in the 3rd page of CSA */ 38 * GPU SDMA IP block helpers function. 46 for (i = 0; i < adev->sdma.num_instances; i++) 47 if (ring == &adev->sdma.instance[i].ring || 48 ring == &adev->sdma.instance[i].page) 49 return &adev->sdma.instance[i]; 59 for (i = 0; i < adev->sdma.num_instances; i++) { 60 if (ring == &adev->sdma.instance[i].ring || 61 ring == &adev->sdma.instance[i].page) { 106 if (!adev->sdma.ras_if) [all...] |
| amdgpu_sdma_v2_4.c | 87 * sDMA - System DMA 95 * (ring buffer, IBs, etc.), but sDMA has it's own 97 * used by the CP. sDMA supports copying data, writing 122 for (i = 0; i < adev->sdma.num_instances; i++) { 123 release_firmware(adev->sdma.instance[i].fw); 124 adev->sdma.instance[i].fw = NULL; 155 for (i = 0; i < adev->sdma.num_instances; i++) { 160 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 163 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); 166 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data 237 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); local 755 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); local [all...] |
| amdgpu_sdma_v4_0.c | 524 for (i = 0; i < adev->sdma.num_instances; i++) { 525 if (adev->sdma.instance[i].fw != NULL) 526 release_firmware(adev->sdma.instance[i].fw); 529 all SDMA isntances */ 534 memset((void*)adev->sdma.instance, 0, 590 err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev); 594 err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[0]); 598 for (i = 1; i < adev->sdma.num_instances; i++) { 601 for every SDMA instance */ 602 memcpy((void*)&adev->sdma.instance[i] 781 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); local 915 struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; local 959 struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; local 1685 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); local 1982 u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; local [all...] |
| amdgpu_cik_sdma.c | 81 for (i = 0; i < adev->sdma.num_instances; i++) { 82 release_firmware(adev->sdma.instance[i].fw); 83 adev->sdma.instance[i].fw = NULL; 88 * sDMA - System DMA 96 * (ring buffer, IBs, etc.), but sDMA has it's own 98 * used by the CP. sDMA supports copying data, writing 140 for (i = 0; i < adev->sdma.num_instances; i++) { 145 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 148 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); 153 for (i = 0; i < adev->sdma.num_instances; i++) 208 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); local 815 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); local [all...] |
| amdgpu_sdma_v3_0.c | 188 * sDMA - System DMA 196 * (ring buffer, IBs, etc.), but sDMA has it's own 198 * used by the CP. sDMA supports copying data, writing 259 for (i = 0; i < adev->sdma.num_instances; i++) { 260 release_firmware(adev->sdma.instance[i].fw); 261 adev->sdma.instance[i].fw = NULL; 313 for (i = 0; i < adev->sdma.num_instances; i++) { 318 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 321 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); 324 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data 411 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); local 1026 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); local [all...] |
| amdgpu_sdma_v5_0.c | 200 for (i = 0; i < adev->sdma.num_instances; i++) { 205 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 208 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); 211 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; 212 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 213 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 214 if (adev->sdma.instance[i].feature_version >= 20) 215 adev->sdma.instance[i].burst_nop = true; 222 info->fw = adev->sdma.instance[i].fw; 231 for (i = 0; i < adev->sdma.num_instances; i++) 365 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); local 1100 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); local [all...] |
| amdgpu_si_dma.c | 54 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 62 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 123 for (i = 0; i < adev->sdma.num_instances; i++) { 124 ring = &adev->sdma.instance[i].ring; 143 for (i = 0; i < adev->sdma.num_instances; i++) { 144 ring = &adev->sdma.instance[i].ring; 361 * si_dma_vm_set_pte_pde - update the page tables using sDMA 370 * Update the page tables using sDMA (CIK). 441 * si_dma_ring_emit_vm_flush - cik vm flush using sDMA 447 * using sDMA (VI) [all...] |
| amdgpu_ctx.c | 98 scheds = adev->sdma.sdma_sched; 99 num_scheds = adev->sdma.num_sdma_sched; 656 for (i = 0; i < adev->sdma.num_instances; i++) { 657 adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched; 658 adev->sdma.num_sdma_sched++;
|
| amdgpu_amdkfd.c | 158 * set for SDMA, VCN, and IH blocks. 372 return adev->sdma.instance[0].fw_version; 375 return adev->sdma.instance[1].fw_version; 582 ring = &adev->sdma.instance[0].ring; 585 ring = &adev->sdma.instance[1].ring;
|
| amdgpu_ucode.h | 276 struct sdma_firmware_header_v1_0 sdma; member in union:amdgpu_firmware_header
|
| amdgpu_kms.c | 291 if (query_fw->index >= adev->sdma.num_instances) 293 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; 294 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; 350 for (i = 0; i < adev->sdma.num_instances; i++) 351 if (adev->sdma.instance[i].ring.sched.ready) 1389 /* SDMA */ 1391 for (i = 0; i < adev->sdma.num_instances; i++) { 1396 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
|
| amdgpu_cgs.c | 174 fw_version = adev->sdma.instance[0].fw_version; 177 fw_version = adev->sdma.instance[1].fw_version;
|
| amdgpu_ucode.c | 221 DRM_DEBUG("SDMA\n"); 240 DRM_ERROR("Unknown SDMA ucode version: %u.%u\n", 418 FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); 419 FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
|
| amdgpu_ras.c | 53 "sdma", 222 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. 255 * block: umc, sdma, gfx, ......... 277 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count 380 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count 716 if (adev->sdma.funcs->query_ras_error_count) { 717 for (i = 0; i < adev->sdma.num_instances; i++) 718 adev->sdma.funcs->query_ras_error_count(adev, i,
|
| amdgpu_ring.c | 98 * This is the generic insert_nop function for rings except SDMA 113 * This is the generic pad_ib function for rings except SDMA 259 else if (ring == &adev->sdma.instance[0].page)
|
| amdgpu_soc15.c | 1285 /* sdma/ih doorbell range are programed by hypervisor */ 1287 for (i = 0; i < adev->sdma.num_instances; i++) { 1288 ring = &adev->sdma.instance[i].ring; 1319 * in SDMA/IH/MM/ACV range will be routed to CP. So 1320 * we need to init SDMA/IH/MM/ACV doorbell range prior
|
| amdgpu.h | 903 /* sdma */ 904 struct amdgpu_sdma sdma; member in struct:amdgpu_device
|
| amdgpu_psp.c | 1251 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
|
| amdgpu_gfx_v10_0.c | 2134 /* Temporarily put sdma part here */ 2142 for (i = 0; i < adev->sdma.num_instances; i++) { 2144 adev->sdma.instance[i].fw->data; 2145 fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data +
|
| /src/sys/external/bsd/drm2/dist/drm/radeon/ |
| radeon_ucode.h | 72 /* SDMA */ 218 struct sdma_firmware_header_v1_0 sdma; member in union:radeon_firmware_header
|
| /src/sys/external/gpl2/dts/dist/arch/arm/boot/dts/nxp/imx/ |
| imx53-tx53.dtsi | 536 &sdma { 537 fsl,sdma-ram-script-name = "sdma-imx53.bin";
|