| /xsrc/external/mit/MesaLib.old/dist/src/gallium/drivers/radeonsi/ |
| H A D | cik_sdma.c | 38 struct si_resource *sdst = si_resource(dst); local in function:cik_sdma_copy_buffer 44 util_range_add(&sdst->valid_buffer_range, dst_offset, 47 dst_offset += sdst->gpu_address; 51 si_need_dma_space(ctx, ncopy * 7, sdst, ssrc); 108 struct si_texture *sdst = (struct si_texture*)dst; local in function:cik_sdma_copy_texture 109 unsigned bpp = sdst->surface.bpe; 110 uint64_t dst_address = sdst->buffer.gpu_address + 111 sdst->surface.u.legacy.level[dst_level].offset; 114 unsigned dst_mode = sdst->surface.u.legacy.level[dst_level].mode; 116 unsigned dst_tile_index = sdst [all...] |
| H A D | si_dma.c | 40 struct si_resource *sdst = si_resource(dst); local in function:si_dma_copy_buffer 46 util_range_add(&sdst->valid_buffer_range, dst_offset, 49 dst_offset += sdst->gpu_address; 64 si_need_dma_space(ctx, ncopy * 5, sdst, ssrc); 97 struct si_texture *sdst = (struct si_texture*)dst; local in function:si_dma_copy_tile 98 unsigned dst_mode = sdst->surface.u.legacy.level[dst_level].mode; 100 struct si_texture *linear = detile ? sdst : ssrc; 101 struct si_texture *tiled = detile ? ssrc : sdst; 155 si_need_dma_space(ctx, ncopy * 9, &sdst->buffer, &ssrc->buffer); 190 struct si_texture *sdst local in function:si_dma_copy [all...] |
| H A D | si_dma_cs.c | 72 struct si_resource *sdst = si_resource(dst); local in function:si_sdma_clear_buffer 86 util_range_add(&sdst->valid_buffer_range, offset, offset + size); 88 offset += sdst->gpu_address; 93 si_need_dma_space(sctx, ncopy * 4, sdst, NULL); 111 si_need_dma_space(sctx, ncopy * 5, sdst, NULL);
|
| H A D | si_cp_dma.c | 215 struct si_resource *sdst = si_resource(dst); local in function:si_cp_dma_clear_buffer 216 uint64_t va = (sdst ? sdst->gpu_address : 0) + offset; 224 if (sdst) 225 util_range_add(&sdst->valid_buffer_range, offset, offset + size); 228 if (sdst && !(user_flags & SI_CPDMA_SKIP_GFX_SYNC)) { 236 unsigned dma_flags = CP_DMA_CLEAR | (sdst ? 0 : CP_DMA_DST_IS_GDS); 248 if (sdst && cache_policy != L2_BYPASS) 249 sdst->TC_L2_dirty = true;
|
| H A D | si_test_dma.c | 223 struct si_texture *sdst; local in function:si_test_dma 293 sdst = (struct si_texture*)dst; 301 array_mode_to_string(sscreen, &sdst->surface), 311 si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, &zero, 4, 343 !sdst->surface.is_linear && 371 !sdst->surface.is_linear &&
|
| H A D | si_fence.c | 196 struct si_multi_fence **sdst = (struct si_multi_fence **)dst; local in function:si_fence_reference 199 if (pipe_reference(&(*sdst)->reference, &ssrc->reference)) { 200 ws->fence_reference(&(*sdst)->gfx, NULL); 201 ws->fence_reference(&(*sdst)->sdma, NULL); 202 tc_unflushed_batch_token_reference(&(*sdst)->tc_token, NULL); 203 si_resource_reference(&(*sdst)->fine.buf, NULL); 204 FREE(*sdst); 206 *sdst = ssrc;
|
| H A D | si_buffer.c | 306 struct si_resource *sdst = si_resource(dst); local in function:si_replace_buffer_storage 309 pb_reference(&sdst->buf, ssrc->buf); 310 sdst->gpu_address = ssrc->gpu_address; 311 sdst->b.b.bind = ssrc->b.b.bind; 312 sdst->b.max_forced_staging_uploads = ssrc->b.max_forced_staging_uploads; 313 sdst->max_forced_staging_uploads = ssrc->max_forced_staging_uploads; 314 sdst->flags = ssrc->flags; 316 assert(sdst->vram_usage == ssrc->vram_usage); 317 assert(sdst->gart_usage == ssrc->gart_usage); 318 assert(sdst [all...] |
| H A D | si_clear.c | 674 struct si_texture *sdst = (struct si_texture*)dst->texture; local in function:si_clear_render_target 676 if (dst->texture->nr_samples <= 1 && !sdst->dcc_offset) {
|
| H A D | si_blit.c | 906 struct si_texture *sdst = (struct si_texture*)dst; local in function:si_resource_copy_region 923 !sdst->dcc_offset &&
|
| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/radeonsi/ |
| H A D | si_sdma_copy_image.c | 113 bool si_sdma_v4_v5_copy_texture(struct si_context *sctx, struct si_texture *sdst, struct si_texture *ssrc, bool is_v5) argument 115 unsigned bpp = sdst->surface.bpe; 116 uint64_t dst_address = sdst->buffer.gpu_address + sdst->surface.u.gfx9.surf_offset; 118 unsigned dst_pitch = sdst->surface.u.gfx9.surf_pitch; 124 assert (!tmz || (sdst->buffer.flags & RADEON_FLAG_ENCRYPTED)); 127 if (ssrc->surface.is_linear && sdst->surface.is_linear) { 136 dst_address += sdst->surface.u.gfx9.offset[0]; 153 if (ssrc->surface.is_linear != sdst->surface.is_linear) { 154 struct si_texture *tiled = ssrc->surface.is_linear ? sdst 225 cik_sdma_copy_texture(struct si_context * sctx,struct si_texture * sdst,struct si_texture * ssrc) argument [all...] |
| H A D | si_test_blit.c | 204 struct si_texture *sdst; local in function:si_test_blit 278 sdst = (struct si_texture *)dst; 286 array_mode_to_string(sscreen, &sdst->surface), tsrc.width0, tsrc.height0, 296 si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, &zero, 4, SI_OP_SYNC_BEFORE_AFTER, 327 if (!ssrc->surface.is_linear && !sdst->surface.is_linear && rand() & 1) { 353 if (ssrc->surface.is_linear && !sdst->surface.is_linear && rand() % 4 == 0) {
|
| H A D | si_cp_dma.c | 193 struct si_resource *sdst = si_resource(dst); local in function:si_cp_dma_clear_buffer 194 uint64_t va = (sdst ? sdst->gpu_address : 0) + offset; 208 if (sdst) { 209 util_range_add(dst, &sdst->valid_buffer_range, offset, offset + size); 217 unsigned dma_flags = CP_DMA_CLEAR | (sdst ? 0 : CP_DMA_DST_IS_GDS); 229 if (sdst && cache_policy != L2_BYPASS) 230 sdst->TC_L2_dirty = true;
|
| H A D | si_fence.c | 188 struct si_fence **sdst = (struct si_fence **)dst; local in function:si_fence_reference 191 if (pipe_reference(&(*sdst)->reference, &ssrc->reference)) { 192 ws->fence_reference(&(*sdst)->gfx, NULL); 193 tc_unflushed_batch_token_reference(&(*sdst)->tc_token, NULL); 194 si_resource_reference(&(*sdst)->fine.buf, NULL); 195 FREE(*sdst); 197 *sdst = ssrc;
|
| H A D | si_buffer.c | 282 struct si_resource *sdst = si_resource(dst); local in function:si_replace_buffer_storage 285 radeon_bo_reference(sctx->screen->ws, &sdst->buf, ssrc->buf); 286 sdst->gpu_address = ssrc->gpu_address; 287 sdst->b.b.bind = ssrc->b.b.bind; 288 sdst->flags = ssrc->flags; 290 assert(sdst->memory_usage_kb == ssrc->memory_usage_kb); 291 assert(sdst->bo_size == ssrc->bo_size); 292 assert(sdst->bo_alignment_log2 == ssrc->bo_alignment_log2); 293 assert(sdst->domains == ssrc->domains);
|
| H A D | si_compute_blit.c | 426 struct si_texture *sdst = (struct si_texture*)dst; local in function:si_compute_copy_image 432 bool is_linear = ssrc->surface.is_linear || sdst->surface.is_linear; 437 !vi_dcc_enabled(sdst, dst_level) && 485 sdst->surface.u.gfx9.color.dcc.pipe_aligned);
|
| H A D | si_blit.c | 909 struct si_texture *sdst = (struct si_texture *)dst; local in function:si_resource_copy_region 927 (!vi_dcc_enabled(sdst, dst_level) || sctx->chip_class >= GFX10) && 1227 struct si_texture *sdst = (struct si_texture *)info->dst.resource; local in function:si_blit 1233 if (info->is_dri_blit_image && sdst->surface.is_linear && 1234 sctx->chip_class >= GFX7 && sdst->surface.flags & RADEON_SURF_IMPORTED) { 1245 if (async_copy && sctx->chip_class < GFX10_3 && si_sdma_copy_image(sctx, sdst, ssrc))
|
| H A D | si_clear.c | 1082 struct si_texture *sdst = (struct si_texture *)dst->texture; local in function:si_clear_render_target 1085 (sctx->chip_class >= GFX10 || !vi_dcc_enabled(sdst, dst->u.tex.level))) {
|
| /xsrc/external/mit/MesaLib.old/dist/src/gallium/drivers/nouveau/nv50/ |
| H A D | nv50_surface.c | 954 struct ureg_dst sdst = ureg_writemask(data, TGSI_WRITEMASK_Y); local in function:nv50_blitter_make_fp 979 ureg_I2F(ureg, sdst, ssrc);
|
| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/nouveau/nv50/ |
| H A D | nv50_surface.c | 974 struct ureg_dst sdst = ureg_writemask(data, TGSI_WRITEMASK_Y); local in function:nv50_blitter_make_fp 999 ureg_I2F(ureg, sdst, ssrc);
|
| /xsrc/external/mit/MesaLib/dist/docs/relnotes/ |
| H A D | 19.0.0.rst | 1810 - radeonsi: rename rsrc -> ssrc, rdst -> sdst
|