1af69d88dSmrg/* 2af69d88dSmrg * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org> 3af69d88dSmrg * 4af69d88dSmrg * Permission is hereby granted, free of charge, to any person obtaining a 5af69d88dSmrg * copy of this software and associated documentation files (the "Software"), 6af69d88dSmrg * to deal in the Software without restriction, including without limitation 7af69d88dSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8af69d88dSmrg * and/or sell copies of the Software, and to permit persons to whom the 9af69d88dSmrg * Software is furnished to do so, subject to the following conditions: 10af69d88dSmrg * 11af69d88dSmrg * The above copyright notice and this permission notice (including the next 12af69d88dSmrg * paragraph) shall be included in all copies or substantial portions of the 13af69d88dSmrg * Software. 14af69d88dSmrg * 15af69d88dSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16af69d88dSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17af69d88dSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18af69d88dSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19af69d88dSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20af69d88dSmrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21af69d88dSmrg * SOFTWARE. 22af69d88dSmrg * 23af69d88dSmrg * Authors: 24af69d88dSmrg * Rob Clark <robclark@freedesktop.org> 25af69d88dSmrg */ 26af69d88dSmrg 27af69d88dSmrg#include "pipe/p_state.h" 287ec681f3Smrg#include "util/format/u_format.h" 2901e04c3fSmrg#include "util/u_draw.h" 307ec681f3Smrg#include "util/u_helpers.h" 31af69d88dSmrg#include "util/u_memory.h" 32af69d88dSmrg#include "util/u_prim.h" 337ec681f3Smrg#include "util/u_string.h" 34af69d88dSmrg 359f464c52Smaya#include "freedreno_blitter.h" 36af69d88dSmrg#include "freedreno_context.h" 377ec681f3Smrg#include "freedreno_draw.h" 3801e04c3fSmrg#include "freedreno_fence.h" 3901e04c3fSmrg#include "freedreno_query_acc.h" 40af69d88dSmrg#include "freedreno_query_hw.h" 417ec681f3Smrg#include "freedreno_resource.h" 427ec681f3Smrg#include "freedreno_state.h" 43af69d88dSmrg#include "freedreno_util.h" 44af69d88dSmrg 4501e04c3fSmrgstatic void 467ec681f3Smrgresource_read(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt 4701e04c3fSmrg{ 487ec681f3Smrg if (!prsc) 497ec681f3Smrg return; 507ec681f3Smrg fd_batch_resource_read(batch, fd_resource(prsc)); 5101e04c3fSmrg} 5201e04c3fSmrg 5301e04c3fSmrgstatic void 547ec681f3Smrgresource_written(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt 5501e04c3fSmrg{ 567ec681f3Smrg if (!prsc) 577ec681f3Smrg return; 587ec681f3Smrg fd_batch_resource_write(batch, fd_resource(prsc)); 5901e04c3fSmrg} 60af69d88dSmrg 61af69d88dSmrgstatic void 627ec681f3Smrgbatch_draw_tracking_for_dirty_bits(struct fd_batch *batch) assert_dt 63af69d88dSmrg{ 647ec681f3Smrg struct fd_context *ctx = batch->ctx; 657ec681f3Smrg struct pipe_framebuffer_state *pfb = &batch->framebuffer; 667ec681f3Smrg unsigned buffers = 0, restore_buffers = 0; 677ec681f3Smrg 687ec681f3Smrg if (ctx->dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) { 697ec681f3Smrg if (fd_depth_enabled(ctx)) { 707ec681f3Smrg if (fd_resource(pfb->zsbuf->texture)->valid) { 717ec681f3Smrg restore_buffers |= FD_BUFFER_DEPTH; 727ec681f3Smrg /* storing packed d/s depth also stores stencil, so we need 737ec681f3Smrg * the stencil restored too to avoid invalidating it. 747ec681f3Smrg */ 757ec681f3Smrg if (pfb->zsbuf->texture->format == PIPE_FORMAT_Z24_UNORM_S8_UINT) 767ec681f3Smrg restore_buffers |= FD_BUFFER_STENCIL; 777ec681f3Smrg } else { 787ec681f3Smrg batch->invalidated |= FD_BUFFER_DEPTH; 797ec681f3Smrg } 807ec681f3Smrg batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED; 817ec681f3Smrg if (fd_depth_write_enabled(ctx)) { 827ec681f3Smrg buffers |= FD_BUFFER_DEPTH; 837ec681f3Smrg resource_written(batch, pfb->zsbuf->texture); 847ec681f3Smrg } else { 857ec681f3Smrg resource_read(batch, pfb->zsbuf->texture); 867ec681f3Smrg } 877ec681f3Smrg } 887ec681f3Smrg 897ec681f3Smrg if (fd_stencil_enabled(ctx)) { 907ec681f3Smrg if (fd_resource(pfb->zsbuf->texture)->valid) { 917ec681f3Smrg restore_buffers |= FD_BUFFER_STENCIL; 927ec681f3Smrg /* storing packed d/s stencil also stores depth, so we need 937ec681f3Smrg * the depth restored too to avoid invalidating it. 947ec681f3Smrg */ 957ec681f3Smrg if (pfb->zsbuf->texture->format == PIPE_FORMAT_Z24_UNORM_S8_UINT) 967ec681f3Smrg restore_buffers |= FD_BUFFER_DEPTH; 977ec681f3Smrg } else { 987ec681f3Smrg batch->invalidated |= FD_BUFFER_STENCIL; 997ec681f3Smrg } 1007ec681f3Smrg batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED; 1017ec681f3Smrg buffers |= FD_BUFFER_STENCIL; 1027ec681f3Smrg resource_written(batch, pfb->zsbuf->texture); 1037ec681f3Smrg } 1047ec681f3Smrg } 1057ec681f3Smrg 1067ec681f3Smrg if (ctx->dirty & FD_DIRTY_FRAMEBUFFER) { 1077ec681f3Smrg for (unsigned i = 0; i < pfb->nr_cbufs; i++) { 1087ec681f3Smrg struct pipe_resource *surf; 1097ec681f3Smrg 1107ec681f3Smrg if (!pfb->cbufs[i]) 1117ec681f3Smrg continue; 1127ec681f3Smrg 1137ec681f3Smrg surf = pfb->cbufs[i]->texture; 1147ec681f3Smrg 1157ec681f3Smrg if (fd_resource(surf)->valid) { 1167ec681f3Smrg restore_buffers |= PIPE_CLEAR_COLOR0 << i; 1177ec681f3Smrg } else { 1187ec681f3Smrg batch->invalidated |= PIPE_CLEAR_COLOR0 << i; 1197ec681f3Smrg } 1207ec681f3Smrg 1217ec681f3Smrg buffers |= PIPE_CLEAR_COLOR0 << i; 1227ec681f3Smrg 1237ec681f3Smrg if (ctx->dirty & FD_DIRTY_FRAMEBUFFER) 1247ec681f3Smrg resource_written(batch, pfb->cbufs[i]->texture); 1257ec681f3Smrg } 1267ec681f3Smrg } 1277ec681f3Smrg 1287ec681f3Smrg if (ctx->dirty & FD_DIRTY_BLEND) { 1297ec681f3Smrg if (ctx->blend->logicop_enable) 1307ec681f3Smrg batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED; 1317ec681f3Smrg for (unsigned i = 0; i < pfb->nr_cbufs; i++) { 1327ec681f3Smrg if (ctx->blend->rt[i].blend_enable) 1337ec681f3Smrg batch->gmem_reason |= FD_GMEM_BLEND_ENABLED; 1347ec681f3Smrg } 1357ec681f3Smrg } 1367ec681f3Smrg 1377ec681f3Smrg /* Mark SSBOs */ 1387ec681f3Smrg if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO) { 1397ec681f3Smrg const struct fd_shaderbuf_stateobj *so = 1407ec681f3Smrg &ctx->shaderbuf[PIPE_SHADER_FRAGMENT]; 1417ec681f3Smrg 1427ec681f3Smrg u_foreach_bit (i, so->enabled_mask & so->writable_mask) 1437ec681f3Smrg resource_written(batch, so->sb[i].buffer); 1447ec681f3Smrg 1457ec681f3Smrg u_foreach_bit (i, so->enabled_mask & ~so->writable_mask) 1467ec681f3Smrg resource_read(batch, so->sb[i].buffer); 1477ec681f3Smrg } 1487ec681f3Smrg 1497ec681f3Smrg if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE) { 1507ec681f3Smrg u_foreach_bit (i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) { 1517ec681f3Smrg struct pipe_image_view *img = 1527ec681f3Smrg &ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i]; 1537ec681f3Smrg if (img->access & PIPE_IMAGE_ACCESS_WRITE) 1547ec681f3Smrg resource_written(batch, img->resource); 1557ec681f3Smrg else 1567ec681f3Smrg resource_read(batch, img->resource); 1577ec681f3Smrg } 1587ec681f3Smrg } 1597ec681f3Smrg 1607ec681f3Smrg u_foreach_bit (s, ctx->bound_shader_stages) { 1617ec681f3Smrg /* Mark constbuf as being read: */ 1627ec681f3Smrg if (ctx->dirty_shader[s] & FD_DIRTY_SHADER_CONST) { 1637ec681f3Smrg u_foreach_bit (i, ctx->constbuf[s].enabled_mask) 1647ec681f3Smrg resource_read(batch, ctx->constbuf[s].cb[i].buffer); 1657ec681f3Smrg } 1667ec681f3Smrg 1677ec681f3Smrg /* Mark textures as being read */ 1687ec681f3Smrg if (ctx->dirty_shader[s] & FD_DIRTY_SHADER_TEX) { 1697ec681f3Smrg u_foreach_bit (i, ctx->tex[s].valid_textures) 1707ec681f3Smrg resource_read(batch, ctx->tex[s].textures[i]->texture); 1717ec681f3Smrg } 1727ec681f3Smrg } 1737ec681f3Smrg 1747ec681f3Smrg /* Mark VBOs as being read */ 1757ec681f3Smrg if (ctx->dirty & FD_DIRTY_VTXBUF) { 1767ec681f3Smrg u_foreach_bit (i, ctx->vtx.vertexbuf.enabled_mask) { 1777ec681f3Smrg assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer); 1787ec681f3Smrg resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource); 1797ec681f3Smrg } 1807ec681f3Smrg } 1817ec681f3Smrg 1827ec681f3Smrg /* Mark streamout buffers as being written.. */ 1837ec681f3Smrg if (ctx->dirty & FD_DIRTY_STREAMOUT) { 1847ec681f3Smrg for (unsigned i = 0; i < ctx->streamout.num_targets; i++) 1857ec681f3Smrg if (ctx->streamout.targets[i]) 1867ec681f3Smrg resource_written(batch, ctx->streamout.targets[i]->buffer); 1877ec681f3Smrg } 1887ec681f3Smrg 1897ec681f3Smrg /* any buffers that haven't been cleared yet, we need to restore: */ 1907ec681f3Smrg batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated); 1917ec681f3Smrg /* and any buffers used, need to be resolved: */ 1927ec681f3Smrg batch->resolve |= buffers; 1937ec681f3Smrg} 1947ec681f3Smrg 1957ec681f3Smrgstatic void 1967ec681f3Smrgbatch_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info, 1977ec681f3Smrg const struct pipe_draw_indirect_info *indirect) assert_dt 1987ec681f3Smrg{ 1997ec681f3Smrg struct fd_context *ctx = batch->ctx; 2007ec681f3Smrg 2017ec681f3Smrg /* NOTE: needs to be before resource_written(batch->query_buf), otherwise 2027ec681f3Smrg * query_buf may not be created yet. 2037ec681f3Smrg */ 2047ec681f3Smrg fd_batch_update_queries(batch); 2057ec681f3Smrg 2067ec681f3Smrg /* 2077ec681f3Smrg * Figure out the buffers/features we need: 2087ec681f3Smrg */ 2097ec681f3Smrg 2107ec681f3Smrg fd_screen_lock(ctx->screen); 2117ec681f3Smrg 2127ec681f3Smrg if (ctx->dirty & FD_DIRTY_RESOURCE) 2137ec681f3Smrg batch_draw_tracking_for_dirty_bits(batch); 2147ec681f3Smrg 2157ec681f3Smrg /* Mark index buffer as being read */ 2167ec681f3Smrg if (info->index_size) 2177ec681f3Smrg resource_read(batch, info->index.resource); 2187ec681f3Smrg 2197ec681f3Smrg /* Mark indirect draw buffer as being read */ 2207ec681f3Smrg if (indirect) { 2217ec681f3Smrg if (indirect->buffer) 2227ec681f3Smrg resource_read(batch, indirect->buffer); 2237ec681f3Smrg if (indirect->count_from_stream_output) 2247ec681f3Smrg resource_read( 2257ec681f3Smrg batch, fd_stream_output_target(indirect->count_from_stream_output) 2267ec681f3Smrg ->offset_buf); 2277ec681f3Smrg } 2287ec681f3Smrg 2297ec681f3Smrg resource_written(batch, batch->query_buf); 2307ec681f3Smrg 2317ec681f3Smrg list_for_each_entry (struct fd_acc_query, aq, &ctx->acc_active_queries, node) 2327ec681f3Smrg resource_written(batch, aq->prsc); 2337ec681f3Smrg 2347ec681f3Smrg fd_screen_unlock(ctx->screen); 2357ec681f3Smrg} 2367ec681f3Smrg 2377ec681f3Smrgstatic void 2387ec681f3Smrgupdate_draw_stats(struct fd_context *ctx, const struct pipe_draw_info *info, 2397ec681f3Smrg const struct pipe_draw_start_count_bias *draws, 2407ec681f3Smrg unsigned num_draws) assert_dt 2417ec681f3Smrg{ 2427ec681f3Smrg ctx->stats.draw_calls++; 2437ec681f3Smrg 2447ec681f3Smrg if (ctx->screen->gen < 6) { 2457ec681f3Smrg /* Counting prims in sw doesn't work for GS and tesselation. For older 2467ec681f3Smrg * gens we don't have those stages and don't have the hw counters enabled, 2477ec681f3Smrg * so keep the count accurate for non-patch geometry. 2487ec681f3Smrg */ 2497ec681f3Smrg unsigned prims = 0; 2507ec681f3Smrg if ((info->mode != PIPE_PRIM_PATCHES) && (info->mode != PIPE_PRIM_MAX)) { 2517ec681f3Smrg for (unsigned i = 0; i < num_draws; i++) { 2527ec681f3Smrg prims += u_reduced_prims_for_vertices(info->mode, draws[i].count); 2537ec681f3Smrg } 2547ec681f3Smrg } 2557ec681f3Smrg 2567ec681f3Smrg ctx->stats.prims_generated += prims; 2577ec681f3Smrg 2587ec681f3Smrg if (ctx->streamout.num_targets > 0) { 2597ec681f3Smrg /* Clip the prims we're writing to the size of the SO buffers. */ 2607ec681f3Smrg enum pipe_prim_type tf_prim = u_decomposed_prim(info->mode); 2617ec681f3Smrg unsigned verts_written = u_vertices_for_prims(tf_prim, prims); 2627ec681f3Smrg unsigned remaining_vert_space = 2637ec681f3Smrg ctx->streamout.max_tf_vtx - ctx->streamout.verts_written; 2647ec681f3Smrg if (verts_written > remaining_vert_space) { 2657ec681f3Smrg verts_written = remaining_vert_space; 2667ec681f3Smrg u_trim_pipe_prim(tf_prim, &remaining_vert_space); 2677ec681f3Smrg } 2687ec681f3Smrg ctx->streamout.verts_written += verts_written; 2697ec681f3Smrg 2707ec681f3Smrg ctx->stats.prims_emitted += 2717ec681f3Smrg u_reduced_prims_for_vertices(tf_prim, verts_written); 2727ec681f3Smrg } 2737ec681f3Smrg } 2747ec681f3Smrg} 2757ec681f3Smrg 2767ec681f3Smrgstatic void 2777ec681f3Smrgfd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info, 2787ec681f3Smrg unsigned drawid_offset, 2797ec681f3Smrg const struct pipe_draw_indirect_info *indirect, 2807ec681f3Smrg const struct pipe_draw_start_count_bias *draws, unsigned num_draws) in_dt 2817ec681f3Smrg{ 2827ec681f3Smrg struct fd_context *ctx = fd_context(pctx); 2837ec681f3Smrg 2847ec681f3Smrg /* for debugging problems with indirect draw, it is convenient 2857ec681f3Smrg * to be able to emulate it, to determine if game is feeding us 2867ec681f3Smrg * bogus data: 2877ec681f3Smrg */ 2887ec681f3Smrg if (indirect && indirect->buffer && FD_DBG(NOINDR)) { 2897ec681f3Smrg /* num_draws is only applicable for direct draws: */ 2907ec681f3Smrg assert(num_draws == 1); 2917ec681f3Smrg util_draw_indirect(pctx, info, indirect); 2927ec681f3Smrg return; 2937ec681f3Smrg } 2947ec681f3Smrg 2957ec681f3Smrg /* TODO: push down the region versions into the tiles */ 2967ec681f3Smrg if (!fd_render_condition_check(pctx)) 2977ec681f3Smrg return; 2987ec681f3Smrg 2997ec681f3Smrg /* Upload a user index buffer. */ 3007ec681f3Smrg struct pipe_resource *indexbuf = NULL; 3017ec681f3Smrg unsigned index_offset = 0; 3027ec681f3Smrg struct pipe_draw_info new_info; 3037ec681f3Smrg if (info->index_size) { 3047ec681f3Smrg if (info->has_user_indices) { 3057ec681f3Smrg if (num_draws > 1) { 3067ec681f3Smrg util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws); 3077ec681f3Smrg return; 3087ec681f3Smrg } 3097ec681f3Smrg if (!util_upload_index_buffer(pctx, info, &draws[0], &indexbuf, 3107ec681f3Smrg &index_offset, 4)) 3117ec681f3Smrg return; 3127ec681f3Smrg new_info = *info; 3137ec681f3Smrg new_info.index.resource = indexbuf; 3147ec681f3Smrg new_info.has_user_indices = false; 3157ec681f3Smrg info = &new_info; 3167ec681f3Smrg } else { 3177ec681f3Smrg indexbuf = info->index.resource; 3187ec681f3Smrg } 3197ec681f3Smrg } 3207ec681f3Smrg 3217ec681f3Smrg if ((ctx->streamout.num_targets > 0) && (num_draws > 1)) { 3227ec681f3Smrg util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws); 3237ec681f3Smrg return; 3247ec681f3Smrg } 3257ec681f3Smrg 3267ec681f3Smrg struct fd_batch *batch = fd_context_batch(ctx); 3277ec681f3Smrg 3287ec681f3Smrg batch_draw_tracking(batch, info, indirect); 3297ec681f3Smrg 3307ec681f3Smrg while (unlikely(!fd_batch_lock_submit(batch))) { 3317ec681f3Smrg /* The current batch was flushed in batch_draw_tracking() 3327ec681f3Smrg * so start anew. We know this won't happen a second time 3337ec681f3Smrg * since we are dealing with a fresh batch: 3347ec681f3Smrg */ 3357ec681f3Smrg fd_batch_reference(&batch, NULL); 3367ec681f3Smrg batch = fd_context_batch(ctx); 3377ec681f3Smrg batch_draw_tracking(batch, info, indirect); 3387ec681f3Smrg assert(ctx->batch == batch); 3397ec681f3Smrg } 3407ec681f3Smrg 3417ec681f3Smrg batch->num_draws++; 3427ec681f3Smrg 3437ec681f3Smrg /* Marking the batch as needing flush must come after the batch 3447ec681f3Smrg * dependency tracking (resource_read()/resource_write()), as that 3457ec681f3Smrg * can trigger a flush 3467ec681f3Smrg */ 3477ec681f3Smrg fd_batch_needs_flush(batch); 3487ec681f3Smrg 3497ec681f3Smrg struct pipe_framebuffer_state *pfb = &batch->framebuffer; 3507ec681f3Smrg DBG("%p: %ux%u num_draws=%u (%s/%s)", batch, pfb->width, pfb->height, 3517ec681f3Smrg batch->num_draws, 3527ec681f3Smrg util_format_short_name(pipe_surface_format(pfb->cbufs[0])), 3537ec681f3Smrg util_format_short_name(pipe_surface_format(pfb->zsbuf))); 3547ec681f3Smrg 3557ec681f3Smrg batch->cost += ctx->draw_cost; 3567ec681f3Smrg 3577ec681f3Smrg for (unsigned i = 0; i < num_draws; i++) { 3587ec681f3Smrg ctx->draw_vbo(ctx, info, drawid_offset, indirect, &draws[i], index_offset); 3597ec681f3Smrg 3607ec681f3Smrg batch->num_vertices += draws[i].count * info->instance_count; 3617ec681f3Smrg } 3627ec681f3Smrg 3637ec681f3Smrg if (unlikely(ctx->stats_users > 0)) 3647ec681f3Smrg update_draw_stats(ctx, info, draws, num_draws); 3657ec681f3Smrg 3667ec681f3Smrg for (unsigned i = 0; i < ctx->streamout.num_targets; i++) { 3677ec681f3Smrg assert(num_draws == 1); 3687ec681f3Smrg ctx->streamout.offsets[i] += draws[0].count; 3697ec681f3Smrg } 3707ec681f3Smrg 3717ec681f3Smrg if (FD_DBG(DDRAW)) 3727ec681f3Smrg fd_context_all_dirty(ctx); 3737ec681f3Smrg 3747ec681f3Smrg debug_assert(!batch->flushed); 3757ec681f3Smrg 3767ec681f3Smrg fd_batch_unlock_submit(batch); 3777ec681f3Smrg fd_batch_check_size(batch); 3787ec681f3Smrg fd_batch_reference(&batch, NULL); 3797ec681f3Smrg 3807ec681f3Smrg if (info == &new_info) 3817ec681f3Smrg pipe_resource_reference(&indexbuf, NULL); 3827ec681f3Smrg} 3837ec681f3Smrg 3847ec681f3Smrgstatic void 3857ec681f3Smrgbatch_clear_tracking(struct fd_batch *batch, unsigned buffers) assert_dt 3867ec681f3Smrg{ 3877ec681f3Smrg struct fd_context *ctx = batch->ctx; 3887ec681f3Smrg struct pipe_framebuffer_state *pfb = &batch->framebuffer; 3897ec681f3Smrg unsigned cleared_buffers; 3907ec681f3Smrg 3917ec681f3Smrg /* pctx->clear() is only for full-surface clears, so scissor is 3927ec681f3Smrg * equivalent to having GL_SCISSOR_TEST disabled: 3937ec681f3Smrg */ 3947ec681f3Smrg batch->max_scissor.minx = 0; 3957ec681f3Smrg batch->max_scissor.miny = 0; 3967ec681f3Smrg batch->max_scissor.maxx = pfb->width; 3977ec681f3Smrg batch->max_scissor.maxy = pfb->height; 3987ec681f3Smrg 3997ec681f3Smrg /* for bookkeeping about which buffers have been cleared (and thus 4007ec681f3Smrg * can fully or partially skip mem2gmem) we need to ignore buffers 4017ec681f3Smrg * that have already had a draw, in case apps do silly things like 4027ec681f3Smrg * clear after draw (ie. if you only clear the color buffer, but 4037ec681f3Smrg * something like alpha-test causes side effects from the draw in 4047ec681f3Smrg * the depth buffer, etc) 4057ec681f3Smrg */ 4067ec681f3Smrg cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore); 4077ec681f3Smrg batch->cleared |= buffers; 4087ec681f3Smrg batch->invalidated |= cleared_buffers; 4097ec681f3Smrg 4107ec681f3Smrg batch->resolve |= buffers; 4117ec681f3Smrg 4127ec681f3Smrg fd_screen_lock(ctx->screen); 4137ec681f3Smrg 4147ec681f3Smrg if (buffers & PIPE_CLEAR_COLOR) 4157ec681f3Smrg for (unsigned i = 0; i < pfb->nr_cbufs; i++) 4167ec681f3Smrg if (buffers & (PIPE_CLEAR_COLOR0 << i)) 4177ec681f3Smrg resource_written(batch, pfb->cbufs[i]->texture); 4187ec681f3Smrg 4197ec681f3Smrg if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) { 4207ec681f3Smrg resource_written(batch, pfb->zsbuf->texture); 4217ec681f3Smrg batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL; 4227ec681f3Smrg } 4237ec681f3Smrg 4247ec681f3Smrg resource_written(batch, batch->query_buf); 4257ec681f3Smrg 4267ec681f3Smrg list_for_each_entry (struct fd_acc_query, aq, &ctx->acc_active_queries, node) 4277ec681f3Smrg resource_written(batch, aq->prsc); 4287ec681f3Smrg 4297ec681f3Smrg fd_screen_unlock(ctx->screen); 430af69d88dSmrg} 431af69d88dSmrg 432af69d88dSmrgstatic void 433af69d88dSmrgfd_clear(struct pipe_context *pctx, unsigned buffers, 4347ec681f3Smrg const struct pipe_scissor_state *scissor_state, 4357ec681f3Smrg const union pipe_color_union *color, double depth, 4367ec681f3Smrg unsigned stencil) in_dt 437af69d88dSmrg{ 4387ec681f3Smrg struct fd_context *ctx = fd_context(pctx); 4397ec681f3Smrg 4407ec681f3Smrg /* TODO: push down the region versions into the tiles */ 4417ec681f3Smrg if (!fd_render_condition_check(pctx)) 4427ec681f3Smrg return; 4437ec681f3Smrg 4447ec681f3Smrg struct fd_batch *batch = fd_context_batch(ctx); 4457ec681f3Smrg 4467ec681f3Smrg batch_clear_tracking(batch, buffers); 4477ec681f3Smrg 4487ec681f3Smrg while (unlikely(!fd_batch_lock_submit(batch))) { 4497ec681f3Smrg /* The current batch was flushed in batch_clear_tracking() 4507ec681f3Smrg * so start anew. We know this won't happen a second time 4517ec681f3Smrg * since we are dealing with a fresh batch: 4527ec681f3Smrg */ 4537ec681f3Smrg fd_batch_reference(&batch, NULL); 4547ec681f3Smrg batch = fd_context_batch(ctx); 4557ec681f3Smrg batch_clear_tracking(batch, buffers); 4567ec681f3Smrg assert(ctx->batch == batch); 4577ec681f3Smrg } 4587ec681f3Smrg 4597ec681f3Smrg /* Marking the batch as needing flush must come after the batch 4607ec681f3Smrg * dependency tracking (resource_read()/resource_write()), as that 4617ec681f3Smrg * can trigger a flush 4627ec681f3Smrg */ 4637ec681f3Smrg fd_batch_needs_flush(batch); 4647ec681f3Smrg 4657ec681f3Smrg struct pipe_framebuffer_state *pfb = &batch->framebuffer; 4667ec681f3Smrg DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers, pfb->width, 4677ec681f3Smrg pfb->height, depth, stencil, 4687ec681f3Smrg util_format_short_name(pipe_surface_format(pfb->cbufs[0])), 4697ec681f3Smrg util_format_short_name(pipe_surface_format(pfb->zsbuf))); 4707ec681f3Smrg 4717ec681f3Smrg /* if per-gen backend doesn't implement ctx->clear() generic 4727ec681f3Smrg * blitter clear: 4737ec681f3Smrg */ 4747ec681f3Smrg bool fallback = true; 4757ec681f3Smrg 4767ec681f3Smrg if (ctx->clear) { 4777ec681f3Smrg fd_batch_update_queries(batch); 4787ec681f3Smrg 4797ec681f3Smrg if (ctx->clear(ctx, buffers, color, depth, stencil)) { 4807ec681f3Smrg if (FD_DBG(DCLEAR)) 4817ec681f3Smrg fd_context_all_dirty(ctx); 4827ec681f3Smrg 4837ec681f3Smrg fallback = false; 4847ec681f3Smrg } 4857ec681f3Smrg } 4867ec681f3Smrg 4877ec681f3Smrg debug_assert(!batch->flushed); 4887ec681f3Smrg 4897ec681f3Smrg fd_batch_unlock_submit(batch); 4907ec681f3Smrg fd_batch_check_size(batch); 4917ec681f3Smrg 4927ec681f3Smrg if (fallback) { 4937ec681f3Smrg fd_blitter_clear(pctx, buffers, color, depth, stencil); 4947ec681f3Smrg } 4957ec681f3Smrg 4967ec681f3Smrg fd_batch_reference(&batch, NULL); 497af69d88dSmrg} 498af69d88dSmrg 499af69d88dSmrgstatic void 500af69d88dSmrgfd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps, 5017ec681f3Smrg const union pipe_color_union *color, unsigned x, 5027ec681f3Smrg unsigned y, unsigned w, unsigned h, 5037ec681f3Smrg bool render_condition_enabled) 504af69d88dSmrg{ 5057ec681f3Smrg DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h); 506af69d88dSmrg} 507af69d88dSmrg 508af69d88dSmrgstatic void 509af69d88dSmrgfd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps, 5107ec681f3Smrg unsigned buffers, double depth, unsigned stencil, 5117ec681f3Smrg unsigned x, unsigned y, unsigned w, unsigned h, 5127ec681f3Smrg bool render_condition_enabled) 513af69d88dSmrg{ 5147ec681f3Smrg DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u", 5157ec681f3Smrg buffers, depth, stencil, x, y, w, h); 516af69d88dSmrg} 517af69d88dSmrg 51801e04c3fSmrgstatic void 5197ec681f3Smrgfd_launch_grid(struct pipe_context *pctx, 5207ec681f3Smrg const struct pipe_grid_info *info) in_dt 52101e04c3fSmrg{ 5227ec681f3Smrg struct fd_context *ctx = fd_context(pctx); 5237ec681f3Smrg const struct fd_shaderbuf_stateobj *so = 5247ec681f3Smrg &ctx->shaderbuf[PIPE_SHADER_COMPUTE]; 5257ec681f3Smrg struct fd_batch *batch, *save_batch = NULL; 5267ec681f3Smrg 5277ec681f3Smrg batch = fd_bc_alloc_batch(ctx, true); 5287ec681f3Smrg fd_batch_reference(&save_batch, ctx->batch); 5297ec681f3Smrg fd_batch_reference(&ctx->batch, batch); 5307ec681f3Smrg fd_context_all_dirty(ctx); 5317ec681f3Smrg 5327ec681f3Smrg fd_screen_lock(ctx->screen); 5337ec681f3Smrg 5347ec681f3Smrg /* Mark SSBOs */ 5357ec681f3Smrg u_foreach_bit (i, so->enabled_mask & so->writable_mask) 5367ec681f3Smrg resource_written(batch, so->sb[i].buffer); 5377ec681f3Smrg 5387ec681f3Smrg u_foreach_bit (i, so->enabled_mask & ~so->writable_mask) 5397ec681f3Smrg resource_read(batch, so->sb[i].buffer); 5407ec681f3Smrg 5417ec681f3Smrg u_foreach_bit (i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) { 5427ec681f3Smrg struct pipe_image_view *img = &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i]; 5437ec681f3Smrg if (img->access & PIPE_IMAGE_ACCESS_WRITE) 5447ec681f3Smrg resource_written(batch, img->resource); 5457ec681f3Smrg else 5467ec681f3Smrg resource_read(batch, img->resource); 5477ec681f3Smrg } 5487ec681f3Smrg 5497ec681f3Smrg /* UBO's are read */ 5507ec681f3Smrg u_foreach_bit (i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask) 5517ec681f3Smrg resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer); 5527ec681f3Smrg 5537ec681f3Smrg /* Mark textures as being read */ 5547ec681f3Smrg u_foreach_bit (i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures) 5557ec681f3Smrg resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture); 5567ec681f3Smrg 5577ec681f3Smrg /* For global buffers, we don't really know if read or written, so assume 5587ec681f3Smrg * the worst: 5597ec681f3Smrg */ 5607ec681f3Smrg u_foreach_bit (i, ctx->global_bindings.enabled_mask) 5617ec681f3Smrg resource_written(batch, ctx->global_bindings.buf[i]); 5627ec681f3Smrg 5637ec681f3Smrg if (info->indirect) 5647ec681f3Smrg resource_read(batch, info->indirect); 5657ec681f3Smrg 5667ec681f3Smrg fd_screen_unlock(ctx->screen); 5677ec681f3Smrg 5687ec681f3Smrg DBG("%p: work_dim=%u, block=%ux%ux%u, grid=%ux%ux%u", 5697ec681f3Smrg batch, info->work_dim, 5707ec681f3Smrg info->block[0], info->block[1], info->block[2], 5717ec681f3Smrg info->grid[0], info->grid[1], info->grid[2]); 5727ec681f3Smrg 5737ec681f3Smrg fd_batch_needs_flush(batch); 5747ec681f3Smrg ctx->launch_grid(ctx, info); 5757ec681f3Smrg 5767ec681f3Smrg fd_batch_flush(batch); 5777ec681f3Smrg 5787ec681f3Smrg fd_batch_reference(&ctx->batch, save_batch); 5797ec681f3Smrg fd_context_all_dirty(ctx); 5807ec681f3Smrg fd_batch_reference(&save_batch, NULL); 5817ec681f3Smrg fd_batch_reference(&batch, NULL); 58201e04c3fSmrg} 58301e04c3fSmrg 584af69d88dSmrgvoid 585af69d88dSmrgfd_draw_init(struct pipe_context *pctx) 586af69d88dSmrg{ 5877ec681f3Smrg pctx->draw_vbo = fd_draw_vbo; 5887ec681f3Smrg pctx->clear = fd_clear; 5897ec681f3Smrg pctx->clear_render_target = fd_clear_render_target; 5907ec681f3Smrg pctx->clear_depth_stencil = fd_clear_depth_stencil; 5917ec681f3Smrg 5927ec681f3Smrg if (has_compute(fd_screen(pctx->screen))) { 5937ec681f3Smrg pctx->launch_grid = fd_launch_grid; 5947ec681f3Smrg } 595af69d88dSmrg} 596