101e04c3fSmrg/*
201e04c3fSmrg * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
301e04c3fSmrg *
401e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a
501e04c3fSmrg * copy of this software and associated documentation files (the "Software"),
601e04c3fSmrg * to deal in the Software without restriction, including without limitation
701e04c3fSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
801e04c3fSmrg * and/or sell copies of the Software, and to permit persons to whom the
901e04c3fSmrg * Software is furnished to do so, subject to the following conditions:
1001e04c3fSmrg *
1101e04c3fSmrg * The above copyright notice and this permission notice (including the next
1201e04c3fSmrg * paragraph) shall be included in all copies or substantial portions of the
1301e04c3fSmrg * Software.
1401e04c3fSmrg *
1501e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1601e04c3fSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1701e04c3fSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1801e04c3fSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1901e04c3fSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2001e04c3fSmrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
2101e04c3fSmrg * SOFTWARE.
2201e04c3fSmrg *
2301e04c3fSmrg * Authors:
2401e04c3fSmrg *    Rob Clark <robclark@freedesktop.org>
2501e04c3fSmrg */
2601e04c3fSmrg
277ec681f3Smrg#include "util/hash_table.h"
2801e04c3fSmrg#include "util/list.h"
2901e04c3fSmrg#include "util/set.h"
3001e04c3fSmrg#include "util/u_string.h"
3101e04c3fSmrg
3201e04c3fSmrg#include "freedreno_batch.h"
3301e04c3fSmrg#include "freedreno_context.h"
3401e04c3fSmrg#include "freedreno_fence.h"
3501e04c3fSmrg#include "freedreno_query_hw.h"
367ec681f3Smrg#include "freedreno_resource.h"
377ec681f3Smrg
387ec681f3Smrgstatic struct fd_ringbuffer *
397ec681f3Smrgalloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags)
407ec681f3Smrg{
417ec681f3Smrg   struct fd_context *ctx = batch->ctx;
427ec681f3Smrg
437ec681f3Smrg   /* if kernel is too old to support unlimited # of cmd buffers, we
447ec681f3Smrg    * have no option but to allocate large worst-case sizes so that
457ec681f3Smrg    * we don't need to grow the ringbuffer.  Performance is likely to
467ec681f3Smrg    * suffer, but there is no good alternative.
477ec681f3Smrg    *
487ec681f3Smrg    * Otherwise if supported, allocate a growable ring with initial
497ec681f3Smrg    * size of zero.
507ec681f3Smrg    */
517ec681f3Smrg   if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) &&
527ec681f3Smrg       !FD_DBG(NOGROW)) {
537ec681f3Smrg      flags |= FD_RINGBUFFER_GROWABLE;
547ec681f3Smrg      sz = 0;
557ec681f3Smrg   }
567ec681f3Smrg
577ec681f3Smrg   return fd_submit_new_ringbuffer(batch->submit, sz, flags);
587ec681f3Smrg}
5901e04c3fSmrg
6001e04c3fSmrgstatic void
6101e04c3fSmrgbatch_init(struct fd_batch *batch)
6201e04c3fSmrg{
637ec681f3Smrg   struct fd_context *ctx = batch->ctx;
647ec681f3Smrg
657ec681f3Smrg   batch->submit = fd_submit_new(ctx->pipe);
667ec681f3Smrg   if (batch->nondraw) {
677ec681f3Smrg      batch->gmem = alloc_ring(batch, 0x1000, FD_RINGBUFFER_PRIMARY);
687ec681f3Smrg      batch->draw = alloc_ring(batch, 0x100000, 0);
697ec681f3Smrg   } else {
707ec681f3Smrg      batch->gmem = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
717ec681f3Smrg      batch->draw = alloc_ring(batch, 0x100000, 0);
727ec681f3Smrg
737ec681f3Smrg      /* a6xx+ re-uses draw rb for both draw and binning pass: */
747ec681f3Smrg      if (ctx->screen->gen < 6) {
757ec681f3Smrg         batch->binning = alloc_ring(batch, 0x100000, 0);
767ec681f3Smrg      }
777ec681f3Smrg   }
787ec681f3Smrg
797ec681f3Smrg   batch->in_fence_fd = -1;
807ec681f3Smrg   batch->fence = NULL;
817ec681f3Smrg
827ec681f3Smrg   /* Work around problems on earlier gens with submit merging, etc,
837ec681f3Smrg    * by always creating a fence to request that the submit is flushed
847ec681f3Smrg    * immediately:
857ec681f3Smrg    */
867ec681f3Smrg   if (ctx->screen->gen < 6)
877ec681f3Smrg      batch->fence = fd_fence_create(batch);
887ec681f3Smrg
897ec681f3Smrg   batch->cleared = 0;
907ec681f3Smrg   batch->fast_cleared = 0;
917ec681f3Smrg   batch->invalidated = 0;
927ec681f3Smrg   batch->restore = batch->resolve = 0;
937ec681f3Smrg   batch->needs_flush = false;
947ec681f3Smrg   batch->flushed = false;
957ec681f3Smrg   batch->gmem_reason = 0;
967ec681f3Smrg   batch->num_draws = 0;
977ec681f3Smrg   batch->num_vertices = 0;
987ec681f3Smrg   batch->num_bins_per_pipe = 0;
997ec681f3Smrg   batch->prim_strm_bits = 0;
1007ec681f3Smrg   batch->draw_strm_bits = 0;
1017ec681f3Smrg
1027ec681f3Smrg   fd_reset_wfi(batch);
1037ec681f3Smrg
1047ec681f3Smrg   util_dynarray_init(&batch->draw_patches, NULL);
1057ec681f3Smrg   util_dynarray_init(&batch->fb_read_patches, NULL);
1067ec681f3Smrg
1077ec681f3Smrg   if (is_a2xx(ctx->screen)) {
1087ec681f3Smrg      util_dynarray_init(&batch->shader_patches, NULL);
1097ec681f3Smrg      util_dynarray_init(&batch->gmem_patches, NULL);
1107ec681f3Smrg   }
1117ec681f3Smrg
1127ec681f3Smrg   if (is_a3xx(ctx->screen))
1137ec681f3Smrg      util_dynarray_init(&batch->rbrc_patches, NULL);
1147ec681f3Smrg
1157ec681f3Smrg   assert(batch->resources->entries == 0);
1167ec681f3Smrg
1177ec681f3Smrg   util_dynarray_init(&batch->samples, NULL);
1187ec681f3Smrg
1197ec681f3Smrg   u_trace_init(&batch->trace, &ctx->trace_context);
1207ec681f3Smrg   batch->last_timestamp_cmd = NULL;
12101e04c3fSmrg}
12201e04c3fSmrg
12301e04c3fSmrgstruct fd_batch *
12401e04c3fSmrgfd_batch_create(struct fd_context *ctx, bool nondraw)
12501e04c3fSmrg{
1267ec681f3Smrg   struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
1277ec681f3Smrg
1287ec681f3Smrg   if (!batch)
1297ec681f3Smrg      return NULL;
13001e04c3fSmrg
1317ec681f3Smrg   DBG("%p", batch);
13201e04c3fSmrg
1337ec681f3Smrg   pipe_reference_init(&batch->reference, 1);
1347ec681f3Smrg   batch->ctx = ctx;
1357ec681f3Smrg   batch->nondraw = nondraw;
13601e04c3fSmrg
1377ec681f3Smrg   simple_mtx_init(&batch->submit_lock, mtx_plain);
13801e04c3fSmrg
1397ec681f3Smrg   batch->resources =
1407ec681f3Smrg      _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
14101e04c3fSmrg
1427ec681f3Smrg   batch_init(batch);
14301e04c3fSmrg
1447ec681f3Smrg   return batch;
14501e04c3fSmrg}
14601e04c3fSmrg
14701e04c3fSmrgstatic void
1487ec681f3Smrgcleanup_submit(struct fd_batch *batch)
14901e04c3fSmrg{
1507ec681f3Smrg   if (!batch->submit)
1517ec681f3Smrg      return;
1527ec681f3Smrg
1537ec681f3Smrg   fd_ringbuffer_del(batch->draw);
1547ec681f3Smrg   fd_ringbuffer_del(batch->gmem);
1557ec681f3Smrg
1567ec681f3Smrg   if (batch->binning) {
1577ec681f3Smrg      fd_ringbuffer_del(batch->binning);
1587ec681f3Smrg      batch->binning = NULL;
1597ec681f3Smrg   }
1607ec681f3Smrg
1617ec681f3Smrg   if (batch->prologue) {
1627ec681f3Smrg      fd_ringbuffer_del(batch->prologue);
1637ec681f3Smrg      batch->prologue = NULL;
1647ec681f3Smrg   }
1657ec681f3Smrg
1667ec681f3Smrg   if (batch->epilogue) {
1677ec681f3Smrg      fd_ringbuffer_del(batch->epilogue);
1687ec681f3Smrg      batch->epilogue = NULL;
1697ec681f3Smrg   }
1707ec681f3Smrg
1717ec681f3Smrg   if (batch->tile_setup) {
1727ec681f3Smrg      fd_ringbuffer_del(batch->tile_setup);
1737ec681f3Smrg      batch->tile_setup = NULL;
1747ec681f3Smrg   }
1757ec681f3Smrg
1767ec681f3Smrg   if (batch->tile_fini) {
1777ec681f3Smrg      fd_ringbuffer_del(batch->tile_fini);
1787ec681f3Smrg      batch->tile_fini = NULL;
1797ec681f3Smrg   }
1807ec681f3Smrg
1817ec681f3Smrg   if (batch->tessellation) {
1827ec681f3Smrg      fd_bo_del(batch->tessfactor_bo);
1837ec681f3Smrg      fd_bo_del(batch->tessparam_bo);
1847ec681f3Smrg      fd_ringbuffer_del(batch->tess_addrs_constobj);
1857ec681f3Smrg   }
1867ec681f3Smrg
1877ec681f3Smrg   fd_submit_del(batch->submit);
1887ec681f3Smrg   batch->submit = NULL;
1897ec681f3Smrg}
19001e04c3fSmrg
1917ec681f3Smrgstatic void
1927ec681f3Smrgbatch_fini(struct fd_batch *batch)
1937ec681f3Smrg{
1947ec681f3Smrg   DBG("%p", batch);
19501e04c3fSmrg
1967ec681f3Smrg   pipe_resource_reference(&batch->query_buf, NULL);
1979f464c52Smaya
1987ec681f3Smrg   if (batch->in_fence_fd != -1)
1997ec681f3Smrg      close(batch->in_fence_fd);
20001e04c3fSmrg
2017ec681f3Smrg   /* in case batch wasn't flushed but fence was created: */
2027ec681f3Smrg   if (batch->fence)
2037ec681f3Smrg      fd_fence_set_batch(batch->fence, NULL);
2049f464c52Smaya
2057ec681f3Smrg   fd_fence_ref(&batch->fence, NULL);
2069f464c52Smaya
2077ec681f3Smrg   cleanup_submit(batch);
20801e04c3fSmrg
2097ec681f3Smrg   util_dynarray_fini(&batch->draw_patches);
2107ec681f3Smrg   util_dynarray_fini(&batch->fb_read_patches);
2119f464c52Smaya
2127ec681f3Smrg   if (is_a2xx(batch->ctx->screen)) {
2137ec681f3Smrg      util_dynarray_fini(&batch->shader_patches);
2147ec681f3Smrg      util_dynarray_fini(&batch->gmem_patches);
2157ec681f3Smrg   }
21601e04c3fSmrg
2177ec681f3Smrg   if (is_a3xx(batch->ctx->screen))
2187ec681f3Smrg      util_dynarray_fini(&batch->rbrc_patches);
21901e04c3fSmrg
2207ec681f3Smrg   while (batch->samples.size > 0) {
2217ec681f3Smrg      struct fd_hw_sample *samp =
2227ec681f3Smrg         util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
2237ec681f3Smrg      fd_hw_sample_reference(batch->ctx, &samp, NULL);
2247ec681f3Smrg   }
2257ec681f3Smrg   util_dynarray_fini(&batch->samples);
22601e04c3fSmrg
2277ec681f3Smrg   u_trace_fini(&batch->trace);
22801e04c3fSmrg}
22901e04c3fSmrg
23001e04c3fSmrgstatic void
2317ec681f3Smrgbatch_flush_dependencies(struct fd_batch *batch) assert_dt
23201e04c3fSmrg{
2337ec681f3Smrg   struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
2347ec681f3Smrg   struct fd_batch *dep;
23501e04c3fSmrg
2367ec681f3Smrg   foreach_batch (dep, cache, batch->dependents_mask) {
2377ec681f3Smrg      fd_batch_flush(dep);
2387ec681f3Smrg      fd_batch_reference(&dep, NULL);
2397ec681f3Smrg   }
24001e04c3fSmrg
2417ec681f3Smrg   batch->dependents_mask = 0;
24201e04c3fSmrg}
24301e04c3fSmrg
24401e04c3fSmrgstatic void
2457ec681f3Smrgbatch_reset_dependencies(struct fd_batch *batch)
24601e04c3fSmrg{
2477ec681f3Smrg   struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
2487ec681f3Smrg   struct fd_batch *dep;
2497ec681f3Smrg
2507ec681f3Smrg   foreach_batch (dep, cache, batch->dependents_mask) {
2517ec681f3Smrg      fd_batch_reference(&dep, NULL);
2527ec681f3Smrg   }
2537ec681f3Smrg
2547ec681f3Smrg   batch->dependents_mask = 0;
25501e04c3fSmrg}
25601e04c3fSmrg
25701e04c3fSmrgstatic void
25801e04c3fSmrgbatch_reset_resources(struct fd_batch *batch)
25901e04c3fSmrg{
2607ec681f3Smrg   fd_screen_assert_locked(batch->ctx->screen);
2617ec681f3Smrg
2627ec681f3Smrg   set_foreach (batch->resources, entry) {
2637ec681f3Smrg      struct fd_resource *rsc = (struct fd_resource *)entry->key;
2647ec681f3Smrg      _mesa_set_remove(batch->resources, entry);
2657ec681f3Smrg      debug_assert(rsc->track->batch_mask & (1 << batch->idx));
2667ec681f3Smrg      rsc->track->batch_mask &= ~(1 << batch->idx);
2677ec681f3Smrg      if (rsc->track->write_batch == batch)
2687ec681f3Smrg         fd_batch_reference_locked(&rsc->track->write_batch, NULL);
2697ec681f3Smrg   }
27001e04c3fSmrg}
27101e04c3fSmrg
27201e04c3fSmrgstatic void
2737ec681f3Smrgbatch_reset(struct fd_batch *batch) assert_dt
27401e04c3fSmrg{
2757ec681f3Smrg   DBG("%p", batch);
27601e04c3fSmrg
2777ec681f3Smrg   batch_reset_dependencies(batch);
27801e04c3fSmrg
2797ec681f3Smrg   fd_screen_lock(batch->ctx->screen);
2807ec681f3Smrg   batch_reset_resources(batch);
2817ec681f3Smrg   fd_screen_unlock(batch->ctx->screen);
28201e04c3fSmrg
2837ec681f3Smrg   batch_fini(batch);
2847ec681f3Smrg   batch_init(batch);
28501e04c3fSmrg}
28601e04c3fSmrg
28701e04c3fSmrgvoid
28801e04c3fSmrgfd_batch_reset(struct fd_batch *batch)
28901e04c3fSmrg{
2907ec681f3Smrg   if (batch->needs_flush)
2917ec681f3Smrg      batch_reset(batch);
29201e04c3fSmrg}
29301e04c3fSmrg
29401e04c3fSmrgvoid
29501e04c3fSmrg__fd_batch_destroy(struct fd_batch *batch)
29601e04c3fSmrg{
2977ec681f3Smrg   struct fd_context *ctx = batch->ctx;
29801e04c3fSmrg
2997ec681f3Smrg   DBG("%p", batch);
30001e04c3fSmrg
3017ec681f3Smrg   fd_screen_assert_locked(batch->ctx->screen);
30201e04c3fSmrg
3037ec681f3Smrg   fd_bc_invalidate_batch(batch, true);
30401e04c3fSmrg
3057ec681f3Smrg   batch_reset_resources(batch);
3067ec681f3Smrg   debug_assert(batch->resources->entries == 0);
3077ec681f3Smrg   _mesa_set_destroy(batch->resources, NULL);
30801e04c3fSmrg
3097ec681f3Smrg   fd_screen_unlock(ctx->screen);
3107ec681f3Smrg   batch_reset_dependencies(batch);
3117ec681f3Smrg   debug_assert(batch->dependents_mask == 0);
31201e04c3fSmrg
3137ec681f3Smrg   util_copy_framebuffer_state(&batch->framebuffer, NULL);
3147ec681f3Smrg   batch_fini(batch);
31501e04c3fSmrg
3167ec681f3Smrg   simple_mtx_destroy(&batch->submit_lock);
3177ec681f3Smrg
3187ec681f3Smrg   free(batch->key);
3197ec681f3Smrg   free(batch);
3207ec681f3Smrg   fd_screen_lock(ctx->screen);
32101e04c3fSmrg}
32201e04c3fSmrg
32301e04c3fSmrgvoid
3247ec681f3Smrg__fd_batch_describe(char *buf, const struct fd_batch *batch)
32501e04c3fSmrg{
3267ec681f3Smrg   sprintf(buf, "fd_batch<%u>", batch->seqno);
32701e04c3fSmrg}
32801e04c3fSmrg
3297ec681f3Smrg/* Get per-batch prologue */
3307ec681f3Smrgstruct fd_ringbuffer *
3317ec681f3Smrgfd_batch_get_prologue(struct fd_batch *batch)
33201e04c3fSmrg{
3337ec681f3Smrg   if (!batch->prologue)
3347ec681f3Smrg      batch->prologue = alloc_ring(batch, 0x1000, 0);
3357ec681f3Smrg   return batch->prologue;
33601e04c3fSmrg}
33701e04c3fSmrg
3387ec681f3Smrg/* Only called from fd_batch_flush() */
33901e04c3fSmrgstatic void
3407ec681f3Smrgbatch_flush(struct fd_batch *batch) assert_dt
34101e04c3fSmrg{
3427ec681f3Smrg   DBG("%p: needs_flush=%d", batch, batch->needs_flush);
34301e04c3fSmrg
3447ec681f3Smrg   if (!fd_batch_lock_submit(batch))
3457ec681f3Smrg      return;
34601e04c3fSmrg
3477ec681f3Smrg   batch->needs_flush = false;
34801e04c3fSmrg
3497ec681f3Smrg   /* close out the draw cmds by making sure any active queries are
3507ec681f3Smrg    * paused:
3517ec681f3Smrg    */
3527ec681f3Smrg   fd_batch_finish_queries(batch);
35301e04c3fSmrg
3547ec681f3Smrg   batch_flush_dependencies(batch);
35501e04c3fSmrg
3567ec681f3Smrg   fd_screen_lock(batch->ctx->screen);
3577ec681f3Smrg   batch_reset_resources(batch);
3587ec681f3Smrg   /* NOTE: remove=false removes the batch from the hashtable, so future
3597ec681f3Smrg    * lookups won't cache-hit a flushed batch, but leaves the weak reference
3607ec681f3Smrg    * to the batch to avoid having multiple batches with same batch->idx, as
3617ec681f3Smrg    * that causes all sorts of hilarity.
3627ec681f3Smrg    */
3637ec681f3Smrg   fd_bc_invalidate_batch(batch, false);
3647ec681f3Smrg   batch->flushed = true;
36501e04c3fSmrg
3667ec681f3Smrg   if (batch == batch->ctx->batch)
3677ec681f3Smrg      fd_batch_reference_locked(&batch->ctx->batch, NULL);
36801e04c3fSmrg
3697ec681f3Smrg   fd_screen_unlock(batch->ctx->screen);
37001e04c3fSmrg
3717ec681f3Smrg   if (batch->fence)
3727ec681f3Smrg      fd_fence_ref(&batch->ctx->last_fence, batch->fence);
37301e04c3fSmrg
3747ec681f3Smrg   fd_gmem_render_tiles(batch);
37501e04c3fSmrg
3767ec681f3Smrg   debug_assert(batch->reference.count > 0);
37701e04c3fSmrg
3787ec681f3Smrg   cleanup_submit(batch);
3797ec681f3Smrg   fd_batch_unlock_submit(batch);
38001e04c3fSmrg}
38101e04c3fSmrg
38201e04c3fSmrg/* NOTE: could drop the last ref to batch
38301e04c3fSmrg */
38401e04c3fSmrgvoid
3857ec681f3Smrgfd_batch_flush(struct fd_batch *batch)
38601e04c3fSmrg{
3877ec681f3Smrg   struct fd_batch *tmp = NULL;
3887ec681f3Smrg
3897ec681f3Smrg   /* NOTE: we need to hold an extra ref across the body of flush,
3907ec681f3Smrg    * since the last ref to this batch could be dropped when cleaning
3917ec681f3Smrg    * up used_resources
3927ec681f3Smrg    */
3937ec681f3Smrg   fd_batch_reference(&tmp, batch);
3947ec681f3Smrg   batch_flush(tmp);
3957ec681f3Smrg   fd_batch_reference(&tmp, NULL);
39601e04c3fSmrg}
39701e04c3fSmrg
3987ec681f3Smrg/* find a batches dependents mask, including recursive dependencies: */
3997ec681f3Smrgstatic uint32_t
4007ec681f3Smrgrecursive_dependents_mask(struct fd_batch *batch)
40101e04c3fSmrg{
4027ec681f3Smrg   struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
4037ec681f3Smrg   struct fd_batch *dep;
4047ec681f3Smrg   uint32_t dependents_mask = batch->dependents_mask;
40501e04c3fSmrg
4067ec681f3Smrg   foreach_batch (dep, cache, batch->dependents_mask)
4077ec681f3Smrg      dependents_mask |= recursive_dependents_mask(dep);
40801e04c3fSmrg
4097ec681f3Smrg   return dependents_mask;
41001e04c3fSmrg}
41101e04c3fSmrg
41201e04c3fSmrgvoid
41301e04c3fSmrgfd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
41401e04c3fSmrg{
4157ec681f3Smrg   fd_screen_assert_locked(batch->ctx->screen);
4167ec681f3Smrg
4177ec681f3Smrg   if (batch->dependents_mask & (1 << dep->idx))
4187ec681f3Smrg      return;
4197ec681f3Smrg
4207ec681f3Smrg   /* a loop should not be possible */
4217ec681f3Smrg   debug_assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
4227ec681f3Smrg
4237ec681f3Smrg   struct fd_batch *other = NULL;
4247ec681f3Smrg   fd_batch_reference_locked(&other, dep);
4257ec681f3Smrg   batch->dependents_mask |= (1 << dep->idx);
4267ec681f3Smrg   DBG("%p: added dependency on %p", batch, dep);
4277ec681f3Smrg}
4287ec681f3Smrg
4297ec681f3Smrgstatic void
4307ec681f3Smrgflush_write_batch(struct fd_resource *rsc) assert_dt
4317ec681f3Smrg{
4327ec681f3Smrg   struct fd_batch *b = NULL;
4337ec681f3Smrg   fd_batch_reference_locked(&b, rsc->track->write_batch);
43401e04c3fSmrg
4357ec681f3Smrg   fd_screen_unlock(b->ctx->screen);
4367ec681f3Smrg   fd_batch_flush(b);
4377ec681f3Smrg   fd_screen_lock(b->ctx->screen);
43801e04c3fSmrg
4397ec681f3Smrg   fd_batch_reference_locked(&b, NULL);
44001e04c3fSmrg}
44101e04c3fSmrg
44201e04c3fSmrgstatic void
4437ec681f3Smrgfd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
44401e04c3fSmrg{
44501e04c3fSmrg
4467ec681f3Smrg   if (likely(fd_batch_references_resource(batch, rsc))) {
4477ec681f3Smrg      debug_assert(_mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc));
4487ec681f3Smrg      return;
4497ec681f3Smrg   }
45001e04c3fSmrg
4517ec681f3Smrg   debug_assert(!_mesa_set_search(batch->resources, rsc));
4527ec681f3Smrg
4537ec681f3Smrg   _mesa_set_add_pre_hashed(batch->resources, rsc->hash, rsc);
4547ec681f3Smrg   rsc->track->batch_mask |= (1 << batch->idx);
45501e04c3fSmrg}
45601e04c3fSmrg
45701e04c3fSmrgvoid
4587ec681f3Smrgfd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
45901e04c3fSmrg{
4607ec681f3Smrg   fd_screen_assert_locked(batch->ctx->screen);
4617ec681f3Smrg
4627ec681f3Smrg   DBG("%p: write %p", batch, rsc);
4637ec681f3Smrg
4647ec681f3Smrg   /* Must do this before the early out, so we unset a previous resource
4657ec681f3Smrg    * invalidate (which may have left the write_batch state in place).
4667ec681f3Smrg    */
4677ec681f3Smrg   rsc->valid = true;
4687ec681f3Smrg
4697ec681f3Smrg   if (rsc->track->write_batch == batch)
4707ec681f3Smrg      return;
4717ec681f3Smrg
4727ec681f3Smrg   fd_batch_write_prep(batch, rsc);
4737ec681f3Smrg
4747ec681f3Smrg   if (rsc->stencil)
4757ec681f3Smrg      fd_batch_resource_write(batch, rsc->stencil);
4767ec681f3Smrg
4777ec681f3Smrg   /* note, invalidate write batch, to avoid further writes to rsc
4787ec681f3Smrg    * resulting in a write-after-read hazard.
4797ec681f3Smrg    */
4807ec681f3Smrg   /* if we are pending read or write by any other batch: */
4817ec681f3Smrg   if (unlikely(rsc->track->batch_mask & ~(1 << batch->idx))) {
4827ec681f3Smrg      struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
4837ec681f3Smrg      struct fd_batch *dep;
4847ec681f3Smrg
4857ec681f3Smrg      if (rsc->track->write_batch)
4867ec681f3Smrg         flush_write_batch(rsc);
4877ec681f3Smrg
4887ec681f3Smrg      foreach_batch (dep, cache, rsc->track->batch_mask) {
4897ec681f3Smrg         struct fd_batch *b = NULL;
4907ec681f3Smrg         if (dep == batch)
4917ec681f3Smrg            continue;
4927ec681f3Smrg         /* note that batch_add_dep could flush and unref dep, so
4937ec681f3Smrg          * we need to hold a reference to keep it live for the
4947ec681f3Smrg          * fd_bc_invalidate_batch()
4957ec681f3Smrg          */
4967ec681f3Smrg         fd_batch_reference(&b, dep);
4977ec681f3Smrg         fd_batch_add_dep(batch, b);
4987ec681f3Smrg         fd_bc_invalidate_batch(b, false);
4997ec681f3Smrg         fd_batch_reference_locked(&b, NULL);
5007ec681f3Smrg      }
5017ec681f3Smrg   }
5027ec681f3Smrg   fd_batch_reference_locked(&rsc->track->write_batch, batch);
5037ec681f3Smrg
5047ec681f3Smrg   fd_batch_add_resource(batch, rsc);
50501e04c3fSmrg}
50601e04c3fSmrg
50701e04c3fSmrgvoid
5087ec681f3Smrgfd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc)
50901e04c3fSmrg{
5107ec681f3Smrg   fd_screen_assert_locked(batch->ctx->screen);
5117ec681f3Smrg
5127ec681f3Smrg   if (rsc->stencil)
5137ec681f3Smrg      fd_batch_resource_read(batch, rsc->stencil);
51401e04c3fSmrg
5157ec681f3Smrg   DBG("%p: read %p", batch, rsc);
51601e04c3fSmrg
5177ec681f3Smrg   /* If reading a resource pending a write, go ahead and flush the
5187ec681f3Smrg    * writer.  This avoids situations where we end up having to
5197ec681f3Smrg    * flush the current batch in _resource_used()
5207ec681f3Smrg    */
5217ec681f3Smrg   if (unlikely(rsc->track->write_batch && rsc->track->write_batch != batch))
5227ec681f3Smrg      flush_write_batch(rsc);
52301e04c3fSmrg
5247ec681f3Smrg   fd_batch_add_resource(batch, rsc);
5257ec681f3Smrg}
5267ec681f3Smrg
5277ec681f3Smrgvoid
5287ec681f3Smrgfd_batch_check_size(struct fd_batch *batch)
5297ec681f3Smrg{
5307ec681f3Smrg   if (FD_DBG(FLUSH)) {
5317ec681f3Smrg      fd_batch_flush(batch);
5327ec681f3Smrg      return;
5337ec681f3Smrg   }
5347ec681f3Smrg
5357ec681f3Smrg   /* Place a reasonable upper bound on prim/draw stream buffer size: */
5367ec681f3Smrg   const unsigned limit_bits = 8 * 8 * 1024 * 1024;
5377ec681f3Smrg   if ((batch->prim_strm_bits > limit_bits) ||
5387ec681f3Smrg       (batch->draw_strm_bits > limit_bits)) {
5397ec681f3Smrg      fd_batch_flush(batch);
5407ec681f3Smrg      return;
5417ec681f3Smrg   }
5427ec681f3Smrg
5437ec681f3Smrg   if (!fd_ringbuffer_check_size(batch->draw))
5447ec681f3Smrg      fd_batch_flush(batch);
54501e04c3fSmrg}
54601e04c3fSmrg
54701e04c3fSmrg/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
54801e04c3fSmrg * been one since last draw:
54901e04c3fSmrg */
55001e04c3fSmrgvoid
55101e04c3fSmrgfd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
55201e04c3fSmrg{
5537ec681f3Smrg   if (batch->needs_wfi) {
5547ec681f3Smrg      if (batch->ctx->screen->gen >= 5)
5557ec681f3Smrg         OUT_WFI5(ring);
5567ec681f3Smrg      else
5577ec681f3Smrg         OUT_WFI(ring);
5587ec681f3Smrg      batch->needs_wfi = false;
5597ec681f3Smrg   }
56001e04c3fSmrg}
561