19f464c52Smaya/*
29f464c52Smaya * Copyright © 2017 Intel Corporation
39f464c52Smaya *
49f464c52Smaya * Permission is hereby granted, free of charge, to any person obtaining a
59f464c52Smaya * copy of this software and associated documentation files (the "Software"),
69f464c52Smaya * to deal in the Software without restriction, including without limitation
79f464c52Smaya * the rights to use, copy, modify, merge, publish, distribute, sublicense,
89f464c52Smaya * and/or sell copies of the Software, and to permit persons to whom the
99f464c52Smaya * Software is furnished to do so, subject to the following conditions:
109f464c52Smaya *
119f464c52Smaya * The above copyright notice and this permission notice shall be included
129f464c52Smaya * in all copies or substantial portions of the Software.
139f464c52Smaya *
149f464c52Smaya * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
159f464c52Smaya * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
169f464c52Smaya * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
179f464c52Smaya * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
189f464c52Smaya * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
199f464c52Smaya * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
209f464c52Smaya * DEALINGS IN THE SOFTWARE.
219f464c52Smaya */
229f464c52Smaya
239f464c52Smaya/**
249f464c52Smaya * @file iris_batch.c
259f464c52Smaya *
269f464c52Smaya * Batchbuffer and command submission module.
279f464c52Smaya *
289f464c52Smaya * Every API draw call results in a number of GPU commands, which we
299f464c52Smaya * collect into a "batch buffer".  Typically, many draw calls are grouped
309f464c52Smaya * into a single batch to amortize command submission overhead.
319f464c52Smaya *
329f464c52Smaya * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.
339f464c52Smaya * One critical piece of data is the "validation list", which contains a
349f464c52Smaya * list of the buffer objects (BOs) which the commands in the GPU need.
359f464c52Smaya * The kernel will make sure these are resident and pinned at the correct
369f464c52Smaya * virtual memory address before executing our batch.  If a BO is not in
379f464c52Smaya * the validation list, it effectively does not exist, so take care.
389f464c52Smaya */
399f464c52Smaya
409f464c52Smaya#include "iris_batch.h"
419f464c52Smaya#include "iris_bufmgr.h"
429f464c52Smaya#include "iris_context.h"
439f464c52Smaya#include "iris_fence.h"
449f464c52Smaya
459f464c52Smaya#include "drm-uapi/i915_drm.h"
469f464c52Smaya
477ec681f3Smrg#include "common/intel_aux_map.h"
487ec681f3Smrg#include "intel/common/intel_gem.h"
499f464c52Smaya#include "util/hash_table.h"
509f464c52Smaya#include "util/set.h"
517ec681f3Smrg#include "util/u_upload_mgr.h"
529f464c52Smaya#include "main/macros.h"
539f464c52Smaya
549f464c52Smaya#include <errno.h>
559f464c52Smaya#include <xf86drm.h>
569f464c52Smaya
579f464c52Smaya#if HAVE_VALGRIND
589f464c52Smaya#include <valgrind.h>
599f464c52Smaya#include <memcheck.h>
609f464c52Smaya#define VG(x) x
619f464c52Smaya#else
629f464c52Smaya#define VG(x)
639f464c52Smaya#endif
649f464c52Smaya
659f464c52Smaya#define FILE_DEBUG_FLAG DEBUG_BUFMGR
669f464c52Smaya
679f464c52Smayastatic void
689f464c52Smayairis_batch_reset(struct iris_batch *batch);
699f464c52Smaya
709f464c52Smayastatic unsigned
719f464c52Smayanum_fences(struct iris_batch *batch)
729f464c52Smaya{
739f464c52Smaya   return util_dynarray_num_elements(&batch->exec_fences,
749f464c52Smaya                                     struct drm_i915_gem_exec_fence);
759f464c52Smaya}
769f464c52Smaya
779f464c52Smaya/**
789f464c52Smaya * Debugging code to dump the fence list, used by INTEL_DEBUG=submit.
799f464c52Smaya */
809f464c52Smayastatic void
819f464c52Smayadump_fence_list(struct iris_batch *batch)
829f464c52Smaya{
839f464c52Smaya   fprintf(stderr, "Fence list (length %u):      ", num_fences(batch));
849f464c52Smaya
859f464c52Smaya   util_dynarray_foreach(&batch->exec_fences,
869f464c52Smaya                         struct drm_i915_gem_exec_fence, f) {
879f464c52Smaya      fprintf(stderr, "%s%u%s ",
889f464c52Smaya              (f->flags & I915_EXEC_FENCE_WAIT) ? "..." : "",
899f464c52Smaya              f->handle,
909f464c52Smaya              (f->flags & I915_EXEC_FENCE_SIGNAL) ? "!" : "");
919f464c52Smaya   }
929f464c52Smaya
939f464c52Smaya   fprintf(stderr, "\n");
949f464c52Smaya}
959f464c52Smaya
969f464c52Smaya/**
979f464c52Smaya * Debugging code to dump the validation list, used by INTEL_DEBUG=submit.
989f464c52Smaya */
999f464c52Smayastatic void
1007ec681f3Smrgdump_bo_list(struct iris_batch *batch)
1019f464c52Smaya{
1027ec681f3Smrg   fprintf(stderr, "BO list (length %d):\n", batch->exec_count);
1039f464c52Smaya
1049f464c52Smaya   for (int i = 0; i < batch->exec_count; i++) {
1057ec681f3Smrg      struct iris_bo *bo = batch->exec_bos[i];
1067ec681f3Smrg      struct iris_bo *backing = iris_get_backing_bo(bo);
1077ec681f3Smrg      bool written = BITSET_TEST(batch->bos_written, i);
1087ec681f3Smrg
1097ec681f3Smrg      fprintf(stderr, "[%2d]: %3d (%3d) %-14s @ 0x%016"PRIx64" (%-6s %8"PRIu64"B) %2d refs  %s\n",
1109f464c52Smaya              i,
1117ec681f3Smrg              bo->gem_handle,
1127ec681f3Smrg              backing->gem_handle,
1137ec681f3Smrg              bo->name,
1147ec681f3Smrg              bo->address,
1157ec681f3Smrg              backing->real.local ? "local" : "system",
1167ec681f3Smrg              bo->size,
1177ec681f3Smrg              bo->refcount,
1187ec681f3Smrg              written ? "(write)" : "");
1199f464c52Smaya   }
1209f464c52Smaya}
1219f464c52Smaya
1229f464c52Smaya/**
1239f464c52Smaya * Return BO information to the batch decoder (for debugging).
1249f464c52Smaya */
1257ec681f3Smrgstatic struct intel_batch_decode_bo
1269f464c52Smayadecode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
1279f464c52Smaya{
1289f464c52Smaya   struct iris_batch *batch = v_batch;
1299f464c52Smaya
1309f464c52Smaya   assert(ppgtt);
1319f464c52Smaya
1329f464c52Smaya   for (int i = 0; i < batch->exec_count; i++) {
1339f464c52Smaya      struct iris_bo *bo = batch->exec_bos[i];
1349f464c52Smaya      /* The decoder zeroes out the top 16 bits, so we need to as well */
1357ec681f3Smrg      uint64_t bo_address = bo->address & (~0ull >> 16);
1369f464c52Smaya
1379f464c52Smaya      if (address >= bo_address && address < bo_address + bo->size) {
1387ec681f3Smrg         return (struct intel_batch_decode_bo) {
1397ec681f3Smrg            .addr = bo_address,
1409f464c52Smaya            .size = bo->size,
1417ec681f3Smrg            .map = iris_bo_map(batch->dbg, bo, MAP_READ),
1429f464c52Smaya         };
1439f464c52Smaya      }
1449f464c52Smaya   }
1459f464c52Smaya
1467ec681f3Smrg   return (struct intel_batch_decode_bo) { };
1477ec681f3Smrg}
1487ec681f3Smrg
1497ec681f3Smrgstatic unsigned
1507ec681f3Smrgdecode_get_state_size(void *v_batch,
1517ec681f3Smrg                      uint64_t address,
1527ec681f3Smrg                      UNUSED uint64_t base_address)
1537ec681f3Smrg{
1547ec681f3Smrg   struct iris_batch *batch = v_batch;
1557ec681f3Smrg   unsigned size = (uintptr_t)
1567ec681f3Smrg      _mesa_hash_table_u64_search(batch->state_sizes, address);
1577ec681f3Smrg
1587ec681f3Smrg   return size;
1599f464c52Smaya}
1609f464c52Smaya
1619f464c52Smaya/**
1629f464c52Smaya * Decode the current batch.
1639f464c52Smaya */
1649f464c52Smayastatic void
1659f464c52Smayadecode_batch(struct iris_batch *batch)
1669f464c52Smaya{
1679f464c52Smaya   void *map = iris_bo_map(batch->dbg, batch->exec_bos[0], MAP_READ);
1687ec681f3Smrg   intel_print_batch(&batch->decoder, map, batch->primary_batch_size,
1697ec681f3Smrg                     batch->exec_bos[0]->address, false);
1709f464c52Smaya}
1719f464c52Smaya
1729f464c52Smayavoid
1737ec681f3Smrgiris_init_batch(struct iris_context *ice,
1749f464c52Smaya                enum iris_batch_name name,
1759f464c52Smaya                int priority)
1769f464c52Smaya{
1777ec681f3Smrg   struct iris_batch *batch = &ice->batches[name];
1787ec681f3Smrg   struct iris_screen *screen = (void *) ice->ctx.screen;
1797ec681f3Smrg
1809f464c52Smaya   batch->screen = screen;
1817ec681f3Smrg   batch->dbg = &ice->dbg;
1827ec681f3Smrg   batch->reset = &ice->reset;
1837ec681f3Smrg   batch->state_sizes = ice->state.sizes;
1849f464c52Smaya   batch->name = name;
1857ec681f3Smrg   batch->ice = ice;
1867ec681f3Smrg   batch->contains_fence_signal = false;
1879f464c52Smaya
1887ec681f3Smrg   batch->fine_fences.uploader =
1897ec681f3Smrg      u_upload_create(&ice->ctx, 4096, PIPE_BIND_CUSTOM,
1907ec681f3Smrg                      PIPE_USAGE_STAGING, 0);
1917ec681f3Smrg   iris_fine_fence_init(batch);
1929f464c52Smaya
1939f464c52Smaya   batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr);
1949f464c52Smaya   assert(batch->hw_ctx_id);
1959f464c52Smaya
1969f464c52Smaya   iris_hw_context_set_priority(screen->bufmgr, batch->hw_ctx_id, priority);
1979f464c52Smaya
1989f464c52Smaya   util_dynarray_init(&batch->exec_fences, ralloc_context(NULL));
1997ec681f3Smrg   util_dynarray_init(&batch->syncobjs, ralloc_context(NULL));
2009f464c52Smaya
2019f464c52Smaya   batch->exec_count = 0;
2027ec681f3Smrg   batch->max_gem_handle = 0;
2037ec681f3Smrg   batch->exec_array_size = 128;
2049f464c52Smaya   batch->exec_bos =
2059f464c52Smaya      malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));
2067ec681f3Smrg   batch->bos_written =
2077ec681f3Smrg      rzalloc_array(NULL, BITSET_WORD, BITSET_WORDS(batch->exec_array_size));
2089f464c52Smaya
2099f464c52Smaya   batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2109f464c52Smaya                                                 _mesa_key_pointer_equal);
2119f464c52Smaya
2129f464c52Smaya   memset(batch->other_batches, 0, sizeof(batch->other_batches));
2139f464c52Smaya
2149f464c52Smaya   for (int i = 0, j = 0; i < IRIS_BATCH_COUNT; i++) {
2157ec681f3Smrg      if (i != name)
2167ec681f3Smrg         batch->other_batches[j++] = &ice->batches[i];
2179f464c52Smaya   }
2189f464c52Smaya
2197ec681f3Smrg   if (INTEL_DEBUG(DEBUG_ANY)) {
2209f464c52Smaya      const unsigned decode_flags =
2217ec681f3Smrg         INTEL_BATCH_DECODE_FULL |
2227ec681f3Smrg         (INTEL_DEBUG(DEBUG_COLOR) ? INTEL_BATCH_DECODE_IN_COLOR : 0) |
2237ec681f3Smrg         INTEL_BATCH_DECODE_OFFSETS |
2247ec681f3Smrg         INTEL_BATCH_DECODE_FLOATS;
2257ec681f3Smrg
2267ec681f3Smrg      intel_batch_decode_ctx_init(&batch->decoder, &screen->devinfo,
2277ec681f3Smrg                                  stderr, decode_flags, NULL,
2287ec681f3Smrg                                  decode_get_bo, decode_get_state_size, batch);
2297ec681f3Smrg      batch->decoder.dynamic_base = IRIS_MEMZONE_DYNAMIC_START;
2307ec681f3Smrg      batch->decoder.instruction_base = IRIS_MEMZONE_SHADER_START;
2319f464c52Smaya      batch->decoder.max_vbo_decoded_lines = 32;
2329f464c52Smaya   }
2339f464c52Smaya
2347ec681f3Smrg   iris_init_batch_measure(ice, batch);
2357ec681f3Smrg
2369f464c52Smaya   iris_batch_reset(batch);
2379f464c52Smaya}
2389f464c52Smaya
2397ec681f3Smrgstatic int
2407ec681f3Smrgfind_exec_index(struct iris_batch *batch, struct iris_bo *bo)
2419f464c52Smaya{
2429f464c52Smaya   unsigned index = READ_ONCE(bo->index);
2439f464c52Smaya
2449f464c52Smaya   if (index < batch->exec_count && batch->exec_bos[index] == bo)
2457ec681f3Smrg      return index;
2469f464c52Smaya
2479f464c52Smaya   /* May have been shared between multiple active batches */
2489f464c52Smaya   for (index = 0; index < batch->exec_count; index++) {
2499f464c52Smaya      if (batch->exec_bos[index] == bo)
2507ec681f3Smrg         return index;
2519f464c52Smaya   }
2529f464c52Smaya
2537ec681f3Smrg   return -1;
2547ec681f3Smrg}
2557ec681f3Smrg
2567ec681f3Smrgstatic void
2577ec681f3Smrgensure_exec_obj_space(struct iris_batch *batch, uint32_t count)
2587ec681f3Smrg{
2597ec681f3Smrg   while (batch->exec_count + count > batch->exec_array_size) {
2607ec681f3Smrg      unsigned old_size = batch->exec_array_size;
2617ec681f3Smrg
2627ec681f3Smrg      batch->exec_array_size *= 2;
2637ec681f3Smrg      batch->exec_bos =
2647ec681f3Smrg         realloc(batch->exec_bos,
2657ec681f3Smrg                 batch->exec_array_size * sizeof(batch->exec_bos[0]));
2667ec681f3Smrg      batch->bos_written =
2677ec681f3Smrg         rerzalloc(NULL, batch->bos_written, BITSET_WORD,
2687ec681f3Smrg                   BITSET_WORDS(old_size),
2697ec681f3Smrg                   BITSET_WORDS(batch->exec_array_size));
2707ec681f3Smrg   }
2717ec681f3Smrg}
2727ec681f3Smrg
2737ec681f3Smrgstatic void
2747ec681f3Smrgadd_bo_to_batch(struct iris_batch *batch, struct iris_bo *bo, bool writable)
2757ec681f3Smrg{
2767ec681f3Smrg   assert(batch->exec_array_size > batch->exec_count);
2777ec681f3Smrg
2787ec681f3Smrg   iris_bo_reference(bo);
2797ec681f3Smrg
2807ec681f3Smrg   batch->exec_bos[batch->exec_count] = bo;
2817ec681f3Smrg
2827ec681f3Smrg   if (writable)
2837ec681f3Smrg      BITSET_SET(batch->bos_written, batch->exec_count);
2847ec681f3Smrg
2857ec681f3Smrg   bo->index = batch->exec_count;
2867ec681f3Smrg   batch->exec_count++;
2877ec681f3Smrg   batch->aperture_space += bo->size;
2887ec681f3Smrg
2897ec681f3Smrg   batch->max_gem_handle =
2907ec681f3Smrg      MAX2(batch->max_gem_handle, iris_get_backing_bo(bo)->gem_handle);
2917ec681f3Smrg}
2927ec681f3Smrg
2937ec681f3Smrgstatic void
2947ec681f3Smrgflush_for_cross_batch_dependencies(struct iris_batch *batch,
2957ec681f3Smrg                                   struct iris_bo *bo,
2967ec681f3Smrg                                   bool writable)
2977ec681f3Smrg{
2987ec681f3Smrg   if (batch->measure && bo == batch->measure->bo)
2997ec681f3Smrg      return;
3007ec681f3Smrg
3017ec681f3Smrg   /* When a batch uses a buffer for the first time, or newly writes a buffer
3027ec681f3Smrg    * it had already referenced, we may need to flush other batches in order
3037ec681f3Smrg    * to correctly synchronize them.
3047ec681f3Smrg    */
3057ec681f3Smrg   for (int b = 0; b < ARRAY_SIZE(batch->other_batches); b++) {
3067ec681f3Smrg      struct iris_batch *other_batch = batch->other_batches[b];
3077ec681f3Smrg      int other_index = find_exec_index(other_batch, bo);
3087ec681f3Smrg
3097ec681f3Smrg      /* If the buffer is referenced by another batch, and either batch
3107ec681f3Smrg       * intends to write it, then flush the other batch and synchronize.
3117ec681f3Smrg       *
3127ec681f3Smrg       * Consider these cases:
3137ec681f3Smrg       *
3147ec681f3Smrg       * 1. They read, we read   =>  No synchronization required.
3157ec681f3Smrg       * 2. They read, we write  =>  Synchronize (they need the old value)
3167ec681f3Smrg       * 3. They write, we read  =>  Synchronize (we need their new value)
3177ec681f3Smrg       * 4. They write, we write =>  Synchronize (order writes)
3187ec681f3Smrg       *
3197ec681f3Smrg       * The read/read case is very common, as multiple batches usually
3207ec681f3Smrg       * share a streaming state buffer or shader assembly buffer, and
3217ec681f3Smrg       * we want to avoid synchronizing in this case.
3227ec681f3Smrg       */
3237ec681f3Smrg      if (other_index != -1 &&
3247ec681f3Smrg          (writable || BITSET_TEST(other_batch->bos_written, other_index)))
3257ec681f3Smrg         iris_batch_flush(other_batch);
3267ec681f3Smrg   }
3279f464c52Smaya}
3289f464c52Smaya
3299f464c52Smaya/**
3309f464c52Smaya * Add a buffer to the current batch's validation list.
3319f464c52Smaya *
3329f464c52Smaya * You must call this on any BO you wish to use in this batch, to ensure
3339f464c52Smaya * that it's resident when the GPU commands execute.
3349f464c52Smaya */
3359f464c52Smayavoid
3369f464c52Smayairis_use_pinned_bo(struct iris_batch *batch,
3379f464c52Smaya                   struct iris_bo *bo,
3387ec681f3Smrg                   bool writable, enum iris_domain access)
3399f464c52Smaya{
3407ec681f3Smrg   assert(iris_get_backing_bo(bo)->real.kflags & EXEC_OBJECT_PINNED);
3417ec681f3Smrg   assert(bo != batch->bo);
3429f464c52Smaya
3439f464c52Smaya   /* Never mark the workaround BO with EXEC_OBJECT_WRITE.  We don't care
3449f464c52Smaya    * about the order of any writes to that buffer, and marking it writable
3459f464c52Smaya    * would introduce data dependencies between multiple batches which share
3467ec681f3Smrg    * the buffer. It is added directly to the batch using add_bo_to_batch()
3477ec681f3Smrg    * during batch reset time.
3489f464c52Smaya    */
3499f464c52Smaya   if (bo == batch->screen->workaround_bo)
3509f464c52Smaya      return;
3519f464c52Smaya
3527ec681f3Smrg   if (access < NUM_IRIS_DOMAINS) {
3537ec681f3Smrg      assert(batch->sync_region_depth);
3547ec681f3Smrg      iris_bo_bump_seqno(bo, batch->next_seqno, access);
3559f464c52Smaya   }
3569f464c52Smaya
3577ec681f3Smrg   int existing_index = find_exec_index(batch, bo);
3589f464c52Smaya
3597ec681f3Smrg   if (existing_index == -1) {
3607ec681f3Smrg      flush_for_cross_batch_dependencies(batch, bo, writable);
3619f464c52Smaya
3627ec681f3Smrg      ensure_exec_obj_space(batch, 1);
3637ec681f3Smrg      add_bo_to_batch(batch, bo, writable);
3647ec681f3Smrg   } else if (writable && !BITSET_TEST(batch->bos_written, existing_index)) {
3657ec681f3Smrg      flush_for_cross_batch_dependencies(batch, bo, writable);
3669f464c52Smaya
3677ec681f3Smrg      /* The BO is already in the list; mark it writable */
3687ec681f3Smrg      BITSET_SET(batch->bos_written, existing_index);
3697ec681f3Smrg   }
3709f464c52Smaya}
3719f464c52Smaya
3729f464c52Smayastatic void
3739f464c52Smayacreate_batch(struct iris_batch *batch)
3749f464c52Smaya{
3759f464c52Smaya   struct iris_screen *screen = batch->screen;
3769f464c52Smaya   struct iris_bufmgr *bufmgr = screen->bufmgr;
3779f464c52Smaya
3787ec681f3Smrg   /* TODO: We probably could suballocate batches... */
3799f464c52Smaya   batch->bo = iris_bo_alloc(bufmgr, "command buffer",
3807ec681f3Smrg                             BATCH_SZ + BATCH_RESERVED, 1,
3817ec681f3Smrg                             IRIS_MEMZONE_OTHER, BO_ALLOC_NO_SUBALLOC);
3827ec681f3Smrg   iris_get_backing_bo(batch->bo)->real.kflags |= EXEC_OBJECT_CAPTURE;
3839f464c52Smaya   batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
3849f464c52Smaya   batch->map_next = batch->map;
3859f464c52Smaya
3867ec681f3Smrg   ensure_exec_obj_space(batch, 1);
3877ec681f3Smrg   add_bo_to_batch(batch, batch->bo, false);
3887ec681f3Smrg}
3897ec681f3Smrg
3907ec681f3Smrgstatic void
3917ec681f3Smrgiris_batch_maybe_noop(struct iris_batch *batch)
3927ec681f3Smrg{
3937ec681f3Smrg   /* We only insert the NOOP at the beginning of the batch. */
3947ec681f3Smrg   assert(iris_batch_bytes_used(batch) == 0);
3957ec681f3Smrg
3967ec681f3Smrg   if (batch->noop_enabled) {
3977ec681f3Smrg      /* Emit MI_BATCH_BUFFER_END to prevent any further command to be
3987ec681f3Smrg       * executed.
3997ec681f3Smrg       */
4007ec681f3Smrg      uint32_t *map = batch->map_next;
4017ec681f3Smrg
4027ec681f3Smrg      map[0] = (0xA << 23);
4037ec681f3Smrg
4047ec681f3Smrg      batch->map_next += 4;
4057ec681f3Smrg   }
4069f464c52Smaya}
4079f464c52Smaya
4089f464c52Smayastatic void
4099f464c52Smayairis_batch_reset(struct iris_batch *batch)
4109f464c52Smaya{
4119f464c52Smaya   struct iris_screen *screen = batch->screen;
4127ec681f3Smrg   struct iris_bufmgr *bufmgr = screen->bufmgr;
4139f464c52Smaya
4149f464c52Smaya   iris_bo_unreference(batch->bo);
4159f464c52Smaya   batch->primary_batch_size = 0;
4167ec681f3Smrg   batch->total_chained_batch_size = 0;
4179f464c52Smaya   batch->contains_draw = false;
4187ec681f3Smrg   batch->contains_fence_signal = false;
4197ec681f3Smrg   batch->decoder.surface_base = batch->last_surface_base_address;
4209f464c52Smaya
4219f464c52Smaya   create_batch(batch);
4229f464c52Smaya   assert(batch->bo->index == 0);
4239f464c52Smaya
4247ec681f3Smrg   memset(batch->bos_written, 0,
4257ec681f3Smrg          sizeof(BITSET_WORD) * BITSET_WORDS(batch->exec_array_size));
4267ec681f3Smrg
4277ec681f3Smrg   struct iris_syncobj *syncobj = iris_create_syncobj(bufmgr);
4287ec681f3Smrg   iris_batch_add_syncobj(batch, syncobj, I915_EXEC_FENCE_SIGNAL);
4297ec681f3Smrg   iris_syncobj_reference(bufmgr, &syncobj, NULL);
4307ec681f3Smrg
4317ec681f3Smrg   assert(!batch->sync_region_depth);
4327ec681f3Smrg   iris_batch_sync_boundary(batch);
4337ec681f3Smrg   iris_batch_mark_reset_sync(batch);
4347ec681f3Smrg
4357ec681f3Smrg   /* Always add the workaround BO, it contains a driver identifier at the
4367ec681f3Smrg    * beginning quite helpful to debug error states.
4377ec681f3Smrg    */
4387ec681f3Smrg   add_bo_to_batch(batch, screen->workaround_bo, false);
4399f464c52Smaya
4407ec681f3Smrg   iris_batch_maybe_noop(batch);
4419f464c52Smaya}
4429f464c52Smaya
4439f464c52Smayavoid
4449f464c52Smayairis_batch_free(struct iris_batch *batch)
4459f464c52Smaya{
4469f464c52Smaya   struct iris_screen *screen = batch->screen;
4479f464c52Smaya   struct iris_bufmgr *bufmgr = screen->bufmgr;
4489f464c52Smaya
4499f464c52Smaya   for (int i = 0; i < batch->exec_count; i++) {
4509f464c52Smaya      iris_bo_unreference(batch->exec_bos[i]);
4519f464c52Smaya   }
4529f464c52Smaya   free(batch->exec_bos);
4537ec681f3Smrg   ralloc_free(batch->bos_written);
4549f464c52Smaya
4559f464c52Smaya   ralloc_free(batch->exec_fences.mem_ctx);
4569f464c52Smaya
4577ec681f3Smrg   pipe_resource_reference(&batch->fine_fences.ref.res, NULL);
4589f464c52Smaya
4597ec681f3Smrg   util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
4607ec681f3Smrg      iris_syncobj_reference(bufmgr, s, NULL);
4617ec681f3Smrg   ralloc_free(batch->syncobjs.mem_ctx);
4627ec681f3Smrg
4637ec681f3Smrg   iris_fine_fence_reference(batch->screen, &batch->last_fence, NULL);
4647ec681f3Smrg   u_upload_destroy(batch->fine_fences.uploader);
4659f464c52Smaya
4669f464c52Smaya   iris_bo_unreference(batch->bo);
4679f464c52Smaya   batch->bo = NULL;
4689f464c52Smaya   batch->map = NULL;
4699f464c52Smaya   batch->map_next = NULL;
4709f464c52Smaya
4719f464c52Smaya   iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);
4729f464c52Smaya
4737ec681f3Smrg   iris_destroy_batch_measure(batch->measure);
4747ec681f3Smrg   batch->measure = NULL;
4757ec681f3Smrg
4769f464c52Smaya   _mesa_hash_table_destroy(batch->cache.render, NULL);
4779f464c52Smaya
4787ec681f3Smrg   if (INTEL_DEBUG(DEBUG_ANY))
4797ec681f3Smrg      intel_batch_decode_ctx_finish(&batch->decoder);
4809f464c52Smaya}
4819f464c52Smaya
4829f464c52Smaya/**
4839f464c52Smaya * If we've chained to a secondary batch, or are getting near to the end,
4849f464c52Smaya * then flush.  This should only be called between draws.
4859f464c52Smaya */
4869f464c52Smayavoid
4879f464c52Smayairis_batch_maybe_flush(struct iris_batch *batch, unsigned estimate)
4889f464c52Smaya{
4899f464c52Smaya   if (batch->bo != batch->exec_bos[0] ||
4909f464c52Smaya       iris_batch_bytes_used(batch) + estimate >= BATCH_SZ) {
4919f464c52Smaya      iris_batch_flush(batch);
4929f464c52Smaya   }
4939f464c52Smaya}
4949f464c52Smaya
4957ec681f3Smrgstatic void
4967ec681f3Smrgrecord_batch_sizes(struct iris_batch *batch)
4977ec681f3Smrg{
4987ec681f3Smrg   unsigned batch_size = iris_batch_bytes_used(batch);
4997ec681f3Smrg
5007ec681f3Smrg   VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->map, batch_size));
5017ec681f3Smrg
5027ec681f3Smrg   if (batch->bo == batch->exec_bos[0])
5037ec681f3Smrg      batch->primary_batch_size = batch_size;
5047ec681f3Smrg
5057ec681f3Smrg   batch->total_chained_batch_size += batch_size;
5067ec681f3Smrg}
5077ec681f3Smrg
5089f464c52Smayavoid
5099f464c52Smayairis_chain_to_new_batch(struct iris_batch *batch)
5109f464c52Smaya{
5119f464c52Smaya   uint32_t *cmd = batch->map_next;
5129f464c52Smaya   uint64_t *addr = batch->map_next + 4;
5139f464c52Smaya   batch->map_next += 12;
5149f464c52Smaya
5157ec681f3Smrg   record_batch_sizes(batch);
5167ec681f3Smrg
5179f464c52Smaya   /* No longer held by batch->bo, still held by validation list */
5189f464c52Smaya   iris_bo_unreference(batch->bo);
5199f464c52Smaya   create_batch(batch);
5209f464c52Smaya
5219f464c52Smaya   /* Emit MI_BATCH_BUFFER_START to chain to another batch. */
5229f464c52Smaya   *cmd = (0x31 << 23) | (1 << 8) | (3 - 2);
5237ec681f3Smrg   *addr = batch->bo->address;
5247ec681f3Smrg}
5257ec681f3Smrg
5267ec681f3Smrgstatic void
5277ec681f3Smrgadd_aux_map_bos_to_batch(struct iris_batch *batch)
5287ec681f3Smrg{
5297ec681f3Smrg   void *aux_map_ctx = iris_bufmgr_get_aux_map_context(batch->screen->bufmgr);
5307ec681f3Smrg   if (!aux_map_ctx)
5317ec681f3Smrg      return;
5327ec681f3Smrg
5337ec681f3Smrg   uint32_t count = intel_aux_map_get_num_buffers(aux_map_ctx);
5347ec681f3Smrg   ensure_exec_obj_space(batch, count);
5357ec681f3Smrg   intel_aux_map_fill_bos(aux_map_ctx,
5367ec681f3Smrg                          (void**)&batch->exec_bos[batch->exec_count], count);
5377ec681f3Smrg   for (uint32_t i = 0; i < count; i++) {
5387ec681f3Smrg      struct iris_bo *bo = batch->exec_bos[batch->exec_count];
5397ec681f3Smrg      add_bo_to_batch(batch, bo, false);
5407ec681f3Smrg   }
5417ec681f3Smrg}
5427ec681f3Smrg
5437ec681f3Smrgstatic void
5447ec681f3Smrgfinish_seqno(struct iris_batch *batch)
5457ec681f3Smrg{
5467ec681f3Smrg   struct iris_fine_fence *sq = iris_fine_fence_new(batch, IRIS_FENCE_END);
5477ec681f3Smrg   if (!sq)
5487ec681f3Smrg      return;
5499f464c52Smaya
5507ec681f3Smrg   iris_fine_fence_reference(batch->screen, &batch->last_fence, sq);
5517ec681f3Smrg   iris_fine_fence_reference(batch->screen, &sq, NULL);
5529f464c52Smaya}
5539f464c52Smaya
5549f464c52Smaya/**
5559f464c52Smaya * Terminate a batch with MI_BATCH_BUFFER_END.
5569f464c52Smaya */
5579f464c52Smayastatic void
5589f464c52Smayairis_finish_batch(struct iris_batch *batch)
5599f464c52Smaya{
5607ec681f3Smrg   const struct intel_device_info *devinfo = &batch->screen->devinfo;
5617ec681f3Smrg
5627ec681f3Smrg   if (devinfo->ver == 12 && batch->name == IRIS_BATCH_RENDER) {
5637ec681f3Smrg      /* We re-emit constants at the beginning of every batch as a hardware
5647ec681f3Smrg       * bug workaround, so invalidate indirect state pointers in order to
5657ec681f3Smrg       * save ourselves the overhead of restoring constants redundantly when
5667ec681f3Smrg       * the next render batch is executed.
5677ec681f3Smrg       */
5687ec681f3Smrg      iris_emit_pipe_control_flush(batch, "ISP invalidate at batch end",
5697ec681f3Smrg                                   PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE |
5707ec681f3Smrg                                   PIPE_CONTROL_STALL_AT_SCOREBOARD |
5717ec681f3Smrg                                   PIPE_CONTROL_CS_STALL);
5727ec681f3Smrg   }
5737ec681f3Smrg
5747ec681f3Smrg   add_aux_map_bos_to_batch(batch);
5757ec681f3Smrg
5767ec681f3Smrg   finish_seqno(batch);
5777ec681f3Smrg
5789f464c52Smaya   /* Emit MI_BATCH_BUFFER_END to finish our batch. */
5799f464c52Smaya   uint32_t *map = batch->map_next;
5809f464c52Smaya
5819f464c52Smaya   map[0] = (0xA << 23);
5829f464c52Smaya
5839f464c52Smaya   batch->map_next += 4;
5849f464c52Smaya
5857ec681f3Smrg   record_batch_sizes(batch);
5867ec681f3Smrg}
5877ec681f3Smrg
5887ec681f3Smrg/**
5897ec681f3Smrg * Replace our current GEM context with a new one (in case it got banned).
5907ec681f3Smrg */
5917ec681f3Smrgstatic bool
5927ec681f3Smrgreplace_hw_ctx(struct iris_batch *batch)
5937ec681f3Smrg{
5947ec681f3Smrg   struct iris_screen *screen = batch->screen;
5957ec681f3Smrg   struct iris_bufmgr *bufmgr = screen->bufmgr;
5967ec681f3Smrg
5977ec681f3Smrg   uint32_t new_ctx = iris_clone_hw_context(bufmgr, batch->hw_ctx_id);
5987ec681f3Smrg   if (!new_ctx)
5997ec681f3Smrg      return false;
6007ec681f3Smrg
6017ec681f3Smrg   iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);
6027ec681f3Smrg   batch->hw_ctx_id = new_ctx;
6037ec681f3Smrg
6047ec681f3Smrg   /* Notify the context that state must be re-initialized. */
6057ec681f3Smrg   iris_lost_context_state(batch);
6067ec681f3Smrg
6077ec681f3Smrg   return true;
6087ec681f3Smrg}
6097ec681f3Smrg
6107ec681f3Smrgenum pipe_reset_status
6117ec681f3Smrgiris_batch_check_for_reset(struct iris_batch *batch)
6127ec681f3Smrg{
6137ec681f3Smrg   struct iris_screen *screen = batch->screen;
6147ec681f3Smrg   enum pipe_reset_status status = PIPE_NO_RESET;
6157ec681f3Smrg   struct drm_i915_reset_stats stats = { .ctx_id = batch->hw_ctx_id };
6167ec681f3Smrg
6177ec681f3Smrg   if (intel_ioctl(screen->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats))
6187ec681f3Smrg      DBG("DRM_IOCTL_I915_GET_RESET_STATS failed: %s\n", strerror(errno));
6197ec681f3Smrg
6207ec681f3Smrg   if (stats.batch_active != 0) {
6217ec681f3Smrg      /* A reset was observed while a batch from this hardware context was
6227ec681f3Smrg       * executing.  Assume that this context was at fault.
6237ec681f3Smrg       */
6247ec681f3Smrg      status = PIPE_GUILTY_CONTEXT_RESET;
6257ec681f3Smrg   } else if (stats.batch_pending != 0) {
6267ec681f3Smrg      /* A reset was observed while a batch from this context was in progress,
6277ec681f3Smrg       * but the batch was not executing.  In this case, assume that the
6287ec681f3Smrg       * context was not at fault.
6297ec681f3Smrg       */
6307ec681f3Smrg      status = PIPE_INNOCENT_CONTEXT_RESET;
6317ec681f3Smrg   }
6327ec681f3Smrg
6337ec681f3Smrg   if (status != PIPE_NO_RESET) {
6347ec681f3Smrg      /* Our context is likely banned, or at least in an unknown state.
6357ec681f3Smrg       * Throw it away and start with a fresh context.  Ideally this may
6367ec681f3Smrg       * catch the problem before our next execbuf fails with -EIO.
6377ec681f3Smrg       */
6387ec681f3Smrg      replace_hw_ctx(batch);
6397ec681f3Smrg   }
6407ec681f3Smrg
6417ec681f3Smrg   return status;
6427ec681f3Smrg}
6437ec681f3Smrg
6447ec681f3Smrgstatic void
6457ec681f3Smrgmove_syncobj_to_batch(struct iris_batch *batch,
6467ec681f3Smrg                      struct iris_syncobj **p_syncobj,
6477ec681f3Smrg                      unsigned flags)
6487ec681f3Smrg{
6497ec681f3Smrg   struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
6507ec681f3Smrg
6517ec681f3Smrg   if (!*p_syncobj)
6527ec681f3Smrg      return;
6537ec681f3Smrg
6547ec681f3Smrg   bool found = false;
6557ec681f3Smrg   util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s) {
6567ec681f3Smrg      if (*p_syncobj == *s) {
6577ec681f3Smrg         found = true;
6587ec681f3Smrg         break;
6597ec681f3Smrg      }
6607ec681f3Smrg   }
6617ec681f3Smrg
6627ec681f3Smrg   if (!found)
6637ec681f3Smrg      iris_batch_add_syncobj(batch, *p_syncobj, flags);
6647ec681f3Smrg
6657ec681f3Smrg   iris_syncobj_reference(bufmgr, p_syncobj, NULL);
6667ec681f3Smrg}
6677ec681f3Smrg
6687ec681f3Smrgstatic void
6697ec681f3Smrgupdate_bo_syncobjs(struct iris_batch *batch, struct iris_bo *bo, bool write)
6707ec681f3Smrg{
6717ec681f3Smrg   struct iris_screen *screen = batch->screen;
6727ec681f3Smrg   struct iris_bufmgr *bufmgr = screen->bufmgr;
6737ec681f3Smrg
6747ec681f3Smrg   /* Make sure bo->deps is big enough */
6757ec681f3Smrg   if (screen->id >= bo->deps_size) {
6767ec681f3Smrg      int new_size = screen->id + 1;
6777ec681f3Smrg      bo->deps= realloc(bo->deps, new_size * sizeof(bo->deps[0]));
6787ec681f3Smrg      memset(&bo->deps[bo->deps_size], 0,
6797ec681f3Smrg             sizeof(bo->deps[0]) * (new_size - bo->deps_size));
6807ec681f3Smrg
6817ec681f3Smrg      bo->deps_size = new_size;
6827ec681f3Smrg   }
6837ec681f3Smrg
6847ec681f3Smrg   /* When it comes to execbuf submission of non-shared buffers, we only need
6857ec681f3Smrg    * to care about the reads and writes done by the other batches of our own
6867ec681f3Smrg    * screen, and we also don't care about the reads and writes done by our
6877ec681f3Smrg    * own batch, although we need to track them. Just note that other places of
6887ec681f3Smrg    * our code may need to care about all the operations done by every batch
6897ec681f3Smrg    * on every screen.
6907ec681f3Smrg    */
6917ec681f3Smrg   struct iris_bo_screen_deps *deps = &bo->deps[screen->id];
6927ec681f3Smrg   int batch_idx = batch->name;
6937ec681f3Smrg
6947ec681f3Smrg#if IRIS_BATCH_COUNT == 2
6957ec681f3Smrg   /* Due to the above, we exploit the fact that IRIS_NUM_BATCHES is actually
6967ec681f3Smrg    * 2, which means there's only one other batch we need to care about.
6977ec681f3Smrg    */
6987ec681f3Smrg   int other_batch_idx = 1 - batch_idx;
6997ec681f3Smrg#else
7007ec681f3Smrg   /* For IRIS_BATCH_COUNT == 3 we can do:
7017ec681f3Smrg    *   int other_batch_idxs[IRIS_BATCH_COUNT - 1] = {
7027ec681f3Smrg    *      (batch_idx ^ 1) & 1,
7037ec681f3Smrg    *      (batch_idx ^ 2) & 2,
7047ec681f3Smrg    *   };
7057ec681f3Smrg    * For IRIS_BATCH_COUNT == 4 we can do:
7067ec681f3Smrg    *   int other_batch_idxs[IRIS_BATCH_COUNT - 1] = {
7077ec681f3Smrg    *      (batch_idx + 1) & 3,
7087ec681f3Smrg    *      (batch_idx + 2) & 3,
7097ec681f3Smrg    *      (batch_idx + 3) & 3,
7107ec681f3Smrg    *   };
7117ec681f3Smrg    */
7127ec681f3Smrg#error "Implement me."
7137ec681f3Smrg#endif
7147ec681f3Smrg
7157ec681f3Smrg   /* If it is being written to by others, wait on it. */
7167ec681f3Smrg   if (deps->write_syncobjs[other_batch_idx])
7177ec681f3Smrg      move_syncobj_to_batch(batch, &deps->write_syncobjs[other_batch_idx],
7187ec681f3Smrg                            I915_EXEC_FENCE_WAIT);
7197ec681f3Smrg
7207ec681f3Smrg   /* If it's being written by our screen, wait on it too. This is relevant
7217ec681f3Smrg    * when there are multiple contexts on the same screen. */
7227ec681f3Smrg   if (deps->write_syncobjs[batch_idx])
7237ec681f3Smrg      move_syncobj_to_batch(batch, &deps->write_syncobjs[batch_idx],
7247ec681f3Smrg                            I915_EXEC_FENCE_WAIT);
7257ec681f3Smrg
7267ec681f3Smrg   struct iris_syncobj *batch_syncobj = iris_batch_get_signal_syncobj(batch);
7277ec681f3Smrg
7287ec681f3Smrg   if (write) {
7297ec681f3Smrg      /* If we're writing to it, set our batch's syncobj as write_syncobj so
7307ec681f3Smrg       * others can wait on us. Also wait every reader we care about before
7317ec681f3Smrg       * writing.
7327ec681f3Smrg       */
7337ec681f3Smrg      iris_syncobj_reference(bufmgr, &deps->write_syncobjs[batch_idx],
7347ec681f3Smrg                              batch_syncobj);
7357ec681f3Smrg
7367ec681f3Smrg      move_syncobj_to_batch(batch, &deps->read_syncobjs[other_batch_idx],
7377ec681f3Smrg                           I915_EXEC_FENCE_WAIT);
7387ec681f3Smrg      move_syncobj_to_batch(batch, &deps->read_syncobjs[batch_idx],
7397ec681f3Smrg                           I915_EXEC_FENCE_WAIT);
7407ec681f3Smrg
7417ec681f3Smrg   } else {
7427ec681f3Smrg      /* If we're reading, replace the other read from our batch index. */
7437ec681f3Smrg      iris_syncobj_reference(bufmgr, &deps->read_syncobjs[batch_idx],
7447ec681f3Smrg                             batch_syncobj);
7457ec681f3Smrg   }
7467ec681f3Smrg}
7477ec681f3Smrg
7487ec681f3Smrgstatic void
7497ec681f3Smrgupdate_batch_syncobjs(struct iris_batch *batch)
7507ec681f3Smrg{
7517ec681f3Smrg   struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
7527ec681f3Smrg   simple_mtx_t *bo_deps_lock = iris_bufmgr_get_bo_deps_lock(bufmgr);
7537ec681f3Smrg
7547ec681f3Smrg   simple_mtx_lock(bo_deps_lock);
7557ec681f3Smrg
7567ec681f3Smrg   for (int i = 0; i < batch->exec_count; i++) {
7577ec681f3Smrg      struct iris_bo *bo = batch->exec_bos[i];
7587ec681f3Smrg      bool write = BITSET_TEST(batch->bos_written, i);
7597ec681f3Smrg
7607ec681f3Smrg      if (bo == batch->screen->workaround_bo)
7617ec681f3Smrg         continue;
7627ec681f3Smrg
7637ec681f3Smrg      update_bo_syncobjs(batch, bo, write);
7647ec681f3Smrg   }
7657ec681f3Smrg   simple_mtx_unlock(bo_deps_lock);
7669f464c52Smaya}
7679f464c52Smaya
7689f464c52Smaya/**
7699f464c52Smaya * Submit the batch to the GPU via execbuffer2.
7709f464c52Smaya */
7719f464c52Smayastatic int
7729f464c52Smayasubmit_batch(struct iris_batch *batch)
7739f464c52Smaya{
7749f464c52Smaya   iris_bo_unmap(batch->bo);
7759f464c52Smaya
7767ec681f3Smrg   struct drm_i915_gem_exec_object2 *validation_list =
7777ec681f3Smrg      malloc(batch->exec_count * sizeof(*validation_list));
7787ec681f3Smrg
7797ec681f3Smrg   unsigned *index_for_handle =
7807ec681f3Smrg      calloc(batch->max_gem_handle + 1, sizeof(unsigned));
7817ec681f3Smrg
7827ec681f3Smrg   unsigned validation_count = 0;
7837ec681f3Smrg   for (int i = 0; i < batch->exec_count; i++) {
7847ec681f3Smrg      struct iris_bo *bo = iris_get_backing_bo(batch->exec_bos[i]);
7857ec681f3Smrg      assert(bo->gem_handle != 0);
7867ec681f3Smrg
7877ec681f3Smrg      bool written = BITSET_TEST(batch->bos_written, i);
7887ec681f3Smrg      unsigned prev_index = index_for_handle[bo->gem_handle];
7897ec681f3Smrg      if (prev_index > 0) {
7907ec681f3Smrg         if (written)
7917ec681f3Smrg            validation_list[prev_index].flags |= EXEC_OBJECT_WRITE;
7927ec681f3Smrg      } else {
7937ec681f3Smrg         index_for_handle[bo->gem_handle] = validation_count;
7947ec681f3Smrg         validation_list[validation_count] =
7957ec681f3Smrg            (struct drm_i915_gem_exec_object2) {
7967ec681f3Smrg               .handle = bo->gem_handle,
7977ec681f3Smrg               .offset = bo->address,
7987ec681f3Smrg               .flags  = bo->real.kflags | (written ? EXEC_OBJECT_WRITE : 0) |
7997ec681f3Smrg                         (iris_bo_is_external(bo) ? 0 : EXEC_OBJECT_ASYNC),
8007ec681f3Smrg            };
8017ec681f3Smrg         ++validation_count;
8027ec681f3Smrg      }
8037ec681f3Smrg   }
8047ec681f3Smrg
8057ec681f3Smrg   free(index_for_handle);
8067ec681f3Smrg
8077ec681f3Smrg   if (INTEL_DEBUG(DEBUG_BATCH | DEBUG_SUBMIT)) {
8087ec681f3Smrg      dump_fence_list(batch);
8097ec681f3Smrg      dump_bo_list(batch);
8107ec681f3Smrg   }
8117ec681f3Smrg
8127ec681f3Smrg   if (INTEL_DEBUG(DEBUG_BATCH)) {
8137ec681f3Smrg      decode_batch(batch);
8147ec681f3Smrg   }
8157ec681f3Smrg
8169f464c52Smaya   /* The requirement for using I915_EXEC_NO_RELOC are:
8179f464c52Smaya    *
8189f464c52Smaya    *   The addresses written in the objects must match the corresponding
8197ec681f3Smrg    *   reloc.address which in turn must match the corresponding
8209f464c52Smaya    *   execobject.offset.
8219f464c52Smaya    *
8229f464c52Smaya    *   Any render targets written to in the batch must be flagged with
8239f464c52Smaya    *   EXEC_OBJECT_WRITE.
8249f464c52Smaya    *
8259f464c52Smaya    *   To avoid stalling, execobject.offset should match the current
8269f464c52Smaya    *   address of that object within the active context.
8279f464c52Smaya    */
8289f464c52Smaya   struct drm_i915_gem_execbuffer2 execbuf = {
8297ec681f3Smrg      .buffers_ptr = (uintptr_t) validation_list,
8307ec681f3Smrg      .buffer_count = validation_count,
8319f464c52Smaya      .batch_start_offset = 0,
8329f464c52Smaya      /* This must be QWord aligned. */
8339f464c52Smaya      .batch_len = ALIGN(batch->primary_batch_size, 8),
8347ec681f3Smrg      .flags = I915_EXEC_RENDER |
8359f464c52Smaya               I915_EXEC_NO_RELOC |
8369f464c52Smaya               I915_EXEC_BATCH_FIRST |
8379f464c52Smaya               I915_EXEC_HANDLE_LUT,
8389f464c52Smaya      .rsvd1 = batch->hw_ctx_id, /* rsvd1 is actually the context ID */
8399f464c52Smaya   };
8409f464c52Smaya
8419f464c52Smaya   if (num_fences(batch)) {
8429f464c52Smaya      execbuf.flags |= I915_EXEC_FENCE_ARRAY;
8439f464c52Smaya      execbuf.num_cliprects = num_fences(batch);
8449f464c52Smaya      execbuf.cliprects_ptr =
8459f464c52Smaya         (uintptr_t)util_dynarray_begin(&batch->exec_fences);
8469f464c52Smaya   }
8479f464c52Smaya
8487ec681f3Smrg   int ret = 0;
8497ec681f3Smrg   if (!batch->screen->devinfo.no_hw &&
8507ec681f3Smrg       intel_ioctl(batch->screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf))
8519f464c52Smaya      ret = -errno;
8529f464c52Smaya
8539f464c52Smaya   for (int i = 0; i < batch->exec_count; i++) {
8549f464c52Smaya      struct iris_bo *bo = batch->exec_bos[i];
8559f464c52Smaya
8569f464c52Smaya      bo->idle = false;
8579f464c52Smaya      bo->index = -1;
8589f464c52Smaya
8597ec681f3Smrg      iris_get_backing_bo(bo)->idle = false;
8607ec681f3Smrg
8619f464c52Smaya      iris_bo_unreference(bo);
8629f464c52Smaya   }
8639f464c52Smaya
8647ec681f3Smrg   free(validation_list);
8657ec681f3Smrg
8669f464c52Smaya   return ret;
8679f464c52Smaya}
8689f464c52Smaya
8699f464c52Smayastatic const char *
8709f464c52Smayabatch_name_to_string(enum iris_batch_name name)
8719f464c52Smaya{
8729f464c52Smaya   const char *names[IRIS_BATCH_COUNT] = {
8739f464c52Smaya      [IRIS_BATCH_RENDER]  = "render",
8749f464c52Smaya      [IRIS_BATCH_COMPUTE] = "compute",
8759f464c52Smaya   };
8769f464c52Smaya   return names[name];
8779f464c52Smaya}
8789f464c52Smaya
8799f464c52Smaya/**
8809f464c52Smaya * Flush the batch buffer, submitting it to the GPU and resetting it so
8819f464c52Smaya * we're ready to emit the next batch.
8829f464c52Smaya */
8839f464c52Smayavoid
8849f464c52Smaya_iris_batch_flush(struct iris_batch *batch, const char *file, int line)
8859f464c52Smaya{
8869f464c52Smaya   struct iris_screen *screen = batch->screen;
8879f464c52Smaya
8887ec681f3Smrg   /* If a fence signals we need to flush it. */
8897ec681f3Smrg   if (iris_batch_bytes_used(batch) == 0 && !batch->contains_fence_signal)
8909f464c52Smaya      return;
8919f464c52Smaya
8927ec681f3Smrg   iris_measure_batch_end(batch->ice, batch);
8937ec681f3Smrg
8949f464c52Smaya   iris_finish_batch(batch);
8959f464c52Smaya
8967ec681f3Smrg   update_batch_syncobjs(batch);
8977ec681f3Smrg
8987ec681f3Smrg   if (INTEL_DEBUG(DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL)) {
8997ec681f3Smrg      const char *basefile = strstr(file, "iris/");
9007ec681f3Smrg      if (basefile)
9017ec681f3Smrg         file = basefile + 5;
9027ec681f3Smrg
9037ec681f3Smrg      fprintf(stderr, "%19s:%-3d: %s batch [%u] flush with %5db (%0.1f%%) "
9049f464c52Smaya              "(cmds), %4d BOs (%0.1fMb aperture)\n",
9059f464c52Smaya              file, line, batch_name_to_string(batch->name), batch->hw_ctx_id,
9067ec681f3Smrg              batch->total_chained_batch_size,
9077ec681f3Smrg              100.0f * batch->total_chained_batch_size / BATCH_SZ,
9089f464c52Smaya              batch->exec_count,
9099f464c52Smaya              (float) batch->aperture_space / (1024 * 1024));
9109f464c52Smaya
9119f464c52Smaya   }
9129f464c52Smaya
9139f464c52Smaya   int ret = submit_batch(batch);
9149f464c52Smaya
9157ec681f3Smrg   /* When batch submission fails, our end-of-batch syncobj remains
9167ec681f3Smrg    * unsignalled, and in fact is not even considered submitted.
9177ec681f3Smrg    *
9187ec681f3Smrg    * In the hang recovery case (-EIO) or -ENOMEM, we recreate our context and
9197ec681f3Smrg    * attempt to carry on.  In that case, we need to signal our syncobj,
9207ec681f3Smrg    * dubiously claiming that this batch completed, because future batches may
9217ec681f3Smrg    * depend on it.  If we don't, then execbuf would fail with -EINVAL for
9227ec681f3Smrg    * those batches, because they depend on a syncobj that's considered to be
9237ec681f3Smrg    * "never submitted".  This would lead to an abort().  So here, we signal
9247ec681f3Smrg    * the failing batch's syncobj to try and allow further progress to be
9257ec681f3Smrg    * made, knowing we may have broken our dependency tracking.
9267ec681f3Smrg    */
9277ec681f3Smrg   if (ret < 0)
9287ec681f3Smrg      iris_syncobj_signal(screen->bufmgr, iris_batch_get_signal_syncobj(batch));
9299f464c52Smaya
9309f464c52Smaya   batch->exec_count = 0;
9317ec681f3Smrg   batch->max_gem_handle = 0;
9329f464c52Smaya   batch->aperture_space = 0;
9339f464c52Smaya
9347ec681f3Smrg   util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
9357ec681f3Smrg      iris_syncobj_reference(screen->bufmgr, s, NULL);
9367ec681f3Smrg   util_dynarray_clear(&batch->syncobjs);
9379f464c52Smaya
9389f464c52Smaya   util_dynarray_clear(&batch->exec_fences);
9399f464c52Smaya
9407ec681f3Smrg   if (INTEL_DEBUG(DEBUG_SYNC)) {
9417ec681f3Smrg      dbg_printf("waiting for idle\n");
9427ec681f3Smrg      iris_bo_wait_rendering(batch->bo); /* if execbuf failed; this is a nop */
9437ec681f3Smrg   }
9447ec681f3Smrg
9459f464c52Smaya   /* Start a new batch buffer. */
9469f464c52Smaya   iris_batch_reset(batch);
9477ec681f3Smrg
9487ec681f3Smrg   /* EIO means our context is banned.  In this case, try and replace it
9497ec681f3Smrg    * with a new logical context, and inform iris_context that all state
9507ec681f3Smrg    * has been lost and needs to be re-initialized.  If this succeeds,
9517ec681f3Smrg    * dubiously claim success...
9527ec681f3Smrg    * Also handle ENOMEM here.
9537ec681f3Smrg    */
9547ec681f3Smrg   if ((ret == -EIO || ret == -ENOMEM) && replace_hw_ctx(batch)) {
9557ec681f3Smrg      if (batch->reset->reset) {
9567ec681f3Smrg         /* Tell gallium frontends the device is lost and it was our fault. */
9577ec681f3Smrg         batch->reset->reset(batch->reset->data, PIPE_GUILTY_CONTEXT_RESET);
9587ec681f3Smrg      }
9597ec681f3Smrg
9607ec681f3Smrg      ret = 0;
9617ec681f3Smrg   }
9627ec681f3Smrg
9637ec681f3Smrg   if (ret < 0) {
9647ec681f3Smrg#ifdef DEBUG
9657ec681f3Smrg      const bool color = INTEL_DEBUG(DEBUG_COLOR);
9667ec681f3Smrg      fprintf(stderr, "%siris: Failed to submit batchbuffer: %-80s%s\n",
9677ec681f3Smrg              color ? "\e[1;41m" : "", strerror(-ret), color ? "\e[0m" : "");
9687ec681f3Smrg#endif
9697ec681f3Smrg      abort();
9707ec681f3Smrg   }
9719f464c52Smaya}
9729f464c52Smaya
9739f464c52Smaya/**
9749f464c52Smaya * Does the current batch refer to the given BO?
9759f464c52Smaya *
9769f464c52Smaya * (In other words, is the BO in the current batch's validation list?)
9779f464c52Smaya */
9789f464c52Smayabool
9799f464c52Smayairis_batch_references(struct iris_batch *batch, struct iris_bo *bo)
9809f464c52Smaya{
9817ec681f3Smrg   return find_exec_index(batch, bo) != -1;
9827ec681f3Smrg}
9837ec681f3Smrg
9847ec681f3Smrg/**
9857ec681f3Smrg * Updates the state of the noop feature.  Returns true if there was a noop
9867ec681f3Smrg * transition that led to state invalidation.
9877ec681f3Smrg */
9887ec681f3Smrgbool
9897ec681f3Smrgiris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable)
9907ec681f3Smrg{
9917ec681f3Smrg   if (batch->noop_enabled == noop_enable)
9927ec681f3Smrg      return 0;
9937ec681f3Smrg
9947ec681f3Smrg   batch->noop_enabled = noop_enable;
9957ec681f3Smrg
9967ec681f3Smrg   iris_batch_flush(batch);
9977ec681f3Smrg
9987ec681f3Smrg   /* If the batch was empty, flush had no effect, so insert our noop. */
9997ec681f3Smrg   if (iris_batch_bytes_used(batch) == 0)
10007ec681f3Smrg      iris_batch_maybe_noop(batch);
10017ec681f3Smrg
10027ec681f3Smrg   /* We only need to update the entire state if we transition from noop ->
10037ec681f3Smrg    * not-noop.
10047ec681f3Smrg    */
10057ec681f3Smrg   return !batch->noop_enabled;
10069f464c52Smaya}
1007