19f464c52Smaya/*
29f464c52Smaya * Copyright © 2018 Intel Corporation
39f464c52Smaya *
49f464c52Smaya * Permission is hereby granted, free of charge, to any person obtaining a
59f464c52Smaya * copy of this software and associated documentation files (the "Software"),
69f464c52Smaya * to deal in the Software without restriction, including without limitation
79f464c52Smaya * the rights to use, copy, modify, merge, publish, distribute, sublicense,
89f464c52Smaya * and/or sell copies of the Software, and to permit persons to whom the
99f464c52Smaya * Software is furnished to do so, subject to the following conditions:
109f464c52Smaya *
119f464c52Smaya * The above copyright notice and this permission notice shall be included
129f464c52Smaya * in all copies or substantial portions of the Software.
139f464c52Smaya *
149f464c52Smaya * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
159f464c52Smaya * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
169f464c52Smaya * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
179f464c52Smaya * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
189f464c52Smaya * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
199f464c52Smaya * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
209f464c52Smaya * DEALINGS IN THE SOFTWARE.
219f464c52Smaya */
229f464c52Smaya
239f464c52Smaya/**
249f464c52Smaya * @file iris_fence.c
259f464c52Smaya *
269f464c52Smaya * Fences for driver and IPC serialisation, scheduling and synchronisation.
279f464c52Smaya */
289f464c52Smaya
297ec681f3Smrg#include "drm-uapi/sync_file.h"
307ec681f3Smrg#include "util/u_debug.h"
319f464c52Smaya#include "util/u_inlines.h"
327ec681f3Smrg#include "intel/common/intel_gem.h"
339f464c52Smaya
349f464c52Smaya#include "iris_batch.h"
359f464c52Smaya#include "iris_bufmgr.h"
369f464c52Smaya#include "iris_context.h"
379f464c52Smaya#include "iris_fence.h"
389f464c52Smaya#include "iris_screen.h"
399f464c52Smaya
409f464c52Smayastatic uint32_t
419f464c52Smayagem_syncobj_create(int fd, uint32_t flags)
429f464c52Smaya{
439f464c52Smaya   struct drm_syncobj_create args = {
449f464c52Smaya      .flags = flags,
459f464c52Smaya   };
469f464c52Smaya
477ec681f3Smrg   intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
489f464c52Smaya
499f464c52Smaya   return args.handle;
509f464c52Smaya}
519f464c52Smaya
529f464c52Smayastatic void
539f464c52Smayagem_syncobj_destroy(int fd, uint32_t handle)
549f464c52Smaya{
559f464c52Smaya   struct drm_syncobj_destroy args = {
569f464c52Smaya      .handle = handle,
579f464c52Smaya   };
589f464c52Smaya
597ec681f3Smrg   intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
609f464c52Smaya}
619f464c52Smaya
629f464c52Smaya/**
639f464c52Smaya * Make a new sync-point.
649f464c52Smaya */
657ec681f3Smrgstruct iris_syncobj *
667ec681f3Smrgiris_create_syncobj(struct iris_bufmgr *bufmgr)
679f464c52Smaya{
687ec681f3Smrg   int fd = iris_bufmgr_get_fd(bufmgr);
697ec681f3Smrg   struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
709f464c52Smaya
717ec681f3Smrg   if (!syncobj)
729f464c52Smaya      return NULL;
739f464c52Smaya
747ec681f3Smrg   syncobj->handle = gem_syncobj_create(fd, 0);
757ec681f3Smrg   assert(syncobj->handle);
769f464c52Smaya
777ec681f3Smrg   pipe_reference_init(&syncobj->ref, 1);
789f464c52Smaya
797ec681f3Smrg   return syncobj;
809f464c52Smaya}
819f464c52Smaya
829f464c52Smayavoid
837ec681f3Smrgiris_syncobj_destroy(struct iris_bufmgr *bufmgr, struct iris_syncobj *syncobj)
849f464c52Smaya{
857ec681f3Smrg   int fd = iris_bufmgr_get_fd(bufmgr);
867ec681f3Smrg   gem_syncobj_destroy(fd, syncobj->handle);
877ec681f3Smrg   free(syncobj);
887ec681f3Smrg}
897ec681f3Smrg
907ec681f3Smrgvoid
917ec681f3Smrgiris_syncobj_signal(struct iris_bufmgr *bufmgr, struct iris_syncobj *syncobj)
927ec681f3Smrg{
937ec681f3Smrg   int fd = iris_bufmgr_get_fd(bufmgr);
947ec681f3Smrg   struct drm_syncobj_array args = {
957ec681f3Smrg      .handles = (uintptr_t)&syncobj->handle,
967ec681f3Smrg      .count_handles = 1,
977ec681f3Smrg   };
987ec681f3Smrg
997ec681f3Smrg   if (intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &args)) {
1007ec681f3Smrg      fprintf(stderr, "failed to signal syncobj %"PRIu32"\n",
1017ec681f3Smrg              syncobj->handle);
1027ec681f3Smrg   }
1039f464c52Smaya}
1049f464c52Smaya
1059f464c52Smaya/**
1069f464c52Smaya * Add a sync-point to the batch, with the given flags.
1079f464c52Smaya *
1089f464c52Smaya * \p flags   One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
1099f464c52Smaya */
1109f464c52Smayavoid
1117ec681f3Smrgiris_batch_add_syncobj(struct iris_batch *batch,
1127ec681f3Smrg                       struct iris_syncobj *syncobj,
1137ec681f3Smrg                       unsigned flags)
1149f464c52Smaya{
1159f464c52Smaya   struct drm_i915_gem_exec_fence *fence =
1167ec681f3Smrg      util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
1179f464c52Smaya
1189f464c52Smaya   *fence = (struct drm_i915_gem_exec_fence) {
1197ec681f3Smrg      .handle = syncobj->handle,
1209f464c52Smaya      .flags = flags,
1219f464c52Smaya   };
1229f464c52Smaya
1237ec681f3Smrg   struct iris_syncobj **store =
1247ec681f3Smrg      util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
1259f464c52Smaya
1269f464c52Smaya   *store = NULL;
1277ec681f3Smrg   iris_syncobj_reference(batch->screen->bufmgr, store, syncobj);
1287ec681f3Smrg}
1297ec681f3Smrg
1307ec681f3Smrg/**
1317ec681f3Smrg * Walk through a batch's dependencies (any I915_EXEC_FENCE_WAIT syncobjs)
1327ec681f3Smrg * and unreference any which have already passed.
1337ec681f3Smrg *
1347ec681f3Smrg * Sometimes the compute batch is seldom used, and accumulates references
1357ec681f3Smrg * to stale render batches that are no longer of interest, so we can free
1367ec681f3Smrg * those up.
1377ec681f3Smrg */
1387ec681f3Smrgstatic void
1397ec681f3Smrgclear_stale_syncobjs(struct iris_batch *batch)
1407ec681f3Smrg{
1417ec681f3Smrg   struct iris_screen *screen = batch->screen;
1427ec681f3Smrg   struct iris_bufmgr *bufmgr = screen->bufmgr;
1437ec681f3Smrg
1447ec681f3Smrg   int n = util_dynarray_num_elements(&batch->syncobjs, struct iris_syncobj *);
1457ec681f3Smrg
1467ec681f3Smrg   assert(n == util_dynarray_num_elements(&batch->exec_fences,
1477ec681f3Smrg                                          struct drm_i915_gem_exec_fence));
1487ec681f3Smrg
1497ec681f3Smrg   /* Skip the first syncobj, as it's the signalling one. */
1507ec681f3Smrg   for (int i = n - 1; i > 1; i--) {
1517ec681f3Smrg      struct iris_syncobj **syncobj =
1527ec681f3Smrg         util_dynarray_element(&batch->syncobjs, struct iris_syncobj *, i);
1537ec681f3Smrg      struct drm_i915_gem_exec_fence *fence =
1547ec681f3Smrg         util_dynarray_element(&batch->exec_fences,
1557ec681f3Smrg                               struct drm_i915_gem_exec_fence, i);
1567ec681f3Smrg      assert(fence->flags & I915_EXEC_FENCE_WAIT);
1577ec681f3Smrg
1587ec681f3Smrg      if (iris_wait_syncobj(bufmgr, *syncobj, 0))
1597ec681f3Smrg         continue;
1607ec681f3Smrg
1617ec681f3Smrg      /* This sync object has already passed, there's no need to continue
1627ec681f3Smrg       * marking it as a dependency; we can stop holding on to the reference.
1637ec681f3Smrg       */
1647ec681f3Smrg      iris_syncobj_reference(bufmgr, syncobj, NULL);
1657ec681f3Smrg
1667ec681f3Smrg      /* Remove it from the lists; move the last element here. */
1677ec681f3Smrg      struct iris_syncobj **nth_syncobj =
1687ec681f3Smrg         util_dynarray_pop_ptr(&batch->syncobjs, struct iris_syncobj *);
1697ec681f3Smrg      struct drm_i915_gem_exec_fence *nth_fence =
1707ec681f3Smrg         util_dynarray_pop_ptr(&batch->exec_fences,
1717ec681f3Smrg                               struct drm_i915_gem_exec_fence);
1727ec681f3Smrg
1737ec681f3Smrg      if (syncobj != nth_syncobj) {
1747ec681f3Smrg         *syncobj = *nth_syncobj;
1757ec681f3Smrg         memcpy(fence, nth_fence, sizeof(*fence));
1767ec681f3Smrg      }
1777ec681f3Smrg   }
1789f464c52Smaya}
1799f464c52Smaya
1809f464c52Smaya/* ------------------------------------------------------------------- */
1819f464c52Smaya
1829f464c52Smayastruct pipe_fence_handle {
1839f464c52Smaya   struct pipe_reference ref;
1847ec681f3Smrg
1857ec681f3Smrg   struct pipe_context *unflushed_ctx;
1867ec681f3Smrg
1877ec681f3Smrg   struct iris_fine_fence *fine[IRIS_BATCH_COUNT];
1889f464c52Smaya};
1899f464c52Smaya
1909f464c52Smayastatic void
1919f464c52Smayairis_fence_destroy(struct pipe_screen *p_screen,
1929f464c52Smaya                   struct pipe_fence_handle *fence)
1939f464c52Smaya{
1949f464c52Smaya   struct iris_screen *screen = (struct iris_screen *)p_screen;
1959f464c52Smaya
1967ec681f3Smrg   for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++)
1977ec681f3Smrg      iris_fine_fence_reference(screen, &fence->fine[i], NULL);
1989f464c52Smaya
1999f464c52Smaya   free(fence);
2009f464c52Smaya}
2019f464c52Smaya
2029f464c52Smayastatic void
2039f464c52Smayairis_fence_reference(struct pipe_screen *p_screen,
2049f464c52Smaya                     struct pipe_fence_handle **dst,
2059f464c52Smaya                     struct pipe_fence_handle *src)
2069f464c52Smaya{
2077ec681f3Smrg   if (pipe_reference(*dst ? &(*dst)->ref : NULL,
2087ec681f3Smrg                      src ? &src->ref : NULL))
2099f464c52Smaya      iris_fence_destroy(p_screen, *dst);
2109f464c52Smaya
2119f464c52Smaya   *dst = src;
2129f464c52Smaya}
2139f464c52Smaya
2149f464c52Smayabool
2157ec681f3Smrgiris_wait_syncobj(struct iris_bufmgr *bufmgr,
2167ec681f3Smrg                  struct iris_syncobj *syncobj,
2177ec681f3Smrg                  int64_t timeout_nsec)
2189f464c52Smaya{
2197ec681f3Smrg   if (!syncobj)
2209f464c52Smaya      return false;
2219f464c52Smaya
2227ec681f3Smrg   int fd = iris_bufmgr_get_fd(bufmgr);
2237ec681f3Smrg
2249f464c52Smaya   struct drm_syncobj_wait args = {
2257ec681f3Smrg      .handles = (uintptr_t)&syncobj->handle,
2269f464c52Smaya      .count_handles = 1,
2279f464c52Smaya      .timeout_nsec = timeout_nsec,
2289f464c52Smaya   };
2297ec681f3Smrg   return intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
2309f464c52Smaya}
2319f464c52Smaya
2327ec681f3Smrg#define CSI "\e["
2337ec681f3Smrg#define BLUE_HEADER  CSI "0;97;44m"
2347ec681f3Smrg#define NORMAL       CSI "0m"
2357ec681f3Smrg
2369f464c52Smayastatic void
2379f464c52Smayairis_fence_flush(struct pipe_context *ctx,
2389f464c52Smaya                 struct pipe_fence_handle **out_fence,
2399f464c52Smaya                 unsigned flags)
2409f464c52Smaya{
2419f464c52Smaya   struct iris_screen *screen = (void *) ctx->screen;
2429f464c52Smaya   struct iris_context *ice = (struct iris_context *)ctx;
2439f464c52Smaya
2447ec681f3Smrg   /* We require DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (kernel 5.2+) for
2457ec681f3Smrg    * deferred flushes.  Just ignore the request to defer on older kernels.
2467ec681f3Smrg    */
2477ec681f3Smrg   if (!(screen->kernel_features & KERNEL_HAS_WAIT_FOR_SUBMIT))
2487ec681f3Smrg      flags &= ~PIPE_FLUSH_DEFERRED;
2497ec681f3Smrg
2507ec681f3Smrg   const bool deferred = flags & PIPE_FLUSH_DEFERRED;
2517ec681f3Smrg
2527ec681f3Smrg   if (flags & PIPE_FLUSH_END_OF_FRAME) {
2537ec681f3Smrg      ice->frame++;
2547ec681f3Smrg
2557ec681f3Smrg      if (INTEL_DEBUG(DEBUG_SUBMIT)) {
2567ec681f3Smrg         fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
2577ec681f3Smrg                 INTEL_DEBUG(DEBUG_COLOR) ? BLUE_HEADER : "",
2587ec681f3Smrg                 ice->frame, ctx, ' ',
2597ec681f3Smrg                 INTEL_DEBUG(DEBUG_COLOR) ? NORMAL : "");
2607ec681f3Smrg      }
2617ec681f3Smrg   }
2627ec681f3Smrg
2637ec681f3Smrg   iris_flush_dirty_dmabufs(ice);
2647ec681f3Smrg
2657ec681f3Smrg   if (!deferred) {
2667ec681f3Smrg      for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
2677ec681f3Smrg         iris_batch_flush(&ice->batches[i]);
2687ec681f3Smrg   }
2697ec681f3Smrg
2707ec681f3Smrg   if (flags & PIPE_FLUSH_END_OF_FRAME) {
2717ec681f3Smrg      iris_measure_frame_end(ice);
2727ec681f3Smrg   }
2739f464c52Smaya
2749f464c52Smaya   if (!out_fence)
2759f464c52Smaya      return;
2769f464c52Smaya
2779f464c52Smaya   struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
2789f464c52Smaya   if (!fence)
2799f464c52Smaya      return;
2809f464c52Smaya
2819f464c52Smaya   pipe_reference_init(&fence->ref, 1);
2829f464c52Smaya
2837ec681f3Smrg   if (deferred)
2847ec681f3Smrg      fence->unflushed_ctx = ctx;
2859f464c52Smaya
2867ec681f3Smrg   for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
2877ec681f3Smrg      struct iris_batch *batch = &ice->batches[b];
2887ec681f3Smrg
2897ec681f3Smrg      if (deferred && iris_batch_bytes_used(batch) > 0) {
2907ec681f3Smrg         struct iris_fine_fence *fine =
2917ec681f3Smrg            iris_fine_fence_new(batch, IRIS_FENCE_BOTTOM_OF_PIPE);
2927ec681f3Smrg         iris_fine_fence_reference(screen, &fence->fine[b], fine);
2937ec681f3Smrg         iris_fine_fence_reference(screen, &fine, NULL);
2947ec681f3Smrg      } else {
2957ec681f3Smrg         /* This batch has no commands queued up (perhaps we just flushed,
2967ec681f3Smrg          * or all the commands are on the other batch).  Wait for the last
2977ec681f3Smrg          * syncobj on this engine - unless it's already finished by now.
2987ec681f3Smrg          */
2997ec681f3Smrg         if (iris_fine_fence_signaled(batch->last_fence))
3007ec681f3Smrg            continue;
3017ec681f3Smrg
3027ec681f3Smrg         iris_fine_fence_reference(screen, &fence->fine[b], batch->last_fence);
3037ec681f3Smrg      }
3049f464c52Smaya   }
3057ec681f3Smrg
3067ec681f3Smrg   iris_fence_reference(ctx->screen, out_fence, NULL);
3079f464c52Smaya   *out_fence = fence;
3089f464c52Smaya}
3099f464c52Smaya
3109f464c52Smayastatic void
3119f464c52Smayairis_fence_await(struct pipe_context *ctx,
3129f464c52Smaya                 struct pipe_fence_handle *fence)
3139f464c52Smaya{
3149f464c52Smaya   struct iris_context *ice = (struct iris_context *)ctx;
3159f464c52Smaya
3167ec681f3Smrg   /* Unflushed fences from the same context are no-ops. */
3177ec681f3Smrg   if (ctx && ctx == fence->unflushed_ctx)
3187ec681f3Smrg      return;
3197ec681f3Smrg
3207ec681f3Smrg   /* XXX: We can't safely flush the other context, because it might be
3217ec681f3Smrg    *      bound to another thread, and poking at its internals wouldn't
3227ec681f3Smrg    *      be safe.  In the future we should use MI_SEMAPHORE_WAIT and
3237ec681f3Smrg    *      block until the other job has been submitted, relying on
3247ec681f3Smrg    *      kernel timeslicing to preempt us until the other job is
3257ec681f3Smrg    *      actually flushed and the seqno finally passes.
3267ec681f3Smrg    */
3277ec681f3Smrg   if (fence->unflushed_ctx) {
3287ec681f3Smrg      pipe_debug_message(&ice->dbg, CONFORMANCE, "%s",
3297ec681f3Smrg                         "glWaitSync on unflushed fence from another context "
3307ec681f3Smrg                         "is unlikely to work without kernel 5.8+\n");
3317ec681f3Smrg   }
3327ec681f3Smrg
3337ec681f3Smrg   for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
3347ec681f3Smrg      struct iris_fine_fence *fine = fence->fine[i];
3357ec681f3Smrg
3367ec681f3Smrg      if (iris_fine_fence_signaled(fine))
3377ec681f3Smrg         continue;
3387ec681f3Smrg
3397ec681f3Smrg      for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
3407ec681f3Smrg         struct iris_batch *batch = &ice->batches[b];
3417ec681f3Smrg
3427ec681f3Smrg         /* We're going to make any future work in this batch wait for our
3437ec681f3Smrg          * fence to have gone by.  But any currently queued work doesn't
3447ec681f3Smrg          * need to wait.  Flush the batch now, so it can happen sooner.
3457ec681f3Smrg          */
3467ec681f3Smrg         iris_batch_flush(batch);
3477ec681f3Smrg
3487ec681f3Smrg         /* Before adding a new reference, clean out any stale ones. */
3497ec681f3Smrg         clear_stale_syncobjs(batch);
3507ec681f3Smrg
3517ec681f3Smrg         iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
3529f464c52Smaya      }
3539f464c52Smaya   }
3549f464c52Smaya}
3559f464c52Smaya
3569f464c52Smaya#define NSEC_PER_SEC (1000 * USEC_PER_SEC)
3579f464c52Smaya#define USEC_PER_SEC (1000 * MSEC_PER_SEC)
3589f464c52Smaya#define MSEC_PER_SEC (1000)
3599f464c52Smaya
3609f464c52Smayastatic uint64_t
3619f464c52Smayagettime_ns(void)
3629f464c52Smaya{
3639f464c52Smaya   struct timespec current;
3649f464c52Smaya   clock_gettime(CLOCK_MONOTONIC, &current);
3659f464c52Smaya   return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
3669f464c52Smaya}
3679f464c52Smaya
3689f464c52Smayastatic uint64_t
3699f464c52Smayarel2abs(uint64_t timeout)
3709f464c52Smaya{
3719f464c52Smaya   if (timeout == 0)
3729f464c52Smaya      return 0;
3739f464c52Smaya
3749f464c52Smaya   uint64_t current_time = gettime_ns();
3759f464c52Smaya   uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
3769f464c52Smaya
3779f464c52Smaya   timeout = MIN2(max_timeout, timeout);
3789f464c52Smaya
3799f464c52Smaya   return current_time + timeout;
3809f464c52Smaya}
3819f464c52Smaya
3827ec681f3Smrgstatic bool
3839f464c52Smayairis_fence_finish(struct pipe_screen *p_screen,
3849f464c52Smaya                  struct pipe_context *ctx,
3859f464c52Smaya                  struct pipe_fence_handle *fence,
3869f464c52Smaya                  uint64_t timeout)
3879f464c52Smaya{
3887ec681f3Smrg   ctx = threaded_context_unwrap_sync(ctx);
3897ec681f3Smrg
3907ec681f3Smrg   struct iris_context *ice = (struct iris_context *)ctx;
3919f464c52Smaya   struct iris_screen *screen = (struct iris_screen *)p_screen;
3929f464c52Smaya
3937ec681f3Smrg   /* If we created the fence with PIPE_FLUSH_DEFERRED, we may not have
3947ec681f3Smrg    * flushed yet.  Check if our syncobj is the current batch's signalling
3957ec681f3Smrg    * syncobj - if so, we haven't flushed and need to now.
3967ec681f3Smrg    *
3977ec681f3Smrg    * The Gallium docs mention that a flush will occur if \p ctx matches
3987ec681f3Smrg    * the context the fence was created with.  It may be NULL, so we check
3997ec681f3Smrg    * that it matches first.
4007ec681f3Smrg    */
4017ec681f3Smrg   if (ctx && ctx == fence->unflushed_ctx) {
4027ec681f3Smrg      for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) {
4037ec681f3Smrg         struct iris_fine_fence *fine = fence->fine[i];
4047ec681f3Smrg
4057ec681f3Smrg         if (iris_fine_fence_signaled(fine))
4067ec681f3Smrg            continue;
4077ec681f3Smrg
4087ec681f3Smrg         if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i]))
4097ec681f3Smrg            iris_batch_flush(&ice->batches[i]);
4107ec681f3Smrg      }
4117ec681f3Smrg
4127ec681f3Smrg      /* The fence is no longer deferred. */
4137ec681f3Smrg      fence->unflushed_ctx = NULL;
4147ec681f3Smrg   }
4157ec681f3Smrg
4167ec681f3Smrg   unsigned int handle_count = 0;
4177ec681f3Smrg   uint32_t handles[ARRAY_SIZE(fence->fine)];
4187ec681f3Smrg   for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
4197ec681f3Smrg      struct iris_fine_fence *fine = fence->fine[i];
4207ec681f3Smrg
4217ec681f3Smrg      if (iris_fine_fence_signaled(fine))
4227ec681f3Smrg         continue;
4237ec681f3Smrg
4247ec681f3Smrg      handles[handle_count++] = fine->syncobj->handle;
4257ec681f3Smrg   }
4269f464c52Smaya
4277ec681f3Smrg   if (handle_count == 0)
4287ec681f3Smrg      return true;
4299f464c52Smaya
4309f464c52Smaya   struct drm_syncobj_wait args = {
4319f464c52Smaya      .handles = (uintptr_t)handles,
4327ec681f3Smrg      .count_handles = handle_count,
4339f464c52Smaya      .timeout_nsec = rel2abs(timeout),
4349f464c52Smaya      .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
4359f464c52Smaya   };
4369f464c52Smaya
4377ec681f3Smrg   if (fence->unflushed_ctx) {
4387ec681f3Smrg      /* This fence had a deferred flush from another context.  We can't
4397ec681f3Smrg       * safely flush it here, because the context might be bound to a
4407ec681f3Smrg       * different thread, and poking at its internals wouldn't be safe.
4417ec681f3Smrg       *
4427ec681f3Smrg       * Instead, use the WAIT_FOR_SUBMIT flag to block and hope that
4437ec681f3Smrg       * another thread submits the work.
4447ec681f3Smrg       */
4457ec681f3Smrg      args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
4467ec681f3Smrg   }
4479f464c52Smaya
4487ec681f3Smrg   return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
4497ec681f3Smrg}
4509f464c52Smaya
4519f464c52Smayastatic int
4529f464c52Smayasync_merge_fd(int sync_fd, int new_fd)
4539f464c52Smaya{
4549f464c52Smaya   if (sync_fd == -1)
4559f464c52Smaya      return new_fd;
4569f464c52Smaya
4579f464c52Smaya   if (new_fd == -1)
4589f464c52Smaya      return sync_fd;
4599f464c52Smaya
4609f464c52Smaya   struct sync_merge_data args = {
4619f464c52Smaya      .name = "iris fence",
4629f464c52Smaya      .fd2 = new_fd,
4639f464c52Smaya      .fence = -1,
4649f464c52Smaya   };
4659f464c52Smaya
4667ec681f3Smrg   intel_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
4679f464c52Smaya   close(new_fd);
4689f464c52Smaya   close(sync_fd);
4699f464c52Smaya
4709f464c52Smaya   return args.fence;
4719f464c52Smaya}
4729f464c52Smaya
4739f464c52Smayastatic int
4749f464c52Smayairis_fence_get_fd(struct pipe_screen *p_screen,
4759f464c52Smaya                  struct pipe_fence_handle *fence)
4769f464c52Smaya{
4779f464c52Smaya   struct iris_screen *screen = (struct iris_screen *)p_screen;
4789f464c52Smaya   int fd = -1;
4799f464c52Smaya
4807ec681f3Smrg   /* Deferred fences aren't supported. */
4817ec681f3Smrg   if (fence->unflushed_ctx)
4827ec681f3Smrg      return -1;
4837ec681f3Smrg
4847ec681f3Smrg   for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
4857ec681f3Smrg      struct iris_fine_fence *fine = fence->fine[i];
4867ec681f3Smrg
4877ec681f3Smrg      if (iris_fine_fence_signaled(fine))
4887ec681f3Smrg         continue;
4897ec681f3Smrg
4909f464c52Smaya      struct drm_syncobj_handle args = {
4917ec681f3Smrg         .handle = fine->syncobj->handle,
4929f464c52Smaya         .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
4939f464c52Smaya         .fd = -1,
4949f464c52Smaya      };
4959f464c52Smaya
4967ec681f3Smrg      intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
4979f464c52Smaya      fd = sync_merge_fd(fd, args.fd);
4989f464c52Smaya   }
4999f464c52Smaya
5007ec681f3Smrg   if (fd == -1) {
5017ec681f3Smrg      /* Our fence has no syncobj's recorded.  This means that all of the
5027ec681f3Smrg       * batches had already completed, their syncobj's had been signalled,
5037ec681f3Smrg       * and so we didn't bother to record them.  But we're being asked to
5047ec681f3Smrg       * export such a fence.  So export a dummy already-signalled syncobj.
5057ec681f3Smrg       */
5067ec681f3Smrg      struct drm_syncobj_handle args = {
5077ec681f3Smrg         .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
5087ec681f3Smrg      };
5097ec681f3Smrg
5107ec681f3Smrg      args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
5117ec681f3Smrg      intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
5127ec681f3Smrg      gem_syncobj_destroy(screen->fd, args.handle);
5137ec681f3Smrg      return args.fd;
5147ec681f3Smrg   }
5157ec681f3Smrg
5169f464c52Smaya   return fd;
5179f464c52Smaya}
5189f464c52Smaya
5199f464c52Smayastatic void
5209f464c52Smayairis_fence_create_fd(struct pipe_context *ctx,
5219f464c52Smaya                     struct pipe_fence_handle **out,
5229f464c52Smaya                     int fd,
5239f464c52Smaya                     enum pipe_fd_type type)
5249f464c52Smaya{
5257ec681f3Smrg   assert(type == PIPE_FD_TYPE_NATIVE_SYNC || type == PIPE_FD_TYPE_SYNCOBJ);
5269f464c52Smaya
5279f464c52Smaya   struct iris_screen *screen = (struct iris_screen *)ctx->screen;
5289f464c52Smaya   struct drm_syncobj_handle args = {
5299f464c52Smaya      .fd = fd,
5309f464c52Smaya   };
5319f464c52Smaya
5327ec681f3Smrg   if (type == PIPE_FD_TYPE_NATIVE_SYNC) {
5337ec681f3Smrg      args.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE;
5347ec681f3Smrg      args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
5357ec681f3Smrg   }
5367ec681f3Smrg
5377ec681f3Smrg   if (intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
5387ec681f3Smrg      fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
5397ec681f3Smrg              strerror(errno));
5407ec681f3Smrg      if (type == PIPE_FD_TYPE_NATIVE_SYNC)
5417ec681f3Smrg         gem_syncobj_destroy(screen->fd, args.handle);
5427ec681f3Smrg      *out = NULL;
5437ec681f3Smrg      return;
5447ec681f3Smrg   }
5457ec681f3Smrg
5467ec681f3Smrg   struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
5477ec681f3Smrg   if (!syncobj) {
5487ec681f3Smrg      *out = NULL;
5497ec681f3Smrg      return;
5507ec681f3Smrg   }
5517ec681f3Smrg   syncobj->handle = args.handle;
5527ec681f3Smrg   pipe_reference_init(&syncobj->ref, 1);
5537ec681f3Smrg
5547ec681f3Smrg   struct iris_fine_fence *fine = calloc(1, sizeof(*fine));
5557ec681f3Smrg   if (!fine) {
5567ec681f3Smrg      free(syncobj);
5577ec681f3Smrg      *out = NULL;
5587ec681f3Smrg      return;
5597ec681f3Smrg   }
5607ec681f3Smrg
5617ec681f3Smrg   static const uint32_t zero = 0;
5629f464c52Smaya
5637ec681f3Smrg   /* Fences work in terms of iris_fine_fence, but we don't actually have a
5647ec681f3Smrg    * seqno for an imported fence.  So, create a fake one which always
5657ec681f3Smrg    * returns as 'not signaled' so we fall back to using the sync object.
5667ec681f3Smrg    */
5677ec681f3Smrg   fine->seqno = UINT32_MAX;
5687ec681f3Smrg   fine->map = &zero;
5697ec681f3Smrg   fine->syncobj = syncobj;
5707ec681f3Smrg   fine->flags = IRIS_FENCE_END;
5717ec681f3Smrg   pipe_reference_init(&fine->reference, 1);
5727ec681f3Smrg
5737ec681f3Smrg   struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
5747ec681f3Smrg   if (!fence) {
5757ec681f3Smrg      free(fine);
5767ec681f3Smrg      free(syncobj);
5777ec681f3Smrg      *out = NULL;
5787ec681f3Smrg      return;
5797ec681f3Smrg   }
5809f464c52Smaya   pipe_reference_init(&fence->ref, 1);
5817ec681f3Smrg   fence->fine[0] = fine;
5829f464c52Smaya
5839f464c52Smaya   *out = fence;
5849f464c52Smaya}
5859f464c52Smaya
5867ec681f3Smrgstatic void
5877ec681f3Smrgiris_fence_signal(struct pipe_context *ctx,
5887ec681f3Smrg                  struct pipe_fence_handle *fence)
5897ec681f3Smrg{
5907ec681f3Smrg   struct iris_context *ice = (struct iris_context *)ctx;
5917ec681f3Smrg
5927ec681f3Smrg   if (ctx == fence->unflushed_ctx)
5937ec681f3Smrg      return;
5947ec681f3Smrg
5957ec681f3Smrg   for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
5967ec681f3Smrg      for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
5977ec681f3Smrg         struct iris_fine_fence *fine = fence->fine[i];
5987ec681f3Smrg
5997ec681f3Smrg         /* already signaled fence skipped */
6007ec681f3Smrg         if (iris_fine_fence_signaled(fine))
6017ec681f3Smrg            continue;
6027ec681f3Smrg
6037ec681f3Smrg         ice->batches[b].contains_fence_signal = true;
6047ec681f3Smrg         iris_batch_add_syncobj(&ice->batches[b], fine->syncobj,
6057ec681f3Smrg                                I915_EXEC_FENCE_SIGNAL);
6067ec681f3Smrg      }
6077ec681f3Smrg   }
6087ec681f3Smrg}
6097ec681f3Smrg
6109f464c52Smayavoid
6119f464c52Smayairis_init_screen_fence_functions(struct pipe_screen *screen)
6129f464c52Smaya{
6139f464c52Smaya   screen->fence_reference = iris_fence_reference;
6149f464c52Smaya   screen->fence_finish = iris_fence_finish;
6159f464c52Smaya   screen->fence_get_fd = iris_fence_get_fd;
6169f464c52Smaya}
6179f464c52Smaya
6189f464c52Smayavoid
6199f464c52Smayairis_init_context_fence_functions(struct pipe_context *ctx)
6209f464c52Smaya{
6219f464c52Smaya   ctx->flush = iris_fence_flush;
6229f464c52Smaya   ctx->create_fence_fd = iris_fence_create_fd;
6239f464c52Smaya   ctx->fence_server_sync = iris_fence_await;
6247ec681f3Smrg   ctx->fence_server_signal = iris_fence_signal;
6259f464c52Smaya}
626