1b8e80941Smrg/*
2b8e80941Smrg * Copyright © 2015 Intel Corporation
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the next
12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
13b8e80941Smrg * Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21b8e80941Smrg * IN THE SOFTWARE.
22b8e80941Smrg */
23b8e80941Smrg
24b8e80941Smrg/**
25b8e80941Smrg * This file implements VkQueue, VkFence, and VkSemaphore
26b8e80941Smrg */
27b8e80941Smrg
28b8e80941Smrg#include <fcntl.h>
29b8e80941Smrg#include <unistd.h>
30b8e80941Smrg
31b8e80941Smrg#include "anv_private.h"
32b8e80941Smrg#include "vk_util.h"
33b8e80941Smrg
34b8e80941Smrg#include "genxml/gen7_pack.h"
35b8e80941Smrg
36b8e80941SmrgVkResult
37b8e80941Smrganv_device_execbuf(struct anv_device *device,
38b8e80941Smrg                   struct drm_i915_gem_execbuffer2 *execbuf,
39b8e80941Smrg                   struct anv_bo **execbuf_bos)
40b8e80941Smrg{
41b8e80941Smrg   int ret = device->no_hw ? 0 : anv_gem_execbuffer(device, execbuf);
42b8e80941Smrg   if (ret != 0) {
43b8e80941Smrg      /* We don't know the real error. */
44b8e80941Smrg      return anv_device_set_lost(device, "execbuf2 failed: %m");
45b8e80941Smrg   }
46b8e80941Smrg
47b8e80941Smrg   struct drm_i915_gem_exec_object2 *objects =
48b8e80941Smrg      (void *)(uintptr_t)execbuf->buffers_ptr;
49b8e80941Smrg   for (uint32_t k = 0; k < execbuf->buffer_count; k++) {
50b8e80941Smrg      if (execbuf_bos[k]->flags & EXEC_OBJECT_PINNED)
51b8e80941Smrg         assert(execbuf_bos[k]->offset == objects[k].offset);
52b8e80941Smrg      execbuf_bos[k]->offset = objects[k].offset;
53b8e80941Smrg   }
54b8e80941Smrg
55b8e80941Smrg   return VK_SUCCESS;
56b8e80941Smrg}
57b8e80941Smrg
58b8e80941SmrgVkResult
59b8e80941Smrganv_device_submit_simple_batch(struct anv_device *device,
60b8e80941Smrg                               struct anv_batch *batch)
61b8e80941Smrg{
62b8e80941Smrg   struct drm_i915_gem_execbuffer2 execbuf;
63b8e80941Smrg   struct drm_i915_gem_exec_object2 exec2_objects[1];
64b8e80941Smrg   struct anv_bo bo, *exec_bos[1];
65b8e80941Smrg   VkResult result = VK_SUCCESS;
66b8e80941Smrg   uint32_t size;
67b8e80941Smrg
68b8e80941Smrg   /* Kernel driver requires 8 byte aligned batch length */
69b8e80941Smrg   size = align_u32(batch->next - batch->start, 8);
70b8e80941Smrg   result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
71b8e80941Smrg   if (result != VK_SUCCESS)
72b8e80941Smrg      return result;
73b8e80941Smrg
74b8e80941Smrg   memcpy(bo.map, batch->start, size);
75b8e80941Smrg   if (!device->info.has_llc)
76b8e80941Smrg      gen_flush_range(bo.map, size);
77b8e80941Smrg
78b8e80941Smrg   exec_bos[0] = &bo;
79b8e80941Smrg   exec2_objects[0].handle = bo.gem_handle;
80b8e80941Smrg   exec2_objects[0].relocation_count = 0;
81b8e80941Smrg   exec2_objects[0].relocs_ptr = 0;
82b8e80941Smrg   exec2_objects[0].alignment = 0;
83b8e80941Smrg   exec2_objects[0].offset = bo.offset;
84b8e80941Smrg   exec2_objects[0].flags = bo.flags;
85b8e80941Smrg   exec2_objects[0].rsvd1 = 0;
86b8e80941Smrg   exec2_objects[0].rsvd2 = 0;
87b8e80941Smrg
88b8e80941Smrg   execbuf.buffers_ptr = (uintptr_t) exec2_objects;
89b8e80941Smrg   execbuf.buffer_count = 1;
90b8e80941Smrg   execbuf.batch_start_offset = 0;
91b8e80941Smrg   execbuf.batch_len = size;
92b8e80941Smrg   execbuf.cliprects_ptr = 0;
93b8e80941Smrg   execbuf.num_cliprects = 0;
94b8e80941Smrg   execbuf.DR1 = 0;
95b8e80941Smrg   execbuf.DR4 = 0;
96b8e80941Smrg
97b8e80941Smrg   execbuf.flags =
98b8e80941Smrg      I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
99b8e80941Smrg   execbuf.rsvd1 = device->context_id;
100b8e80941Smrg   execbuf.rsvd2 = 0;
101b8e80941Smrg
102b8e80941Smrg   if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
103b8e80941Smrg      gen_print_batch(&device->decoder_ctx, bo.map, bo.size, bo.offset, false);
104b8e80941Smrg
105b8e80941Smrg   result = anv_device_execbuf(device, &execbuf, exec_bos);
106b8e80941Smrg   if (result != VK_SUCCESS)
107b8e80941Smrg      goto fail;
108b8e80941Smrg
109b8e80941Smrg   result = anv_device_wait(device, &bo, INT64_MAX);
110b8e80941Smrg
111b8e80941Smrg fail:
112b8e80941Smrg   anv_bo_pool_free(&device->batch_bo_pool, &bo);
113b8e80941Smrg
114b8e80941Smrg   return result;
115b8e80941Smrg}
116b8e80941Smrg
117b8e80941SmrgVkResult anv_QueueSubmit(
118b8e80941Smrg    VkQueue                                     _queue,
119b8e80941Smrg    uint32_t                                    submitCount,
120b8e80941Smrg    const VkSubmitInfo*                         pSubmits,
121b8e80941Smrg    VkFence                                     fence)
122b8e80941Smrg{
123b8e80941Smrg   ANV_FROM_HANDLE(anv_queue, queue, _queue);
124b8e80941Smrg   struct anv_device *device = queue->device;
125b8e80941Smrg
126b8e80941Smrg   /* Query for device status prior to submitting.  Technically, we don't need
127b8e80941Smrg    * to do this.  However, if we have a client that's submitting piles of
128b8e80941Smrg    * garbage, we would rather break as early as possible to keep the GPU
129b8e80941Smrg    * hanging contained.  If we don't check here, we'll either be waiting for
130b8e80941Smrg    * the kernel to kick us or we'll have to wait until the client waits on a
131b8e80941Smrg    * fence before we actually know whether or not we've hung.
132b8e80941Smrg    */
133b8e80941Smrg   VkResult result = anv_device_query_status(device);
134b8e80941Smrg   if (result != VK_SUCCESS)
135b8e80941Smrg      return result;
136b8e80941Smrg
137b8e80941Smrg   /* We lock around QueueSubmit for three main reasons:
138b8e80941Smrg    *
139b8e80941Smrg    *  1) When a block pool is resized, we create a new gem handle with a
140b8e80941Smrg    *     different size and, in the case of surface states, possibly a
141b8e80941Smrg    *     different center offset but we re-use the same anv_bo struct when
142b8e80941Smrg    *     we do so.  If this happens in the middle of setting up an execbuf,
143b8e80941Smrg    *     we could end up with our list of BOs out of sync with our list of
144b8e80941Smrg    *     gem handles.
145b8e80941Smrg    *
146b8e80941Smrg    *  2) The algorithm we use for building the list of unique buffers isn't
147b8e80941Smrg    *     thread-safe.  While the client is supposed to syncronize around
148b8e80941Smrg    *     QueueSubmit, this would be extremely difficult to debug if it ever
149b8e80941Smrg    *     came up in the wild due to a broken app.  It's better to play it
150b8e80941Smrg    *     safe and just lock around QueueSubmit.
151b8e80941Smrg    *
152b8e80941Smrg    *  3)  The anv_cmd_buffer_execbuf function may perform relocations in
153b8e80941Smrg    *      userspace.  Due to the fact that the surface state buffer is shared
154b8e80941Smrg    *      between batches, we can't afford to have that happen from multiple
155b8e80941Smrg    *      threads at the same time.  Even though the user is supposed to
156b8e80941Smrg    *      ensure this doesn't happen, we play it safe as in (2) above.
157b8e80941Smrg    *
158b8e80941Smrg    * Since the only other things that ever take the device lock such as block
159b8e80941Smrg    * pool resize only rarely happen, this will almost never be contended so
160b8e80941Smrg    * taking a lock isn't really an expensive operation in this case.
161b8e80941Smrg    */
162b8e80941Smrg   pthread_mutex_lock(&device->mutex);
163b8e80941Smrg
164b8e80941Smrg   if (fence && submitCount == 0) {
165b8e80941Smrg      /* If we don't have any command buffers, we need to submit a dummy
166b8e80941Smrg       * batch to give GEM something to wait on.  We could, potentially,
167b8e80941Smrg       * come up with something more efficient but this shouldn't be a
168b8e80941Smrg       * common case.
169b8e80941Smrg       */
170b8e80941Smrg      result = anv_cmd_buffer_execbuf(device, NULL, NULL, 0, NULL, 0, fence);
171b8e80941Smrg      goto out;
172b8e80941Smrg   }
173b8e80941Smrg
174b8e80941Smrg   for (uint32_t i = 0; i < submitCount; i++) {
175b8e80941Smrg      /* Fence for this submit.  NULL for all but the last one */
176b8e80941Smrg      VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
177b8e80941Smrg
178b8e80941Smrg      if (pSubmits[i].commandBufferCount == 0) {
179b8e80941Smrg         /* If we don't have any command buffers, we need to submit a dummy
180b8e80941Smrg          * batch to give GEM something to wait on.  We could, potentially,
181b8e80941Smrg          * come up with something more efficient but this shouldn't be a
182b8e80941Smrg          * common case.
183b8e80941Smrg          */
184b8e80941Smrg         result = anv_cmd_buffer_execbuf(device, NULL,
185b8e80941Smrg                                         pSubmits[i].pWaitSemaphores,
186b8e80941Smrg                                         pSubmits[i].waitSemaphoreCount,
187b8e80941Smrg                                         pSubmits[i].pSignalSemaphores,
188b8e80941Smrg                                         pSubmits[i].signalSemaphoreCount,
189b8e80941Smrg                                         submit_fence);
190b8e80941Smrg         if (result != VK_SUCCESS)
191b8e80941Smrg            goto out;
192b8e80941Smrg
193b8e80941Smrg         continue;
194b8e80941Smrg      }
195b8e80941Smrg
196b8e80941Smrg      for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
197b8e80941Smrg         ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
198b8e80941Smrg                         pSubmits[i].pCommandBuffers[j]);
199b8e80941Smrg         assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
200b8e80941Smrg         assert(!anv_batch_has_error(&cmd_buffer->batch));
201b8e80941Smrg
202b8e80941Smrg         /* Fence for this execbuf.  NULL for all but the last one */
203b8e80941Smrg         VkFence execbuf_fence =
204b8e80941Smrg            (j == pSubmits[i].commandBufferCount - 1) ?
205b8e80941Smrg            submit_fence : VK_NULL_HANDLE;
206b8e80941Smrg
207b8e80941Smrg         const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
208b8e80941Smrg         uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
209b8e80941Smrg         if (j == 0) {
210b8e80941Smrg            /* Only the first batch gets the in semaphores */
211b8e80941Smrg            in_semaphores = pSubmits[i].pWaitSemaphores;
212b8e80941Smrg            num_in_semaphores = pSubmits[i].waitSemaphoreCount;
213b8e80941Smrg         }
214b8e80941Smrg
215b8e80941Smrg         if (j == pSubmits[i].commandBufferCount - 1) {
216b8e80941Smrg            /* Only the last batch gets the out semaphores */
217b8e80941Smrg            out_semaphores = pSubmits[i].pSignalSemaphores;
218b8e80941Smrg            num_out_semaphores = pSubmits[i].signalSemaphoreCount;
219b8e80941Smrg         }
220b8e80941Smrg
221b8e80941Smrg         result = anv_cmd_buffer_execbuf(device, cmd_buffer,
222b8e80941Smrg                                         in_semaphores, num_in_semaphores,
223b8e80941Smrg                                         out_semaphores, num_out_semaphores,
224b8e80941Smrg                                         execbuf_fence);
225b8e80941Smrg         if (result != VK_SUCCESS)
226b8e80941Smrg            goto out;
227b8e80941Smrg      }
228b8e80941Smrg   }
229b8e80941Smrg
230b8e80941Smrg   pthread_cond_broadcast(&device->queue_submit);
231b8e80941Smrg
232b8e80941Smrgout:
233b8e80941Smrg   if (result != VK_SUCCESS) {
234b8e80941Smrg      /* In the case that something has gone wrong we may end up with an
235b8e80941Smrg       * inconsistent state from which it may not be trivial to recover.
236b8e80941Smrg       * For example, we might have computed address relocations and
237b8e80941Smrg       * any future attempt to re-submit this job will need to know about
238b8e80941Smrg       * this and avoid computing relocation addresses again.
239b8e80941Smrg       *
240b8e80941Smrg       * To avoid this sort of issues, we assume that if something was
241b8e80941Smrg       * wrong during submission we must already be in a really bad situation
242b8e80941Smrg       * anyway (such us being out of memory) and return
243b8e80941Smrg       * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
244b8e80941Smrg       * submit the same job again to this device.
245b8e80941Smrg       */
246b8e80941Smrg      result = anv_device_set_lost(device, "vkQueueSubmit() failed");
247b8e80941Smrg   }
248b8e80941Smrg
249b8e80941Smrg   pthread_mutex_unlock(&device->mutex);
250b8e80941Smrg
251b8e80941Smrg   return result;
252b8e80941Smrg}
253b8e80941Smrg
254b8e80941SmrgVkResult anv_QueueWaitIdle(
255b8e80941Smrg    VkQueue                                     _queue)
256b8e80941Smrg{
257b8e80941Smrg   ANV_FROM_HANDLE(anv_queue, queue, _queue);
258b8e80941Smrg
259b8e80941Smrg   return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
260b8e80941Smrg}
261b8e80941Smrg
262b8e80941SmrgVkResult anv_CreateFence(
263b8e80941Smrg    VkDevice                                    _device,
264b8e80941Smrg    const VkFenceCreateInfo*                    pCreateInfo,
265b8e80941Smrg    const VkAllocationCallbacks*                pAllocator,
266b8e80941Smrg    VkFence*                                    pFence)
267b8e80941Smrg{
268b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
269b8e80941Smrg   struct anv_fence *fence;
270b8e80941Smrg
271b8e80941Smrg   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
272b8e80941Smrg
273b8e80941Smrg   fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
274b8e80941Smrg                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
275b8e80941Smrg   if (fence == NULL)
276b8e80941Smrg      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
277b8e80941Smrg
278b8e80941Smrg   if (device->instance->physicalDevice.has_syncobj_wait) {
279b8e80941Smrg      fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
280b8e80941Smrg
281b8e80941Smrg      uint32_t create_flags = 0;
282b8e80941Smrg      if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
283b8e80941Smrg         create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
284b8e80941Smrg
285b8e80941Smrg      fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
286b8e80941Smrg      if (!fence->permanent.syncobj)
287b8e80941Smrg         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
288b8e80941Smrg   } else {
289b8e80941Smrg      fence->permanent.type = ANV_FENCE_TYPE_BO;
290b8e80941Smrg
291b8e80941Smrg      VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool,
292b8e80941Smrg                                          &fence->permanent.bo.bo, 4096);
293b8e80941Smrg      if (result != VK_SUCCESS)
294b8e80941Smrg         return result;
295b8e80941Smrg
296b8e80941Smrg      if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
297b8e80941Smrg         fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
298b8e80941Smrg      } else {
299b8e80941Smrg         fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
300b8e80941Smrg      }
301b8e80941Smrg   }
302b8e80941Smrg
303b8e80941Smrg   *pFence = anv_fence_to_handle(fence);
304b8e80941Smrg
305b8e80941Smrg   return VK_SUCCESS;
306b8e80941Smrg}
307b8e80941Smrg
308b8e80941Smrgstatic void
309b8e80941Smrganv_fence_impl_cleanup(struct anv_device *device,
310b8e80941Smrg                       struct anv_fence_impl *impl)
311b8e80941Smrg{
312b8e80941Smrg   switch (impl->type) {
313b8e80941Smrg   case ANV_FENCE_TYPE_NONE:
314b8e80941Smrg      /* Dummy.  Nothing to do */
315b8e80941Smrg      break;
316b8e80941Smrg
317b8e80941Smrg   case ANV_FENCE_TYPE_BO:
318b8e80941Smrg      anv_bo_pool_free(&device->batch_bo_pool, &impl->bo.bo);
319b8e80941Smrg      break;
320b8e80941Smrg
321b8e80941Smrg   case ANV_FENCE_TYPE_SYNCOBJ:
322b8e80941Smrg      anv_gem_syncobj_destroy(device, impl->syncobj);
323b8e80941Smrg      break;
324b8e80941Smrg
325b8e80941Smrg   case ANV_FENCE_TYPE_WSI:
326b8e80941Smrg      impl->fence_wsi->destroy(impl->fence_wsi);
327b8e80941Smrg      break;
328b8e80941Smrg
329b8e80941Smrg   default:
330b8e80941Smrg      unreachable("Invalid fence type");
331b8e80941Smrg   }
332b8e80941Smrg
333b8e80941Smrg   impl->type = ANV_FENCE_TYPE_NONE;
334b8e80941Smrg}
335b8e80941Smrg
336b8e80941Smrgvoid anv_DestroyFence(
337b8e80941Smrg    VkDevice                                    _device,
338b8e80941Smrg    VkFence                                     _fence,
339b8e80941Smrg    const VkAllocationCallbacks*                pAllocator)
340b8e80941Smrg{
341b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
342b8e80941Smrg   ANV_FROM_HANDLE(anv_fence, fence, _fence);
343b8e80941Smrg
344b8e80941Smrg   if (!fence)
345b8e80941Smrg      return;
346b8e80941Smrg
347b8e80941Smrg   anv_fence_impl_cleanup(device, &fence->temporary);
348b8e80941Smrg   anv_fence_impl_cleanup(device, &fence->permanent);
349b8e80941Smrg
350b8e80941Smrg   vk_free2(&device->alloc, pAllocator, fence);
351b8e80941Smrg}
352b8e80941Smrg
353b8e80941SmrgVkResult anv_ResetFences(
354b8e80941Smrg    VkDevice                                    _device,
355b8e80941Smrg    uint32_t                                    fenceCount,
356b8e80941Smrg    const VkFence*                              pFences)
357b8e80941Smrg{
358b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
359b8e80941Smrg
360b8e80941Smrg   for (uint32_t i = 0; i < fenceCount; i++) {
361b8e80941Smrg      ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
362b8e80941Smrg
363b8e80941Smrg      /* From the Vulkan 1.0.53 spec:
364b8e80941Smrg       *
365b8e80941Smrg       *    "If any member of pFences currently has its payload imported with
366b8e80941Smrg       *    temporary permanence, that fence’s prior permanent payload is
367b8e80941Smrg       *    first restored. The remaining operations described therefore
368b8e80941Smrg       *    operate on the restored payload.
369b8e80941Smrg       */
370b8e80941Smrg      if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
371b8e80941Smrg         anv_fence_impl_cleanup(device, &fence->temporary);
372b8e80941Smrg
373b8e80941Smrg      struct anv_fence_impl *impl = &fence->permanent;
374b8e80941Smrg
375b8e80941Smrg      switch (impl->type) {
376b8e80941Smrg      case ANV_FENCE_TYPE_BO:
377b8e80941Smrg         impl->bo.state = ANV_BO_FENCE_STATE_RESET;
378b8e80941Smrg         break;
379b8e80941Smrg
380b8e80941Smrg      case ANV_FENCE_TYPE_SYNCOBJ:
381b8e80941Smrg         anv_gem_syncobj_reset(device, impl->syncobj);
382b8e80941Smrg         break;
383b8e80941Smrg
384b8e80941Smrg      default:
385b8e80941Smrg         unreachable("Invalid fence type");
386b8e80941Smrg      }
387b8e80941Smrg   }
388b8e80941Smrg
389b8e80941Smrg   return VK_SUCCESS;
390b8e80941Smrg}
391b8e80941Smrg
392b8e80941SmrgVkResult anv_GetFenceStatus(
393b8e80941Smrg    VkDevice                                    _device,
394b8e80941Smrg    VkFence                                     _fence)
395b8e80941Smrg{
396b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
397b8e80941Smrg   ANV_FROM_HANDLE(anv_fence, fence, _fence);
398b8e80941Smrg
399b8e80941Smrg   if (anv_device_is_lost(device))
400b8e80941Smrg      return VK_ERROR_DEVICE_LOST;
401b8e80941Smrg
402b8e80941Smrg   struct anv_fence_impl *impl =
403b8e80941Smrg      fence->temporary.type != ANV_FENCE_TYPE_NONE ?
404b8e80941Smrg      &fence->temporary : &fence->permanent;
405b8e80941Smrg
406b8e80941Smrg   switch (impl->type) {
407b8e80941Smrg   case ANV_FENCE_TYPE_BO:
408b8e80941Smrg      /* BO fences don't support import/export */
409b8e80941Smrg      assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
410b8e80941Smrg      switch (impl->bo.state) {
411b8e80941Smrg      case ANV_BO_FENCE_STATE_RESET:
412b8e80941Smrg         /* If it hasn't even been sent off to the GPU yet, it's not ready */
413b8e80941Smrg         return VK_NOT_READY;
414b8e80941Smrg
415b8e80941Smrg      case ANV_BO_FENCE_STATE_SIGNALED:
416b8e80941Smrg         /* It's been signaled, return success */
417b8e80941Smrg         return VK_SUCCESS;
418b8e80941Smrg
419b8e80941Smrg      case ANV_BO_FENCE_STATE_SUBMITTED: {
420b8e80941Smrg         VkResult result = anv_device_bo_busy(device, &impl->bo.bo);
421b8e80941Smrg         if (result == VK_SUCCESS) {
422b8e80941Smrg            impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
423b8e80941Smrg            return VK_SUCCESS;
424b8e80941Smrg         } else {
425b8e80941Smrg            return result;
426b8e80941Smrg         }
427b8e80941Smrg      }
428b8e80941Smrg      default:
429b8e80941Smrg         unreachable("Invalid fence status");
430b8e80941Smrg      }
431b8e80941Smrg
432b8e80941Smrg   case ANV_FENCE_TYPE_SYNCOBJ: {
433b8e80941Smrg      int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
434b8e80941Smrg      if (ret == -1) {
435b8e80941Smrg         if (errno == ETIME) {
436b8e80941Smrg            return VK_NOT_READY;
437b8e80941Smrg         } else {
438b8e80941Smrg            /* We don't know the real error. */
439b8e80941Smrg            return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
440b8e80941Smrg         }
441b8e80941Smrg      } else {
442b8e80941Smrg         return VK_SUCCESS;
443b8e80941Smrg      }
444b8e80941Smrg   }
445b8e80941Smrg
446b8e80941Smrg   default:
447b8e80941Smrg      unreachable("Invalid fence type");
448b8e80941Smrg   }
449b8e80941Smrg}
450b8e80941Smrg
451b8e80941Smrg#define NSEC_PER_SEC 1000000000
452b8e80941Smrg#define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
453b8e80941Smrg
454b8e80941Smrgstatic uint64_t
455b8e80941Smrggettime_ns(void)
456b8e80941Smrg{
457b8e80941Smrg   struct timespec current;
458b8e80941Smrg   clock_gettime(CLOCK_MONOTONIC, &current);
459b8e80941Smrg   return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
460b8e80941Smrg}
461b8e80941Smrg
462b8e80941Smrgstatic uint64_t anv_get_absolute_timeout(uint64_t timeout)
463b8e80941Smrg{
464b8e80941Smrg   if (timeout == 0)
465b8e80941Smrg      return 0;
466b8e80941Smrg   uint64_t current_time = gettime_ns();
467b8e80941Smrg   uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
468b8e80941Smrg
469b8e80941Smrg   timeout = MIN2(max_timeout, timeout);
470b8e80941Smrg
471b8e80941Smrg   return (current_time + timeout);
472b8e80941Smrg}
473b8e80941Smrg
474b8e80941Smrgstatic int64_t anv_get_relative_timeout(uint64_t abs_timeout)
475b8e80941Smrg{
476b8e80941Smrg   uint64_t now = gettime_ns();
477b8e80941Smrg
478b8e80941Smrg   /* We don't want negative timeouts.
479b8e80941Smrg    *
480b8e80941Smrg    * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
481b8e80941Smrg    * supposed to block indefinitely timeouts < 0.  Unfortunately,
482b8e80941Smrg    * this was broken for a couple of kernel releases.  Since there's
483b8e80941Smrg    * no way to know whether or not the kernel we're using is one of
484b8e80941Smrg    * the broken ones, the best we can do is to clamp the timeout to
485b8e80941Smrg    * INT64_MAX.  This limits the maximum timeout from 584 years to
486b8e80941Smrg    * 292 years - likely not a big deal.
487b8e80941Smrg    */
488b8e80941Smrg   if (abs_timeout < now)
489b8e80941Smrg      return 0;
490b8e80941Smrg
491b8e80941Smrg   uint64_t rel_timeout = abs_timeout - now;
492b8e80941Smrg   if (rel_timeout > (uint64_t) INT64_MAX)
493b8e80941Smrg      rel_timeout = INT64_MAX;
494b8e80941Smrg
495b8e80941Smrg   return rel_timeout;
496b8e80941Smrg}
497b8e80941Smrg
498b8e80941Smrgstatic VkResult
499b8e80941Smrganv_wait_for_syncobj_fences(struct anv_device *device,
500b8e80941Smrg                            uint32_t fenceCount,
501b8e80941Smrg                            const VkFence *pFences,
502b8e80941Smrg                            bool waitAll,
503b8e80941Smrg                            uint64_t abs_timeout_ns)
504b8e80941Smrg{
505b8e80941Smrg   uint32_t *syncobjs = vk_zalloc(&device->alloc,
506b8e80941Smrg                                  sizeof(*syncobjs) * fenceCount, 8,
507b8e80941Smrg                                  VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
508b8e80941Smrg   if (!syncobjs)
509b8e80941Smrg      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
510b8e80941Smrg
511b8e80941Smrg   for (uint32_t i = 0; i < fenceCount; i++) {
512b8e80941Smrg      ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
513b8e80941Smrg      assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
514b8e80941Smrg
515b8e80941Smrg      struct anv_fence_impl *impl =
516b8e80941Smrg         fence->temporary.type != ANV_FENCE_TYPE_NONE ?
517b8e80941Smrg         &fence->temporary : &fence->permanent;
518b8e80941Smrg
519b8e80941Smrg      assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
520b8e80941Smrg      syncobjs[i] = impl->syncobj;
521b8e80941Smrg   }
522b8e80941Smrg
523b8e80941Smrg   /* The gem_syncobj_wait ioctl may return early due to an inherent
524b8e80941Smrg    * limitation in the way it computes timeouts.  Loop until we've actually
525b8e80941Smrg    * passed the timeout.
526b8e80941Smrg    */
527b8e80941Smrg   int ret;
528b8e80941Smrg   do {
529b8e80941Smrg      ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
530b8e80941Smrg                                 abs_timeout_ns, waitAll);
531b8e80941Smrg   } while (ret == -1 && errno == ETIME && gettime_ns() < abs_timeout_ns);
532b8e80941Smrg
533b8e80941Smrg   vk_free(&device->alloc, syncobjs);
534b8e80941Smrg
535b8e80941Smrg   if (ret == -1) {
536b8e80941Smrg      if (errno == ETIME) {
537b8e80941Smrg         return VK_TIMEOUT;
538b8e80941Smrg      } else {
539b8e80941Smrg         /* We don't know the real error. */
540b8e80941Smrg         return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
541b8e80941Smrg      }
542b8e80941Smrg   } else {
543b8e80941Smrg      return VK_SUCCESS;
544b8e80941Smrg   }
545b8e80941Smrg}
546b8e80941Smrg
547b8e80941Smrgstatic VkResult
548b8e80941Smrganv_wait_for_bo_fences(struct anv_device *device,
549b8e80941Smrg                       uint32_t fenceCount,
550b8e80941Smrg                       const VkFence *pFences,
551b8e80941Smrg                       bool waitAll,
552b8e80941Smrg                       uint64_t abs_timeout_ns)
553b8e80941Smrg{
554b8e80941Smrg   VkResult result = VK_SUCCESS;
555b8e80941Smrg   uint32_t pending_fences = fenceCount;
556b8e80941Smrg   while (pending_fences) {
557b8e80941Smrg      pending_fences = 0;
558b8e80941Smrg      bool signaled_fences = false;
559b8e80941Smrg      for (uint32_t i = 0; i < fenceCount; i++) {
560b8e80941Smrg         ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
561b8e80941Smrg
562b8e80941Smrg         /* This function assumes that all fences are BO fences and that they
563b8e80941Smrg          * have no temporary state.  Since BO fences will never be exported,
564b8e80941Smrg          * this should be a safe assumption.
565b8e80941Smrg          */
566b8e80941Smrg         assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
567b8e80941Smrg         assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
568b8e80941Smrg         struct anv_fence_impl *impl = &fence->permanent;
569b8e80941Smrg
570b8e80941Smrg         switch (impl->bo.state) {
571b8e80941Smrg         case ANV_BO_FENCE_STATE_RESET:
572b8e80941Smrg            /* This fence hasn't been submitted yet, we'll catch it the next
573b8e80941Smrg             * time around.  Yes, this may mean we dead-loop but, short of
574b8e80941Smrg             * lots of locking and a condition variable, there's not much that
575b8e80941Smrg             * we can do about that.
576b8e80941Smrg             */
577b8e80941Smrg            pending_fences++;
578b8e80941Smrg            continue;
579b8e80941Smrg
580b8e80941Smrg         case ANV_BO_FENCE_STATE_SIGNALED:
581b8e80941Smrg            /* This fence is not pending.  If waitAll isn't set, we can return
582b8e80941Smrg             * early.  Otherwise, we have to keep going.
583b8e80941Smrg             */
584b8e80941Smrg            if (!waitAll) {
585b8e80941Smrg               result = VK_SUCCESS;
586b8e80941Smrg               goto done;
587b8e80941Smrg            }
588b8e80941Smrg            continue;
589b8e80941Smrg
590b8e80941Smrg         case ANV_BO_FENCE_STATE_SUBMITTED:
591b8e80941Smrg            /* These are the fences we really care about.  Go ahead and wait
592b8e80941Smrg             * on it until we hit a timeout.
593b8e80941Smrg             */
594b8e80941Smrg            result = anv_device_wait(device, &impl->bo.bo,
595b8e80941Smrg                                     anv_get_relative_timeout(abs_timeout_ns));
596b8e80941Smrg            switch (result) {
597b8e80941Smrg            case VK_SUCCESS:
598b8e80941Smrg               impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
599b8e80941Smrg               signaled_fences = true;
600b8e80941Smrg               if (!waitAll)
601b8e80941Smrg                  goto done;
602b8e80941Smrg               break;
603b8e80941Smrg
604b8e80941Smrg            case VK_TIMEOUT:
605b8e80941Smrg               goto done;
606b8e80941Smrg
607b8e80941Smrg            default:
608b8e80941Smrg               return result;
609b8e80941Smrg            }
610b8e80941Smrg         }
611b8e80941Smrg      }
612b8e80941Smrg
613b8e80941Smrg      if (pending_fences && !signaled_fences) {
614b8e80941Smrg         /* If we've hit this then someone decided to vkWaitForFences before
615b8e80941Smrg          * they've actually submitted any of them to a queue.  This is a
616b8e80941Smrg          * fairly pessimal case, so it's ok to lock here and use a standard
617b8e80941Smrg          * pthreads condition variable.
618b8e80941Smrg          */
619b8e80941Smrg         pthread_mutex_lock(&device->mutex);
620b8e80941Smrg
621b8e80941Smrg         /* It's possible that some of the fences have changed state since the
622b8e80941Smrg          * last time we checked.  Now that we have the lock, check for
623b8e80941Smrg          * pending fences again and don't wait if it's changed.
624b8e80941Smrg          */
625b8e80941Smrg         uint32_t now_pending_fences = 0;
626b8e80941Smrg         for (uint32_t i = 0; i < fenceCount; i++) {
627b8e80941Smrg            ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
628b8e80941Smrg            if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
629b8e80941Smrg               now_pending_fences++;
630b8e80941Smrg         }
631b8e80941Smrg         assert(now_pending_fences <= pending_fences);
632b8e80941Smrg
633b8e80941Smrg         if (now_pending_fences == pending_fences) {
634b8e80941Smrg            struct timespec abstime = {
635b8e80941Smrg               .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
636b8e80941Smrg               .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
637b8e80941Smrg            };
638b8e80941Smrg
639b8e80941Smrg            MAYBE_UNUSED int ret;
640b8e80941Smrg            ret = pthread_cond_timedwait(&device->queue_submit,
641b8e80941Smrg                                         &device->mutex, &abstime);
642b8e80941Smrg            assert(ret != EINVAL);
643b8e80941Smrg            if (gettime_ns() >= abs_timeout_ns) {
644b8e80941Smrg               pthread_mutex_unlock(&device->mutex);
645b8e80941Smrg               result = VK_TIMEOUT;
646b8e80941Smrg               goto done;
647b8e80941Smrg            }
648b8e80941Smrg         }
649b8e80941Smrg
650b8e80941Smrg         pthread_mutex_unlock(&device->mutex);
651b8e80941Smrg      }
652b8e80941Smrg   }
653b8e80941Smrg
654b8e80941Smrgdone:
655b8e80941Smrg   if (anv_device_is_lost(device))
656b8e80941Smrg      return VK_ERROR_DEVICE_LOST;
657b8e80941Smrg
658b8e80941Smrg   return result;
659b8e80941Smrg}
660b8e80941Smrg
661b8e80941Smrgstatic VkResult
662b8e80941Smrganv_wait_for_wsi_fence(struct anv_device *device,
663b8e80941Smrg                       const VkFence _fence,
664b8e80941Smrg                       uint64_t abs_timeout)
665b8e80941Smrg{
666b8e80941Smrg   ANV_FROM_HANDLE(anv_fence, fence, _fence);
667b8e80941Smrg   struct anv_fence_impl *impl = &fence->permanent;
668b8e80941Smrg
669b8e80941Smrg   return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
670b8e80941Smrg}
671b8e80941Smrg
672b8e80941Smrgstatic VkResult
673b8e80941Smrganv_wait_for_fences(struct anv_device *device,
674b8e80941Smrg                    uint32_t fenceCount,
675b8e80941Smrg                    const VkFence *pFences,
676b8e80941Smrg                    bool waitAll,
677b8e80941Smrg                    uint64_t abs_timeout)
678b8e80941Smrg{
679b8e80941Smrg   VkResult result = VK_SUCCESS;
680b8e80941Smrg
681b8e80941Smrg   if (fenceCount <= 1 || waitAll) {
682b8e80941Smrg      for (uint32_t i = 0; i < fenceCount; i++) {
683b8e80941Smrg         ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
684b8e80941Smrg         switch (fence->permanent.type) {
685b8e80941Smrg         case ANV_FENCE_TYPE_BO:
686b8e80941Smrg            result = anv_wait_for_bo_fences(device, 1, &pFences[i],
687b8e80941Smrg                                            true, abs_timeout);
688b8e80941Smrg            break;
689b8e80941Smrg         case ANV_FENCE_TYPE_SYNCOBJ:
690b8e80941Smrg            result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
691b8e80941Smrg                                                 true, abs_timeout);
692b8e80941Smrg            break;
693b8e80941Smrg         case ANV_FENCE_TYPE_WSI:
694b8e80941Smrg            result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
695b8e80941Smrg            break;
696b8e80941Smrg         case ANV_FENCE_TYPE_NONE:
697b8e80941Smrg            result = VK_SUCCESS;
698b8e80941Smrg            break;
699b8e80941Smrg         }
700b8e80941Smrg         if (result != VK_SUCCESS)
701b8e80941Smrg            return result;
702b8e80941Smrg      }
703b8e80941Smrg   } else {
704b8e80941Smrg      do {
705b8e80941Smrg         for (uint32_t i = 0; i < fenceCount; i++) {
706b8e80941Smrg            if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
707b8e80941Smrg               return VK_SUCCESS;
708b8e80941Smrg         }
709b8e80941Smrg      } while (gettime_ns() < abs_timeout);
710b8e80941Smrg      result = VK_TIMEOUT;
711b8e80941Smrg   }
712b8e80941Smrg   return result;
713b8e80941Smrg}
714b8e80941Smrg
715b8e80941Smrgstatic bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
716b8e80941Smrg{
717b8e80941Smrg   for (uint32_t i = 0; i < fenceCount; ++i) {
718b8e80941Smrg      ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
719b8e80941Smrg      if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
720b8e80941Smrg         return false;
721b8e80941Smrg   }
722b8e80941Smrg   return true;
723b8e80941Smrg}
724b8e80941Smrg
725b8e80941Smrgstatic bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
726b8e80941Smrg{
727b8e80941Smrg   for (uint32_t i = 0; i < fenceCount; ++i) {
728b8e80941Smrg      ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
729b8e80941Smrg      if (fence->permanent.type != ANV_FENCE_TYPE_BO)
730b8e80941Smrg         return false;
731b8e80941Smrg   }
732b8e80941Smrg   return true;
733b8e80941Smrg}
734b8e80941Smrg
735b8e80941SmrgVkResult anv_WaitForFences(
736b8e80941Smrg    VkDevice                                    _device,
737b8e80941Smrg    uint32_t                                    fenceCount,
738b8e80941Smrg    const VkFence*                              pFences,
739b8e80941Smrg    VkBool32                                    waitAll,
740b8e80941Smrg    uint64_t                                    timeout)
741b8e80941Smrg{
742b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
743b8e80941Smrg
744b8e80941Smrg   if (anv_device_is_lost(device))
745b8e80941Smrg      return VK_ERROR_DEVICE_LOST;
746b8e80941Smrg
747b8e80941Smrg   uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
748b8e80941Smrg   if (anv_all_fences_syncobj(fenceCount, pFences)) {
749b8e80941Smrg      return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
750b8e80941Smrg                                         waitAll, abs_timeout);
751b8e80941Smrg   } else if (anv_all_fences_bo(fenceCount, pFences)) {
752b8e80941Smrg      return anv_wait_for_bo_fences(device, fenceCount, pFences,
753b8e80941Smrg                                    waitAll, abs_timeout);
754b8e80941Smrg   } else {
755b8e80941Smrg      return anv_wait_for_fences(device, fenceCount, pFences,
756b8e80941Smrg                                 waitAll, abs_timeout);
757b8e80941Smrg   }
758b8e80941Smrg}
759b8e80941Smrg
760b8e80941Smrgvoid anv_GetPhysicalDeviceExternalFenceProperties(
761b8e80941Smrg    VkPhysicalDevice                            physicalDevice,
762b8e80941Smrg    const VkPhysicalDeviceExternalFenceInfo*    pExternalFenceInfo,
763b8e80941Smrg    VkExternalFenceProperties*                  pExternalFenceProperties)
764b8e80941Smrg{
765b8e80941Smrg   ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
766b8e80941Smrg
767b8e80941Smrg   switch (pExternalFenceInfo->handleType) {
768b8e80941Smrg   case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
769b8e80941Smrg   case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
770b8e80941Smrg      if (device->has_syncobj_wait) {
771b8e80941Smrg         pExternalFenceProperties->exportFromImportedHandleTypes =
772b8e80941Smrg            VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
773b8e80941Smrg            VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
774b8e80941Smrg         pExternalFenceProperties->compatibleHandleTypes =
775b8e80941Smrg            VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
776b8e80941Smrg            VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
777b8e80941Smrg         pExternalFenceProperties->externalFenceFeatures =
778b8e80941Smrg            VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
779b8e80941Smrg            VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
780b8e80941Smrg         return;
781b8e80941Smrg      }
782b8e80941Smrg      break;
783b8e80941Smrg
784b8e80941Smrg   default:
785b8e80941Smrg      break;
786b8e80941Smrg   }
787b8e80941Smrg
788b8e80941Smrg   pExternalFenceProperties->exportFromImportedHandleTypes = 0;
789b8e80941Smrg   pExternalFenceProperties->compatibleHandleTypes = 0;
790b8e80941Smrg   pExternalFenceProperties->externalFenceFeatures = 0;
791b8e80941Smrg}
792b8e80941Smrg
793b8e80941SmrgVkResult anv_ImportFenceFdKHR(
794b8e80941Smrg    VkDevice                                    _device,
795b8e80941Smrg    const VkImportFenceFdInfoKHR*               pImportFenceFdInfo)
796b8e80941Smrg{
797b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
798b8e80941Smrg   ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
799b8e80941Smrg   int fd = pImportFenceFdInfo->fd;
800b8e80941Smrg
801b8e80941Smrg   assert(pImportFenceFdInfo->sType ==
802b8e80941Smrg          VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
803b8e80941Smrg
804b8e80941Smrg   struct anv_fence_impl new_impl = {
805b8e80941Smrg      .type = ANV_FENCE_TYPE_NONE,
806b8e80941Smrg   };
807b8e80941Smrg
808b8e80941Smrg   switch (pImportFenceFdInfo->handleType) {
809b8e80941Smrg   case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
810b8e80941Smrg      new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
811b8e80941Smrg
812b8e80941Smrg      new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
813b8e80941Smrg      if (!new_impl.syncobj)
814b8e80941Smrg         return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
815b8e80941Smrg
816b8e80941Smrg      break;
817b8e80941Smrg
818b8e80941Smrg   case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
819b8e80941Smrg      /* Sync files are a bit tricky.  Because we want to continue using the
820b8e80941Smrg       * syncobj implementation of WaitForFences, we don't use the sync file
821b8e80941Smrg       * directly but instead import it into a syncobj.
822b8e80941Smrg       */
823b8e80941Smrg      new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
824b8e80941Smrg
825b8e80941Smrg      new_impl.syncobj = anv_gem_syncobj_create(device, 0);
826b8e80941Smrg      if (!new_impl.syncobj)
827b8e80941Smrg         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
828b8e80941Smrg
829b8e80941Smrg      if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
830b8e80941Smrg         anv_gem_syncobj_destroy(device, new_impl.syncobj);
831b8e80941Smrg         return vk_errorf(device->instance, NULL,
832b8e80941Smrg                          VK_ERROR_INVALID_EXTERNAL_HANDLE,
833b8e80941Smrg                          "syncobj sync file import failed: %m");
834b8e80941Smrg      }
835b8e80941Smrg      break;
836b8e80941Smrg
837b8e80941Smrg   default:
838b8e80941Smrg      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
839b8e80941Smrg   }
840b8e80941Smrg
841b8e80941Smrg   /* From the Vulkan 1.0.53 spec:
842b8e80941Smrg    *
843b8e80941Smrg    *    "Importing a fence payload from a file descriptor transfers
844b8e80941Smrg    *    ownership of the file descriptor from the application to the
845b8e80941Smrg    *    Vulkan implementation. The application must not perform any
846b8e80941Smrg    *    operations on the file descriptor after a successful import."
847b8e80941Smrg    *
848b8e80941Smrg    * If the import fails, we leave the file descriptor open.
849b8e80941Smrg    */
850b8e80941Smrg   close(fd);
851b8e80941Smrg
852b8e80941Smrg   if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
853b8e80941Smrg      anv_fence_impl_cleanup(device, &fence->temporary);
854b8e80941Smrg      fence->temporary = new_impl;
855b8e80941Smrg   } else {
856b8e80941Smrg      anv_fence_impl_cleanup(device, &fence->permanent);
857b8e80941Smrg      fence->permanent = new_impl;
858b8e80941Smrg   }
859b8e80941Smrg
860b8e80941Smrg   return VK_SUCCESS;
861b8e80941Smrg}
862b8e80941Smrg
863b8e80941SmrgVkResult anv_GetFenceFdKHR(
864b8e80941Smrg    VkDevice                                    _device,
865b8e80941Smrg    const VkFenceGetFdInfoKHR*                  pGetFdInfo,
866b8e80941Smrg    int*                                        pFd)
867b8e80941Smrg{
868b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
869b8e80941Smrg   ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
870b8e80941Smrg
871b8e80941Smrg   assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
872b8e80941Smrg
873b8e80941Smrg   struct anv_fence_impl *impl =
874b8e80941Smrg      fence->temporary.type != ANV_FENCE_TYPE_NONE ?
875b8e80941Smrg      &fence->temporary : &fence->permanent;
876b8e80941Smrg
877b8e80941Smrg   assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
878b8e80941Smrg   switch (pGetFdInfo->handleType) {
879b8e80941Smrg   case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
880b8e80941Smrg      int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
881b8e80941Smrg      if (fd < 0)
882b8e80941Smrg         return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
883b8e80941Smrg
884b8e80941Smrg      *pFd = fd;
885b8e80941Smrg      break;
886b8e80941Smrg   }
887b8e80941Smrg
888b8e80941Smrg   case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
889b8e80941Smrg      int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
890b8e80941Smrg      if (fd < 0)
891b8e80941Smrg         return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
892b8e80941Smrg
893b8e80941Smrg      *pFd = fd;
894b8e80941Smrg      break;
895b8e80941Smrg   }
896b8e80941Smrg
897b8e80941Smrg   default:
898b8e80941Smrg      unreachable("Invalid fence export handle type");
899b8e80941Smrg   }
900b8e80941Smrg
901b8e80941Smrg   /* From the Vulkan 1.0.53 spec:
902b8e80941Smrg    *
903b8e80941Smrg    *    "Export operations have the same transference as the specified handle
904b8e80941Smrg    *    type’s import operations. [...] If the fence was using a
905b8e80941Smrg    *    temporarily imported payload, the fence’s prior permanent payload
906b8e80941Smrg    *    will be restored.
907b8e80941Smrg    */
908b8e80941Smrg   if (impl == &fence->temporary)
909b8e80941Smrg      anv_fence_impl_cleanup(device, impl);
910b8e80941Smrg
911b8e80941Smrg   return VK_SUCCESS;
912b8e80941Smrg}
913b8e80941Smrg
914b8e80941Smrg// Queue semaphore functions
915b8e80941Smrg
916b8e80941SmrgVkResult anv_CreateSemaphore(
917b8e80941Smrg    VkDevice                                    _device,
918b8e80941Smrg    const VkSemaphoreCreateInfo*                pCreateInfo,
919b8e80941Smrg    const VkAllocationCallbacks*                pAllocator,
920b8e80941Smrg    VkSemaphore*                                pSemaphore)
921b8e80941Smrg{
922b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
923b8e80941Smrg   struct anv_semaphore *semaphore;
924b8e80941Smrg
925b8e80941Smrg   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
926b8e80941Smrg
927b8e80941Smrg   semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
928b8e80941Smrg                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
929b8e80941Smrg   if (semaphore == NULL)
930b8e80941Smrg      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
931b8e80941Smrg
932b8e80941Smrg   const VkExportSemaphoreCreateInfo *export =
933b8e80941Smrg      vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
934b8e80941Smrg    VkExternalSemaphoreHandleTypeFlags handleTypes =
935b8e80941Smrg      export ? export->handleTypes : 0;
936b8e80941Smrg
937b8e80941Smrg   if (handleTypes == 0) {
938b8e80941Smrg      /* The DRM execbuffer ioctl always execute in-oder so long as you stay
939b8e80941Smrg       * on the same ring.  Since we don't expose the blit engine as a DMA
940b8e80941Smrg       * queue, a dummy no-op semaphore is a perfectly valid implementation.
941b8e80941Smrg       */
942b8e80941Smrg      semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
943b8e80941Smrg   } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
944b8e80941Smrg      assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
945b8e80941Smrg      if (device->instance->physicalDevice.has_syncobj) {
946b8e80941Smrg         semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
947b8e80941Smrg         semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
948b8e80941Smrg         if (!semaphore->permanent.syncobj) {
949b8e80941Smrg            vk_free2(&device->alloc, pAllocator, semaphore);
950b8e80941Smrg            return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
951b8e80941Smrg         }
952b8e80941Smrg      } else {
953b8e80941Smrg         semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
954b8e80941Smrg         VkResult result = anv_bo_cache_alloc(device, &device->bo_cache,
955b8e80941Smrg                                              4096, ANV_BO_EXTERNAL,
956b8e80941Smrg                                              &semaphore->permanent.bo);
957b8e80941Smrg         if (result != VK_SUCCESS) {
958b8e80941Smrg            vk_free2(&device->alloc, pAllocator, semaphore);
959b8e80941Smrg            return result;
960b8e80941Smrg         }
961b8e80941Smrg
962b8e80941Smrg         /* If we're going to use this as a fence, we need to *not* have the
963b8e80941Smrg          * EXEC_OBJECT_ASYNC bit set.
964b8e80941Smrg          */
965b8e80941Smrg         assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
966b8e80941Smrg      }
967b8e80941Smrg   } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
968b8e80941Smrg      assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
969b8e80941Smrg
970b8e80941Smrg      semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
971b8e80941Smrg      semaphore->permanent.fd = -1;
972b8e80941Smrg   } else {
973b8e80941Smrg      assert(!"Unknown handle type");
974b8e80941Smrg      vk_free2(&device->alloc, pAllocator, semaphore);
975b8e80941Smrg      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
976b8e80941Smrg   }
977b8e80941Smrg
978b8e80941Smrg   semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
979b8e80941Smrg
980b8e80941Smrg   *pSemaphore = anv_semaphore_to_handle(semaphore);
981b8e80941Smrg
982b8e80941Smrg   return VK_SUCCESS;
983b8e80941Smrg}
984b8e80941Smrg
985b8e80941Smrgstatic void
986b8e80941Smrganv_semaphore_impl_cleanup(struct anv_device *device,
987b8e80941Smrg                           struct anv_semaphore_impl *impl)
988b8e80941Smrg{
989b8e80941Smrg   switch (impl->type) {
990b8e80941Smrg   case ANV_SEMAPHORE_TYPE_NONE:
991b8e80941Smrg   case ANV_SEMAPHORE_TYPE_DUMMY:
992b8e80941Smrg      /* Dummy.  Nothing to do */
993b8e80941Smrg      break;
994b8e80941Smrg
995b8e80941Smrg   case ANV_SEMAPHORE_TYPE_BO:
996b8e80941Smrg      anv_bo_cache_release(device, &device->bo_cache, impl->bo);
997b8e80941Smrg      break;
998b8e80941Smrg
999b8e80941Smrg   case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1000b8e80941Smrg      close(impl->fd);
1001b8e80941Smrg      break;
1002b8e80941Smrg
1003b8e80941Smrg   case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1004b8e80941Smrg      anv_gem_syncobj_destroy(device, impl->syncobj);
1005b8e80941Smrg      break;
1006b8e80941Smrg
1007b8e80941Smrg   default:
1008b8e80941Smrg      unreachable("Invalid semaphore type");
1009b8e80941Smrg   }
1010b8e80941Smrg
1011b8e80941Smrg   impl->type = ANV_SEMAPHORE_TYPE_NONE;
1012b8e80941Smrg}
1013b8e80941Smrg
1014b8e80941Smrgvoid
1015b8e80941Smrganv_semaphore_reset_temporary(struct anv_device *device,
1016b8e80941Smrg                              struct anv_semaphore *semaphore)
1017b8e80941Smrg{
1018b8e80941Smrg   if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1019b8e80941Smrg      return;
1020b8e80941Smrg
1021b8e80941Smrg   anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1022b8e80941Smrg}
1023b8e80941Smrg
1024b8e80941Smrgvoid anv_DestroySemaphore(
1025b8e80941Smrg    VkDevice                                    _device,
1026b8e80941Smrg    VkSemaphore                                 _semaphore,
1027b8e80941Smrg    const VkAllocationCallbacks*                pAllocator)
1028b8e80941Smrg{
1029b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
1030b8e80941Smrg   ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1031b8e80941Smrg
1032b8e80941Smrg   if (semaphore == NULL)
1033b8e80941Smrg      return;
1034b8e80941Smrg
1035b8e80941Smrg   anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1036b8e80941Smrg   anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1037b8e80941Smrg
1038b8e80941Smrg   vk_free2(&device->alloc, pAllocator, semaphore);
1039b8e80941Smrg}
1040b8e80941Smrg
1041b8e80941Smrgvoid anv_GetPhysicalDeviceExternalSemaphoreProperties(
1042b8e80941Smrg    VkPhysicalDevice                            physicalDevice,
1043b8e80941Smrg    const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
1044b8e80941Smrg    VkExternalSemaphoreProperties*               pExternalSemaphoreProperties)
1045b8e80941Smrg{
1046b8e80941Smrg   ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1047b8e80941Smrg
1048b8e80941Smrg   switch (pExternalSemaphoreInfo->handleType) {
1049b8e80941Smrg   case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1050b8e80941Smrg      pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1051b8e80941Smrg         VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1052b8e80941Smrg      pExternalSemaphoreProperties->compatibleHandleTypes =
1053b8e80941Smrg         VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1054b8e80941Smrg      pExternalSemaphoreProperties->externalSemaphoreFeatures =
1055b8e80941Smrg         VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1056b8e80941Smrg         VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1057b8e80941Smrg      return;
1058b8e80941Smrg
1059b8e80941Smrg   case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1060b8e80941Smrg      if (device->has_exec_fence) {
1061b8e80941Smrg         pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1062b8e80941Smrg            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1063b8e80941Smrg         pExternalSemaphoreProperties->compatibleHandleTypes =
1064b8e80941Smrg            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1065b8e80941Smrg         pExternalSemaphoreProperties->externalSemaphoreFeatures =
1066b8e80941Smrg            VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1067b8e80941Smrg            VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1068b8e80941Smrg         return;
1069b8e80941Smrg      }
1070b8e80941Smrg      break;
1071b8e80941Smrg
1072b8e80941Smrg   default:
1073b8e80941Smrg      break;
1074b8e80941Smrg   }
1075b8e80941Smrg
1076b8e80941Smrg   pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1077b8e80941Smrg   pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1078b8e80941Smrg   pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1079b8e80941Smrg}
1080b8e80941Smrg
1081b8e80941SmrgVkResult anv_ImportSemaphoreFdKHR(
1082b8e80941Smrg    VkDevice                                    _device,
1083b8e80941Smrg    const VkImportSemaphoreFdInfoKHR*           pImportSemaphoreFdInfo)
1084b8e80941Smrg{
1085b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
1086b8e80941Smrg   ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1087b8e80941Smrg   int fd = pImportSemaphoreFdInfo->fd;
1088b8e80941Smrg
1089b8e80941Smrg   struct anv_semaphore_impl new_impl = {
1090b8e80941Smrg      .type = ANV_SEMAPHORE_TYPE_NONE,
1091b8e80941Smrg   };
1092b8e80941Smrg
1093b8e80941Smrg   switch (pImportSemaphoreFdInfo->handleType) {
1094b8e80941Smrg   case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1095b8e80941Smrg      if (device->instance->physicalDevice.has_syncobj) {
1096b8e80941Smrg         new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1097b8e80941Smrg
1098b8e80941Smrg         new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1099b8e80941Smrg         if (!new_impl.syncobj)
1100b8e80941Smrg            return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1101b8e80941Smrg      } else {
1102b8e80941Smrg         new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1103b8e80941Smrg
1104b8e80941Smrg         VkResult result = anv_bo_cache_import(device, &device->bo_cache,
1105b8e80941Smrg                                               fd, ANV_BO_EXTERNAL,
1106b8e80941Smrg                                               &new_impl.bo);
1107b8e80941Smrg         if (result != VK_SUCCESS)
1108b8e80941Smrg            return result;
1109b8e80941Smrg
1110b8e80941Smrg         if (new_impl.bo->size < 4096) {
1111b8e80941Smrg            anv_bo_cache_release(device, &device->bo_cache, new_impl.bo);
1112b8e80941Smrg            return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1113b8e80941Smrg         }
1114b8e80941Smrg
1115b8e80941Smrg         /* If we're going to use this as a fence, we need to *not* have the
1116b8e80941Smrg          * EXEC_OBJECT_ASYNC bit set.
1117b8e80941Smrg          */
1118b8e80941Smrg         assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1119b8e80941Smrg      }
1120b8e80941Smrg
1121b8e80941Smrg      /* From the Vulkan spec:
1122b8e80941Smrg       *
1123b8e80941Smrg       *    "Importing semaphore state from a file descriptor transfers
1124b8e80941Smrg       *    ownership of the file descriptor from the application to the
1125b8e80941Smrg       *    Vulkan implementation. The application must not perform any
1126b8e80941Smrg       *    operations on the file descriptor after a successful import."
1127b8e80941Smrg       *
1128b8e80941Smrg       * If the import fails, we leave the file descriptor open.
1129b8e80941Smrg       */
1130b8e80941Smrg      close(fd);
1131b8e80941Smrg      break;
1132b8e80941Smrg
1133b8e80941Smrg   case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1134b8e80941Smrg      new_impl = (struct anv_semaphore_impl) {
1135b8e80941Smrg         .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1136b8e80941Smrg         .fd = fd,
1137b8e80941Smrg      };
1138b8e80941Smrg      break;
1139b8e80941Smrg
1140b8e80941Smrg   default:
1141b8e80941Smrg      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1142b8e80941Smrg   }
1143b8e80941Smrg
1144b8e80941Smrg   if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1145b8e80941Smrg      anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1146b8e80941Smrg      semaphore->temporary = new_impl;
1147b8e80941Smrg   } else {
1148b8e80941Smrg      anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1149b8e80941Smrg      semaphore->permanent = new_impl;
1150b8e80941Smrg   }
1151b8e80941Smrg
1152b8e80941Smrg   return VK_SUCCESS;
1153b8e80941Smrg}
1154b8e80941Smrg
1155b8e80941SmrgVkResult anv_GetSemaphoreFdKHR(
1156b8e80941Smrg    VkDevice                                    _device,
1157b8e80941Smrg    const VkSemaphoreGetFdInfoKHR*              pGetFdInfo,
1158b8e80941Smrg    int*                                        pFd)
1159b8e80941Smrg{
1160b8e80941Smrg   ANV_FROM_HANDLE(anv_device, device, _device);
1161b8e80941Smrg   ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1162b8e80941Smrg   VkResult result;
1163b8e80941Smrg   int fd;
1164b8e80941Smrg
1165b8e80941Smrg   assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1166b8e80941Smrg
1167b8e80941Smrg   struct anv_semaphore_impl *impl =
1168b8e80941Smrg      semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1169b8e80941Smrg      &semaphore->temporary : &semaphore->permanent;
1170b8e80941Smrg
1171b8e80941Smrg   switch (impl->type) {
1172b8e80941Smrg   case ANV_SEMAPHORE_TYPE_BO:
1173b8e80941Smrg      result = anv_bo_cache_export(device, &device->bo_cache, impl->bo, pFd);
1174b8e80941Smrg      if (result != VK_SUCCESS)
1175b8e80941Smrg         return result;
1176b8e80941Smrg      break;
1177b8e80941Smrg
1178b8e80941Smrg   case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1179b8e80941Smrg      /* There are two reasons why this could happen:
1180b8e80941Smrg       *
1181b8e80941Smrg       *  1) The user is trying to export without submitting something that
1182b8e80941Smrg       *     signals the semaphore.  If this is the case, it's their bug so
1183b8e80941Smrg       *     what we return here doesn't matter.
1184b8e80941Smrg       *
1185b8e80941Smrg       *  2) The kernel didn't give us a file descriptor.  The most likely
1186b8e80941Smrg       *     reason for this is running out of file descriptors.
1187b8e80941Smrg       */
1188b8e80941Smrg      if (impl->fd < 0)
1189b8e80941Smrg         return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1190b8e80941Smrg
1191b8e80941Smrg      *pFd = impl->fd;
1192b8e80941Smrg
1193b8e80941Smrg      /* From the Vulkan 1.0.53 spec:
1194b8e80941Smrg       *
1195b8e80941Smrg       *    "...exporting a semaphore payload to a handle with copy
1196b8e80941Smrg       *    transference has the same side effects on the source
1197b8e80941Smrg       *    semaphore’s payload as executing a semaphore wait operation."
1198b8e80941Smrg       *
1199b8e80941Smrg       * In other words, it may still be a SYNC_FD semaphore, but it's now
1200b8e80941Smrg       * considered to have been waited on and no longer has a sync file
1201b8e80941Smrg       * attached.
1202b8e80941Smrg       */
1203b8e80941Smrg      impl->fd = -1;
1204b8e80941Smrg      return VK_SUCCESS;
1205b8e80941Smrg
1206b8e80941Smrg   case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1207b8e80941Smrg      fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1208b8e80941Smrg      if (fd < 0)
1209b8e80941Smrg         return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1210b8e80941Smrg      *pFd = fd;
1211b8e80941Smrg      break;
1212b8e80941Smrg
1213b8e80941Smrg   default:
1214b8e80941Smrg      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1215b8e80941Smrg   }
1216b8e80941Smrg
1217b8e80941Smrg   /* From the Vulkan 1.0.53 spec:
1218b8e80941Smrg    *
1219b8e80941Smrg    *    "Export operations have the same transference as the specified handle
1220b8e80941Smrg    *    type’s import operations. [...] If the semaphore was using a
1221b8e80941Smrg    *    temporarily imported payload, the semaphore’s prior permanent payload
1222b8e80941Smrg    *    will be restored.
1223b8e80941Smrg    */
1224b8e80941Smrg   if (impl == &semaphore->temporary)
1225b8e80941Smrg      anv_semaphore_impl_cleanup(device, impl);
1226b8e80941Smrg
1227b8e80941Smrg   return VK_SUCCESS;
1228b8e80941Smrg}
1229