1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/**
25 * This file implements VkQueue, VkFence, and VkSemaphore
26 */
27
28#include <fcntl.h>
29#include <unistd.h>
30
31#include "anv_private.h"
32#include "vk_util.h"
33
34#include "genxml/gen7_pack.h"
35
36VkResult
37anv_device_execbuf(struct anv_device *device,
38                   struct drm_i915_gem_execbuffer2 *execbuf,
39                   struct anv_bo **execbuf_bos)
40{
41   int ret = device->no_hw ? 0 : anv_gem_execbuffer(device, execbuf);
42   if (ret != 0) {
43      /* We don't know the real error. */
44      return anv_device_set_lost(device, "execbuf2 failed: %m");
45   }
46
47   struct drm_i915_gem_exec_object2 *objects =
48      (void *)(uintptr_t)execbuf->buffers_ptr;
49   for (uint32_t k = 0; k < execbuf->buffer_count; k++) {
50      if (execbuf_bos[k]->flags & EXEC_OBJECT_PINNED)
51         assert(execbuf_bos[k]->offset == objects[k].offset);
52      execbuf_bos[k]->offset = objects[k].offset;
53   }
54
55   return VK_SUCCESS;
56}
57
58VkResult
59anv_device_submit_simple_batch(struct anv_device *device,
60                               struct anv_batch *batch)
61{
62   struct drm_i915_gem_execbuffer2 execbuf;
63   struct drm_i915_gem_exec_object2 exec2_objects[1];
64   struct anv_bo bo, *exec_bos[1];
65   VkResult result = VK_SUCCESS;
66   uint32_t size;
67
68   /* Kernel driver requires 8 byte aligned batch length */
69   size = align_u32(batch->next - batch->start, 8);
70   result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
71   if (result != VK_SUCCESS)
72      return result;
73
74   memcpy(bo.map, batch->start, size);
75   if (!device->info.has_llc)
76      gen_flush_range(bo.map, size);
77
78   exec_bos[0] = &bo;
79   exec2_objects[0].handle = bo.gem_handle;
80   exec2_objects[0].relocation_count = 0;
81   exec2_objects[0].relocs_ptr = 0;
82   exec2_objects[0].alignment = 0;
83   exec2_objects[0].offset = bo.offset;
84   exec2_objects[0].flags = bo.flags;
85   exec2_objects[0].rsvd1 = 0;
86   exec2_objects[0].rsvd2 = 0;
87
88   execbuf.buffers_ptr = (uintptr_t) exec2_objects;
89   execbuf.buffer_count = 1;
90   execbuf.batch_start_offset = 0;
91   execbuf.batch_len = size;
92   execbuf.cliprects_ptr = 0;
93   execbuf.num_cliprects = 0;
94   execbuf.DR1 = 0;
95   execbuf.DR4 = 0;
96
97   execbuf.flags =
98      I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
99   execbuf.rsvd1 = device->context_id;
100   execbuf.rsvd2 = 0;
101
102   if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
103      gen_print_batch(&device->decoder_ctx, bo.map, bo.size, bo.offset, false);
104
105   result = anv_device_execbuf(device, &execbuf, exec_bos);
106   if (result != VK_SUCCESS)
107      goto fail;
108
109   result = anv_device_wait(device, &bo, INT64_MAX);
110
111 fail:
112   anv_bo_pool_free(&device->batch_bo_pool, &bo);
113
114   return result;
115}
116
117VkResult anv_QueueSubmit(
118    VkQueue                                     _queue,
119    uint32_t                                    submitCount,
120    const VkSubmitInfo*                         pSubmits,
121    VkFence                                     fence)
122{
123   ANV_FROM_HANDLE(anv_queue, queue, _queue);
124   struct anv_device *device = queue->device;
125
126   /* Query for device status prior to submitting.  Technically, we don't need
127    * to do this.  However, if we have a client that's submitting piles of
128    * garbage, we would rather break as early as possible to keep the GPU
129    * hanging contained.  If we don't check here, we'll either be waiting for
130    * the kernel to kick us or we'll have to wait until the client waits on a
131    * fence before we actually know whether or not we've hung.
132    */
133   VkResult result = anv_device_query_status(device);
134   if (result != VK_SUCCESS)
135      return result;
136
137   /* We lock around QueueSubmit for three main reasons:
138    *
139    *  1) When a block pool is resized, we create a new gem handle with a
140    *     different size and, in the case of surface states, possibly a
141    *     different center offset but we re-use the same anv_bo struct when
142    *     we do so.  If this happens in the middle of setting up an execbuf,
143    *     we could end up with our list of BOs out of sync with our list of
144    *     gem handles.
145    *
146    *  2) The algorithm we use for building the list of unique buffers isn't
147    *     thread-safe.  While the client is supposed to syncronize around
148    *     QueueSubmit, this would be extremely difficult to debug if it ever
149    *     came up in the wild due to a broken app.  It's better to play it
150    *     safe and just lock around QueueSubmit.
151    *
152    *  3)  The anv_cmd_buffer_execbuf function may perform relocations in
153    *      userspace.  Due to the fact that the surface state buffer is shared
154    *      between batches, we can't afford to have that happen from multiple
155    *      threads at the same time.  Even though the user is supposed to
156    *      ensure this doesn't happen, we play it safe as in (2) above.
157    *
158    * Since the only other things that ever take the device lock such as block
159    * pool resize only rarely happen, this will almost never be contended so
160    * taking a lock isn't really an expensive operation in this case.
161    */
162   pthread_mutex_lock(&device->mutex);
163
164   if (fence && submitCount == 0) {
165      /* If we don't have any command buffers, we need to submit a dummy
166       * batch to give GEM something to wait on.  We could, potentially,
167       * come up with something more efficient but this shouldn't be a
168       * common case.
169       */
170      result = anv_cmd_buffer_execbuf(device, NULL, NULL, 0, NULL, 0, fence);
171      goto out;
172   }
173
174   for (uint32_t i = 0; i < submitCount; i++) {
175      /* Fence for this submit.  NULL for all but the last one */
176      VkFence submit_fence = (i == submitCount - 1) ? fence : VK_NULL_HANDLE;
177
178      if (pSubmits[i].commandBufferCount == 0) {
179         /* If we don't have any command buffers, we need to submit a dummy
180          * batch to give GEM something to wait on.  We could, potentially,
181          * come up with something more efficient but this shouldn't be a
182          * common case.
183          */
184         result = anv_cmd_buffer_execbuf(device, NULL,
185                                         pSubmits[i].pWaitSemaphores,
186                                         pSubmits[i].waitSemaphoreCount,
187                                         pSubmits[i].pSignalSemaphores,
188                                         pSubmits[i].signalSemaphoreCount,
189                                         submit_fence);
190         if (result != VK_SUCCESS)
191            goto out;
192
193         continue;
194      }
195
196      for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
197         ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
198                         pSubmits[i].pCommandBuffers[j]);
199         assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
200         assert(!anv_batch_has_error(&cmd_buffer->batch));
201
202         /* Fence for this execbuf.  NULL for all but the last one */
203         VkFence execbuf_fence =
204            (j == pSubmits[i].commandBufferCount - 1) ?
205            submit_fence : VK_NULL_HANDLE;
206
207         const VkSemaphore *in_semaphores = NULL, *out_semaphores = NULL;
208         uint32_t num_in_semaphores = 0, num_out_semaphores = 0;
209         if (j == 0) {
210            /* Only the first batch gets the in semaphores */
211            in_semaphores = pSubmits[i].pWaitSemaphores;
212            num_in_semaphores = pSubmits[i].waitSemaphoreCount;
213         }
214
215         if (j == pSubmits[i].commandBufferCount - 1) {
216            /* Only the last batch gets the out semaphores */
217            out_semaphores = pSubmits[i].pSignalSemaphores;
218            num_out_semaphores = pSubmits[i].signalSemaphoreCount;
219         }
220
221         result = anv_cmd_buffer_execbuf(device, cmd_buffer,
222                                         in_semaphores, num_in_semaphores,
223                                         out_semaphores, num_out_semaphores,
224                                         execbuf_fence);
225         if (result != VK_SUCCESS)
226            goto out;
227      }
228   }
229
230   pthread_cond_broadcast(&device->queue_submit);
231
232out:
233   if (result != VK_SUCCESS) {
234      /* In the case that something has gone wrong we may end up with an
235       * inconsistent state from which it may not be trivial to recover.
236       * For example, we might have computed address relocations and
237       * any future attempt to re-submit this job will need to know about
238       * this and avoid computing relocation addresses again.
239       *
240       * To avoid this sort of issues, we assume that if something was
241       * wrong during submission we must already be in a really bad situation
242       * anyway (such us being out of memory) and return
243       * VK_ERROR_DEVICE_LOST to ensure that clients do not attempt to
244       * submit the same job again to this device.
245       */
246      result = anv_device_set_lost(device, "vkQueueSubmit() failed");
247   }
248
249   pthread_mutex_unlock(&device->mutex);
250
251   return result;
252}
253
254VkResult anv_QueueWaitIdle(
255    VkQueue                                     _queue)
256{
257   ANV_FROM_HANDLE(anv_queue, queue, _queue);
258
259   return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
260}
261
262VkResult anv_CreateFence(
263    VkDevice                                    _device,
264    const VkFenceCreateInfo*                    pCreateInfo,
265    const VkAllocationCallbacks*                pAllocator,
266    VkFence*                                    pFence)
267{
268   ANV_FROM_HANDLE(anv_device, device, _device);
269   struct anv_fence *fence;
270
271   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
272
273   fence = vk_zalloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
274                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
275   if (fence == NULL)
276      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
277
278   if (device->instance->physicalDevice.has_syncobj_wait) {
279      fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
280
281      uint32_t create_flags = 0;
282      if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
283         create_flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
284
285      fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
286      if (!fence->permanent.syncobj)
287         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
288   } else {
289      fence->permanent.type = ANV_FENCE_TYPE_BO;
290
291      VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool,
292                                          &fence->permanent.bo.bo, 4096);
293      if (result != VK_SUCCESS)
294         return result;
295
296      if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
297         fence->permanent.bo.state = ANV_BO_FENCE_STATE_SIGNALED;
298      } else {
299         fence->permanent.bo.state = ANV_BO_FENCE_STATE_RESET;
300      }
301   }
302
303   *pFence = anv_fence_to_handle(fence);
304
305   return VK_SUCCESS;
306}
307
308static void
309anv_fence_impl_cleanup(struct anv_device *device,
310                       struct anv_fence_impl *impl)
311{
312   switch (impl->type) {
313   case ANV_FENCE_TYPE_NONE:
314      /* Dummy.  Nothing to do */
315      break;
316
317   case ANV_FENCE_TYPE_BO:
318      anv_bo_pool_free(&device->batch_bo_pool, &impl->bo.bo);
319      break;
320
321   case ANV_FENCE_TYPE_SYNCOBJ:
322      anv_gem_syncobj_destroy(device, impl->syncobj);
323      break;
324
325   case ANV_FENCE_TYPE_WSI:
326      impl->fence_wsi->destroy(impl->fence_wsi);
327      break;
328
329   default:
330      unreachable("Invalid fence type");
331   }
332
333   impl->type = ANV_FENCE_TYPE_NONE;
334}
335
336void anv_DestroyFence(
337    VkDevice                                    _device,
338    VkFence                                     _fence,
339    const VkAllocationCallbacks*                pAllocator)
340{
341   ANV_FROM_HANDLE(anv_device, device, _device);
342   ANV_FROM_HANDLE(anv_fence, fence, _fence);
343
344   if (!fence)
345      return;
346
347   anv_fence_impl_cleanup(device, &fence->temporary);
348   anv_fence_impl_cleanup(device, &fence->permanent);
349
350   vk_free2(&device->alloc, pAllocator, fence);
351}
352
353VkResult anv_ResetFences(
354    VkDevice                                    _device,
355    uint32_t                                    fenceCount,
356    const VkFence*                              pFences)
357{
358   ANV_FROM_HANDLE(anv_device, device, _device);
359
360   for (uint32_t i = 0; i < fenceCount; i++) {
361      ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
362
363      /* From the Vulkan 1.0.53 spec:
364       *
365       *    "If any member of pFences currently has its payload imported with
366       *    temporary permanence, that fence’s prior permanent payload is
367       *    first restored. The remaining operations described therefore
368       *    operate on the restored payload.
369       */
370      if (fence->temporary.type != ANV_FENCE_TYPE_NONE)
371         anv_fence_impl_cleanup(device, &fence->temporary);
372
373      struct anv_fence_impl *impl = &fence->permanent;
374
375      switch (impl->type) {
376      case ANV_FENCE_TYPE_BO:
377         impl->bo.state = ANV_BO_FENCE_STATE_RESET;
378         break;
379
380      case ANV_FENCE_TYPE_SYNCOBJ:
381         anv_gem_syncobj_reset(device, impl->syncobj);
382         break;
383
384      default:
385         unreachable("Invalid fence type");
386      }
387   }
388
389   return VK_SUCCESS;
390}
391
392VkResult anv_GetFenceStatus(
393    VkDevice                                    _device,
394    VkFence                                     _fence)
395{
396   ANV_FROM_HANDLE(anv_device, device, _device);
397   ANV_FROM_HANDLE(anv_fence, fence, _fence);
398
399   if (anv_device_is_lost(device))
400      return VK_ERROR_DEVICE_LOST;
401
402   struct anv_fence_impl *impl =
403      fence->temporary.type != ANV_FENCE_TYPE_NONE ?
404      &fence->temporary : &fence->permanent;
405
406   switch (impl->type) {
407   case ANV_FENCE_TYPE_BO:
408      /* BO fences don't support import/export */
409      assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
410      switch (impl->bo.state) {
411      case ANV_BO_FENCE_STATE_RESET:
412         /* If it hasn't even been sent off to the GPU yet, it's not ready */
413         return VK_NOT_READY;
414
415      case ANV_BO_FENCE_STATE_SIGNALED:
416         /* It's been signaled, return success */
417         return VK_SUCCESS;
418
419      case ANV_BO_FENCE_STATE_SUBMITTED: {
420         VkResult result = anv_device_bo_busy(device, &impl->bo.bo);
421         if (result == VK_SUCCESS) {
422            impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
423            return VK_SUCCESS;
424         } else {
425            return result;
426         }
427      }
428      default:
429         unreachable("Invalid fence status");
430      }
431
432   case ANV_FENCE_TYPE_SYNCOBJ: {
433      int ret = anv_gem_syncobj_wait(device, &impl->syncobj, 1, 0, true);
434      if (ret == -1) {
435         if (errno == ETIME) {
436            return VK_NOT_READY;
437         } else {
438            /* We don't know the real error. */
439            return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
440         }
441      } else {
442         return VK_SUCCESS;
443      }
444   }
445
446   default:
447      unreachable("Invalid fence type");
448   }
449}
450
451#define NSEC_PER_SEC 1000000000
452#define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
453
454static uint64_t
455gettime_ns(void)
456{
457   struct timespec current;
458   clock_gettime(CLOCK_MONOTONIC, &current);
459   return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
460}
461
462static uint64_t anv_get_absolute_timeout(uint64_t timeout)
463{
464   if (timeout == 0)
465      return 0;
466   uint64_t current_time = gettime_ns();
467   uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
468
469   timeout = MIN2(max_timeout, timeout);
470
471   return (current_time + timeout);
472}
473
474static int64_t anv_get_relative_timeout(uint64_t abs_timeout)
475{
476   uint64_t now = gettime_ns();
477
478   /* We don't want negative timeouts.
479    *
480    * DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is
481    * supposed to block indefinitely timeouts < 0.  Unfortunately,
482    * this was broken for a couple of kernel releases.  Since there's
483    * no way to know whether or not the kernel we're using is one of
484    * the broken ones, the best we can do is to clamp the timeout to
485    * INT64_MAX.  This limits the maximum timeout from 584 years to
486    * 292 years - likely not a big deal.
487    */
488   if (abs_timeout < now)
489      return 0;
490
491   uint64_t rel_timeout = abs_timeout - now;
492   if (rel_timeout > (uint64_t) INT64_MAX)
493      rel_timeout = INT64_MAX;
494
495   return rel_timeout;
496}
497
498static VkResult
499anv_wait_for_syncobj_fences(struct anv_device *device,
500                            uint32_t fenceCount,
501                            const VkFence *pFences,
502                            bool waitAll,
503                            uint64_t abs_timeout_ns)
504{
505   uint32_t *syncobjs = vk_zalloc(&device->alloc,
506                                  sizeof(*syncobjs) * fenceCount, 8,
507                                  VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
508   if (!syncobjs)
509      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
510
511   for (uint32_t i = 0; i < fenceCount; i++) {
512      ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
513      assert(fence->permanent.type == ANV_FENCE_TYPE_SYNCOBJ);
514
515      struct anv_fence_impl *impl =
516         fence->temporary.type != ANV_FENCE_TYPE_NONE ?
517         &fence->temporary : &fence->permanent;
518
519      assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
520      syncobjs[i] = impl->syncobj;
521   }
522
523   /* The gem_syncobj_wait ioctl may return early due to an inherent
524    * limitation in the way it computes timeouts.  Loop until we've actually
525    * passed the timeout.
526    */
527   int ret;
528   do {
529      ret = anv_gem_syncobj_wait(device, syncobjs, fenceCount,
530                                 abs_timeout_ns, waitAll);
531   } while (ret == -1 && errno == ETIME && gettime_ns() < abs_timeout_ns);
532
533   vk_free(&device->alloc, syncobjs);
534
535   if (ret == -1) {
536      if (errno == ETIME) {
537         return VK_TIMEOUT;
538      } else {
539         /* We don't know the real error. */
540         return anv_device_set_lost(device, "drm_syncobj_wait failed: %m");
541      }
542   } else {
543      return VK_SUCCESS;
544   }
545}
546
547static VkResult
548anv_wait_for_bo_fences(struct anv_device *device,
549                       uint32_t fenceCount,
550                       const VkFence *pFences,
551                       bool waitAll,
552                       uint64_t abs_timeout_ns)
553{
554   VkResult result = VK_SUCCESS;
555   uint32_t pending_fences = fenceCount;
556   while (pending_fences) {
557      pending_fences = 0;
558      bool signaled_fences = false;
559      for (uint32_t i = 0; i < fenceCount; i++) {
560         ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
561
562         /* This function assumes that all fences are BO fences and that they
563          * have no temporary state.  Since BO fences will never be exported,
564          * this should be a safe assumption.
565          */
566         assert(fence->permanent.type == ANV_FENCE_TYPE_BO);
567         assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
568         struct anv_fence_impl *impl = &fence->permanent;
569
570         switch (impl->bo.state) {
571         case ANV_BO_FENCE_STATE_RESET:
572            /* This fence hasn't been submitted yet, we'll catch it the next
573             * time around.  Yes, this may mean we dead-loop but, short of
574             * lots of locking and a condition variable, there's not much that
575             * we can do about that.
576             */
577            pending_fences++;
578            continue;
579
580         case ANV_BO_FENCE_STATE_SIGNALED:
581            /* This fence is not pending.  If waitAll isn't set, we can return
582             * early.  Otherwise, we have to keep going.
583             */
584            if (!waitAll) {
585               result = VK_SUCCESS;
586               goto done;
587            }
588            continue;
589
590         case ANV_BO_FENCE_STATE_SUBMITTED:
591            /* These are the fences we really care about.  Go ahead and wait
592             * on it until we hit a timeout.
593             */
594            result = anv_device_wait(device, &impl->bo.bo,
595                                     anv_get_relative_timeout(abs_timeout_ns));
596            switch (result) {
597            case VK_SUCCESS:
598               impl->bo.state = ANV_BO_FENCE_STATE_SIGNALED;
599               signaled_fences = true;
600               if (!waitAll)
601                  goto done;
602               break;
603
604            case VK_TIMEOUT:
605               goto done;
606
607            default:
608               return result;
609            }
610         }
611      }
612
613      if (pending_fences && !signaled_fences) {
614         /* If we've hit this then someone decided to vkWaitForFences before
615          * they've actually submitted any of them to a queue.  This is a
616          * fairly pessimal case, so it's ok to lock here and use a standard
617          * pthreads condition variable.
618          */
619         pthread_mutex_lock(&device->mutex);
620
621         /* It's possible that some of the fences have changed state since the
622          * last time we checked.  Now that we have the lock, check for
623          * pending fences again and don't wait if it's changed.
624          */
625         uint32_t now_pending_fences = 0;
626         for (uint32_t i = 0; i < fenceCount; i++) {
627            ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
628            if (fence->permanent.bo.state == ANV_BO_FENCE_STATE_RESET)
629               now_pending_fences++;
630         }
631         assert(now_pending_fences <= pending_fences);
632
633         if (now_pending_fences == pending_fences) {
634            struct timespec abstime = {
635               .tv_sec = abs_timeout_ns / NSEC_PER_SEC,
636               .tv_nsec = abs_timeout_ns % NSEC_PER_SEC,
637            };
638
639            MAYBE_UNUSED int ret;
640            ret = pthread_cond_timedwait(&device->queue_submit,
641                                         &device->mutex, &abstime);
642            assert(ret != EINVAL);
643            if (gettime_ns() >= abs_timeout_ns) {
644               pthread_mutex_unlock(&device->mutex);
645               result = VK_TIMEOUT;
646               goto done;
647            }
648         }
649
650         pthread_mutex_unlock(&device->mutex);
651      }
652   }
653
654done:
655   if (anv_device_is_lost(device))
656      return VK_ERROR_DEVICE_LOST;
657
658   return result;
659}
660
661static VkResult
662anv_wait_for_wsi_fence(struct anv_device *device,
663                       const VkFence _fence,
664                       uint64_t abs_timeout)
665{
666   ANV_FROM_HANDLE(anv_fence, fence, _fence);
667   struct anv_fence_impl *impl = &fence->permanent;
668
669   return impl->fence_wsi->wait(impl->fence_wsi, abs_timeout);
670}
671
672static VkResult
673anv_wait_for_fences(struct anv_device *device,
674                    uint32_t fenceCount,
675                    const VkFence *pFences,
676                    bool waitAll,
677                    uint64_t abs_timeout)
678{
679   VkResult result = VK_SUCCESS;
680
681   if (fenceCount <= 1 || waitAll) {
682      for (uint32_t i = 0; i < fenceCount; i++) {
683         ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
684         switch (fence->permanent.type) {
685         case ANV_FENCE_TYPE_BO:
686            result = anv_wait_for_bo_fences(device, 1, &pFences[i],
687                                            true, abs_timeout);
688            break;
689         case ANV_FENCE_TYPE_SYNCOBJ:
690            result = anv_wait_for_syncobj_fences(device, 1, &pFences[i],
691                                                 true, abs_timeout);
692            break;
693         case ANV_FENCE_TYPE_WSI:
694            result = anv_wait_for_wsi_fence(device, pFences[i], abs_timeout);
695            break;
696         case ANV_FENCE_TYPE_NONE:
697            result = VK_SUCCESS;
698            break;
699         }
700         if (result != VK_SUCCESS)
701            return result;
702      }
703   } else {
704      do {
705         for (uint32_t i = 0; i < fenceCount; i++) {
706            if (anv_wait_for_fences(device, 1, &pFences[i], true, 0) == VK_SUCCESS)
707               return VK_SUCCESS;
708         }
709      } while (gettime_ns() < abs_timeout);
710      result = VK_TIMEOUT;
711   }
712   return result;
713}
714
715static bool anv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
716{
717   for (uint32_t i = 0; i < fenceCount; ++i) {
718      ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
719      if (fence->permanent.type != ANV_FENCE_TYPE_SYNCOBJ)
720         return false;
721   }
722   return true;
723}
724
725static bool anv_all_fences_bo(uint32_t fenceCount, const VkFence *pFences)
726{
727   for (uint32_t i = 0; i < fenceCount; ++i) {
728      ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
729      if (fence->permanent.type != ANV_FENCE_TYPE_BO)
730         return false;
731   }
732   return true;
733}
734
735VkResult anv_WaitForFences(
736    VkDevice                                    _device,
737    uint32_t                                    fenceCount,
738    const VkFence*                              pFences,
739    VkBool32                                    waitAll,
740    uint64_t                                    timeout)
741{
742   ANV_FROM_HANDLE(anv_device, device, _device);
743
744   if (anv_device_is_lost(device))
745      return VK_ERROR_DEVICE_LOST;
746
747   uint64_t abs_timeout = anv_get_absolute_timeout(timeout);
748   if (anv_all_fences_syncobj(fenceCount, pFences)) {
749      return anv_wait_for_syncobj_fences(device, fenceCount, pFences,
750                                         waitAll, abs_timeout);
751   } else if (anv_all_fences_bo(fenceCount, pFences)) {
752      return anv_wait_for_bo_fences(device, fenceCount, pFences,
753                                    waitAll, abs_timeout);
754   } else {
755      return anv_wait_for_fences(device, fenceCount, pFences,
756                                 waitAll, abs_timeout);
757   }
758}
759
760void anv_GetPhysicalDeviceExternalFenceProperties(
761    VkPhysicalDevice                            physicalDevice,
762    const VkPhysicalDeviceExternalFenceInfo*    pExternalFenceInfo,
763    VkExternalFenceProperties*                  pExternalFenceProperties)
764{
765   ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
766
767   switch (pExternalFenceInfo->handleType) {
768   case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
769   case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
770      if (device->has_syncobj_wait) {
771         pExternalFenceProperties->exportFromImportedHandleTypes =
772            VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
773            VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
774         pExternalFenceProperties->compatibleHandleTypes =
775            VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT |
776            VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
777         pExternalFenceProperties->externalFenceFeatures =
778            VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT |
779            VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT;
780         return;
781      }
782      break;
783
784   default:
785      break;
786   }
787
788   pExternalFenceProperties->exportFromImportedHandleTypes = 0;
789   pExternalFenceProperties->compatibleHandleTypes = 0;
790   pExternalFenceProperties->externalFenceFeatures = 0;
791}
792
793VkResult anv_ImportFenceFdKHR(
794    VkDevice                                    _device,
795    const VkImportFenceFdInfoKHR*               pImportFenceFdInfo)
796{
797   ANV_FROM_HANDLE(anv_device, device, _device);
798   ANV_FROM_HANDLE(anv_fence, fence, pImportFenceFdInfo->fence);
799   int fd = pImportFenceFdInfo->fd;
800
801   assert(pImportFenceFdInfo->sType ==
802          VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR);
803
804   struct anv_fence_impl new_impl = {
805      .type = ANV_FENCE_TYPE_NONE,
806   };
807
808   switch (pImportFenceFdInfo->handleType) {
809   case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
810      new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
811
812      new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
813      if (!new_impl.syncobj)
814         return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
815
816      break;
817
818   case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
819      /* Sync files are a bit tricky.  Because we want to continue using the
820       * syncobj implementation of WaitForFences, we don't use the sync file
821       * directly but instead import it into a syncobj.
822       */
823      new_impl.type = ANV_FENCE_TYPE_SYNCOBJ;
824
825      new_impl.syncobj = anv_gem_syncobj_create(device, 0);
826      if (!new_impl.syncobj)
827         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
828
829      if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
830         anv_gem_syncobj_destroy(device, new_impl.syncobj);
831         return vk_errorf(device->instance, NULL,
832                          VK_ERROR_INVALID_EXTERNAL_HANDLE,
833                          "syncobj sync file import failed: %m");
834      }
835      break;
836
837   default:
838      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
839   }
840
841   /* From the Vulkan 1.0.53 spec:
842    *
843    *    "Importing a fence payload from a file descriptor transfers
844    *    ownership of the file descriptor from the application to the
845    *    Vulkan implementation. The application must not perform any
846    *    operations on the file descriptor after a successful import."
847    *
848    * If the import fails, we leave the file descriptor open.
849    */
850   close(fd);
851
852   if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
853      anv_fence_impl_cleanup(device, &fence->temporary);
854      fence->temporary = new_impl;
855   } else {
856      anv_fence_impl_cleanup(device, &fence->permanent);
857      fence->permanent = new_impl;
858   }
859
860   return VK_SUCCESS;
861}
862
863VkResult anv_GetFenceFdKHR(
864    VkDevice                                    _device,
865    const VkFenceGetFdInfoKHR*                  pGetFdInfo,
866    int*                                        pFd)
867{
868   ANV_FROM_HANDLE(anv_device, device, _device);
869   ANV_FROM_HANDLE(anv_fence, fence, pGetFdInfo->fence);
870
871   assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR);
872
873   struct anv_fence_impl *impl =
874      fence->temporary.type != ANV_FENCE_TYPE_NONE ?
875      &fence->temporary : &fence->permanent;
876
877   assert(impl->type == ANV_FENCE_TYPE_SYNCOBJ);
878   switch (pGetFdInfo->handleType) {
879   case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
880      int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
881      if (fd < 0)
882         return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
883
884      *pFd = fd;
885      break;
886   }
887
888   case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: {
889      int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
890      if (fd < 0)
891         return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
892
893      *pFd = fd;
894      break;
895   }
896
897   default:
898      unreachable("Invalid fence export handle type");
899   }
900
901   /* From the Vulkan 1.0.53 spec:
902    *
903    *    "Export operations have the same transference as the specified handle
904    *    type’s import operations. [...] If the fence was using a
905    *    temporarily imported payload, the fence’s prior permanent payload
906    *    will be restored.
907    */
908   if (impl == &fence->temporary)
909      anv_fence_impl_cleanup(device, impl);
910
911   return VK_SUCCESS;
912}
913
914// Queue semaphore functions
915
916VkResult anv_CreateSemaphore(
917    VkDevice                                    _device,
918    const VkSemaphoreCreateInfo*                pCreateInfo,
919    const VkAllocationCallbacks*                pAllocator,
920    VkSemaphore*                                pSemaphore)
921{
922   ANV_FROM_HANDLE(anv_device, device, _device);
923   struct anv_semaphore *semaphore;
924
925   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO);
926
927   semaphore = vk_alloc2(&device->alloc, pAllocator, sizeof(*semaphore), 8,
928                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
929   if (semaphore == NULL)
930      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
931
932   const VkExportSemaphoreCreateInfo *export =
933      vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
934    VkExternalSemaphoreHandleTypeFlags handleTypes =
935      export ? export->handleTypes : 0;
936
937   if (handleTypes == 0) {
938      /* The DRM execbuffer ioctl always execute in-oder so long as you stay
939       * on the same ring.  Since we don't expose the blit engine as a DMA
940       * queue, a dummy no-op semaphore is a perfectly valid implementation.
941       */
942      semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DUMMY;
943   } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
944      assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
945      if (device->instance->physicalDevice.has_syncobj) {
946         semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
947         semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
948         if (!semaphore->permanent.syncobj) {
949            vk_free2(&device->alloc, pAllocator, semaphore);
950            return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
951         }
952      } else {
953         semaphore->permanent.type = ANV_SEMAPHORE_TYPE_BO;
954         VkResult result = anv_bo_cache_alloc(device, &device->bo_cache,
955                                              4096, ANV_BO_EXTERNAL,
956                                              &semaphore->permanent.bo);
957         if (result != VK_SUCCESS) {
958            vk_free2(&device->alloc, pAllocator, semaphore);
959            return result;
960         }
961
962         /* If we're going to use this as a fence, we need to *not* have the
963          * EXEC_OBJECT_ASYNC bit set.
964          */
965         assert(!(semaphore->permanent.bo->flags & EXEC_OBJECT_ASYNC));
966      }
967   } else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
968      assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
969
970      semaphore->permanent.type = ANV_SEMAPHORE_TYPE_SYNC_FILE;
971      semaphore->permanent.fd = -1;
972   } else {
973      assert(!"Unknown handle type");
974      vk_free2(&device->alloc, pAllocator, semaphore);
975      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
976   }
977
978   semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
979
980   *pSemaphore = anv_semaphore_to_handle(semaphore);
981
982   return VK_SUCCESS;
983}
984
985static void
986anv_semaphore_impl_cleanup(struct anv_device *device,
987                           struct anv_semaphore_impl *impl)
988{
989   switch (impl->type) {
990   case ANV_SEMAPHORE_TYPE_NONE:
991   case ANV_SEMAPHORE_TYPE_DUMMY:
992      /* Dummy.  Nothing to do */
993      break;
994
995   case ANV_SEMAPHORE_TYPE_BO:
996      anv_bo_cache_release(device, &device->bo_cache, impl->bo);
997      break;
998
999   case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1000      close(impl->fd);
1001      break;
1002
1003   case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1004      anv_gem_syncobj_destroy(device, impl->syncobj);
1005      break;
1006
1007   default:
1008      unreachable("Invalid semaphore type");
1009   }
1010
1011   impl->type = ANV_SEMAPHORE_TYPE_NONE;
1012}
1013
1014void
1015anv_semaphore_reset_temporary(struct anv_device *device,
1016                              struct anv_semaphore *semaphore)
1017{
1018   if (semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE)
1019      return;
1020
1021   anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1022}
1023
1024void anv_DestroySemaphore(
1025    VkDevice                                    _device,
1026    VkSemaphore                                 _semaphore,
1027    const VkAllocationCallbacks*                pAllocator)
1028{
1029   ANV_FROM_HANDLE(anv_device, device, _device);
1030   ANV_FROM_HANDLE(anv_semaphore, semaphore, _semaphore);
1031
1032   if (semaphore == NULL)
1033      return;
1034
1035   anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1036   anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1037
1038   vk_free2(&device->alloc, pAllocator, semaphore);
1039}
1040
1041void anv_GetPhysicalDeviceExternalSemaphoreProperties(
1042    VkPhysicalDevice                            physicalDevice,
1043    const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
1044    VkExternalSemaphoreProperties*               pExternalSemaphoreProperties)
1045{
1046   ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
1047
1048   switch (pExternalSemaphoreInfo->handleType) {
1049   case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1050      pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1051         VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1052      pExternalSemaphoreProperties->compatibleHandleTypes =
1053         VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
1054      pExternalSemaphoreProperties->externalSemaphoreFeatures =
1055         VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1056         VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1057      return;
1058
1059   case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1060      if (device->has_exec_fence) {
1061         pExternalSemaphoreProperties->exportFromImportedHandleTypes =
1062            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1063         pExternalSemaphoreProperties->compatibleHandleTypes =
1064            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
1065         pExternalSemaphoreProperties->externalSemaphoreFeatures =
1066            VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
1067            VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
1068         return;
1069      }
1070      break;
1071
1072   default:
1073      break;
1074   }
1075
1076   pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
1077   pExternalSemaphoreProperties->compatibleHandleTypes = 0;
1078   pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
1079}
1080
1081VkResult anv_ImportSemaphoreFdKHR(
1082    VkDevice                                    _device,
1083    const VkImportSemaphoreFdInfoKHR*           pImportSemaphoreFdInfo)
1084{
1085   ANV_FROM_HANDLE(anv_device, device, _device);
1086   ANV_FROM_HANDLE(anv_semaphore, semaphore, pImportSemaphoreFdInfo->semaphore);
1087   int fd = pImportSemaphoreFdInfo->fd;
1088
1089   struct anv_semaphore_impl new_impl = {
1090      .type = ANV_SEMAPHORE_TYPE_NONE,
1091   };
1092
1093   switch (pImportSemaphoreFdInfo->handleType) {
1094   case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
1095      if (device->instance->physicalDevice.has_syncobj) {
1096         new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
1097
1098         new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
1099         if (!new_impl.syncobj)
1100            return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1101      } else {
1102         new_impl.type = ANV_SEMAPHORE_TYPE_BO;
1103
1104         VkResult result = anv_bo_cache_import(device, &device->bo_cache,
1105                                               fd, ANV_BO_EXTERNAL,
1106                                               &new_impl.bo);
1107         if (result != VK_SUCCESS)
1108            return result;
1109
1110         if (new_impl.bo->size < 4096) {
1111            anv_bo_cache_release(device, &device->bo_cache, new_impl.bo);
1112            return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1113         }
1114
1115         /* If we're going to use this as a fence, we need to *not* have the
1116          * EXEC_OBJECT_ASYNC bit set.
1117          */
1118         assert(!(new_impl.bo->flags & EXEC_OBJECT_ASYNC));
1119      }
1120
1121      /* From the Vulkan spec:
1122       *
1123       *    "Importing semaphore state from a file descriptor transfers
1124       *    ownership of the file descriptor from the application to the
1125       *    Vulkan implementation. The application must not perform any
1126       *    operations on the file descriptor after a successful import."
1127       *
1128       * If the import fails, we leave the file descriptor open.
1129       */
1130      close(fd);
1131      break;
1132
1133   case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
1134      new_impl = (struct anv_semaphore_impl) {
1135         .type = ANV_SEMAPHORE_TYPE_SYNC_FILE,
1136         .fd = fd,
1137      };
1138      break;
1139
1140   default:
1141      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1142   }
1143
1144   if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
1145      anv_semaphore_impl_cleanup(device, &semaphore->temporary);
1146      semaphore->temporary = new_impl;
1147   } else {
1148      anv_semaphore_impl_cleanup(device, &semaphore->permanent);
1149      semaphore->permanent = new_impl;
1150   }
1151
1152   return VK_SUCCESS;
1153}
1154
1155VkResult anv_GetSemaphoreFdKHR(
1156    VkDevice                                    _device,
1157    const VkSemaphoreGetFdInfoKHR*              pGetFdInfo,
1158    int*                                        pFd)
1159{
1160   ANV_FROM_HANDLE(anv_device, device, _device);
1161   ANV_FROM_HANDLE(anv_semaphore, semaphore, pGetFdInfo->semaphore);
1162   VkResult result;
1163   int fd;
1164
1165   assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR);
1166
1167   struct anv_semaphore_impl *impl =
1168      semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1169      &semaphore->temporary : &semaphore->permanent;
1170
1171   switch (impl->type) {
1172   case ANV_SEMAPHORE_TYPE_BO:
1173      result = anv_bo_cache_export(device, &device->bo_cache, impl->bo, pFd);
1174      if (result != VK_SUCCESS)
1175         return result;
1176      break;
1177
1178   case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1179      /* There are two reasons why this could happen:
1180       *
1181       *  1) The user is trying to export without submitting something that
1182       *     signals the semaphore.  If this is the case, it's their bug so
1183       *     what we return here doesn't matter.
1184       *
1185       *  2) The kernel didn't give us a file descriptor.  The most likely
1186       *     reason for this is running out of file descriptors.
1187       */
1188      if (impl->fd < 0)
1189         return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1190
1191      *pFd = impl->fd;
1192
1193      /* From the Vulkan 1.0.53 spec:
1194       *
1195       *    "...exporting a semaphore payload to a handle with copy
1196       *    transference has the same side effects on the source
1197       *    semaphore’s payload as executing a semaphore wait operation."
1198       *
1199       * In other words, it may still be a SYNC_FD semaphore, but it's now
1200       * considered to have been waited on and no longer has a sync file
1201       * attached.
1202       */
1203      impl->fd = -1;
1204      return VK_SUCCESS;
1205
1206   case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1207      fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
1208      if (fd < 0)
1209         return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
1210      *pFd = fd;
1211      break;
1212
1213   default:
1214      return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1215   }
1216
1217   /* From the Vulkan 1.0.53 spec:
1218    *
1219    *    "Export operations have the same transference as the specified handle
1220    *    type’s import operations. [...] If the semaphore was using a
1221    *    temporarily imported payload, the semaphore’s prior permanent payload
1222    *    will be restored.
1223    */
1224   if (impl == &semaphore->temporary)
1225      anv_semaphore_impl_cleanup(device, impl);
1226
1227   return VK_SUCCESS;
1228}
1229