17ec681f3Smrg/*
27ec681f3Smrg * Copyright 2019 Google LLC
37ec681f3Smrg * SPDX-License-Identifier: MIT
47ec681f3Smrg */
57ec681f3Smrg
67ec681f3Smrg#ifndef VN_RENDERER_H
77ec681f3Smrg#define VN_RENDERER_H
87ec681f3Smrg
97ec681f3Smrg#include "vn_common.h"
107ec681f3Smrg
117ec681f3Smrgstruct vn_renderer_shmem {
127ec681f3Smrg   struct vn_refcount refcount;
137ec681f3Smrg
147ec681f3Smrg   uint32_t res_id;
157ec681f3Smrg   size_t mmap_size; /* for internal use only (i.e., munmap) */
167ec681f3Smrg   void *mmap_ptr;
177ec681f3Smrg};
187ec681f3Smrg
197ec681f3Smrgstruct vn_renderer_bo {
207ec681f3Smrg   struct vn_refcount refcount;
217ec681f3Smrg
227ec681f3Smrg   uint32_t res_id;
237ec681f3Smrg   /* for internal use only */
247ec681f3Smrg   size_t mmap_size;
257ec681f3Smrg   void *mmap_ptr;
267ec681f3Smrg};
277ec681f3Smrg
287ec681f3Smrg/*
297ec681f3Smrg * A sync consists of a uint64_t counter.  The counter can be updated by CPU
307ec681f3Smrg * or by GPU.  It can also be waited on by CPU or by GPU until it reaches
317ec681f3Smrg * certain values.
327ec681f3Smrg *
337ec681f3Smrg * This models after timeline VkSemaphore rather than timeline drm_syncobj.
347ec681f3Smrg * The main difference is that drm_syncobj can have unsignaled value 0.
357ec681f3Smrg */
367ec681f3Smrgstruct vn_renderer_sync {
377ec681f3Smrg   uint32_t sync_id;
387ec681f3Smrg};
397ec681f3Smrg
407ec681f3Smrgstruct vn_renderer_info {
417ec681f3Smrg   struct {
427ec681f3Smrg      uint16_t vendor_id;
437ec681f3Smrg      uint16_t device_id;
447ec681f3Smrg
457ec681f3Smrg      bool has_bus_info;
467ec681f3Smrg      uint16_t domain;
477ec681f3Smrg      uint8_t bus;
487ec681f3Smrg      uint8_t device;
497ec681f3Smrg      uint8_t function;
507ec681f3Smrg   } pci;
517ec681f3Smrg
527ec681f3Smrg   bool has_dma_buf_import;
537ec681f3Smrg   bool has_cache_management;
547ec681f3Smrg   bool has_external_sync;
557ec681f3Smrg   bool has_implicit_fencing;
567ec681f3Smrg
577ec681f3Smrg   uint32_t max_sync_queue_count;
587ec681f3Smrg
597ec681f3Smrg   /* hw capset */
607ec681f3Smrg   uint32_t wire_format_version;
617ec681f3Smrg   uint32_t vk_xml_version;
627ec681f3Smrg   uint32_t vk_ext_command_serialization_spec_version;
637ec681f3Smrg   uint32_t vk_mesa_venus_protocol_spec_version;
647ec681f3Smrg};
657ec681f3Smrg
667ec681f3Smrgstruct vn_renderer_submit_batch {
677ec681f3Smrg   const void *cs_data;
687ec681f3Smrg   size_t cs_size;
697ec681f3Smrg
707ec681f3Smrg   /*
717ec681f3Smrg    * Submit cs to the virtual sync queue identified by sync_queue_index.  The
727ec681f3Smrg    * virtual queue is assumed to be associated with the physical VkQueue
737ec681f3Smrg    * identified by vk_queue_id.  After the execution completes on the
747ec681f3Smrg    * VkQueue, the virtual sync queue is signaled.
757ec681f3Smrg    *
767ec681f3Smrg    * sync_queue_index must be less than max_sync_queue_count.
777ec681f3Smrg    *
787ec681f3Smrg    * vk_queue_id specifies the object id of a VkQueue.
797ec681f3Smrg    *
807ec681f3Smrg    * When sync_queue_cpu is true, it specifies the special CPU sync queue,
817ec681f3Smrg    * and sync_queue_index/vk_queue_id are ignored.  TODO revisit this later
827ec681f3Smrg    */
837ec681f3Smrg   uint32_t sync_queue_index;
847ec681f3Smrg   bool sync_queue_cpu;
857ec681f3Smrg   vn_object_id vk_queue_id;
867ec681f3Smrg
877ec681f3Smrg   /* syncs to update when the virtual sync queue is signaled */
887ec681f3Smrg   struct vn_renderer_sync *const *syncs;
897ec681f3Smrg   /* TODO allow NULL when syncs are all binary? */
907ec681f3Smrg   const uint64_t *sync_values;
917ec681f3Smrg   uint32_t sync_count;
927ec681f3Smrg};
937ec681f3Smrg
947ec681f3Smrgstruct vn_renderer_submit {
957ec681f3Smrg   /* BOs to pin and to fence implicitly
967ec681f3Smrg    *
977ec681f3Smrg    * TODO track all bos and automatically pin them.  We don't do it yet
987ec681f3Smrg    * because each vn_command_buffer owns a bo.  We can probably make do by
997ec681f3Smrg    * returning the bos to a bo cache and exclude bo cache from pinning.
1007ec681f3Smrg    */
1017ec681f3Smrg   struct vn_renderer_bo *const *bos;
1027ec681f3Smrg   uint32_t bo_count;
1037ec681f3Smrg
1047ec681f3Smrg   const struct vn_renderer_submit_batch *batches;
1057ec681f3Smrg   uint32_t batch_count;
1067ec681f3Smrg};
1077ec681f3Smrg
1087ec681f3Smrgstruct vn_renderer_wait {
1097ec681f3Smrg   bool wait_any;
1107ec681f3Smrg   uint64_t timeout;
1117ec681f3Smrg
1127ec681f3Smrg   struct vn_renderer_sync *const *syncs;
1137ec681f3Smrg   /* TODO allow NULL when syncs are all binary? */
1147ec681f3Smrg   const uint64_t *sync_values;
1157ec681f3Smrg   uint32_t sync_count;
1167ec681f3Smrg};
1177ec681f3Smrg
1187ec681f3Smrgstruct vn_renderer_ops {
1197ec681f3Smrg   void (*destroy)(struct vn_renderer *renderer,
1207ec681f3Smrg                   const VkAllocationCallbacks *alloc);
1217ec681f3Smrg
1227ec681f3Smrg   void (*get_info)(struct vn_renderer *renderer,
1237ec681f3Smrg                    struct vn_renderer_info *info);
1247ec681f3Smrg
1257ec681f3Smrg   VkResult (*submit)(struct vn_renderer *renderer,
1267ec681f3Smrg                      const struct vn_renderer_submit *submit);
1277ec681f3Smrg
1287ec681f3Smrg   /*
1297ec681f3Smrg    * On success, returns VK_SUCCESS or VK_TIMEOUT.  On failure, returns
1307ec681f3Smrg    * VK_ERROR_DEVICE_LOST or out of device/host memory.
1317ec681f3Smrg    */
1327ec681f3Smrg   VkResult (*wait)(struct vn_renderer *renderer,
1337ec681f3Smrg                    const struct vn_renderer_wait *wait);
1347ec681f3Smrg};
1357ec681f3Smrg
1367ec681f3Smrgstruct vn_renderer_shmem_ops {
1377ec681f3Smrg   struct vn_renderer_shmem *(*create)(struct vn_renderer *renderer,
1387ec681f3Smrg                                       size_t size);
1397ec681f3Smrg   void (*destroy)(struct vn_renderer *renderer,
1407ec681f3Smrg                   struct vn_renderer_shmem *shmem);
1417ec681f3Smrg};
1427ec681f3Smrg
1437ec681f3Smrgstruct vn_renderer_bo_ops {
1447ec681f3Smrg   VkResult (*create_from_device_memory)(
1457ec681f3Smrg      struct vn_renderer *renderer,
1467ec681f3Smrg      VkDeviceSize size,
1477ec681f3Smrg      vn_object_id mem_id,
1487ec681f3Smrg      VkMemoryPropertyFlags flags,
1497ec681f3Smrg      VkExternalMemoryHandleTypeFlags external_handles,
1507ec681f3Smrg      struct vn_renderer_bo **out_bo);
1517ec681f3Smrg
1527ec681f3Smrg   VkResult (*create_from_dma_buf)(struct vn_renderer *renderer,
1537ec681f3Smrg                                   VkDeviceSize size,
1547ec681f3Smrg                                   int fd,
1557ec681f3Smrg                                   VkMemoryPropertyFlags flags,
1567ec681f3Smrg                                   struct vn_renderer_bo **out_bo);
1577ec681f3Smrg
1587ec681f3Smrg   bool (*destroy)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
1597ec681f3Smrg
1607ec681f3Smrg   int (*export_dma_buf)(struct vn_renderer *renderer,
1617ec681f3Smrg                         struct vn_renderer_bo *bo);
1627ec681f3Smrg
1637ec681f3Smrg   /* map is not thread-safe */
1647ec681f3Smrg   void *(*map)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
1657ec681f3Smrg
1667ec681f3Smrg   void (*flush)(struct vn_renderer *renderer,
1677ec681f3Smrg                 struct vn_renderer_bo *bo,
1687ec681f3Smrg                 VkDeviceSize offset,
1697ec681f3Smrg                 VkDeviceSize size);
1707ec681f3Smrg   void (*invalidate)(struct vn_renderer *renderer,
1717ec681f3Smrg                      struct vn_renderer_bo *bo,
1727ec681f3Smrg                      VkDeviceSize offset,
1737ec681f3Smrg                      VkDeviceSize size);
1747ec681f3Smrg};
1757ec681f3Smrg
1767ec681f3Smrgenum vn_renderer_sync_flags {
1777ec681f3Smrg   VN_RENDERER_SYNC_SHAREABLE = 1u << 0,
1787ec681f3Smrg   VN_RENDERER_SYNC_BINARY = 1u << 1,
1797ec681f3Smrg};
1807ec681f3Smrg
1817ec681f3Smrgstruct vn_renderer_sync_ops {
1827ec681f3Smrg   VkResult (*create)(struct vn_renderer *renderer,
1837ec681f3Smrg                      uint64_t initial_val,
1847ec681f3Smrg                      uint32_t flags,
1857ec681f3Smrg                      struct vn_renderer_sync **out_sync);
1867ec681f3Smrg
1877ec681f3Smrg   VkResult (*create_from_syncobj)(struct vn_renderer *renderer,
1887ec681f3Smrg                                   int fd,
1897ec681f3Smrg                                   bool sync_file,
1907ec681f3Smrg                                   struct vn_renderer_sync **out_sync);
1917ec681f3Smrg   void (*destroy)(struct vn_renderer *renderer,
1927ec681f3Smrg                   struct vn_renderer_sync *sync);
1937ec681f3Smrg
1947ec681f3Smrg   int (*export_syncobj)(struct vn_renderer *renderer,
1957ec681f3Smrg                         struct vn_renderer_sync *sync,
1967ec681f3Smrg                         bool sync_file);
1977ec681f3Smrg
1987ec681f3Smrg   /* reset the counter */
1997ec681f3Smrg   VkResult (*reset)(struct vn_renderer *renderer,
2007ec681f3Smrg                     struct vn_renderer_sync *sync,
2017ec681f3Smrg                     uint64_t initial_val);
2027ec681f3Smrg
2037ec681f3Smrg   /* read the current value from the counter */
2047ec681f3Smrg   VkResult (*read)(struct vn_renderer *renderer,
2057ec681f3Smrg                    struct vn_renderer_sync *sync,
2067ec681f3Smrg                    uint64_t *val);
2077ec681f3Smrg
2087ec681f3Smrg   /* write a new value (larger than the current one) to the counter */
2097ec681f3Smrg   VkResult (*write)(struct vn_renderer *renderer,
2107ec681f3Smrg                     struct vn_renderer_sync *sync,
2117ec681f3Smrg                     uint64_t val);
2127ec681f3Smrg};
2137ec681f3Smrg
2147ec681f3Smrgstruct vn_renderer {
2157ec681f3Smrg   struct vn_renderer_ops ops;
2167ec681f3Smrg   struct vn_renderer_shmem_ops shmem_ops;
2177ec681f3Smrg   struct vn_renderer_bo_ops bo_ops;
2187ec681f3Smrg   struct vn_renderer_sync_ops sync_ops;
2197ec681f3Smrg};
2207ec681f3Smrg
2217ec681f3SmrgVkResult
2227ec681f3Smrgvn_renderer_create_virtgpu(struct vn_instance *instance,
2237ec681f3Smrg                           const VkAllocationCallbacks *alloc,
2247ec681f3Smrg                           struct vn_renderer **renderer);
2257ec681f3Smrg
2267ec681f3SmrgVkResult
2277ec681f3Smrgvn_renderer_create_vtest(struct vn_instance *instance,
2287ec681f3Smrg                         const VkAllocationCallbacks *alloc,
2297ec681f3Smrg                         struct vn_renderer **renderer);
2307ec681f3Smrg
2317ec681f3Smrgstatic inline VkResult
2327ec681f3Smrgvn_renderer_create(struct vn_instance *instance,
2337ec681f3Smrg                   const VkAllocationCallbacks *alloc,
2347ec681f3Smrg                   struct vn_renderer **renderer)
2357ec681f3Smrg{
2367ec681f3Smrg   if (VN_DEBUG(VTEST)) {
2377ec681f3Smrg      VkResult result = vn_renderer_create_vtest(instance, alloc, renderer);
2387ec681f3Smrg      if (result == VK_SUCCESS)
2397ec681f3Smrg         return VK_SUCCESS;
2407ec681f3Smrg   }
2417ec681f3Smrg
2427ec681f3Smrg   return vn_renderer_create_virtgpu(instance, alloc, renderer);
2437ec681f3Smrg}
2447ec681f3Smrg
2457ec681f3Smrgstatic inline void
2467ec681f3Smrgvn_renderer_destroy(struct vn_renderer *renderer,
2477ec681f3Smrg                    const VkAllocationCallbacks *alloc)
2487ec681f3Smrg{
2497ec681f3Smrg   renderer->ops.destroy(renderer, alloc);
2507ec681f3Smrg}
2517ec681f3Smrg
2527ec681f3Smrgstatic inline void
2537ec681f3Smrgvn_renderer_get_info(struct vn_renderer *renderer,
2547ec681f3Smrg                     struct vn_renderer_info *info)
2557ec681f3Smrg{
2567ec681f3Smrg   renderer->ops.get_info(renderer, info);
2577ec681f3Smrg}
2587ec681f3Smrg
2597ec681f3Smrgstatic inline VkResult
2607ec681f3Smrgvn_renderer_submit(struct vn_renderer *renderer,
2617ec681f3Smrg                   const struct vn_renderer_submit *submit)
2627ec681f3Smrg{
2637ec681f3Smrg   return renderer->ops.submit(renderer, submit);
2647ec681f3Smrg}
2657ec681f3Smrg
2667ec681f3Smrgstatic inline VkResult
2677ec681f3Smrgvn_renderer_submit_simple(struct vn_renderer *renderer,
2687ec681f3Smrg                          const void *cs_data,
2697ec681f3Smrg                          size_t cs_size)
2707ec681f3Smrg{
2717ec681f3Smrg   const struct vn_renderer_submit submit = {
2727ec681f3Smrg      .batches =
2737ec681f3Smrg         &(const struct vn_renderer_submit_batch){
2747ec681f3Smrg            .cs_data = cs_data,
2757ec681f3Smrg            .cs_size = cs_size,
2767ec681f3Smrg         },
2777ec681f3Smrg      .batch_count = 1,
2787ec681f3Smrg   };
2797ec681f3Smrg   return vn_renderer_submit(renderer, &submit);
2807ec681f3Smrg}
2817ec681f3Smrg
2827ec681f3Smrgstatic inline VkResult
2837ec681f3Smrgvn_renderer_wait(struct vn_renderer *renderer,
2847ec681f3Smrg                 const struct vn_renderer_wait *wait)
2857ec681f3Smrg{
2867ec681f3Smrg   return renderer->ops.wait(renderer, wait);
2877ec681f3Smrg}
2887ec681f3Smrg
2897ec681f3Smrgstatic inline struct vn_renderer_shmem *
2907ec681f3Smrgvn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
2917ec681f3Smrg{
2927ec681f3Smrg   struct vn_renderer_shmem *shmem =
2937ec681f3Smrg      renderer->shmem_ops.create(renderer, size);
2947ec681f3Smrg   if (shmem) {
2957ec681f3Smrg      assert(vn_refcount_is_valid(&shmem->refcount));
2967ec681f3Smrg      assert(shmem->res_id);
2977ec681f3Smrg      assert(shmem->mmap_size >= size);
2987ec681f3Smrg      assert(shmem->mmap_ptr);
2997ec681f3Smrg   }
3007ec681f3Smrg
3017ec681f3Smrg   return shmem;
3027ec681f3Smrg}
3037ec681f3Smrg
3047ec681f3Smrgstatic inline struct vn_renderer_shmem *
3057ec681f3Smrgvn_renderer_shmem_ref(struct vn_renderer *renderer,
3067ec681f3Smrg                      struct vn_renderer_shmem *shmem)
3077ec681f3Smrg{
3087ec681f3Smrg   vn_refcount_inc(&shmem->refcount);
3097ec681f3Smrg   return shmem;
3107ec681f3Smrg}
3117ec681f3Smrg
3127ec681f3Smrgstatic inline void
3137ec681f3Smrgvn_renderer_shmem_unref(struct vn_renderer *renderer,
3147ec681f3Smrg                        struct vn_renderer_shmem *shmem)
3157ec681f3Smrg{
3167ec681f3Smrg   if (vn_refcount_dec(&shmem->refcount))
3177ec681f3Smrg      renderer->shmem_ops.destroy(renderer, shmem);
3187ec681f3Smrg}
3197ec681f3Smrg
3207ec681f3Smrgstatic inline VkResult
3217ec681f3Smrgvn_renderer_bo_create_from_device_memory(
3227ec681f3Smrg   struct vn_renderer *renderer,
3237ec681f3Smrg   VkDeviceSize size,
3247ec681f3Smrg   vn_object_id mem_id,
3257ec681f3Smrg   VkMemoryPropertyFlags flags,
3267ec681f3Smrg   VkExternalMemoryHandleTypeFlags external_handles,
3277ec681f3Smrg   struct vn_renderer_bo **out_bo)
3287ec681f3Smrg{
3297ec681f3Smrg   struct vn_renderer_bo *bo;
3307ec681f3Smrg   VkResult result = renderer->bo_ops.create_from_device_memory(
3317ec681f3Smrg      renderer, size, mem_id, flags, external_handles, &bo);
3327ec681f3Smrg   if (result != VK_SUCCESS)
3337ec681f3Smrg      return result;
3347ec681f3Smrg
3357ec681f3Smrg   assert(vn_refcount_is_valid(&bo->refcount));
3367ec681f3Smrg   assert(bo->res_id);
3377ec681f3Smrg   assert(!bo->mmap_size || bo->mmap_size >= size);
3387ec681f3Smrg
3397ec681f3Smrg   *out_bo = bo;
3407ec681f3Smrg   return VK_SUCCESS;
3417ec681f3Smrg}
3427ec681f3Smrg
3437ec681f3Smrgstatic inline VkResult
3447ec681f3Smrgvn_renderer_bo_create_from_dma_buf(struct vn_renderer *renderer,
3457ec681f3Smrg                                   VkDeviceSize size,
3467ec681f3Smrg                                   int fd,
3477ec681f3Smrg                                   VkMemoryPropertyFlags flags,
3487ec681f3Smrg                                   struct vn_renderer_bo **out_bo)
3497ec681f3Smrg{
3507ec681f3Smrg   struct vn_renderer_bo *bo;
3517ec681f3Smrg   VkResult result =
3527ec681f3Smrg      renderer->bo_ops.create_from_dma_buf(renderer, size, fd, flags, &bo);
3537ec681f3Smrg   if (result != VK_SUCCESS)
3547ec681f3Smrg      return result;
3557ec681f3Smrg
3567ec681f3Smrg   assert(vn_refcount_is_valid(&bo->refcount));
3577ec681f3Smrg   assert(bo->res_id);
3587ec681f3Smrg   assert(!bo->mmap_size || bo->mmap_size >= size);
3597ec681f3Smrg
3607ec681f3Smrg   *out_bo = bo;
3617ec681f3Smrg   return VK_SUCCESS;
3627ec681f3Smrg}
3637ec681f3Smrg
3647ec681f3Smrgstatic inline struct vn_renderer_bo *
3657ec681f3Smrgvn_renderer_bo_ref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
3667ec681f3Smrg{
3677ec681f3Smrg   vn_refcount_inc(&bo->refcount);
3687ec681f3Smrg   return bo;
3697ec681f3Smrg}
3707ec681f3Smrg
3717ec681f3Smrgstatic inline bool
3727ec681f3Smrgvn_renderer_bo_unref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
3737ec681f3Smrg{
3747ec681f3Smrg   if (vn_refcount_dec(&bo->refcount))
3757ec681f3Smrg      return renderer->bo_ops.destroy(renderer, bo);
3767ec681f3Smrg   return false;
3777ec681f3Smrg}
3787ec681f3Smrg
3797ec681f3Smrgstatic inline int
3807ec681f3Smrgvn_renderer_bo_export_dma_buf(struct vn_renderer *renderer,
3817ec681f3Smrg                              struct vn_renderer_bo *bo)
3827ec681f3Smrg{
3837ec681f3Smrg   return renderer->bo_ops.export_dma_buf(renderer, bo);
3847ec681f3Smrg}
3857ec681f3Smrg
3867ec681f3Smrgstatic inline void *
3877ec681f3Smrgvn_renderer_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
3887ec681f3Smrg{
3897ec681f3Smrg   return renderer->bo_ops.map(renderer, bo);
3907ec681f3Smrg}
3917ec681f3Smrg
3927ec681f3Smrgstatic inline void
3937ec681f3Smrgvn_renderer_bo_flush(struct vn_renderer *renderer,
3947ec681f3Smrg                     struct vn_renderer_bo *bo,
3957ec681f3Smrg                     VkDeviceSize offset,
3967ec681f3Smrg                     VkDeviceSize end)
3977ec681f3Smrg{
3987ec681f3Smrg   renderer->bo_ops.flush(renderer, bo, offset, end);
3997ec681f3Smrg}
4007ec681f3Smrg
4017ec681f3Smrgstatic inline void
4027ec681f3Smrgvn_renderer_bo_invalidate(struct vn_renderer *renderer,
4037ec681f3Smrg                          struct vn_renderer_bo *bo,
4047ec681f3Smrg                          VkDeviceSize offset,
4057ec681f3Smrg                          VkDeviceSize size)
4067ec681f3Smrg{
4077ec681f3Smrg   renderer->bo_ops.invalidate(renderer, bo, offset, size);
4087ec681f3Smrg}
4097ec681f3Smrg
4107ec681f3Smrgstatic inline VkResult
4117ec681f3Smrgvn_renderer_sync_create(struct vn_renderer *renderer,
4127ec681f3Smrg                        uint64_t initial_val,
4137ec681f3Smrg                        uint32_t flags,
4147ec681f3Smrg                        struct vn_renderer_sync **out_sync)
4157ec681f3Smrg{
4167ec681f3Smrg   return renderer->sync_ops.create(renderer, initial_val, flags, out_sync);
4177ec681f3Smrg}
4187ec681f3Smrg
4197ec681f3Smrgstatic inline VkResult
4207ec681f3Smrgvn_renderer_sync_create_from_syncobj(struct vn_renderer *renderer,
4217ec681f3Smrg                                     int fd,
4227ec681f3Smrg                                     bool sync_file,
4237ec681f3Smrg                                     struct vn_renderer_sync **out_sync)
4247ec681f3Smrg{
4257ec681f3Smrg   return renderer->sync_ops.create_from_syncobj(renderer, fd, sync_file,
4267ec681f3Smrg                                                 out_sync);
4277ec681f3Smrg}
4287ec681f3Smrg
4297ec681f3Smrgstatic inline void
4307ec681f3Smrgvn_renderer_sync_destroy(struct vn_renderer *renderer,
4317ec681f3Smrg                         struct vn_renderer_sync *sync)
4327ec681f3Smrg{
4337ec681f3Smrg   renderer->sync_ops.destroy(renderer, sync);
4347ec681f3Smrg}
4357ec681f3Smrg
4367ec681f3Smrgstatic inline int
4377ec681f3Smrgvn_renderer_sync_export_syncobj(struct vn_renderer *renderer,
4387ec681f3Smrg                                struct vn_renderer_sync *sync,
4397ec681f3Smrg                                bool sync_file)
4407ec681f3Smrg{
4417ec681f3Smrg   return renderer->sync_ops.export_syncobj(renderer, sync, sync_file);
4427ec681f3Smrg}
4437ec681f3Smrg
4447ec681f3Smrgstatic inline VkResult
4457ec681f3Smrgvn_renderer_sync_reset(struct vn_renderer *renderer,
4467ec681f3Smrg                       struct vn_renderer_sync *sync,
4477ec681f3Smrg                       uint64_t initial_val)
4487ec681f3Smrg{
4497ec681f3Smrg   return renderer->sync_ops.reset(renderer, sync, initial_val);
4507ec681f3Smrg}
4517ec681f3Smrg
4527ec681f3Smrgstatic inline VkResult
4537ec681f3Smrgvn_renderer_sync_read(struct vn_renderer *renderer,
4547ec681f3Smrg                      struct vn_renderer_sync *sync,
4557ec681f3Smrg                      uint64_t *val)
4567ec681f3Smrg{
4577ec681f3Smrg   return renderer->sync_ops.read(renderer, sync, val);
4587ec681f3Smrg}
4597ec681f3Smrg
4607ec681f3Smrgstatic inline VkResult
4617ec681f3Smrgvn_renderer_sync_write(struct vn_renderer *renderer,
4627ec681f3Smrg                       struct vn_renderer_sync *sync,
4637ec681f3Smrg                       uint64_t val)
4647ec681f3Smrg{
4657ec681f3Smrg   return renderer->sync_ops.write(renderer, sync, val);
4667ec681f3Smrg}
4677ec681f3Smrg
4687ec681f3Smrgstatic inline VkResult
4697ec681f3Smrgvn_renderer_submit_simple_sync(struct vn_renderer *renderer,
4707ec681f3Smrg                               const void *cs_data,
4717ec681f3Smrg                               size_t cs_size)
4727ec681f3Smrg{
4737ec681f3Smrg   struct vn_renderer_sync *sync;
4747ec681f3Smrg   VkResult result =
4757ec681f3Smrg      vn_renderer_sync_create(renderer, 0, VN_RENDERER_SYNC_BINARY, &sync);
4767ec681f3Smrg   if (result != VK_SUCCESS)
4777ec681f3Smrg      return result;
4787ec681f3Smrg
4797ec681f3Smrg   const struct vn_renderer_submit submit = {
4807ec681f3Smrg      .batches =
4817ec681f3Smrg         &(const struct vn_renderer_submit_batch){
4827ec681f3Smrg            .cs_data = cs_data,
4837ec681f3Smrg            .cs_size = cs_size,
4847ec681f3Smrg            .sync_queue_cpu = true,
4857ec681f3Smrg            .syncs = &sync,
4867ec681f3Smrg            .sync_values = &(const uint64_t){ 1 },
4877ec681f3Smrg            .sync_count = 1,
4887ec681f3Smrg         },
4897ec681f3Smrg      .batch_count = 1,
4907ec681f3Smrg   };
4917ec681f3Smrg   const struct vn_renderer_wait wait = {
4927ec681f3Smrg      .timeout = UINT64_MAX,
4937ec681f3Smrg      .syncs = &sync,
4947ec681f3Smrg      .sync_values = &(const uint64_t){ 1 },
4957ec681f3Smrg      .sync_count = 1,
4967ec681f3Smrg   };
4977ec681f3Smrg
4987ec681f3Smrg   result = vn_renderer_submit(renderer, &submit);
4997ec681f3Smrg   if (result == VK_SUCCESS)
5007ec681f3Smrg      result = vn_renderer_wait(renderer, &wait);
5017ec681f3Smrg
5027ec681f3Smrg   vn_renderer_sync_destroy(renderer, sync);
5037ec681f3Smrg
5047ec681f3Smrg   return result;
5057ec681f3Smrg}
5067ec681f3Smrg
5077ec681f3Smrg#endif /* VN_RENDERER_H */
508