vn_renderer.h revision 7ec681f3
1/*
2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6#ifndef VN_RENDERER_H
7#define VN_RENDERER_H
8
9#include "vn_common.h"
10
11struct vn_renderer_shmem {
12   struct vn_refcount refcount;
13
14   uint32_t res_id;
15   size_t mmap_size; /* for internal use only (i.e., munmap) */
16   void *mmap_ptr;
17};
18
19struct vn_renderer_bo {
20   struct vn_refcount refcount;
21
22   uint32_t res_id;
23   /* for internal use only */
24   size_t mmap_size;
25   void *mmap_ptr;
26};
27
28/*
29 * A sync consists of a uint64_t counter.  The counter can be updated by CPU
30 * or by GPU.  It can also be waited on by CPU or by GPU until it reaches
31 * certain values.
32 *
33 * This models after timeline VkSemaphore rather than timeline drm_syncobj.
34 * The main difference is that drm_syncobj can have unsignaled value 0.
35 */
36struct vn_renderer_sync {
37   uint32_t sync_id;
38};
39
40struct vn_renderer_info {
41   struct {
42      uint16_t vendor_id;
43      uint16_t device_id;
44
45      bool has_bus_info;
46      uint16_t domain;
47      uint8_t bus;
48      uint8_t device;
49      uint8_t function;
50   } pci;
51
52   bool has_dma_buf_import;
53   bool has_cache_management;
54   bool has_external_sync;
55   bool has_implicit_fencing;
56
57   uint32_t max_sync_queue_count;
58
59   /* hw capset */
60   uint32_t wire_format_version;
61   uint32_t vk_xml_version;
62   uint32_t vk_ext_command_serialization_spec_version;
63   uint32_t vk_mesa_venus_protocol_spec_version;
64};
65
66struct vn_renderer_submit_batch {
67   const void *cs_data;
68   size_t cs_size;
69
70   /*
71    * Submit cs to the virtual sync queue identified by sync_queue_index.  The
72    * virtual queue is assumed to be associated with the physical VkQueue
73    * identified by vk_queue_id.  After the execution completes on the
74    * VkQueue, the virtual sync queue is signaled.
75    *
76    * sync_queue_index must be less than max_sync_queue_count.
77    *
78    * vk_queue_id specifies the object id of a VkQueue.
79    *
80    * When sync_queue_cpu is true, it specifies the special CPU sync queue,
81    * and sync_queue_index/vk_queue_id are ignored.  TODO revisit this later
82    */
83   uint32_t sync_queue_index;
84   bool sync_queue_cpu;
85   vn_object_id vk_queue_id;
86
87   /* syncs to update when the virtual sync queue is signaled */
88   struct vn_renderer_sync *const *syncs;
89   /* TODO allow NULL when syncs are all binary? */
90   const uint64_t *sync_values;
91   uint32_t sync_count;
92};
93
94struct vn_renderer_submit {
95   /* BOs to pin and to fence implicitly
96    *
97    * TODO track all bos and automatically pin them.  We don't do it yet
98    * because each vn_command_buffer owns a bo.  We can probably make do by
99    * returning the bos to a bo cache and exclude bo cache from pinning.
100    */
101   struct vn_renderer_bo *const *bos;
102   uint32_t bo_count;
103
104   const struct vn_renderer_submit_batch *batches;
105   uint32_t batch_count;
106};
107
108struct vn_renderer_wait {
109   bool wait_any;
110   uint64_t timeout;
111
112   struct vn_renderer_sync *const *syncs;
113   /* TODO allow NULL when syncs are all binary? */
114   const uint64_t *sync_values;
115   uint32_t sync_count;
116};
117
118struct vn_renderer_ops {
119   void (*destroy)(struct vn_renderer *renderer,
120                   const VkAllocationCallbacks *alloc);
121
122   void (*get_info)(struct vn_renderer *renderer,
123                    struct vn_renderer_info *info);
124
125   VkResult (*submit)(struct vn_renderer *renderer,
126                      const struct vn_renderer_submit *submit);
127
128   /*
129    * On success, returns VK_SUCCESS or VK_TIMEOUT.  On failure, returns
130    * VK_ERROR_DEVICE_LOST or out of device/host memory.
131    */
132   VkResult (*wait)(struct vn_renderer *renderer,
133                    const struct vn_renderer_wait *wait);
134};
135
136struct vn_renderer_shmem_ops {
137   struct vn_renderer_shmem *(*create)(struct vn_renderer *renderer,
138                                       size_t size);
139   void (*destroy)(struct vn_renderer *renderer,
140                   struct vn_renderer_shmem *shmem);
141};
142
143struct vn_renderer_bo_ops {
144   VkResult (*create_from_device_memory)(
145      struct vn_renderer *renderer,
146      VkDeviceSize size,
147      vn_object_id mem_id,
148      VkMemoryPropertyFlags flags,
149      VkExternalMemoryHandleTypeFlags external_handles,
150      struct vn_renderer_bo **out_bo);
151
152   VkResult (*create_from_dma_buf)(struct vn_renderer *renderer,
153                                   VkDeviceSize size,
154                                   int fd,
155                                   VkMemoryPropertyFlags flags,
156                                   struct vn_renderer_bo **out_bo);
157
158   bool (*destroy)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
159
160   int (*export_dma_buf)(struct vn_renderer *renderer,
161                         struct vn_renderer_bo *bo);
162
163   /* map is not thread-safe */
164   void *(*map)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
165
166   void (*flush)(struct vn_renderer *renderer,
167                 struct vn_renderer_bo *bo,
168                 VkDeviceSize offset,
169                 VkDeviceSize size);
170   void (*invalidate)(struct vn_renderer *renderer,
171                      struct vn_renderer_bo *bo,
172                      VkDeviceSize offset,
173                      VkDeviceSize size);
174};
175
176enum vn_renderer_sync_flags {
177   VN_RENDERER_SYNC_SHAREABLE = 1u << 0,
178   VN_RENDERER_SYNC_BINARY = 1u << 1,
179};
180
181struct vn_renderer_sync_ops {
182   VkResult (*create)(struct vn_renderer *renderer,
183                      uint64_t initial_val,
184                      uint32_t flags,
185                      struct vn_renderer_sync **out_sync);
186
187   VkResult (*create_from_syncobj)(struct vn_renderer *renderer,
188                                   int fd,
189                                   bool sync_file,
190                                   struct vn_renderer_sync **out_sync);
191   void (*destroy)(struct vn_renderer *renderer,
192                   struct vn_renderer_sync *sync);
193
194   int (*export_syncobj)(struct vn_renderer *renderer,
195                         struct vn_renderer_sync *sync,
196                         bool sync_file);
197
198   /* reset the counter */
199   VkResult (*reset)(struct vn_renderer *renderer,
200                     struct vn_renderer_sync *sync,
201                     uint64_t initial_val);
202
203   /* read the current value from the counter */
204   VkResult (*read)(struct vn_renderer *renderer,
205                    struct vn_renderer_sync *sync,
206                    uint64_t *val);
207
208   /* write a new value (larger than the current one) to the counter */
209   VkResult (*write)(struct vn_renderer *renderer,
210                     struct vn_renderer_sync *sync,
211                     uint64_t val);
212};
213
214struct vn_renderer {
215   struct vn_renderer_ops ops;
216   struct vn_renderer_shmem_ops shmem_ops;
217   struct vn_renderer_bo_ops bo_ops;
218   struct vn_renderer_sync_ops sync_ops;
219};
220
221VkResult
222vn_renderer_create_virtgpu(struct vn_instance *instance,
223                           const VkAllocationCallbacks *alloc,
224                           struct vn_renderer **renderer);
225
226VkResult
227vn_renderer_create_vtest(struct vn_instance *instance,
228                         const VkAllocationCallbacks *alloc,
229                         struct vn_renderer **renderer);
230
231static inline VkResult
232vn_renderer_create(struct vn_instance *instance,
233                   const VkAllocationCallbacks *alloc,
234                   struct vn_renderer **renderer)
235{
236   if (VN_DEBUG(VTEST)) {
237      VkResult result = vn_renderer_create_vtest(instance, alloc, renderer);
238      if (result == VK_SUCCESS)
239         return VK_SUCCESS;
240   }
241
242   return vn_renderer_create_virtgpu(instance, alloc, renderer);
243}
244
245static inline void
246vn_renderer_destroy(struct vn_renderer *renderer,
247                    const VkAllocationCallbacks *alloc)
248{
249   renderer->ops.destroy(renderer, alloc);
250}
251
252static inline void
253vn_renderer_get_info(struct vn_renderer *renderer,
254                     struct vn_renderer_info *info)
255{
256   renderer->ops.get_info(renderer, info);
257}
258
259static inline VkResult
260vn_renderer_submit(struct vn_renderer *renderer,
261                   const struct vn_renderer_submit *submit)
262{
263   return renderer->ops.submit(renderer, submit);
264}
265
266static inline VkResult
267vn_renderer_submit_simple(struct vn_renderer *renderer,
268                          const void *cs_data,
269                          size_t cs_size)
270{
271   const struct vn_renderer_submit submit = {
272      .batches =
273         &(const struct vn_renderer_submit_batch){
274            .cs_data = cs_data,
275            .cs_size = cs_size,
276         },
277      .batch_count = 1,
278   };
279   return vn_renderer_submit(renderer, &submit);
280}
281
282static inline VkResult
283vn_renderer_wait(struct vn_renderer *renderer,
284                 const struct vn_renderer_wait *wait)
285{
286   return renderer->ops.wait(renderer, wait);
287}
288
289static inline struct vn_renderer_shmem *
290vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
291{
292   struct vn_renderer_shmem *shmem =
293      renderer->shmem_ops.create(renderer, size);
294   if (shmem) {
295      assert(vn_refcount_is_valid(&shmem->refcount));
296      assert(shmem->res_id);
297      assert(shmem->mmap_size >= size);
298      assert(shmem->mmap_ptr);
299   }
300
301   return shmem;
302}
303
304static inline struct vn_renderer_shmem *
305vn_renderer_shmem_ref(struct vn_renderer *renderer,
306                      struct vn_renderer_shmem *shmem)
307{
308   vn_refcount_inc(&shmem->refcount);
309   return shmem;
310}
311
312static inline void
313vn_renderer_shmem_unref(struct vn_renderer *renderer,
314                        struct vn_renderer_shmem *shmem)
315{
316   if (vn_refcount_dec(&shmem->refcount))
317      renderer->shmem_ops.destroy(renderer, shmem);
318}
319
320static inline VkResult
321vn_renderer_bo_create_from_device_memory(
322   struct vn_renderer *renderer,
323   VkDeviceSize size,
324   vn_object_id mem_id,
325   VkMemoryPropertyFlags flags,
326   VkExternalMemoryHandleTypeFlags external_handles,
327   struct vn_renderer_bo **out_bo)
328{
329   struct vn_renderer_bo *bo;
330   VkResult result = renderer->bo_ops.create_from_device_memory(
331      renderer, size, mem_id, flags, external_handles, &bo);
332   if (result != VK_SUCCESS)
333      return result;
334
335   assert(vn_refcount_is_valid(&bo->refcount));
336   assert(bo->res_id);
337   assert(!bo->mmap_size || bo->mmap_size >= size);
338
339   *out_bo = bo;
340   return VK_SUCCESS;
341}
342
343static inline VkResult
344vn_renderer_bo_create_from_dma_buf(struct vn_renderer *renderer,
345                                   VkDeviceSize size,
346                                   int fd,
347                                   VkMemoryPropertyFlags flags,
348                                   struct vn_renderer_bo **out_bo)
349{
350   struct vn_renderer_bo *bo;
351   VkResult result =
352      renderer->bo_ops.create_from_dma_buf(renderer, size, fd, flags, &bo);
353   if (result != VK_SUCCESS)
354      return result;
355
356   assert(vn_refcount_is_valid(&bo->refcount));
357   assert(bo->res_id);
358   assert(!bo->mmap_size || bo->mmap_size >= size);
359
360   *out_bo = bo;
361   return VK_SUCCESS;
362}
363
364static inline struct vn_renderer_bo *
365vn_renderer_bo_ref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
366{
367   vn_refcount_inc(&bo->refcount);
368   return bo;
369}
370
371static inline bool
372vn_renderer_bo_unref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
373{
374   if (vn_refcount_dec(&bo->refcount))
375      return renderer->bo_ops.destroy(renderer, bo);
376   return false;
377}
378
379static inline int
380vn_renderer_bo_export_dma_buf(struct vn_renderer *renderer,
381                              struct vn_renderer_bo *bo)
382{
383   return renderer->bo_ops.export_dma_buf(renderer, bo);
384}
385
386static inline void *
387vn_renderer_bo_map(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
388{
389   return renderer->bo_ops.map(renderer, bo);
390}
391
392static inline void
393vn_renderer_bo_flush(struct vn_renderer *renderer,
394                     struct vn_renderer_bo *bo,
395                     VkDeviceSize offset,
396                     VkDeviceSize end)
397{
398   renderer->bo_ops.flush(renderer, bo, offset, end);
399}
400
401static inline void
402vn_renderer_bo_invalidate(struct vn_renderer *renderer,
403                          struct vn_renderer_bo *bo,
404                          VkDeviceSize offset,
405                          VkDeviceSize size)
406{
407   renderer->bo_ops.invalidate(renderer, bo, offset, size);
408}
409
410static inline VkResult
411vn_renderer_sync_create(struct vn_renderer *renderer,
412                        uint64_t initial_val,
413                        uint32_t flags,
414                        struct vn_renderer_sync **out_sync)
415{
416   return renderer->sync_ops.create(renderer, initial_val, flags, out_sync);
417}
418
419static inline VkResult
420vn_renderer_sync_create_from_syncobj(struct vn_renderer *renderer,
421                                     int fd,
422                                     bool sync_file,
423                                     struct vn_renderer_sync **out_sync)
424{
425   return renderer->sync_ops.create_from_syncobj(renderer, fd, sync_file,
426                                                 out_sync);
427}
428
429static inline void
430vn_renderer_sync_destroy(struct vn_renderer *renderer,
431                         struct vn_renderer_sync *sync)
432{
433   renderer->sync_ops.destroy(renderer, sync);
434}
435
436static inline int
437vn_renderer_sync_export_syncobj(struct vn_renderer *renderer,
438                                struct vn_renderer_sync *sync,
439                                bool sync_file)
440{
441   return renderer->sync_ops.export_syncobj(renderer, sync, sync_file);
442}
443
444static inline VkResult
445vn_renderer_sync_reset(struct vn_renderer *renderer,
446                       struct vn_renderer_sync *sync,
447                       uint64_t initial_val)
448{
449   return renderer->sync_ops.reset(renderer, sync, initial_val);
450}
451
452static inline VkResult
453vn_renderer_sync_read(struct vn_renderer *renderer,
454                      struct vn_renderer_sync *sync,
455                      uint64_t *val)
456{
457   return renderer->sync_ops.read(renderer, sync, val);
458}
459
460static inline VkResult
461vn_renderer_sync_write(struct vn_renderer *renderer,
462                       struct vn_renderer_sync *sync,
463                       uint64_t val)
464{
465   return renderer->sync_ops.write(renderer, sync, val);
466}
467
468static inline VkResult
469vn_renderer_submit_simple_sync(struct vn_renderer *renderer,
470                               const void *cs_data,
471                               size_t cs_size)
472{
473   struct vn_renderer_sync *sync;
474   VkResult result =
475      vn_renderer_sync_create(renderer, 0, VN_RENDERER_SYNC_BINARY, &sync);
476   if (result != VK_SUCCESS)
477      return result;
478
479   const struct vn_renderer_submit submit = {
480      .batches =
481         &(const struct vn_renderer_submit_batch){
482            .cs_data = cs_data,
483            .cs_size = cs_size,
484            .sync_queue_cpu = true,
485            .syncs = &sync,
486            .sync_values = &(const uint64_t){ 1 },
487            .sync_count = 1,
488         },
489      .batch_count = 1,
490   };
491   const struct vn_renderer_wait wait = {
492      .timeout = UINT64_MAX,
493      .syncs = &sync,
494      .sync_values = &(const uint64_t){ 1 },
495      .sync_count = 1,
496   };
497
498   result = vn_renderer_submit(renderer, &submit);
499   if (result == VK_SUCCESS)
500      result = vn_renderer_wait(renderer, &wait);
501
502   vn_renderer_sync_destroy(renderer, sync);
503
504   return result;
505}
506
507#endif /* VN_RENDERER_H */
508