1b8e80941Smrg/*
2b8e80941Smrg * Copyright © 2016 Red Hat.
3b8e80941Smrg * Copyright © 2016 Bas Nieuwenhuizen
4b8e80941Smrg *
5b8e80941Smrg * based in part on anv driver which is:
6b8e80941Smrg * Copyright © 2015 Intel Corporation
7b8e80941Smrg *
8b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
9b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
10b8e80941Smrg * to deal in the Software without restriction, including without limitation
11b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
13b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
14b8e80941Smrg *
15b8e80941Smrg * The above copyright notice and this permission notice (including the next
16b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
17b8e80941Smrg * Software.
18b8e80941Smrg *
19b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25b8e80941Smrg * DEALINGS IN THE SOFTWARE.
26b8e80941Smrg */
27b8e80941Smrg
28b8e80941Smrg#include "tu_private.h"
29b8e80941Smrg
30b8e80941Smrg#include <fcntl.h>
31b8e80941Smrg#include <libsync.h>
32b8e80941Smrg#include <stdbool.h>
33b8e80941Smrg#include <string.h>
34b8e80941Smrg#include <sys/mman.h>
35b8e80941Smrg#include <sys/sysinfo.h>
36b8e80941Smrg#include <unistd.h>
37b8e80941Smrg#include <xf86drm.h>
38b8e80941Smrg
39b8e80941Smrg#include "util/debug.h"
40b8e80941Smrg#include "util/disk_cache.h"
41b8e80941Smrg#include "util/strtod.h"
42b8e80941Smrg#include "vk_format.h"
43b8e80941Smrg#include "vk_util.h"
44b8e80941Smrg
45b8e80941Smrg#include "drm/msm_drm.h"
46b8e80941Smrg
47b8e80941Smrgstatic int
48b8e80941Smrgtu_device_get_cache_uuid(uint16_t family, void *uuid)
49b8e80941Smrg{
50b8e80941Smrg   uint32_t mesa_timestamp;
51b8e80941Smrg   uint16_t f = family;
52b8e80941Smrg   memset(uuid, 0, VK_UUID_SIZE);
53b8e80941Smrg   if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54b8e80941Smrg                                          &mesa_timestamp))
55b8e80941Smrg      return -1;
56b8e80941Smrg
57b8e80941Smrg   memcpy(uuid, &mesa_timestamp, 4);
58b8e80941Smrg   memcpy((char *) uuid + 4, &f, 2);
59b8e80941Smrg   snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60b8e80941Smrg   return 0;
61b8e80941Smrg}
62b8e80941Smrg
63b8e80941Smrgstatic void
64b8e80941Smrgtu_get_driver_uuid(void *uuid)
65b8e80941Smrg{
66b8e80941Smrg   memset(uuid, 0, VK_UUID_SIZE);
67b8e80941Smrg   snprintf(uuid, VK_UUID_SIZE, "freedreno");
68b8e80941Smrg}
69b8e80941Smrg
70b8e80941Smrgstatic void
71b8e80941Smrgtu_get_device_uuid(void *uuid)
72b8e80941Smrg{
73b8e80941Smrg   memset(uuid, 0, VK_UUID_SIZE);
74b8e80941Smrg}
75b8e80941Smrg
76b8e80941Smrgstatic VkResult
77b8e80941Smrgtu_bo_init(struct tu_device *dev,
78b8e80941Smrg           struct tu_bo *bo,
79b8e80941Smrg           uint32_t gem_handle,
80b8e80941Smrg           uint64_t size)
81b8e80941Smrg{
82b8e80941Smrg   uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83b8e80941Smrg   if (!iova)
84b8e80941Smrg      return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85b8e80941Smrg
86b8e80941Smrg   *bo = (struct tu_bo) {
87b8e80941Smrg      .gem_handle = gem_handle,
88b8e80941Smrg      .size = size,
89b8e80941Smrg      .iova = iova,
90b8e80941Smrg   };
91b8e80941Smrg
92b8e80941Smrg   return VK_SUCCESS;
93b8e80941Smrg}
94b8e80941Smrg
95b8e80941SmrgVkResult
96b8e80941Smrgtu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97b8e80941Smrg{
98b8e80941Smrg   /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99b8e80941Smrg    * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100b8e80941Smrg    */
101b8e80941Smrg   uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102b8e80941Smrg   if (!gem_handle)
103b8e80941Smrg      return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104b8e80941Smrg
105b8e80941Smrg   VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106b8e80941Smrg   if (result != VK_SUCCESS) {
107b8e80941Smrg      tu_gem_close(dev, gem_handle);
108b8e80941Smrg      return vk_error(dev->instance, result);
109b8e80941Smrg   }
110b8e80941Smrg
111b8e80941Smrg   return VK_SUCCESS;
112b8e80941Smrg}
113b8e80941Smrg
114b8e80941SmrgVkResult
115b8e80941Smrgtu_bo_init_dmabuf(struct tu_device *dev,
116b8e80941Smrg                  struct tu_bo *bo,
117b8e80941Smrg                  uint64_t size,
118b8e80941Smrg                  int fd)
119b8e80941Smrg{
120b8e80941Smrg   uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121b8e80941Smrg   if (!gem_handle)
122b8e80941Smrg      return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123b8e80941Smrg
124b8e80941Smrg   VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125b8e80941Smrg   if (result != VK_SUCCESS) {
126b8e80941Smrg      tu_gem_close(dev, gem_handle);
127b8e80941Smrg      return vk_error(dev->instance, result);
128b8e80941Smrg   }
129b8e80941Smrg
130b8e80941Smrg   return VK_SUCCESS;
131b8e80941Smrg}
132b8e80941Smrg
133b8e80941Smrgint
134b8e80941Smrgtu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135b8e80941Smrg{
136b8e80941Smrg   return tu_gem_export_dmabuf(dev, bo->gem_handle);
137b8e80941Smrg}
138b8e80941Smrg
139b8e80941SmrgVkResult
140b8e80941Smrgtu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141b8e80941Smrg{
142b8e80941Smrg   if (bo->map)
143b8e80941Smrg      return VK_SUCCESS;
144b8e80941Smrg
145b8e80941Smrg   uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146b8e80941Smrg   if (!offset)
147b8e80941Smrg      return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148b8e80941Smrg
149b8e80941Smrg   /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150b8e80941Smrg   void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151b8e80941Smrg                    dev->physical_device->local_fd, offset);
152b8e80941Smrg   if (map == MAP_FAILED)
153b8e80941Smrg      return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154b8e80941Smrg
155b8e80941Smrg   bo->map = map;
156b8e80941Smrg   return VK_SUCCESS;
157b8e80941Smrg}
158b8e80941Smrg
159b8e80941Smrgvoid
160b8e80941Smrgtu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161b8e80941Smrg{
162b8e80941Smrg   assert(bo->gem_handle);
163b8e80941Smrg
164b8e80941Smrg   if (bo->map)
165b8e80941Smrg      munmap(bo->map, bo->size);
166b8e80941Smrg
167b8e80941Smrg   tu_gem_close(dev, bo->gem_handle);
168b8e80941Smrg}
169b8e80941Smrg
170b8e80941Smrgstatic VkResult
171b8e80941Smrgtu_physical_device_init(struct tu_physical_device *device,
172b8e80941Smrg                        struct tu_instance *instance,
173b8e80941Smrg                        drmDevicePtr drm_device)
174b8e80941Smrg{
175b8e80941Smrg   const char *path = drm_device->nodes[DRM_NODE_RENDER];
176b8e80941Smrg   VkResult result = VK_SUCCESS;
177b8e80941Smrg   drmVersionPtr version;
178b8e80941Smrg   int fd;
179b8e80941Smrg   int master_fd = -1;
180b8e80941Smrg
181b8e80941Smrg   fd = open(path, O_RDWR | O_CLOEXEC);
182b8e80941Smrg   if (fd < 0) {
183b8e80941Smrg      return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184b8e80941Smrg                       "failed to open device %s", path);
185b8e80941Smrg   }
186b8e80941Smrg
187b8e80941Smrg   /* Version 1.3 added MSM_INFO_IOVA. */
188b8e80941Smrg   const int min_version_major = 1;
189b8e80941Smrg   const int min_version_minor = 3;
190b8e80941Smrg
191b8e80941Smrg   version = drmGetVersion(fd);
192b8e80941Smrg   if (!version) {
193b8e80941Smrg      close(fd);
194b8e80941Smrg      return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195b8e80941Smrg                       "failed to query kernel driver version for device %s",
196b8e80941Smrg                       path);
197b8e80941Smrg   }
198b8e80941Smrg
199b8e80941Smrg   if (strcmp(version->name, "msm")) {
200b8e80941Smrg      drmFreeVersion(version);
201b8e80941Smrg      close(fd);
202b8e80941Smrg      return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
203b8e80941Smrg                       "device %s does not use the msm kernel driver", path);
204b8e80941Smrg   }
205b8e80941Smrg
206b8e80941Smrg   if (version->version_major != min_version_major ||
207b8e80941Smrg       version->version_minor < min_version_minor) {
208b8e80941Smrg      result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
209b8e80941Smrg                         "kernel driver for device %s has version %d.%d, "
210b8e80941Smrg                         "but Vulkan requires version >= %d.%d",
211b8e80941Smrg                         path, version->version_major, version->version_minor,
212b8e80941Smrg                         min_version_major, min_version_minor);
213b8e80941Smrg      drmFreeVersion(version);
214b8e80941Smrg      close(fd);
215b8e80941Smrg      return result;
216b8e80941Smrg   }
217b8e80941Smrg
218b8e80941Smrg   drmFreeVersion(version);
219b8e80941Smrg
220b8e80941Smrg   if (instance->debug_flags & TU_DEBUG_STARTUP)
221b8e80941Smrg      tu_logi("Found compatible device '%s'.", path);
222b8e80941Smrg
223b8e80941Smrg   device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
224b8e80941Smrg   device->instance = instance;
225b8e80941Smrg   assert(strlen(path) < ARRAY_SIZE(device->path));
226b8e80941Smrg   strncpy(device->path, path, ARRAY_SIZE(device->path));
227b8e80941Smrg
228b8e80941Smrg   if (instance->enabled_extensions.KHR_display) {
229b8e80941Smrg      master_fd =
230b8e80941Smrg         open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
231b8e80941Smrg      if (master_fd >= 0) {
232b8e80941Smrg         /* TODO: free master_fd is accel is not working? */
233b8e80941Smrg      }
234b8e80941Smrg   }
235b8e80941Smrg
236b8e80941Smrg   device->master_fd = master_fd;
237b8e80941Smrg   device->local_fd = fd;
238b8e80941Smrg
239b8e80941Smrg   if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
240b8e80941Smrg      if (instance->debug_flags & TU_DEBUG_STARTUP)
241b8e80941Smrg         tu_logi("Could not query the GPU ID");
242b8e80941Smrg      result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
243b8e80941Smrg                         "could not get GPU ID");
244b8e80941Smrg      goto fail;
245b8e80941Smrg   }
246b8e80941Smrg
247b8e80941Smrg   if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
248b8e80941Smrg      if (instance->debug_flags & TU_DEBUG_STARTUP)
249b8e80941Smrg         tu_logi("Could not query the GMEM size");
250b8e80941Smrg      result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
251b8e80941Smrg                         "could not get GMEM size");
252b8e80941Smrg      goto fail;
253b8e80941Smrg   }
254b8e80941Smrg
255b8e80941Smrg   memset(device->name, 0, sizeof(device->name));
256b8e80941Smrg   sprintf(device->name, "FD%d", device->gpu_id);
257b8e80941Smrg
258b8e80941Smrg   switch (device->gpu_id) {
259b8e80941Smrg   case 630:
260b8e80941Smrg      device->tile_align_w = 32;
261b8e80941Smrg      device->tile_align_h = 32;
262b8e80941Smrg      break;
263b8e80941Smrg   default:
264b8e80941Smrg      result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
265b8e80941Smrg                         "device %s is unsupported", device->name);
266b8e80941Smrg      goto fail;
267b8e80941Smrg   }
268b8e80941Smrg   if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
269b8e80941Smrg      result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
270b8e80941Smrg                         "cannot generate UUID");
271b8e80941Smrg      goto fail;
272b8e80941Smrg   }
273b8e80941Smrg
274b8e80941Smrg   /* The gpu id is already embedded in the uuid so we just pass "tu"
275b8e80941Smrg    * when creating the cache.
276b8e80941Smrg    */
277b8e80941Smrg   char buf[VK_UUID_SIZE * 2 + 1];
278b8e80941Smrg   disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
279b8e80941Smrg   device->disk_cache = disk_cache_create(device->name, buf, 0);
280b8e80941Smrg
281b8e80941Smrg   fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
282b8e80941Smrg                   "testing use only.\n");
283b8e80941Smrg
284b8e80941Smrg   tu_get_driver_uuid(&device->device_uuid);
285b8e80941Smrg   tu_get_device_uuid(&device->device_uuid);
286b8e80941Smrg
287b8e80941Smrg   tu_fill_device_extension_table(device, &device->supported_extensions);
288b8e80941Smrg
289b8e80941Smrg   if (result != VK_SUCCESS) {
290b8e80941Smrg      vk_error(instance, result);
291b8e80941Smrg      goto fail;
292b8e80941Smrg   }
293b8e80941Smrg
294b8e80941Smrg   result = tu_wsi_init(device);
295b8e80941Smrg   if (result != VK_SUCCESS) {
296b8e80941Smrg      vk_error(instance, result);
297b8e80941Smrg      goto fail;
298b8e80941Smrg   }
299b8e80941Smrg
300b8e80941Smrg   return VK_SUCCESS;
301b8e80941Smrg
302b8e80941Smrgfail:
303b8e80941Smrg   close(fd);
304b8e80941Smrg   if (master_fd != -1)
305b8e80941Smrg      close(master_fd);
306b8e80941Smrg   return result;
307b8e80941Smrg}
308b8e80941Smrg
309b8e80941Smrgstatic void
310b8e80941Smrgtu_physical_device_finish(struct tu_physical_device *device)
311b8e80941Smrg{
312b8e80941Smrg   tu_wsi_finish(device);
313b8e80941Smrg
314b8e80941Smrg   disk_cache_destroy(device->disk_cache);
315b8e80941Smrg   close(device->local_fd);
316b8e80941Smrg   if (device->master_fd != -1)
317b8e80941Smrg      close(device->master_fd);
318b8e80941Smrg}
319b8e80941Smrg
320b8e80941Smrgstatic void *
321b8e80941Smrgdefault_alloc_func(void *pUserData,
322b8e80941Smrg                   size_t size,
323b8e80941Smrg                   size_t align,
324b8e80941Smrg                   VkSystemAllocationScope allocationScope)
325b8e80941Smrg{
326b8e80941Smrg   return malloc(size);
327b8e80941Smrg}
328b8e80941Smrg
329b8e80941Smrgstatic void *
330b8e80941Smrgdefault_realloc_func(void *pUserData,
331b8e80941Smrg                     void *pOriginal,
332b8e80941Smrg                     size_t size,
333b8e80941Smrg                     size_t align,
334b8e80941Smrg                     VkSystemAllocationScope allocationScope)
335b8e80941Smrg{
336b8e80941Smrg   return realloc(pOriginal, size);
337b8e80941Smrg}
338b8e80941Smrg
339b8e80941Smrgstatic void
340b8e80941Smrgdefault_free_func(void *pUserData, void *pMemory)
341b8e80941Smrg{
342b8e80941Smrg   free(pMemory);
343b8e80941Smrg}
344b8e80941Smrg
345b8e80941Smrgstatic const VkAllocationCallbacks default_alloc = {
346b8e80941Smrg   .pUserData = NULL,
347b8e80941Smrg   .pfnAllocation = default_alloc_func,
348b8e80941Smrg   .pfnReallocation = default_realloc_func,
349b8e80941Smrg   .pfnFree = default_free_func,
350b8e80941Smrg};
351b8e80941Smrg
352b8e80941Smrgstatic const struct debug_control tu_debug_options[] = {
353b8e80941Smrg   { "startup", TU_DEBUG_STARTUP },
354b8e80941Smrg   { "nir", TU_DEBUG_NIR },
355b8e80941Smrg   { "ir3", TU_DEBUG_IR3 },
356b8e80941Smrg   { NULL, 0 }
357b8e80941Smrg};
358b8e80941Smrg
359b8e80941Smrgconst char *
360b8e80941Smrgtu_get_debug_option_name(int id)
361b8e80941Smrg{
362b8e80941Smrg   assert(id < ARRAY_SIZE(tu_debug_options) - 1);
363b8e80941Smrg   return tu_debug_options[id].string;
364b8e80941Smrg}
365b8e80941Smrg
366b8e80941Smrgstatic int
367b8e80941Smrgtu_get_instance_extension_index(const char *name)
368b8e80941Smrg{
369b8e80941Smrg   for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
370b8e80941Smrg      if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
371b8e80941Smrg         return i;
372b8e80941Smrg   }
373b8e80941Smrg   return -1;
374b8e80941Smrg}
375b8e80941Smrg
376b8e80941SmrgVkResult
377b8e80941Smrgtu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
378b8e80941Smrg                  const VkAllocationCallbacks *pAllocator,
379b8e80941Smrg                  VkInstance *pInstance)
380b8e80941Smrg{
381b8e80941Smrg   struct tu_instance *instance;
382b8e80941Smrg   VkResult result;
383b8e80941Smrg
384b8e80941Smrg   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
385b8e80941Smrg
386b8e80941Smrg   uint32_t client_version;
387b8e80941Smrg   if (pCreateInfo->pApplicationInfo &&
388b8e80941Smrg       pCreateInfo->pApplicationInfo->apiVersion != 0) {
389b8e80941Smrg      client_version = pCreateInfo->pApplicationInfo->apiVersion;
390b8e80941Smrg   } else {
391b8e80941Smrg      tu_EnumerateInstanceVersion(&client_version);
392b8e80941Smrg   }
393b8e80941Smrg
394b8e80941Smrg   instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
395b8e80941Smrg                         VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
396b8e80941Smrg   if (!instance)
397b8e80941Smrg      return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
398b8e80941Smrg
399b8e80941Smrg   instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
400b8e80941Smrg
401b8e80941Smrg   if (pAllocator)
402b8e80941Smrg      instance->alloc = *pAllocator;
403b8e80941Smrg   else
404b8e80941Smrg      instance->alloc = default_alloc;
405b8e80941Smrg
406b8e80941Smrg   instance->api_version = client_version;
407b8e80941Smrg   instance->physical_device_count = -1;
408b8e80941Smrg
409b8e80941Smrg   instance->debug_flags =
410b8e80941Smrg      parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
411b8e80941Smrg
412b8e80941Smrg   if (instance->debug_flags & TU_DEBUG_STARTUP)
413b8e80941Smrg      tu_logi("Created an instance");
414b8e80941Smrg
415b8e80941Smrg   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
416b8e80941Smrg      const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
417b8e80941Smrg      int index = tu_get_instance_extension_index(ext_name);
418b8e80941Smrg
419b8e80941Smrg      if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
420b8e80941Smrg         vk_free2(&default_alloc, pAllocator, instance);
421b8e80941Smrg         return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
422b8e80941Smrg      }
423b8e80941Smrg
424b8e80941Smrg      instance->enabled_extensions.extensions[index] = true;
425b8e80941Smrg   }
426b8e80941Smrg
427b8e80941Smrg   result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
428b8e80941Smrg   if (result != VK_SUCCESS) {
429b8e80941Smrg      vk_free2(&default_alloc, pAllocator, instance);
430b8e80941Smrg      return vk_error(instance, result);
431b8e80941Smrg   }
432b8e80941Smrg
433b8e80941Smrg   _mesa_locale_init();
434b8e80941Smrg
435b8e80941Smrg   VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
436b8e80941Smrg
437b8e80941Smrg   *pInstance = tu_instance_to_handle(instance);
438b8e80941Smrg
439b8e80941Smrg   return VK_SUCCESS;
440b8e80941Smrg}
441b8e80941Smrg
442b8e80941Smrgvoid
443b8e80941Smrgtu_DestroyInstance(VkInstance _instance,
444b8e80941Smrg                   const VkAllocationCallbacks *pAllocator)
445b8e80941Smrg{
446b8e80941Smrg   TU_FROM_HANDLE(tu_instance, instance, _instance);
447b8e80941Smrg
448b8e80941Smrg   if (!instance)
449b8e80941Smrg      return;
450b8e80941Smrg
451b8e80941Smrg   for (int i = 0; i < instance->physical_device_count; ++i) {
452b8e80941Smrg      tu_physical_device_finish(instance->physical_devices + i);
453b8e80941Smrg   }
454b8e80941Smrg
455b8e80941Smrg   VG(VALGRIND_DESTROY_MEMPOOL(instance));
456b8e80941Smrg
457b8e80941Smrg   _mesa_locale_fini();
458b8e80941Smrg
459b8e80941Smrg   vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
460b8e80941Smrg
461b8e80941Smrg   vk_free(&instance->alloc, instance);
462b8e80941Smrg}
463b8e80941Smrg
464b8e80941Smrgstatic VkResult
465b8e80941Smrgtu_enumerate_devices(struct tu_instance *instance)
466b8e80941Smrg{
467b8e80941Smrg   /* TODO: Check for more devices ? */
468b8e80941Smrg   drmDevicePtr devices[8];
469b8e80941Smrg   VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
470b8e80941Smrg   int max_devices;
471b8e80941Smrg
472b8e80941Smrg   instance->physical_device_count = 0;
473b8e80941Smrg
474b8e80941Smrg   max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
475b8e80941Smrg
476b8e80941Smrg   if (instance->debug_flags & TU_DEBUG_STARTUP)
477b8e80941Smrg      tu_logi("Found %d drm nodes", max_devices);
478b8e80941Smrg
479b8e80941Smrg   if (max_devices < 1)
480b8e80941Smrg      return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
481b8e80941Smrg
482b8e80941Smrg   for (unsigned i = 0; i < (unsigned) max_devices; i++) {
483b8e80941Smrg      if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
484b8e80941Smrg          devices[i]->bustype == DRM_BUS_PLATFORM) {
485b8e80941Smrg
486b8e80941Smrg         result = tu_physical_device_init(
487b8e80941Smrg            instance->physical_devices + instance->physical_device_count,
488b8e80941Smrg            instance, devices[i]);
489b8e80941Smrg         if (result == VK_SUCCESS)
490b8e80941Smrg            ++instance->physical_device_count;
491b8e80941Smrg         else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
492b8e80941Smrg            break;
493b8e80941Smrg      }
494b8e80941Smrg   }
495b8e80941Smrg   drmFreeDevices(devices, max_devices);
496b8e80941Smrg
497b8e80941Smrg   return result;
498b8e80941Smrg}
499b8e80941Smrg
500b8e80941SmrgVkResult
501b8e80941Smrgtu_EnumeratePhysicalDevices(VkInstance _instance,
502b8e80941Smrg                            uint32_t *pPhysicalDeviceCount,
503b8e80941Smrg                            VkPhysicalDevice *pPhysicalDevices)
504b8e80941Smrg{
505b8e80941Smrg   TU_FROM_HANDLE(tu_instance, instance, _instance);
506b8e80941Smrg   VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
507b8e80941Smrg
508b8e80941Smrg   VkResult result;
509b8e80941Smrg
510b8e80941Smrg   if (instance->physical_device_count < 0) {
511b8e80941Smrg      result = tu_enumerate_devices(instance);
512b8e80941Smrg      if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
513b8e80941Smrg         return result;
514b8e80941Smrg   }
515b8e80941Smrg
516b8e80941Smrg   for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
517b8e80941Smrg      vk_outarray_append(&out, p)
518b8e80941Smrg      {
519b8e80941Smrg         *p = tu_physical_device_to_handle(instance->physical_devices + i);
520b8e80941Smrg      }
521b8e80941Smrg   }
522b8e80941Smrg
523b8e80941Smrg   return vk_outarray_status(&out);
524b8e80941Smrg}
525b8e80941Smrg
526b8e80941SmrgVkResult
527b8e80941Smrgtu_EnumeratePhysicalDeviceGroups(
528b8e80941Smrg   VkInstance _instance,
529b8e80941Smrg   uint32_t *pPhysicalDeviceGroupCount,
530b8e80941Smrg   VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
531b8e80941Smrg{
532b8e80941Smrg   TU_FROM_HANDLE(tu_instance, instance, _instance);
533b8e80941Smrg   VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
534b8e80941Smrg                    pPhysicalDeviceGroupCount);
535b8e80941Smrg   VkResult result;
536b8e80941Smrg
537b8e80941Smrg   if (instance->physical_device_count < 0) {
538b8e80941Smrg      result = tu_enumerate_devices(instance);
539b8e80941Smrg      if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
540b8e80941Smrg         return result;
541b8e80941Smrg   }
542b8e80941Smrg
543b8e80941Smrg   for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
544b8e80941Smrg      vk_outarray_append(&out, p)
545b8e80941Smrg      {
546b8e80941Smrg         p->physicalDeviceCount = 1;
547b8e80941Smrg         p->physicalDevices[0] =
548b8e80941Smrg            tu_physical_device_to_handle(instance->physical_devices + i);
549b8e80941Smrg         p->subsetAllocation = false;
550b8e80941Smrg      }
551b8e80941Smrg   }
552b8e80941Smrg
553b8e80941Smrg   return vk_outarray_status(&out);
554b8e80941Smrg}
555b8e80941Smrg
556b8e80941Smrgvoid
557b8e80941Smrgtu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
558b8e80941Smrg                             VkPhysicalDeviceFeatures *pFeatures)
559b8e80941Smrg{
560b8e80941Smrg   memset(pFeatures, 0, sizeof(*pFeatures));
561b8e80941Smrg
562b8e80941Smrg   *pFeatures = (VkPhysicalDeviceFeatures) {
563b8e80941Smrg      .robustBufferAccess = false,
564b8e80941Smrg      .fullDrawIndexUint32 = false,
565b8e80941Smrg      .imageCubeArray = false,
566b8e80941Smrg      .independentBlend = false,
567b8e80941Smrg      .geometryShader = false,
568b8e80941Smrg      .tessellationShader = false,
569b8e80941Smrg      .sampleRateShading = false,
570b8e80941Smrg      .dualSrcBlend = false,
571b8e80941Smrg      .logicOp = false,
572b8e80941Smrg      .multiDrawIndirect = false,
573b8e80941Smrg      .drawIndirectFirstInstance = false,
574b8e80941Smrg      .depthClamp = false,
575b8e80941Smrg      .depthBiasClamp = false,
576b8e80941Smrg      .fillModeNonSolid = false,
577b8e80941Smrg      .depthBounds = false,
578b8e80941Smrg      .wideLines = false,
579b8e80941Smrg      .largePoints = false,
580b8e80941Smrg      .alphaToOne = false,
581b8e80941Smrg      .multiViewport = false,
582b8e80941Smrg      .samplerAnisotropy = false,
583b8e80941Smrg      .textureCompressionETC2 = false,
584b8e80941Smrg      .textureCompressionASTC_LDR = false,
585b8e80941Smrg      .textureCompressionBC = false,
586b8e80941Smrg      .occlusionQueryPrecise = false,
587b8e80941Smrg      .pipelineStatisticsQuery = false,
588b8e80941Smrg      .vertexPipelineStoresAndAtomics = false,
589b8e80941Smrg      .fragmentStoresAndAtomics = false,
590b8e80941Smrg      .shaderTessellationAndGeometryPointSize = false,
591b8e80941Smrg      .shaderImageGatherExtended = false,
592b8e80941Smrg      .shaderStorageImageExtendedFormats = false,
593b8e80941Smrg      .shaderStorageImageMultisample = false,
594b8e80941Smrg      .shaderUniformBufferArrayDynamicIndexing = false,
595b8e80941Smrg      .shaderSampledImageArrayDynamicIndexing = false,
596b8e80941Smrg      .shaderStorageBufferArrayDynamicIndexing = false,
597b8e80941Smrg      .shaderStorageImageArrayDynamicIndexing = false,
598b8e80941Smrg      .shaderStorageImageReadWithoutFormat = false,
599b8e80941Smrg      .shaderStorageImageWriteWithoutFormat = false,
600b8e80941Smrg      .shaderClipDistance = false,
601b8e80941Smrg      .shaderCullDistance = false,
602b8e80941Smrg      .shaderFloat64 = false,
603b8e80941Smrg      .shaderInt64 = false,
604b8e80941Smrg      .shaderInt16 = false,
605b8e80941Smrg      .sparseBinding = false,
606b8e80941Smrg      .variableMultisampleRate = false,
607b8e80941Smrg      .inheritedQueries = false,
608b8e80941Smrg   };
609b8e80941Smrg}
610b8e80941Smrg
611b8e80941Smrgvoid
612b8e80941Smrgtu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
613b8e80941Smrg                              VkPhysicalDeviceFeatures2 *pFeatures)
614b8e80941Smrg{
615b8e80941Smrg   vk_foreach_struct(ext, pFeatures->pNext)
616b8e80941Smrg   {
617b8e80941Smrg      switch (ext->sType) {
618b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
619b8e80941Smrg         VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
620b8e80941Smrg         features->variablePointersStorageBuffer = false;
621b8e80941Smrg         features->variablePointers = false;
622b8e80941Smrg         break;
623b8e80941Smrg      }
624b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
625b8e80941Smrg         VkPhysicalDeviceMultiviewFeatures *features =
626b8e80941Smrg            (VkPhysicalDeviceMultiviewFeatures *) ext;
627b8e80941Smrg         features->multiview = false;
628b8e80941Smrg         features->multiviewGeometryShader = false;
629b8e80941Smrg         features->multiviewTessellationShader = false;
630b8e80941Smrg         break;
631b8e80941Smrg      }
632b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
633b8e80941Smrg         VkPhysicalDeviceShaderDrawParametersFeatures *features =
634b8e80941Smrg            (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
635b8e80941Smrg         features->shaderDrawParameters = false;
636b8e80941Smrg         break;
637b8e80941Smrg      }
638b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
639b8e80941Smrg         VkPhysicalDeviceProtectedMemoryFeatures *features =
640b8e80941Smrg            (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
641b8e80941Smrg         features->protectedMemory = false;
642b8e80941Smrg         break;
643b8e80941Smrg      }
644b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
645b8e80941Smrg         VkPhysicalDevice16BitStorageFeatures *features =
646b8e80941Smrg            (VkPhysicalDevice16BitStorageFeatures *) ext;
647b8e80941Smrg         features->storageBuffer16BitAccess = false;
648b8e80941Smrg         features->uniformAndStorageBuffer16BitAccess = false;
649b8e80941Smrg         features->storagePushConstant16 = false;
650b8e80941Smrg         features->storageInputOutput16 = false;
651b8e80941Smrg         break;
652b8e80941Smrg      }
653b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
654b8e80941Smrg         VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
655b8e80941Smrg            (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
656b8e80941Smrg         features->samplerYcbcrConversion = false;
657b8e80941Smrg         break;
658b8e80941Smrg      }
659b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
660b8e80941Smrg         VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
661b8e80941Smrg            (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
662b8e80941Smrg         features->shaderInputAttachmentArrayDynamicIndexing = false;
663b8e80941Smrg         features->shaderUniformTexelBufferArrayDynamicIndexing = false;
664b8e80941Smrg         features->shaderStorageTexelBufferArrayDynamicIndexing = false;
665b8e80941Smrg         features->shaderUniformBufferArrayNonUniformIndexing = false;
666b8e80941Smrg         features->shaderSampledImageArrayNonUniformIndexing = false;
667b8e80941Smrg         features->shaderStorageBufferArrayNonUniformIndexing = false;
668b8e80941Smrg         features->shaderStorageImageArrayNonUniformIndexing = false;
669b8e80941Smrg         features->shaderInputAttachmentArrayNonUniformIndexing = false;
670b8e80941Smrg         features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
671b8e80941Smrg         features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
672b8e80941Smrg         features->descriptorBindingUniformBufferUpdateAfterBind = false;
673b8e80941Smrg         features->descriptorBindingSampledImageUpdateAfterBind = false;
674b8e80941Smrg         features->descriptorBindingStorageImageUpdateAfterBind = false;
675b8e80941Smrg         features->descriptorBindingStorageBufferUpdateAfterBind = false;
676b8e80941Smrg         features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
677b8e80941Smrg         features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
678b8e80941Smrg         features->descriptorBindingUpdateUnusedWhilePending = false;
679b8e80941Smrg         features->descriptorBindingPartiallyBound = false;
680b8e80941Smrg         features->descriptorBindingVariableDescriptorCount = false;
681b8e80941Smrg         features->runtimeDescriptorArray = false;
682b8e80941Smrg         break;
683b8e80941Smrg      }
684b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
685b8e80941Smrg         VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
686b8e80941Smrg            (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
687b8e80941Smrg         features->conditionalRendering = false;
688b8e80941Smrg         features->inheritedConditionalRendering = false;
689b8e80941Smrg         break;
690b8e80941Smrg      }
691b8e80941Smrg      default:
692b8e80941Smrg         break;
693b8e80941Smrg      }
694b8e80941Smrg   }
695b8e80941Smrg   return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
696b8e80941Smrg}
697b8e80941Smrg
698b8e80941Smrgvoid
699b8e80941Smrgtu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
700b8e80941Smrg                               VkPhysicalDeviceProperties *pProperties)
701b8e80941Smrg{
702b8e80941Smrg   TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
703b8e80941Smrg   VkSampleCountFlags sample_counts = 0xf;
704b8e80941Smrg
705b8e80941Smrg   /* make sure that the entire descriptor set is addressable with a signed
706b8e80941Smrg    * 32-bit int. So the sum of all limits scaled by descriptor size has to
707b8e80941Smrg    * be at most 2 GiB. the combined image & samples object count as one of
708b8e80941Smrg    * both. This limit is for the pipeline layout, not for the set layout, but
709b8e80941Smrg    * there is no set limit, so we just set a pipeline limit. I don't think
710b8e80941Smrg    * any app is going to hit this soon. */
711b8e80941Smrg   size_t max_descriptor_set_size =
712b8e80941Smrg      ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
713b8e80941Smrg      (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
714b8e80941Smrg       32 /* storage buffer, 32 due to potential space wasted on alignment */ +
715b8e80941Smrg       32 /* sampler, largest when combined with image */ +
716b8e80941Smrg       64 /* sampled image */ + 64 /* storage image */);
717b8e80941Smrg
718b8e80941Smrg   VkPhysicalDeviceLimits limits = {
719b8e80941Smrg      .maxImageDimension1D = (1 << 14),
720b8e80941Smrg      .maxImageDimension2D = (1 << 14),
721b8e80941Smrg      .maxImageDimension3D = (1 << 11),
722b8e80941Smrg      .maxImageDimensionCube = (1 << 14),
723b8e80941Smrg      .maxImageArrayLayers = (1 << 11),
724b8e80941Smrg      .maxTexelBufferElements = 128 * 1024 * 1024,
725b8e80941Smrg      .maxUniformBufferRange = UINT32_MAX,
726b8e80941Smrg      .maxStorageBufferRange = UINT32_MAX,
727b8e80941Smrg      .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
728b8e80941Smrg      .maxMemoryAllocationCount = UINT32_MAX,
729b8e80941Smrg      .maxSamplerAllocationCount = 64 * 1024,
730b8e80941Smrg      .bufferImageGranularity = 64,          /* A cache line */
731b8e80941Smrg      .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
732b8e80941Smrg      .maxBoundDescriptorSets = MAX_SETS,
733b8e80941Smrg      .maxPerStageDescriptorSamplers = max_descriptor_set_size,
734b8e80941Smrg      .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
735b8e80941Smrg      .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
736b8e80941Smrg      .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
737b8e80941Smrg      .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
738b8e80941Smrg      .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
739b8e80941Smrg      .maxPerStageResources = max_descriptor_set_size,
740b8e80941Smrg      .maxDescriptorSetSamplers = max_descriptor_set_size,
741b8e80941Smrg      .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
742b8e80941Smrg      .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
743b8e80941Smrg      .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
744b8e80941Smrg      .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
745b8e80941Smrg      .maxDescriptorSetSampledImages = max_descriptor_set_size,
746b8e80941Smrg      .maxDescriptorSetStorageImages = max_descriptor_set_size,
747b8e80941Smrg      .maxDescriptorSetInputAttachments = max_descriptor_set_size,
748b8e80941Smrg      .maxVertexInputAttributes = 32,
749b8e80941Smrg      .maxVertexInputBindings = 32,
750b8e80941Smrg      .maxVertexInputAttributeOffset = 2047,
751b8e80941Smrg      .maxVertexInputBindingStride = 2048,
752b8e80941Smrg      .maxVertexOutputComponents = 128,
753b8e80941Smrg      .maxTessellationGenerationLevel = 64,
754b8e80941Smrg      .maxTessellationPatchSize = 32,
755b8e80941Smrg      .maxTessellationControlPerVertexInputComponents = 128,
756b8e80941Smrg      .maxTessellationControlPerVertexOutputComponents = 128,
757b8e80941Smrg      .maxTessellationControlPerPatchOutputComponents = 120,
758b8e80941Smrg      .maxTessellationControlTotalOutputComponents = 4096,
759b8e80941Smrg      .maxTessellationEvaluationInputComponents = 128,
760b8e80941Smrg      .maxTessellationEvaluationOutputComponents = 128,
761b8e80941Smrg      .maxGeometryShaderInvocations = 127,
762b8e80941Smrg      .maxGeometryInputComponents = 64,
763b8e80941Smrg      .maxGeometryOutputComponents = 128,
764b8e80941Smrg      .maxGeometryOutputVertices = 256,
765b8e80941Smrg      .maxGeometryTotalOutputComponents = 1024,
766b8e80941Smrg      .maxFragmentInputComponents = 128,
767b8e80941Smrg      .maxFragmentOutputAttachments = 8,
768b8e80941Smrg      .maxFragmentDualSrcAttachments = 1,
769b8e80941Smrg      .maxFragmentCombinedOutputResources = 8,
770b8e80941Smrg      .maxComputeSharedMemorySize = 32768,
771b8e80941Smrg      .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
772b8e80941Smrg      .maxComputeWorkGroupInvocations = 2048,
773b8e80941Smrg      .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
774b8e80941Smrg      .subPixelPrecisionBits = 4 /* FIXME */,
775b8e80941Smrg      .subTexelPrecisionBits = 4 /* FIXME */,
776b8e80941Smrg      .mipmapPrecisionBits = 4 /* FIXME */,
777b8e80941Smrg      .maxDrawIndexedIndexValue = UINT32_MAX,
778b8e80941Smrg      .maxDrawIndirectCount = UINT32_MAX,
779b8e80941Smrg      .maxSamplerLodBias = 16,
780b8e80941Smrg      .maxSamplerAnisotropy = 16,
781b8e80941Smrg      .maxViewports = MAX_VIEWPORTS,
782b8e80941Smrg      .maxViewportDimensions = { (1 << 14), (1 << 14) },
783b8e80941Smrg      .viewportBoundsRange = { INT16_MIN, INT16_MAX },
784b8e80941Smrg      .viewportSubPixelBits = 8,
785b8e80941Smrg      .minMemoryMapAlignment = 4096, /* A page */
786b8e80941Smrg      .minTexelBufferOffsetAlignment = 1,
787b8e80941Smrg      .minUniformBufferOffsetAlignment = 4,
788b8e80941Smrg      .minStorageBufferOffsetAlignment = 4,
789b8e80941Smrg      .minTexelOffset = -32,
790b8e80941Smrg      .maxTexelOffset = 31,
791b8e80941Smrg      .minTexelGatherOffset = -32,
792b8e80941Smrg      .maxTexelGatherOffset = 31,
793b8e80941Smrg      .minInterpolationOffset = -2,
794b8e80941Smrg      .maxInterpolationOffset = 2,
795b8e80941Smrg      .subPixelInterpolationOffsetBits = 8,
796b8e80941Smrg      .maxFramebufferWidth = (1 << 14),
797b8e80941Smrg      .maxFramebufferHeight = (1 << 14),
798b8e80941Smrg      .maxFramebufferLayers = (1 << 10),
799b8e80941Smrg      .framebufferColorSampleCounts = sample_counts,
800b8e80941Smrg      .framebufferDepthSampleCounts = sample_counts,
801b8e80941Smrg      .framebufferStencilSampleCounts = sample_counts,
802b8e80941Smrg      .framebufferNoAttachmentsSampleCounts = sample_counts,
803b8e80941Smrg      .maxColorAttachments = MAX_RTS,
804b8e80941Smrg      .sampledImageColorSampleCounts = sample_counts,
805b8e80941Smrg      .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
806b8e80941Smrg      .sampledImageDepthSampleCounts = sample_counts,
807b8e80941Smrg      .sampledImageStencilSampleCounts = sample_counts,
808b8e80941Smrg      .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
809b8e80941Smrg      .maxSampleMaskWords = 1,
810b8e80941Smrg      .timestampComputeAndGraphics = true,
811b8e80941Smrg      .timestampPeriod = 1,
812b8e80941Smrg      .maxClipDistances = 8,
813b8e80941Smrg      .maxCullDistances = 8,
814b8e80941Smrg      .maxCombinedClipAndCullDistances = 8,
815b8e80941Smrg      .discreteQueuePriorities = 1,
816b8e80941Smrg      .pointSizeRange = { 0.125, 255.875 },
817b8e80941Smrg      .lineWidthRange = { 0.0, 7.9921875 },
818b8e80941Smrg      .pointSizeGranularity = (1.0 / 8.0),
819b8e80941Smrg      .lineWidthGranularity = (1.0 / 128.0),
820b8e80941Smrg      .strictLines = false, /* FINISHME */
821b8e80941Smrg      .standardSampleLocations = true,
822b8e80941Smrg      .optimalBufferCopyOffsetAlignment = 128,
823b8e80941Smrg      .optimalBufferCopyRowPitchAlignment = 128,
824b8e80941Smrg      .nonCoherentAtomSize = 64,
825b8e80941Smrg   };
826b8e80941Smrg
827b8e80941Smrg   *pProperties = (VkPhysicalDeviceProperties) {
828b8e80941Smrg      .apiVersion = tu_physical_device_api_version(pdevice),
829b8e80941Smrg      .driverVersion = vk_get_driver_version(),
830b8e80941Smrg      .vendorID = 0, /* TODO */
831b8e80941Smrg      .deviceID = 0,
832b8e80941Smrg      .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
833b8e80941Smrg      .limits = limits,
834b8e80941Smrg      .sparseProperties = { 0 },
835b8e80941Smrg   };
836b8e80941Smrg
837b8e80941Smrg   strcpy(pProperties->deviceName, pdevice->name);
838b8e80941Smrg   memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
839b8e80941Smrg}
840b8e80941Smrg
841b8e80941Smrgvoid
842b8e80941Smrgtu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
843b8e80941Smrg                                VkPhysicalDeviceProperties2 *pProperties)
844b8e80941Smrg{
845b8e80941Smrg   TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
846b8e80941Smrg   tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
847b8e80941Smrg
848b8e80941Smrg   vk_foreach_struct(ext, pProperties->pNext)
849b8e80941Smrg   {
850b8e80941Smrg      switch (ext->sType) {
851b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
852b8e80941Smrg         VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
853b8e80941Smrg            (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
854b8e80941Smrg         properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
855b8e80941Smrg         break;
856b8e80941Smrg      }
857b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
858b8e80941Smrg         VkPhysicalDeviceIDProperties *properties =
859b8e80941Smrg            (VkPhysicalDeviceIDProperties *) ext;
860b8e80941Smrg         memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
861b8e80941Smrg         memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
862b8e80941Smrg         properties->deviceLUIDValid = false;
863b8e80941Smrg         break;
864b8e80941Smrg      }
865b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
866b8e80941Smrg         VkPhysicalDeviceMultiviewProperties *properties =
867b8e80941Smrg            (VkPhysicalDeviceMultiviewProperties *) ext;
868b8e80941Smrg         properties->maxMultiviewViewCount = MAX_VIEWS;
869b8e80941Smrg         properties->maxMultiviewInstanceIndex = INT_MAX;
870b8e80941Smrg         break;
871b8e80941Smrg      }
872b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
873b8e80941Smrg         VkPhysicalDevicePointClippingProperties *properties =
874b8e80941Smrg            (VkPhysicalDevicePointClippingProperties *) ext;
875b8e80941Smrg         properties->pointClippingBehavior =
876b8e80941Smrg            VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
877b8e80941Smrg         break;
878b8e80941Smrg      }
879b8e80941Smrg      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
880b8e80941Smrg         VkPhysicalDeviceMaintenance3Properties *properties =
881b8e80941Smrg            (VkPhysicalDeviceMaintenance3Properties *) ext;
882b8e80941Smrg         /* Make sure everything is addressable by a signed 32-bit int, and
883b8e80941Smrg          * our largest descriptors are 96 bytes. */
884b8e80941Smrg         properties->maxPerSetDescriptors = (1ull << 31) / 96;
885b8e80941Smrg         /* Our buffer size fields allow only this much */
886b8e80941Smrg         properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
887b8e80941Smrg         break;
888b8e80941Smrg      }
889b8e80941Smrg      default:
890b8e80941Smrg         break;
891b8e80941Smrg      }
892b8e80941Smrg   }
893b8e80941Smrg}
894b8e80941Smrg
895b8e80941Smrgstatic const VkQueueFamilyProperties tu_queue_family_properties = {
896b8e80941Smrg   .queueFlags =
897b8e80941Smrg      VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
898b8e80941Smrg   .queueCount = 1,
899b8e80941Smrg   .timestampValidBits = 64,
900b8e80941Smrg   .minImageTransferGranularity = { 1, 1, 1 },
901b8e80941Smrg};
902b8e80941Smrg
903b8e80941Smrgvoid
904b8e80941Smrgtu_GetPhysicalDeviceQueueFamilyProperties(
905b8e80941Smrg   VkPhysicalDevice physicalDevice,
906b8e80941Smrg   uint32_t *pQueueFamilyPropertyCount,
907b8e80941Smrg   VkQueueFamilyProperties *pQueueFamilyProperties)
908b8e80941Smrg{
909b8e80941Smrg   VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
910b8e80941Smrg
911b8e80941Smrg   vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
912b8e80941Smrg}
913b8e80941Smrg
914b8e80941Smrgvoid
915b8e80941Smrgtu_GetPhysicalDeviceQueueFamilyProperties2(
916b8e80941Smrg   VkPhysicalDevice physicalDevice,
917b8e80941Smrg   uint32_t *pQueueFamilyPropertyCount,
918b8e80941Smrg   VkQueueFamilyProperties2 *pQueueFamilyProperties)
919b8e80941Smrg{
920b8e80941Smrg   VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
921b8e80941Smrg
922b8e80941Smrg   vk_outarray_append(&out, p)
923b8e80941Smrg   {
924b8e80941Smrg      p->queueFamilyProperties = tu_queue_family_properties;
925b8e80941Smrg   }
926b8e80941Smrg}
927b8e80941Smrg
928b8e80941Smrgstatic uint64_t
929b8e80941Smrgtu_get_system_heap_size()
930b8e80941Smrg{
931b8e80941Smrg   struct sysinfo info;
932b8e80941Smrg   sysinfo(&info);
933b8e80941Smrg
934b8e80941Smrg   uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
935b8e80941Smrg
936b8e80941Smrg   /* We don't want to burn too much ram with the GPU.  If the user has 4GiB
937b8e80941Smrg    * or less, we use at most half.  If they have more than 4GiB, we use 3/4.
938b8e80941Smrg    */
939b8e80941Smrg   uint64_t available_ram;
940b8e80941Smrg   if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
941b8e80941Smrg      available_ram = total_ram / 2;
942b8e80941Smrg   else
943b8e80941Smrg      available_ram = total_ram * 3 / 4;
944b8e80941Smrg
945b8e80941Smrg   return available_ram;
946b8e80941Smrg}
947b8e80941Smrg
948b8e80941Smrgvoid
949b8e80941Smrgtu_GetPhysicalDeviceMemoryProperties(
950b8e80941Smrg   VkPhysicalDevice physicalDevice,
951b8e80941Smrg   VkPhysicalDeviceMemoryProperties *pMemoryProperties)
952b8e80941Smrg{
953b8e80941Smrg   pMemoryProperties->memoryHeapCount = 1;
954b8e80941Smrg   pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
955b8e80941Smrg   pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
956b8e80941Smrg
957b8e80941Smrg   pMemoryProperties->memoryTypeCount = 1;
958b8e80941Smrg   pMemoryProperties->memoryTypes[0].propertyFlags =
959b8e80941Smrg      VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
960b8e80941Smrg      VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
961b8e80941Smrg      VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
962b8e80941Smrg   pMemoryProperties->memoryTypes[0].heapIndex = 0;
963b8e80941Smrg}
964b8e80941Smrg
965b8e80941Smrgvoid
966b8e80941Smrgtu_GetPhysicalDeviceMemoryProperties2(
967b8e80941Smrg   VkPhysicalDevice physicalDevice,
968b8e80941Smrg   VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
969b8e80941Smrg{
970b8e80941Smrg   return tu_GetPhysicalDeviceMemoryProperties(
971b8e80941Smrg      physicalDevice, &pMemoryProperties->memoryProperties);
972b8e80941Smrg}
973b8e80941Smrg
974b8e80941Smrgstatic VkResult
975b8e80941Smrgtu_queue_init(struct tu_device *device,
976b8e80941Smrg              struct tu_queue *queue,
977b8e80941Smrg              uint32_t queue_family_index,
978b8e80941Smrg              int idx,
979b8e80941Smrg              VkDeviceQueueCreateFlags flags)
980b8e80941Smrg{
981b8e80941Smrg   queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
982b8e80941Smrg   queue->device = device;
983b8e80941Smrg   queue->queue_family_index = queue_family_index;
984b8e80941Smrg   queue->queue_idx = idx;
985b8e80941Smrg   queue->flags = flags;
986b8e80941Smrg
987b8e80941Smrg   int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
988b8e80941Smrg   if (ret)
989b8e80941Smrg      return VK_ERROR_INITIALIZATION_FAILED;
990b8e80941Smrg
991b8e80941Smrg   tu_fence_init(&queue->submit_fence, false);
992b8e80941Smrg
993b8e80941Smrg   return VK_SUCCESS;
994b8e80941Smrg}
995b8e80941Smrg
996b8e80941Smrgstatic void
997b8e80941Smrgtu_queue_finish(struct tu_queue *queue)
998b8e80941Smrg{
999b8e80941Smrg   tu_fence_finish(&queue->submit_fence);
1000b8e80941Smrg   tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1001b8e80941Smrg}
1002b8e80941Smrg
1003b8e80941Smrgstatic int
1004b8e80941Smrgtu_get_device_extension_index(const char *name)
1005b8e80941Smrg{
1006b8e80941Smrg   for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1007b8e80941Smrg      if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1008b8e80941Smrg         return i;
1009b8e80941Smrg   }
1010b8e80941Smrg   return -1;
1011b8e80941Smrg}
1012b8e80941Smrg
1013b8e80941SmrgVkResult
1014b8e80941Smrgtu_CreateDevice(VkPhysicalDevice physicalDevice,
1015b8e80941Smrg                const VkDeviceCreateInfo *pCreateInfo,
1016b8e80941Smrg                const VkAllocationCallbacks *pAllocator,
1017b8e80941Smrg                VkDevice *pDevice)
1018b8e80941Smrg{
1019b8e80941Smrg   TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1020b8e80941Smrg   VkResult result;
1021b8e80941Smrg   struct tu_device *device;
1022b8e80941Smrg
1023b8e80941Smrg   /* Check enabled features */
1024b8e80941Smrg   if (pCreateInfo->pEnabledFeatures) {
1025b8e80941Smrg      VkPhysicalDeviceFeatures supported_features;
1026b8e80941Smrg      tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1027b8e80941Smrg      VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1028b8e80941Smrg      VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1029b8e80941Smrg      unsigned num_features =
1030b8e80941Smrg         sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1031b8e80941Smrg      for (uint32_t i = 0; i < num_features; i++) {
1032b8e80941Smrg         if (enabled_feature[i] && !supported_feature[i])
1033b8e80941Smrg            return vk_error(physical_device->instance,
1034b8e80941Smrg                            VK_ERROR_FEATURE_NOT_PRESENT);
1035b8e80941Smrg      }
1036b8e80941Smrg   }
1037b8e80941Smrg
1038b8e80941Smrg   device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1039b8e80941Smrg                       sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1040b8e80941Smrg   if (!device)
1041b8e80941Smrg      return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1042b8e80941Smrg
1043b8e80941Smrg   device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1044b8e80941Smrg   device->instance = physical_device->instance;
1045b8e80941Smrg   device->physical_device = physical_device;
1046b8e80941Smrg
1047b8e80941Smrg   if (pAllocator)
1048b8e80941Smrg      device->alloc = *pAllocator;
1049b8e80941Smrg   else
1050b8e80941Smrg      device->alloc = physical_device->instance->alloc;
1051b8e80941Smrg
1052b8e80941Smrg   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1053b8e80941Smrg      const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1054b8e80941Smrg      int index = tu_get_device_extension_index(ext_name);
1055b8e80941Smrg      if (index < 0 ||
1056b8e80941Smrg          !physical_device->supported_extensions.extensions[index]) {
1057b8e80941Smrg         vk_free(&device->alloc, device);
1058b8e80941Smrg         return vk_error(physical_device->instance,
1059b8e80941Smrg                         VK_ERROR_EXTENSION_NOT_PRESENT);
1060b8e80941Smrg      }
1061b8e80941Smrg
1062b8e80941Smrg      device->enabled_extensions.extensions[index] = true;
1063b8e80941Smrg   }
1064b8e80941Smrg
1065b8e80941Smrg   for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1066b8e80941Smrg      const VkDeviceQueueCreateInfo *queue_create =
1067b8e80941Smrg         &pCreateInfo->pQueueCreateInfos[i];
1068b8e80941Smrg      uint32_t qfi = queue_create->queueFamilyIndex;
1069b8e80941Smrg      device->queues[qfi] = vk_alloc(
1070b8e80941Smrg         &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1071b8e80941Smrg         8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1072b8e80941Smrg      if (!device->queues[qfi]) {
1073b8e80941Smrg         result = VK_ERROR_OUT_OF_HOST_MEMORY;
1074b8e80941Smrg         goto fail;
1075b8e80941Smrg      }
1076b8e80941Smrg
1077b8e80941Smrg      memset(device->queues[qfi], 0,
1078b8e80941Smrg             queue_create->queueCount * sizeof(struct tu_queue));
1079b8e80941Smrg
1080b8e80941Smrg      device->queue_count[qfi] = queue_create->queueCount;
1081b8e80941Smrg
1082b8e80941Smrg      for (unsigned q = 0; q < queue_create->queueCount; q++) {
1083b8e80941Smrg         result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1084b8e80941Smrg                                queue_create->flags);
1085b8e80941Smrg         if (result != VK_SUCCESS)
1086b8e80941Smrg            goto fail;
1087b8e80941Smrg      }
1088b8e80941Smrg   }
1089b8e80941Smrg
1090b8e80941Smrg   device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1091b8e80941Smrg   if (!device->compiler)
1092b8e80941Smrg      goto fail;
1093b8e80941Smrg
1094b8e80941Smrg   VkPipelineCacheCreateInfo ci;
1095b8e80941Smrg   ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1096b8e80941Smrg   ci.pNext = NULL;
1097b8e80941Smrg   ci.flags = 0;
1098b8e80941Smrg   ci.pInitialData = NULL;
1099b8e80941Smrg   ci.initialDataSize = 0;
1100b8e80941Smrg   VkPipelineCache pc;
1101b8e80941Smrg   result =
1102b8e80941Smrg      tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1103b8e80941Smrg   if (result != VK_SUCCESS)
1104b8e80941Smrg      goto fail;
1105b8e80941Smrg
1106b8e80941Smrg   device->mem_cache = tu_pipeline_cache_from_handle(pc);
1107b8e80941Smrg
1108b8e80941Smrg   *pDevice = tu_device_to_handle(device);
1109b8e80941Smrg   return VK_SUCCESS;
1110b8e80941Smrg
1111b8e80941Smrgfail:
1112b8e80941Smrg   for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1113b8e80941Smrg      for (unsigned q = 0; q < device->queue_count[i]; q++)
1114b8e80941Smrg         tu_queue_finish(&device->queues[i][q]);
1115b8e80941Smrg      if (device->queue_count[i])
1116b8e80941Smrg         vk_free(&device->alloc, device->queues[i]);
1117b8e80941Smrg   }
1118b8e80941Smrg
1119b8e80941Smrg   if (device->compiler)
1120b8e80941Smrg      ralloc_free(device->compiler);
1121b8e80941Smrg
1122b8e80941Smrg   vk_free(&device->alloc, device);
1123b8e80941Smrg   return result;
1124b8e80941Smrg}
1125b8e80941Smrg
1126b8e80941Smrgvoid
1127b8e80941Smrgtu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1128b8e80941Smrg{
1129b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1130b8e80941Smrg
1131b8e80941Smrg   if (!device)
1132b8e80941Smrg      return;
1133b8e80941Smrg
1134b8e80941Smrg   for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1135b8e80941Smrg      for (unsigned q = 0; q < device->queue_count[i]; q++)
1136b8e80941Smrg         tu_queue_finish(&device->queues[i][q]);
1137b8e80941Smrg      if (device->queue_count[i])
1138b8e80941Smrg         vk_free(&device->alloc, device->queues[i]);
1139b8e80941Smrg   }
1140b8e80941Smrg
1141b8e80941Smrg   /* the compiler does not use pAllocator */
1142b8e80941Smrg   ralloc_free(device->compiler);
1143b8e80941Smrg
1144b8e80941Smrg   VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1145b8e80941Smrg   tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1146b8e80941Smrg
1147b8e80941Smrg   vk_free(&device->alloc, device);
1148b8e80941Smrg}
1149b8e80941Smrg
1150b8e80941SmrgVkResult
1151b8e80941Smrgtu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1152b8e80941Smrg                                    VkLayerProperties *pProperties)
1153b8e80941Smrg{
1154b8e80941Smrg   *pPropertyCount = 0;
1155b8e80941Smrg   return VK_SUCCESS;
1156b8e80941Smrg}
1157b8e80941Smrg
1158b8e80941SmrgVkResult
1159b8e80941Smrgtu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1160b8e80941Smrg                                  uint32_t *pPropertyCount,
1161b8e80941Smrg                                  VkLayerProperties *pProperties)
1162b8e80941Smrg{
1163b8e80941Smrg   *pPropertyCount = 0;
1164b8e80941Smrg   return VK_SUCCESS;
1165b8e80941Smrg}
1166b8e80941Smrg
1167b8e80941Smrgvoid
1168b8e80941Smrgtu_GetDeviceQueue2(VkDevice _device,
1169b8e80941Smrg                   const VkDeviceQueueInfo2 *pQueueInfo,
1170b8e80941Smrg                   VkQueue *pQueue)
1171b8e80941Smrg{
1172b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1173b8e80941Smrg   struct tu_queue *queue;
1174b8e80941Smrg
1175b8e80941Smrg   queue =
1176b8e80941Smrg      &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1177b8e80941Smrg   if (pQueueInfo->flags != queue->flags) {
1178b8e80941Smrg      /* From the Vulkan 1.1.70 spec:
1179b8e80941Smrg       *
1180b8e80941Smrg       * "The queue returned by vkGetDeviceQueue2 must have the same
1181b8e80941Smrg       * flags value from this structure as that used at device
1182b8e80941Smrg       * creation time in a VkDeviceQueueCreateInfo instance. If no
1183b8e80941Smrg       * matching flags were specified at device creation time then
1184b8e80941Smrg       * pQueue will return VK_NULL_HANDLE."
1185b8e80941Smrg       */
1186b8e80941Smrg      *pQueue = VK_NULL_HANDLE;
1187b8e80941Smrg      return;
1188b8e80941Smrg   }
1189b8e80941Smrg
1190b8e80941Smrg   *pQueue = tu_queue_to_handle(queue);
1191b8e80941Smrg}
1192b8e80941Smrg
1193b8e80941Smrgvoid
1194b8e80941Smrgtu_GetDeviceQueue(VkDevice _device,
1195b8e80941Smrg                  uint32_t queueFamilyIndex,
1196b8e80941Smrg                  uint32_t queueIndex,
1197b8e80941Smrg                  VkQueue *pQueue)
1198b8e80941Smrg{
1199b8e80941Smrg   const VkDeviceQueueInfo2 info =
1200b8e80941Smrg      (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1201b8e80941Smrg                             .queueFamilyIndex = queueFamilyIndex,
1202b8e80941Smrg                             .queueIndex = queueIndex };
1203b8e80941Smrg
1204b8e80941Smrg   tu_GetDeviceQueue2(_device, &info, pQueue);
1205b8e80941Smrg}
1206b8e80941Smrg
1207b8e80941SmrgVkResult
1208b8e80941Smrgtu_QueueSubmit(VkQueue _queue,
1209b8e80941Smrg               uint32_t submitCount,
1210b8e80941Smrg               const VkSubmitInfo *pSubmits,
1211b8e80941Smrg               VkFence _fence)
1212b8e80941Smrg{
1213b8e80941Smrg   TU_FROM_HANDLE(tu_queue, queue, _queue);
1214b8e80941Smrg
1215b8e80941Smrg   for (uint32_t i = 0; i < submitCount; ++i) {
1216b8e80941Smrg      const VkSubmitInfo *submit = pSubmits + i;
1217b8e80941Smrg      const bool last_submit = (i == submitCount - 1);
1218b8e80941Smrg      struct tu_bo_list bo_list;
1219b8e80941Smrg      tu_bo_list_init(&bo_list);
1220b8e80941Smrg
1221b8e80941Smrg      uint32_t entry_count = 0;
1222b8e80941Smrg      for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1223b8e80941Smrg         TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1224b8e80941Smrg         entry_count += cmdbuf->cs.entry_count;
1225b8e80941Smrg      }
1226b8e80941Smrg
1227b8e80941Smrg      struct drm_msm_gem_submit_cmd cmds[entry_count];
1228b8e80941Smrg      uint32_t entry_idx = 0;
1229b8e80941Smrg      for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1230b8e80941Smrg         TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1231b8e80941Smrg         struct tu_cs *cs = &cmdbuf->cs;
1232b8e80941Smrg         for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1233b8e80941Smrg            cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1234b8e80941Smrg            cmds[entry_idx].submit_idx =
1235b8e80941Smrg               tu_bo_list_add(&bo_list, cs->entries[i].bo,
1236b8e80941Smrg                              MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1237b8e80941Smrg            cmds[entry_idx].submit_offset = cs->entries[i].offset;
1238b8e80941Smrg            cmds[entry_idx].size = cs->entries[i].size;
1239b8e80941Smrg            cmds[entry_idx].pad = 0;
1240b8e80941Smrg            cmds[entry_idx].nr_relocs = 0;
1241b8e80941Smrg            cmds[entry_idx].relocs = 0;
1242b8e80941Smrg         }
1243b8e80941Smrg
1244b8e80941Smrg         tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1245b8e80941Smrg      }
1246b8e80941Smrg
1247b8e80941Smrg      uint32_t flags = MSM_PIPE_3D0;
1248b8e80941Smrg      if (last_submit) {
1249b8e80941Smrg         flags |= MSM_SUBMIT_FENCE_FD_OUT;
1250b8e80941Smrg      }
1251b8e80941Smrg
1252b8e80941Smrg      struct drm_msm_gem_submit req = {
1253b8e80941Smrg         .flags = flags,
1254b8e80941Smrg         .queueid = queue->msm_queue_id,
1255b8e80941Smrg         .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1256b8e80941Smrg         .nr_bos = bo_list.count,
1257b8e80941Smrg         .cmds = (uint64_t)(uintptr_t)cmds,
1258b8e80941Smrg         .nr_cmds = entry_count,
1259b8e80941Smrg      };
1260b8e80941Smrg
1261b8e80941Smrg      int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1262b8e80941Smrg                                    DRM_MSM_GEM_SUBMIT,
1263b8e80941Smrg                                    &req, sizeof(req));
1264b8e80941Smrg      if (ret) {
1265b8e80941Smrg         fprintf(stderr, "submit failed: %s\n", strerror(errno));
1266b8e80941Smrg         abort();
1267b8e80941Smrg      }
1268b8e80941Smrg
1269b8e80941Smrg      tu_bo_list_destroy(&bo_list);
1270b8e80941Smrg
1271b8e80941Smrg      if (last_submit) {
1272b8e80941Smrg         /* no need to merge fences as queue execution is serialized */
1273b8e80941Smrg         tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1274b8e80941Smrg      }
1275b8e80941Smrg   }
1276b8e80941Smrg
1277b8e80941Smrg   if (_fence != VK_NULL_HANDLE) {
1278b8e80941Smrg      TU_FROM_HANDLE(tu_fence, fence, _fence);
1279b8e80941Smrg      tu_fence_copy(fence, &queue->submit_fence);
1280b8e80941Smrg   }
1281b8e80941Smrg
1282b8e80941Smrg   return VK_SUCCESS;
1283b8e80941Smrg}
1284b8e80941Smrg
1285b8e80941SmrgVkResult
1286b8e80941Smrgtu_QueueWaitIdle(VkQueue _queue)
1287b8e80941Smrg{
1288b8e80941Smrg   TU_FROM_HANDLE(tu_queue, queue, _queue);
1289b8e80941Smrg
1290b8e80941Smrg   tu_fence_wait_idle(&queue->submit_fence);
1291b8e80941Smrg
1292b8e80941Smrg   return VK_SUCCESS;
1293b8e80941Smrg}
1294b8e80941Smrg
1295b8e80941SmrgVkResult
1296b8e80941Smrgtu_DeviceWaitIdle(VkDevice _device)
1297b8e80941Smrg{
1298b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1299b8e80941Smrg
1300b8e80941Smrg   for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1301b8e80941Smrg      for (unsigned q = 0; q < device->queue_count[i]; q++) {
1302b8e80941Smrg         tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1303b8e80941Smrg      }
1304b8e80941Smrg   }
1305b8e80941Smrg   return VK_SUCCESS;
1306b8e80941Smrg}
1307b8e80941Smrg
1308b8e80941SmrgVkResult
1309b8e80941Smrgtu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1310b8e80941Smrg                                        uint32_t *pPropertyCount,
1311b8e80941Smrg                                        VkExtensionProperties *pProperties)
1312b8e80941Smrg{
1313b8e80941Smrg   VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1314b8e80941Smrg
1315b8e80941Smrg   /* We spport no lyaers */
1316b8e80941Smrg   if (pLayerName)
1317b8e80941Smrg      return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1318b8e80941Smrg
1319b8e80941Smrg   for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1320b8e80941Smrg      if (tu_supported_instance_extensions.extensions[i]) {
1321b8e80941Smrg         vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1322b8e80941Smrg      }
1323b8e80941Smrg   }
1324b8e80941Smrg
1325b8e80941Smrg   return vk_outarray_status(&out);
1326b8e80941Smrg}
1327b8e80941Smrg
1328b8e80941SmrgVkResult
1329b8e80941Smrgtu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1330b8e80941Smrg                                      const char *pLayerName,
1331b8e80941Smrg                                      uint32_t *pPropertyCount,
1332b8e80941Smrg                                      VkExtensionProperties *pProperties)
1333b8e80941Smrg{
1334b8e80941Smrg   /* We spport no lyaers */
1335b8e80941Smrg   TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1336b8e80941Smrg   VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1337b8e80941Smrg
1338b8e80941Smrg   /* We spport no lyaers */
1339b8e80941Smrg   if (pLayerName)
1340b8e80941Smrg      return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1341b8e80941Smrg
1342b8e80941Smrg   for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1343b8e80941Smrg      if (device->supported_extensions.extensions[i]) {
1344b8e80941Smrg         vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1345b8e80941Smrg      }
1346b8e80941Smrg   }
1347b8e80941Smrg
1348b8e80941Smrg   return vk_outarray_status(&out);
1349b8e80941Smrg}
1350b8e80941Smrg
1351b8e80941SmrgPFN_vkVoidFunction
1352b8e80941Smrgtu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1353b8e80941Smrg{
1354b8e80941Smrg   TU_FROM_HANDLE(tu_instance, instance, _instance);
1355b8e80941Smrg
1356b8e80941Smrg   return tu_lookup_entrypoint_checked(
1357b8e80941Smrg      pName, instance ? instance->api_version : 0,
1358b8e80941Smrg      instance ? &instance->enabled_extensions : NULL, NULL);
1359b8e80941Smrg}
1360b8e80941Smrg
1361b8e80941Smrg/* The loader wants us to expose a second GetInstanceProcAddr function
1362b8e80941Smrg * to work around certain LD_PRELOAD issues seen in apps.
1363b8e80941Smrg */
1364b8e80941SmrgPUBLIC
1365b8e80941SmrgVKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1366b8e80941Smrgvk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1367b8e80941Smrg
1368b8e80941SmrgPUBLIC
1369b8e80941SmrgVKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1370b8e80941Smrgvk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1371b8e80941Smrg{
1372b8e80941Smrg   return tu_GetInstanceProcAddr(instance, pName);
1373b8e80941Smrg}
1374b8e80941Smrg
1375b8e80941SmrgPFN_vkVoidFunction
1376b8e80941Smrgtu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1377b8e80941Smrg{
1378b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1379b8e80941Smrg
1380b8e80941Smrg   return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1381b8e80941Smrg                                       &device->instance->enabled_extensions,
1382b8e80941Smrg                                       &device->enabled_extensions);
1383b8e80941Smrg}
1384b8e80941Smrg
1385b8e80941Smrgstatic VkResult
1386b8e80941Smrgtu_alloc_memory(struct tu_device *device,
1387b8e80941Smrg                const VkMemoryAllocateInfo *pAllocateInfo,
1388b8e80941Smrg                const VkAllocationCallbacks *pAllocator,
1389b8e80941Smrg                VkDeviceMemory *pMem)
1390b8e80941Smrg{
1391b8e80941Smrg   struct tu_device_memory *mem;
1392b8e80941Smrg   VkResult result;
1393b8e80941Smrg
1394b8e80941Smrg   assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1395b8e80941Smrg
1396b8e80941Smrg   if (pAllocateInfo->allocationSize == 0) {
1397b8e80941Smrg      /* Apparently, this is allowed */
1398b8e80941Smrg      *pMem = VK_NULL_HANDLE;
1399b8e80941Smrg      return VK_SUCCESS;
1400b8e80941Smrg   }
1401b8e80941Smrg
1402b8e80941Smrg   mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1403b8e80941Smrg                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1404b8e80941Smrg   if (mem == NULL)
1405b8e80941Smrg      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1406b8e80941Smrg
1407b8e80941Smrg   const VkImportMemoryFdInfoKHR *fd_info =
1408b8e80941Smrg      vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1409b8e80941Smrg   if (fd_info && !fd_info->handleType)
1410b8e80941Smrg      fd_info = NULL;
1411b8e80941Smrg
1412b8e80941Smrg   if (fd_info) {
1413b8e80941Smrg      assert(fd_info->handleType ==
1414b8e80941Smrg                VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1415b8e80941Smrg             fd_info->handleType ==
1416b8e80941Smrg                VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1417b8e80941Smrg
1418b8e80941Smrg      /*
1419b8e80941Smrg       * TODO Importing the same fd twice gives us the same handle without
1420b8e80941Smrg       * reference counting.  We need to maintain a per-instance handle-to-bo
1421b8e80941Smrg       * table and add reference count to tu_bo.
1422b8e80941Smrg       */
1423b8e80941Smrg      result = tu_bo_init_dmabuf(device, &mem->bo,
1424b8e80941Smrg                                 pAllocateInfo->allocationSize, fd_info->fd);
1425b8e80941Smrg      if (result == VK_SUCCESS) {
1426b8e80941Smrg         /* take ownership and close the fd */
1427b8e80941Smrg         close(fd_info->fd);
1428b8e80941Smrg      }
1429b8e80941Smrg   } else {
1430b8e80941Smrg      result =
1431b8e80941Smrg         tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1432b8e80941Smrg   }
1433b8e80941Smrg
1434b8e80941Smrg   if (result != VK_SUCCESS) {
1435b8e80941Smrg      vk_free2(&device->alloc, pAllocator, mem);
1436b8e80941Smrg      return result;
1437b8e80941Smrg   }
1438b8e80941Smrg
1439b8e80941Smrg   mem->size = pAllocateInfo->allocationSize;
1440b8e80941Smrg   mem->type_index = pAllocateInfo->memoryTypeIndex;
1441b8e80941Smrg
1442b8e80941Smrg   mem->map = NULL;
1443b8e80941Smrg   mem->user_ptr = NULL;
1444b8e80941Smrg
1445b8e80941Smrg   *pMem = tu_device_memory_to_handle(mem);
1446b8e80941Smrg
1447b8e80941Smrg   return VK_SUCCESS;
1448b8e80941Smrg}
1449b8e80941Smrg
1450b8e80941SmrgVkResult
1451b8e80941Smrgtu_AllocateMemory(VkDevice _device,
1452b8e80941Smrg                  const VkMemoryAllocateInfo *pAllocateInfo,
1453b8e80941Smrg                  const VkAllocationCallbacks *pAllocator,
1454b8e80941Smrg                  VkDeviceMemory *pMem)
1455b8e80941Smrg{
1456b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1457b8e80941Smrg   return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1458b8e80941Smrg}
1459b8e80941Smrg
1460b8e80941Smrgvoid
1461b8e80941Smrgtu_FreeMemory(VkDevice _device,
1462b8e80941Smrg              VkDeviceMemory _mem,
1463b8e80941Smrg              const VkAllocationCallbacks *pAllocator)
1464b8e80941Smrg{
1465b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1466b8e80941Smrg   TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1467b8e80941Smrg
1468b8e80941Smrg   if (mem == NULL)
1469b8e80941Smrg      return;
1470b8e80941Smrg
1471b8e80941Smrg   tu_bo_finish(device, &mem->bo);
1472b8e80941Smrg   vk_free2(&device->alloc, pAllocator, mem);
1473b8e80941Smrg}
1474b8e80941Smrg
1475b8e80941SmrgVkResult
1476b8e80941Smrgtu_MapMemory(VkDevice _device,
1477b8e80941Smrg             VkDeviceMemory _memory,
1478b8e80941Smrg             VkDeviceSize offset,
1479b8e80941Smrg             VkDeviceSize size,
1480b8e80941Smrg             VkMemoryMapFlags flags,
1481b8e80941Smrg             void **ppData)
1482b8e80941Smrg{
1483b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1484b8e80941Smrg   TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1485b8e80941Smrg   VkResult result;
1486b8e80941Smrg
1487b8e80941Smrg   if (mem == NULL) {
1488b8e80941Smrg      *ppData = NULL;
1489b8e80941Smrg      return VK_SUCCESS;
1490b8e80941Smrg   }
1491b8e80941Smrg
1492b8e80941Smrg   if (mem->user_ptr) {
1493b8e80941Smrg      *ppData = mem->user_ptr;
1494b8e80941Smrg   } else if (!mem->map) {
1495b8e80941Smrg      result = tu_bo_map(device, &mem->bo);
1496b8e80941Smrg      if (result != VK_SUCCESS)
1497b8e80941Smrg         return result;
1498b8e80941Smrg      *ppData = mem->map = mem->bo.map;
1499b8e80941Smrg   } else
1500b8e80941Smrg      *ppData = mem->map;
1501b8e80941Smrg
1502b8e80941Smrg   if (*ppData) {
1503b8e80941Smrg      *ppData += offset;
1504b8e80941Smrg      return VK_SUCCESS;
1505b8e80941Smrg   }
1506b8e80941Smrg
1507b8e80941Smrg   return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1508b8e80941Smrg}
1509b8e80941Smrg
1510b8e80941Smrgvoid
1511b8e80941Smrgtu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1512b8e80941Smrg{
1513b8e80941Smrg   /* I do not see any unmapping done by the freedreno Gallium driver. */
1514b8e80941Smrg}
1515b8e80941Smrg
1516b8e80941SmrgVkResult
1517b8e80941Smrgtu_FlushMappedMemoryRanges(VkDevice _device,
1518b8e80941Smrg                           uint32_t memoryRangeCount,
1519b8e80941Smrg                           const VkMappedMemoryRange *pMemoryRanges)
1520b8e80941Smrg{
1521b8e80941Smrg   return VK_SUCCESS;
1522b8e80941Smrg}
1523b8e80941Smrg
1524b8e80941SmrgVkResult
1525b8e80941Smrgtu_InvalidateMappedMemoryRanges(VkDevice _device,
1526b8e80941Smrg                                uint32_t memoryRangeCount,
1527b8e80941Smrg                                const VkMappedMemoryRange *pMemoryRanges)
1528b8e80941Smrg{
1529b8e80941Smrg   return VK_SUCCESS;
1530b8e80941Smrg}
1531b8e80941Smrg
1532b8e80941Smrgvoid
1533b8e80941Smrgtu_GetBufferMemoryRequirements(VkDevice _device,
1534b8e80941Smrg                               VkBuffer _buffer,
1535b8e80941Smrg                               VkMemoryRequirements *pMemoryRequirements)
1536b8e80941Smrg{
1537b8e80941Smrg   TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1538b8e80941Smrg
1539b8e80941Smrg   pMemoryRequirements->memoryTypeBits = 1;
1540b8e80941Smrg   pMemoryRequirements->alignment = 16;
1541b8e80941Smrg   pMemoryRequirements->size =
1542b8e80941Smrg      align64(buffer->size, pMemoryRequirements->alignment);
1543b8e80941Smrg}
1544b8e80941Smrg
1545b8e80941Smrgvoid
1546b8e80941Smrgtu_GetBufferMemoryRequirements2(
1547b8e80941Smrg   VkDevice device,
1548b8e80941Smrg   const VkBufferMemoryRequirementsInfo2 *pInfo,
1549b8e80941Smrg   VkMemoryRequirements2 *pMemoryRequirements)
1550b8e80941Smrg{
1551b8e80941Smrg   tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1552b8e80941Smrg                                  &pMemoryRequirements->memoryRequirements);
1553b8e80941Smrg}
1554b8e80941Smrg
1555b8e80941Smrgvoid
1556b8e80941Smrgtu_GetImageMemoryRequirements(VkDevice _device,
1557b8e80941Smrg                              VkImage _image,
1558b8e80941Smrg                              VkMemoryRequirements *pMemoryRequirements)
1559b8e80941Smrg{
1560b8e80941Smrg   TU_FROM_HANDLE(tu_image, image, _image);
1561b8e80941Smrg
1562b8e80941Smrg   pMemoryRequirements->memoryTypeBits = 1;
1563b8e80941Smrg   pMemoryRequirements->size = image->size;
1564b8e80941Smrg   pMemoryRequirements->alignment = image->alignment;
1565b8e80941Smrg}
1566b8e80941Smrg
1567b8e80941Smrgvoid
1568b8e80941Smrgtu_GetImageMemoryRequirements2(VkDevice device,
1569b8e80941Smrg                               const VkImageMemoryRequirementsInfo2 *pInfo,
1570b8e80941Smrg                               VkMemoryRequirements2 *pMemoryRequirements)
1571b8e80941Smrg{
1572b8e80941Smrg   tu_GetImageMemoryRequirements(device, pInfo->image,
1573b8e80941Smrg                                 &pMemoryRequirements->memoryRequirements);
1574b8e80941Smrg}
1575b8e80941Smrg
1576b8e80941Smrgvoid
1577b8e80941Smrgtu_GetImageSparseMemoryRequirements(
1578b8e80941Smrg   VkDevice device,
1579b8e80941Smrg   VkImage image,
1580b8e80941Smrg   uint32_t *pSparseMemoryRequirementCount,
1581b8e80941Smrg   VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1582b8e80941Smrg{
1583b8e80941Smrg   tu_stub();
1584b8e80941Smrg}
1585b8e80941Smrg
1586b8e80941Smrgvoid
1587b8e80941Smrgtu_GetImageSparseMemoryRequirements2(
1588b8e80941Smrg   VkDevice device,
1589b8e80941Smrg   const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1590b8e80941Smrg   uint32_t *pSparseMemoryRequirementCount,
1591b8e80941Smrg   VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1592b8e80941Smrg{
1593b8e80941Smrg   tu_stub();
1594b8e80941Smrg}
1595b8e80941Smrg
1596b8e80941Smrgvoid
1597b8e80941Smrgtu_GetDeviceMemoryCommitment(VkDevice device,
1598b8e80941Smrg                             VkDeviceMemory memory,
1599b8e80941Smrg                             VkDeviceSize *pCommittedMemoryInBytes)
1600b8e80941Smrg{
1601b8e80941Smrg   *pCommittedMemoryInBytes = 0;
1602b8e80941Smrg}
1603b8e80941Smrg
1604b8e80941SmrgVkResult
1605b8e80941Smrgtu_BindBufferMemory2(VkDevice device,
1606b8e80941Smrg                     uint32_t bindInfoCount,
1607b8e80941Smrg                     const VkBindBufferMemoryInfo *pBindInfos)
1608b8e80941Smrg{
1609b8e80941Smrg   for (uint32_t i = 0; i < bindInfoCount; ++i) {
1610b8e80941Smrg      TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1611b8e80941Smrg      TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1612b8e80941Smrg
1613b8e80941Smrg      if (mem) {
1614b8e80941Smrg         buffer->bo = &mem->bo;
1615b8e80941Smrg         buffer->bo_offset = pBindInfos[i].memoryOffset;
1616b8e80941Smrg      } else {
1617b8e80941Smrg         buffer->bo = NULL;
1618b8e80941Smrg      }
1619b8e80941Smrg   }
1620b8e80941Smrg   return VK_SUCCESS;
1621b8e80941Smrg}
1622b8e80941Smrg
1623b8e80941SmrgVkResult
1624b8e80941Smrgtu_BindBufferMemory(VkDevice device,
1625b8e80941Smrg                    VkBuffer buffer,
1626b8e80941Smrg                    VkDeviceMemory memory,
1627b8e80941Smrg                    VkDeviceSize memoryOffset)
1628b8e80941Smrg{
1629b8e80941Smrg   const VkBindBufferMemoryInfo info = {
1630b8e80941Smrg      .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1631b8e80941Smrg      .buffer = buffer,
1632b8e80941Smrg      .memory = memory,
1633b8e80941Smrg      .memoryOffset = memoryOffset
1634b8e80941Smrg   };
1635b8e80941Smrg
1636b8e80941Smrg   return tu_BindBufferMemory2(device, 1, &info);
1637b8e80941Smrg}
1638b8e80941Smrg
1639b8e80941SmrgVkResult
1640b8e80941Smrgtu_BindImageMemory2(VkDevice device,
1641b8e80941Smrg                    uint32_t bindInfoCount,
1642b8e80941Smrg                    const VkBindImageMemoryInfo *pBindInfos)
1643b8e80941Smrg{
1644b8e80941Smrg   for (uint32_t i = 0; i < bindInfoCount; ++i) {
1645b8e80941Smrg      TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1646b8e80941Smrg      TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1647b8e80941Smrg
1648b8e80941Smrg      if (mem) {
1649b8e80941Smrg         image->bo = &mem->bo;
1650b8e80941Smrg         image->bo_offset = pBindInfos[i].memoryOffset;
1651b8e80941Smrg      } else {
1652b8e80941Smrg         image->bo = NULL;
1653b8e80941Smrg         image->bo_offset = 0;
1654b8e80941Smrg      }
1655b8e80941Smrg   }
1656b8e80941Smrg
1657b8e80941Smrg   return VK_SUCCESS;
1658b8e80941Smrg}
1659b8e80941Smrg
1660b8e80941SmrgVkResult
1661b8e80941Smrgtu_BindImageMemory(VkDevice device,
1662b8e80941Smrg                   VkImage image,
1663b8e80941Smrg                   VkDeviceMemory memory,
1664b8e80941Smrg                   VkDeviceSize memoryOffset)
1665b8e80941Smrg{
1666b8e80941Smrg   const VkBindImageMemoryInfo info = {
1667b8e80941Smrg      .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1668b8e80941Smrg      .image = image,
1669b8e80941Smrg      .memory = memory,
1670b8e80941Smrg      .memoryOffset = memoryOffset
1671b8e80941Smrg   };
1672b8e80941Smrg
1673b8e80941Smrg   return tu_BindImageMemory2(device, 1, &info);
1674b8e80941Smrg}
1675b8e80941Smrg
1676b8e80941SmrgVkResult
1677b8e80941Smrgtu_QueueBindSparse(VkQueue _queue,
1678b8e80941Smrg                   uint32_t bindInfoCount,
1679b8e80941Smrg                   const VkBindSparseInfo *pBindInfo,
1680b8e80941Smrg                   VkFence _fence)
1681b8e80941Smrg{
1682b8e80941Smrg   return VK_SUCCESS;
1683b8e80941Smrg}
1684b8e80941Smrg
1685b8e80941Smrg// Queue semaphore functions
1686b8e80941Smrg
1687b8e80941SmrgVkResult
1688b8e80941Smrgtu_CreateSemaphore(VkDevice _device,
1689b8e80941Smrg                   const VkSemaphoreCreateInfo *pCreateInfo,
1690b8e80941Smrg                   const VkAllocationCallbacks *pAllocator,
1691b8e80941Smrg                   VkSemaphore *pSemaphore)
1692b8e80941Smrg{
1693b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1694b8e80941Smrg
1695b8e80941Smrg   struct tu_semaphore *sem =
1696b8e80941Smrg      vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1697b8e80941Smrg                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1698b8e80941Smrg   if (!sem)
1699b8e80941Smrg      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1700b8e80941Smrg
1701b8e80941Smrg   *pSemaphore = tu_semaphore_to_handle(sem);
1702b8e80941Smrg   return VK_SUCCESS;
1703b8e80941Smrg}
1704b8e80941Smrg
1705b8e80941Smrgvoid
1706b8e80941Smrgtu_DestroySemaphore(VkDevice _device,
1707b8e80941Smrg                    VkSemaphore _semaphore,
1708b8e80941Smrg                    const VkAllocationCallbacks *pAllocator)
1709b8e80941Smrg{
1710b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1711b8e80941Smrg   TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1712b8e80941Smrg   if (!_semaphore)
1713b8e80941Smrg      return;
1714b8e80941Smrg
1715b8e80941Smrg   vk_free2(&device->alloc, pAllocator, sem);
1716b8e80941Smrg}
1717b8e80941Smrg
1718b8e80941SmrgVkResult
1719b8e80941Smrgtu_CreateEvent(VkDevice _device,
1720b8e80941Smrg               const VkEventCreateInfo *pCreateInfo,
1721b8e80941Smrg               const VkAllocationCallbacks *pAllocator,
1722b8e80941Smrg               VkEvent *pEvent)
1723b8e80941Smrg{
1724b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1725b8e80941Smrg   struct tu_event *event =
1726b8e80941Smrg      vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1727b8e80941Smrg                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1728b8e80941Smrg
1729b8e80941Smrg   if (!event)
1730b8e80941Smrg      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1731b8e80941Smrg
1732b8e80941Smrg   *pEvent = tu_event_to_handle(event);
1733b8e80941Smrg
1734b8e80941Smrg   return VK_SUCCESS;
1735b8e80941Smrg}
1736b8e80941Smrg
1737b8e80941Smrgvoid
1738b8e80941Smrgtu_DestroyEvent(VkDevice _device,
1739b8e80941Smrg                VkEvent _event,
1740b8e80941Smrg                const VkAllocationCallbacks *pAllocator)
1741b8e80941Smrg{
1742b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1743b8e80941Smrg   TU_FROM_HANDLE(tu_event, event, _event);
1744b8e80941Smrg
1745b8e80941Smrg   if (!event)
1746b8e80941Smrg      return;
1747b8e80941Smrg   vk_free2(&device->alloc, pAllocator, event);
1748b8e80941Smrg}
1749b8e80941Smrg
1750b8e80941SmrgVkResult
1751b8e80941Smrgtu_GetEventStatus(VkDevice _device, VkEvent _event)
1752b8e80941Smrg{
1753b8e80941Smrg   TU_FROM_HANDLE(tu_event, event, _event);
1754b8e80941Smrg
1755b8e80941Smrg   if (*event->map == 1)
1756b8e80941Smrg      return VK_EVENT_SET;
1757b8e80941Smrg   return VK_EVENT_RESET;
1758b8e80941Smrg}
1759b8e80941Smrg
1760b8e80941SmrgVkResult
1761b8e80941Smrgtu_SetEvent(VkDevice _device, VkEvent _event)
1762b8e80941Smrg{
1763b8e80941Smrg   TU_FROM_HANDLE(tu_event, event, _event);
1764b8e80941Smrg   *event->map = 1;
1765b8e80941Smrg
1766b8e80941Smrg   return VK_SUCCESS;
1767b8e80941Smrg}
1768b8e80941Smrg
1769b8e80941SmrgVkResult
1770b8e80941Smrgtu_ResetEvent(VkDevice _device, VkEvent _event)
1771b8e80941Smrg{
1772b8e80941Smrg   TU_FROM_HANDLE(tu_event, event, _event);
1773b8e80941Smrg   *event->map = 0;
1774b8e80941Smrg
1775b8e80941Smrg   return VK_SUCCESS;
1776b8e80941Smrg}
1777b8e80941Smrg
1778b8e80941SmrgVkResult
1779b8e80941Smrgtu_CreateBuffer(VkDevice _device,
1780b8e80941Smrg                const VkBufferCreateInfo *pCreateInfo,
1781b8e80941Smrg                const VkAllocationCallbacks *pAllocator,
1782b8e80941Smrg                VkBuffer *pBuffer)
1783b8e80941Smrg{
1784b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1785b8e80941Smrg   struct tu_buffer *buffer;
1786b8e80941Smrg
1787b8e80941Smrg   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1788b8e80941Smrg
1789b8e80941Smrg   buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1790b8e80941Smrg                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1791b8e80941Smrg   if (buffer == NULL)
1792b8e80941Smrg      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1793b8e80941Smrg
1794b8e80941Smrg   buffer->size = pCreateInfo->size;
1795b8e80941Smrg   buffer->usage = pCreateInfo->usage;
1796b8e80941Smrg   buffer->flags = pCreateInfo->flags;
1797b8e80941Smrg
1798b8e80941Smrg   *pBuffer = tu_buffer_to_handle(buffer);
1799b8e80941Smrg
1800b8e80941Smrg   return VK_SUCCESS;
1801b8e80941Smrg}
1802b8e80941Smrg
1803b8e80941Smrgvoid
1804b8e80941Smrgtu_DestroyBuffer(VkDevice _device,
1805b8e80941Smrg                 VkBuffer _buffer,
1806b8e80941Smrg                 const VkAllocationCallbacks *pAllocator)
1807b8e80941Smrg{
1808b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1809b8e80941Smrg   TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1810b8e80941Smrg
1811b8e80941Smrg   if (!buffer)
1812b8e80941Smrg      return;
1813b8e80941Smrg
1814b8e80941Smrg   vk_free2(&device->alloc, pAllocator, buffer);
1815b8e80941Smrg}
1816b8e80941Smrg
1817b8e80941Smrgstatic uint32_t
1818b8e80941Smrgtu_surface_max_layer_count(struct tu_image_view *iview)
1819b8e80941Smrg{
1820b8e80941Smrg   return iview->type == VK_IMAGE_VIEW_TYPE_3D
1821b8e80941Smrg             ? iview->extent.depth
1822b8e80941Smrg             : (iview->base_layer + iview->layer_count);
1823b8e80941Smrg}
1824b8e80941Smrg
1825b8e80941SmrgVkResult
1826b8e80941Smrgtu_CreateFramebuffer(VkDevice _device,
1827b8e80941Smrg                     const VkFramebufferCreateInfo *pCreateInfo,
1828b8e80941Smrg                     const VkAllocationCallbacks *pAllocator,
1829b8e80941Smrg                     VkFramebuffer *pFramebuffer)
1830b8e80941Smrg{
1831b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1832b8e80941Smrg   struct tu_framebuffer *framebuffer;
1833b8e80941Smrg
1834b8e80941Smrg   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1835b8e80941Smrg
1836b8e80941Smrg   size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1837b8e80941Smrg                                           pCreateInfo->attachmentCount;
1838b8e80941Smrg   framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1839b8e80941Smrg                           VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1840b8e80941Smrg   if (framebuffer == NULL)
1841b8e80941Smrg      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1842b8e80941Smrg
1843b8e80941Smrg   framebuffer->attachment_count = pCreateInfo->attachmentCount;
1844b8e80941Smrg   framebuffer->width = pCreateInfo->width;
1845b8e80941Smrg   framebuffer->height = pCreateInfo->height;
1846b8e80941Smrg   framebuffer->layers = pCreateInfo->layers;
1847b8e80941Smrg   for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1848b8e80941Smrg      VkImageView _iview = pCreateInfo->pAttachments[i];
1849b8e80941Smrg      struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1850b8e80941Smrg      framebuffer->attachments[i].attachment = iview;
1851b8e80941Smrg
1852b8e80941Smrg      framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1853b8e80941Smrg      framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1854b8e80941Smrg      framebuffer->layers =
1855b8e80941Smrg         MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1856b8e80941Smrg   }
1857b8e80941Smrg
1858b8e80941Smrg   *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1859b8e80941Smrg   return VK_SUCCESS;
1860b8e80941Smrg}
1861b8e80941Smrg
1862b8e80941Smrgvoid
1863b8e80941Smrgtu_DestroyFramebuffer(VkDevice _device,
1864b8e80941Smrg                      VkFramebuffer _fb,
1865b8e80941Smrg                      const VkAllocationCallbacks *pAllocator)
1866b8e80941Smrg{
1867b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1868b8e80941Smrg   TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1869b8e80941Smrg
1870b8e80941Smrg   if (!fb)
1871b8e80941Smrg      return;
1872b8e80941Smrg   vk_free2(&device->alloc, pAllocator, fb);
1873b8e80941Smrg}
1874b8e80941Smrg
1875b8e80941Smrgstatic void
1876b8e80941Smrgtu_init_sampler(struct tu_device *device,
1877b8e80941Smrg                struct tu_sampler *sampler,
1878b8e80941Smrg                const VkSamplerCreateInfo *pCreateInfo)
1879b8e80941Smrg{
1880b8e80941Smrg}
1881b8e80941Smrg
1882b8e80941SmrgVkResult
1883b8e80941Smrgtu_CreateSampler(VkDevice _device,
1884b8e80941Smrg                 const VkSamplerCreateInfo *pCreateInfo,
1885b8e80941Smrg                 const VkAllocationCallbacks *pAllocator,
1886b8e80941Smrg                 VkSampler *pSampler)
1887b8e80941Smrg{
1888b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1889b8e80941Smrg   struct tu_sampler *sampler;
1890b8e80941Smrg
1891b8e80941Smrg   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1892b8e80941Smrg
1893b8e80941Smrg   sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1894b8e80941Smrg                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1895b8e80941Smrg   if (!sampler)
1896b8e80941Smrg      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1897b8e80941Smrg
1898b8e80941Smrg   tu_init_sampler(device, sampler, pCreateInfo);
1899b8e80941Smrg   *pSampler = tu_sampler_to_handle(sampler);
1900b8e80941Smrg
1901b8e80941Smrg   return VK_SUCCESS;
1902b8e80941Smrg}
1903b8e80941Smrg
1904b8e80941Smrgvoid
1905b8e80941Smrgtu_DestroySampler(VkDevice _device,
1906b8e80941Smrg                  VkSampler _sampler,
1907b8e80941Smrg                  const VkAllocationCallbacks *pAllocator)
1908b8e80941Smrg{
1909b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1910b8e80941Smrg   TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1911b8e80941Smrg
1912b8e80941Smrg   if (!sampler)
1913b8e80941Smrg      return;
1914b8e80941Smrg   vk_free2(&device->alloc, pAllocator, sampler);
1915b8e80941Smrg}
1916b8e80941Smrg
1917b8e80941Smrg/* vk_icd.h does not declare this function, so we declare it here to
1918b8e80941Smrg * suppress Wmissing-prototypes.
1919b8e80941Smrg */
1920b8e80941SmrgPUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1921b8e80941Smrgvk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1922b8e80941Smrg
1923b8e80941SmrgPUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1924b8e80941Smrgvk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1925b8e80941Smrg{
1926b8e80941Smrg   /* For the full details on loader interface versioning, see
1927b8e80941Smrg    * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1928b8e80941Smrg    * What follows is a condensed summary, to help you navigate the large and
1929b8e80941Smrg    * confusing official doc.
1930b8e80941Smrg    *
1931b8e80941Smrg    *   - Loader interface v0 is incompatible with later versions. We don't
1932b8e80941Smrg    *     support it.
1933b8e80941Smrg    *
1934b8e80941Smrg    *   - In loader interface v1:
1935b8e80941Smrg    *       - The first ICD entrypoint called by the loader is
1936b8e80941Smrg    *         vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1937b8e80941Smrg    *         entrypoint.
1938b8e80941Smrg    *       - The ICD must statically expose no other Vulkan symbol unless it
1939b8e80941Smrg    * is linked with -Bsymbolic.
1940b8e80941Smrg    *       - Each dispatchable Vulkan handle created by the ICD must be
1941b8e80941Smrg    *         a pointer to a struct whose first member is VK_LOADER_DATA. The
1942b8e80941Smrg    *         ICD must initialize VK_LOADER_DATA.loadMagic to
1943b8e80941Smrg    * ICD_LOADER_MAGIC.
1944b8e80941Smrg    *       - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1945b8e80941Smrg    *         vkDestroySurfaceKHR(). The ICD must be capable of working with
1946b8e80941Smrg    *         such loader-managed surfaces.
1947b8e80941Smrg    *
1948b8e80941Smrg    *    - Loader interface v2 differs from v1 in:
1949b8e80941Smrg    *       - The first ICD entrypoint called by the loader is
1950b8e80941Smrg    *         vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1951b8e80941Smrg    *         statically expose this entrypoint.
1952b8e80941Smrg    *
1953b8e80941Smrg    *    - Loader interface v3 differs from v2 in:
1954b8e80941Smrg    *        - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1955b8e80941Smrg    *          vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1956b8e80941Smrg    *          because the loader no longer does so.
1957b8e80941Smrg    */
1958b8e80941Smrg   *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1959b8e80941Smrg   return VK_SUCCESS;
1960b8e80941Smrg}
1961b8e80941Smrg
1962b8e80941SmrgVkResult
1963b8e80941Smrgtu_GetMemoryFdKHR(VkDevice _device,
1964b8e80941Smrg                  const VkMemoryGetFdInfoKHR *pGetFdInfo,
1965b8e80941Smrg                  int *pFd)
1966b8e80941Smrg{
1967b8e80941Smrg   TU_FROM_HANDLE(tu_device, device, _device);
1968b8e80941Smrg   TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
1969b8e80941Smrg
1970b8e80941Smrg   assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
1971b8e80941Smrg
1972b8e80941Smrg   /* At the moment, we support only the below handle types. */
1973b8e80941Smrg   assert(pGetFdInfo->handleType ==
1974b8e80941Smrg             VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1975b8e80941Smrg          pGetFdInfo->handleType ==
1976b8e80941Smrg             VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1977b8e80941Smrg
1978b8e80941Smrg   int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
1979b8e80941Smrg   if (prime_fd < 0)
1980b8e80941Smrg      return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1981b8e80941Smrg
1982b8e80941Smrg   *pFd = prime_fd;
1983b8e80941Smrg   return VK_SUCCESS;
1984b8e80941Smrg}
1985b8e80941Smrg
1986b8e80941SmrgVkResult
1987b8e80941Smrgtu_GetMemoryFdPropertiesKHR(VkDevice _device,
1988b8e80941Smrg                            VkExternalMemoryHandleTypeFlagBits handleType,
1989b8e80941Smrg                            int fd,
1990b8e80941Smrg                            VkMemoryFdPropertiesKHR *pMemoryFdProperties)
1991b8e80941Smrg{
1992b8e80941Smrg   assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1993b8e80941Smrg   pMemoryFdProperties->memoryTypeBits = 1;
1994b8e80941Smrg   return VK_SUCCESS;
1995b8e80941Smrg}
1996b8e80941Smrg
1997b8e80941Smrgvoid
1998b8e80941Smrgtu_GetPhysicalDeviceExternalSemaphoreProperties(
1999b8e80941Smrg   VkPhysicalDevice physicalDevice,
2000b8e80941Smrg   const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2001b8e80941Smrg   VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2002b8e80941Smrg{
2003b8e80941Smrg   pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2004b8e80941Smrg   pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2005b8e80941Smrg   pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2006b8e80941Smrg}
2007b8e80941Smrg
2008b8e80941Smrgvoid
2009b8e80941Smrgtu_GetPhysicalDeviceExternalFenceProperties(
2010b8e80941Smrg   VkPhysicalDevice physicalDevice,
2011b8e80941Smrg   const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2012b8e80941Smrg   VkExternalFenceProperties *pExternalFenceProperties)
2013b8e80941Smrg{
2014b8e80941Smrg   pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2015b8e80941Smrg   pExternalFenceProperties->compatibleHandleTypes = 0;
2016b8e80941Smrg   pExternalFenceProperties->externalFenceFeatures = 0;
2017b8e80941Smrg}
2018b8e80941Smrg
2019b8e80941SmrgVkResult
2020b8e80941Smrgtu_CreateDebugReportCallbackEXT(
2021b8e80941Smrg   VkInstance _instance,
2022b8e80941Smrg   const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2023b8e80941Smrg   const VkAllocationCallbacks *pAllocator,
2024b8e80941Smrg   VkDebugReportCallbackEXT *pCallback)
2025b8e80941Smrg{
2026b8e80941Smrg   TU_FROM_HANDLE(tu_instance, instance, _instance);
2027b8e80941Smrg   return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2028b8e80941Smrg                                          pCreateInfo, pAllocator,
2029b8e80941Smrg                                          &instance->alloc, pCallback);
2030b8e80941Smrg}
2031b8e80941Smrg
2032b8e80941Smrgvoid
2033b8e80941Smrgtu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2034b8e80941Smrg                                 VkDebugReportCallbackEXT _callback,
2035b8e80941Smrg                                 const VkAllocationCallbacks *pAllocator)
2036b8e80941Smrg{
2037b8e80941Smrg   TU_FROM_HANDLE(tu_instance, instance, _instance);
2038b8e80941Smrg   vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2039b8e80941Smrg                                    _callback, pAllocator, &instance->alloc);
2040b8e80941Smrg}
2041b8e80941Smrg
2042b8e80941Smrgvoid
2043b8e80941Smrgtu_DebugReportMessageEXT(VkInstance _instance,
2044b8e80941Smrg                         VkDebugReportFlagsEXT flags,
2045b8e80941Smrg                         VkDebugReportObjectTypeEXT objectType,
2046b8e80941Smrg                         uint64_t object,
2047b8e80941Smrg                         size_t location,
2048b8e80941Smrg                         int32_t messageCode,
2049b8e80941Smrg                         const char *pLayerPrefix,
2050b8e80941Smrg                         const char *pMessage)
2051b8e80941Smrg{
2052b8e80941Smrg   TU_FROM_HANDLE(tu_instance, instance, _instance);
2053b8e80941Smrg   vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2054b8e80941Smrg                   object, location, messageCode, pLayerPrefix, pMessage);
2055b8e80941Smrg}
2056b8e80941Smrg
2057b8e80941Smrgvoid
2058b8e80941Smrgtu_GetDeviceGroupPeerMemoryFeatures(
2059b8e80941Smrg   VkDevice device,
2060b8e80941Smrg   uint32_t heapIndex,
2061b8e80941Smrg   uint32_t localDeviceIndex,
2062b8e80941Smrg   uint32_t remoteDeviceIndex,
2063b8e80941Smrg   VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2064b8e80941Smrg{
2065b8e80941Smrg   assert(localDeviceIndex == remoteDeviceIndex);
2066b8e80941Smrg
2067b8e80941Smrg   *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2068b8e80941Smrg                          VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2069b8e80941Smrg                          VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2070b8e80941Smrg                          VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2071b8e80941Smrg}
2072