1/*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28#include "tu_private.h"
29
30#include <fcntl.h>
31#include <libsync.h>
32#include <stdbool.h>
33#include <string.h>
34#include <sys/mman.h>
35#include <sys/sysinfo.h>
36#include <unistd.h>
37#include <xf86drm.h>
38
39#include "util/debug.h"
40#include "util/disk_cache.h"
41#include "util/strtod.h"
42#include "vk_format.h"
43#include "vk_util.h"
44
45#include "drm/msm_drm.h"
46
47static int
48tu_device_get_cache_uuid(uint16_t family, void *uuid)
49{
50   uint32_t mesa_timestamp;
51   uint16_t f = family;
52   memset(uuid, 0, VK_UUID_SIZE);
53   if (!disk_cache_get_function_timestamp(tu_device_get_cache_uuid,
54                                          &mesa_timestamp))
55      return -1;
56
57   memcpy(uuid, &mesa_timestamp, 4);
58   memcpy((char *) uuid + 4, &f, 2);
59   snprintf((char *) uuid + 6, VK_UUID_SIZE - 10, "tu");
60   return 0;
61}
62
63static void
64tu_get_driver_uuid(void *uuid)
65{
66   memset(uuid, 0, VK_UUID_SIZE);
67   snprintf(uuid, VK_UUID_SIZE, "freedreno");
68}
69
70static void
71tu_get_device_uuid(void *uuid)
72{
73   memset(uuid, 0, VK_UUID_SIZE);
74}
75
76static VkResult
77tu_bo_init(struct tu_device *dev,
78           struct tu_bo *bo,
79           uint32_t gem_handle,
80           uint64_t size)
81{
82   uint64_t iova = tu_gem_info_iova(dev, gem_handle);
83   if (!iova)
84      return VK_ERROR_OUT_OF_DEVICE_MEMORY;
85
86   *bo = (struct tu_bo) {
87      .gem_handle = gem_handle,
88      .size = size,
89      .iova = iova,
90   };
91
92   return VK_SUCCESS;
93}
94
95VkResult
96tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
97{
98   /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
99    * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
100    */
101   uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
102   if (!gem_handle)
103      return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
104
105   VkResult result = tu_bo_init(dev, bo, gem_handle, size);
106   if (result != VK_SUCCESS) {
107      tu_gem_close(dev, gem_handle);
108      return vk_error(dev->instance, result);
109   }
110
111   return VK_SUCCESS;
112}
113
114VkResult
115tu_bo_init_dmabuf(struct tu_device *dev,
116                  struct tu_bo *bo,
117                  uint64_t size,
118                  int fd)
119{
120   uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
121   if (!gem_handle)
122      return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
123
124   VkResult result = tu_bo_init(dev, bo, gem_handle, size);
125   if (result != VK_SUCCESS) {
126      tu_gem_close(dev, gem_handle);
127      return vk_error(dev->instance, result);
128   }
129
130   return VK_SUCCESS;
131}
132
133int
134tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
135{
136   return tu_gem_export_dmabuf(dev, bo->gem_handle);
137}
138
139VkResult
140tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
141{
142   if (bo->map)
143      return VK_SUCCESS;
144
145   uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
146   if (!offset)
147      return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
148
149   /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
150   void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
151                    dev->physical_device->local_fd, offset);
152   if (map == MAP_FAILED)
153      return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
154
155   bo->map = map;
156   return VK_SUCCESS;
157}
158
159void
160tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
161{
162   assert(bo->gem_handle);
163
164   if (bo->map)
165      munmap(bo->map, bo->size);
166
167   tu_gem_close(dev, bo->gem_handle);
168}
169
170static VkResult
171tu_physical_device_init(struct tu_physical_device *device,
172                        struct tu_instance *instance,
173                        drmDevicePtr drm_device)
174{
175   const char *path = drm_device->nodes[DRM_NODE_RENDER];
176   VkResult result = VK_SUCCESS;
177   drmVersionPtr version;
178   int fd;
179   int master_fd = -1;
180
181   fd = open(path, O_RDWR | O_CLOEXEC);
182   if (fd < 0) {
183      return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
184                       "failed to open device %s", path);
185   }
186
187   /* Version 1.3 added MSM_INFO_IOVA. */
188   const int min_version_major = 1;
189   const int min_version_minor = 3;
190
191   version = drmGetVersion(fd);
192   if (!version) {
193      close(fd);
194      return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
195                       "failed to query kernel driver version for device %s",
196                       path);
197   }
198
199   if (strcmp(version->name, "msm")) {
200      drmFreeVersion(version);
201      close(fd);
202      return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
203                       "device %s does not use the msm kernel driver", path);
204   }
205
206   if (version->version_major != min_version_major ||
207       version->version_minor < min_version_minor) {
208      result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
209                         "kernel driver for device %s has version %d.%d, "
210                         "but Vulkan requires version >= %d.%d",
211                         path, version->version_major, version->version_minor,
212                         min_version_major, min_version_minor);
213      drmFreeVersion(version);
214      close(fd);
215      return result;
216   }
217
218   drmFreeVersion(version);
219
220   if (instance->debug_flags & TU_DEBUG_STARTUP)
221      tu_logi("Found compatible device '%s'.", path);
222
223   device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
224   device->instance = instance;
225   assert(strlen(path) < ARRAY_SIZE(device->path));
226   strncpy(device->path, path, ARRAY_SIZE(device->path));
227
228   if (instance->enabled_extensions.KHR_display) {
229      master_fd =
230         open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
231      if (master_fd >= 0) {
232         /* TODO: free master_fd is accel is not working? */
233      }
234   }
235
236   device->master_fd = master_fd;
237   device->local_fd = fd;
238
239   if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
240      if (instance->debug_flags & TU_DEBUG_STARTUP)
241         tu_logi("Could not query the GPU ID");
242      result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
243                         "could not get GPU ID");
244      goto fail;
245   }
246
247   if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
248      if (instance->debug_flags & TU_DEBUG_STARTUP)
249         tu_logi("Could not query the GMEM size");
250      result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
251                         "could not get GMEM size");
252      goto fail;
253   }
254
255   memset(device->name, 0, sizeof(device->name));
256   sprintf(device->name, "FD%d", device->gpu_id);
257
258   switch (device->gpu_id) {
259   case 630:
260      device->tile_align_w = 32;
261      device->tile_align_h = 32;
262      break;
263   default:
264      result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
265                         "device %s is unsupported", device->name);
266      goto fail;
267   }
268   if (tu_device_get_cache_uuid(device->gpu_id, device->cache_uuid)) {
269      result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
270                         "cannot generate UUID");
271      goto fail;
272   }
273
274   /* The gpu id is already embedded in the uuid so we just pass "tu"
275    * when creating the cache.
276    */
277   char buf[VK_UUID_SIZE * 2 + 1];
278   disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
279   device->disk_cache = disk_cache_create(device->name, buf, 0);
280
281   fprintf(stderr, "WARNING: tu is not a conformant vulkan implementation, "
282                   "testing use only.\n");
283
284   tu_get_driver_uuid(&device->device_uuid);
285   tu_get_device_uuid(&device->device_uuid);
286
287   tu_fill_device_extension_table(device, &device->supported_extensions);
288
289   if (result != VK_SUCCESS) {
290      vk_error(instance, result);
291      goto fail;
292   }
293
294   result = tu_wsi_init(device);
295   if (result != VK_SUCCESS) {
296      vk_error(instance, result);
297      goto fail;
298   }
299
300   return VK_SUCCESS;
301
302fail:
303   close(fd);
304   if (master_fd != -1)
305      close(master_fd);
306   return result;
307}
308
309static void
310tu_physical_device_finish(struct tu_physical_device *device)
311{
312   tu_wsi_finish(device);
313
314   disk_cache_destroy(device->disk_cache);
315   close(device->local_fd);
316   if (device->master_fd != -1)
317      close(device->master_fd);
318}
319
320static void *
321default_alloc_func(void *pUserData,
322                   size_t size,
323                   size_t align,
324                   VkSystemAllocationScope allocationScope)
325{
326   return malloc(size);
327}
328
329static void *
330default_realloc_func(void *pUserData,
331                     void *pOriginal,
332                     size_t size,
333                     size_t align,
334                     VkSystemAllocationScope allocationScope)
335{
336   return realloc(pOriginal, size);
337}
338
339static void
340default_free_func(void *pUserData, void *pMemory)
341{
342   free(pMemory);
343}
344
345static const VkAllocationCallbacks default_alloc = {
346   .pUserData = NULL,
347   .pfnAllocation = default_alloc_func,
348   .pfnReallocation = default_realloc_func,
349   .pfnFree = default_free_func,
350};
351
352static const struct debug_control tu_debug_options[] = {
353   { "startup", TU_DEBUG_STARTUP },
354   { "nir", TU_DEBUG_NIR },
355   { "ir3", TU_DEBUG_IR3 },
356   { NULL, 0 }
357};
358
359const char *
360tu_get_debug_option_name(int id)
361{
362   assert(id < ARRAY_SIZE(tu_debug_options) - 1);
363   return tu_debug_options[id].string;
364}
365
366static int
367tu_get_instance_extension_index(const char *name)
368{
369   for (unsigned i = 0; i < TU_INSTANCE_EXTENSION_COUNT; ++i) {
370      if (strcmp(name, tu_instance_extensions[i].extensionName) == 0)
371         return i;
372   }
373   return -1;
374}
375
376VkResult
377tu_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
378                  const VkAllocationCallbacks *pAllocator,
379                  VkInstance *pInstance)
380{
381   struct tu_instance *instance;
382   VkResult result;
383
384   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
385
386   uint32_t client_version;
387   if (pCreateInfo->pApplicationInfo &&
388       pCreateInfo->pApplicationInfo->apiVersion != 0) {
389      client_version = pCreateInfo->pApplicationInfo->apiVersion;
390   } else {
391      tu_EnumerateInstanceVersion(&client_version);
392   }
393
394   instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
395                         VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
396   if (!instance)
397      return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
398
399   instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
400
401   if (pAllocator)
402      instance->alloc = *pAllocator;
403   else
404      instance->alloc = default_alloc;
405
406   instance->api_version = client_version;
407   instance->physical_device_count = -1;
408
409   instance->debug_flags =
410      parse_debug_string(getenv("TU_DEBUG"), tu_debug_options);
411
412   if (instance->debug_flags & TU_DEBUG_STARTUP)
413      tu_logi("Created an instance");
414
415   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
416      const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
417      int index = tu_get_instance_extension_index(ext_name);
418
419      if (index < 0 || !tu_supported_instance_extensions.extensions[index]) {
420         vk_free2(&default_alloc, pAllocator, instance);
421         return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
422      }
423
424      instance->enabled_extensions.extensions[index] = true;
425   }
426
427   result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
428   if (result != VK_SUCCESS) {
429      vk_free2(&default_alloc, pAllocator, instance);
430      return vk_error(instance, result);
431   }
432
433   _mesa_locale_init();
434
435   VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
436
437   *pInstance = tu_instance_to_handle(instance);
438
439   return VK_SUCCESS;
440}
441
442void
443tu_DestroyInstance(VkInstance _instance,
444                   const VkAllocationCallbacks *pAllocator)
445{
446   TU_FROM_HANDLE(tu_instance, instance, _instance);
447
448   if (!instance)
449      return;
450
451   for (int i = 0; i < instance->physical_device_count; ++i) {
452      tu_physical_device_finish(instance->physical_devices + i);
453   }
454
455   VG(VALGRIND_DESTROY_MEMPOOL(instance));
456
457   _mesa_locale_fini();
458
459   vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
460
461   vk_free(&instance->alloc, instance);
462}
463
464static VkResult
465tu_enumerate_devices(struct tu_instance *instance)
466{
467   /* TODO: Check for more devices ? */
468   drmDevicePtr devices[8];
469   VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
470   int max_devices;
471
472   instance->physical_device_count = 0;
473
474   max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
475
476   if (instance->debug_flags & TU_DEBUG_STARTUP)
477      tu_logi("Found %d drm nodes", max_devices);
478
479   if (max_devices < 1)
480      return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
481
482   for (unsigned i = 0; i < (unsigned) max_devices; i++) {
483      if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
484          devices[i]->bustype == DRM_BUS_PLATFORM) {
485
486         result = tu_physical_device_init(
487            instance->physical_devices + instance->physical_device_count,
488            instance, devices[i]);
489         if (result == VK_SUCCESS)
490            ++instance->physical_device_count;
491         else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
492            break;
493      }
494   }
495   drmFreeDevices(devices, max_devices);
496
497   return result;
498}
499
500VkResult
501tu_EnumeratePhysicalDevices(VkInstance _instance,
502                            uint32_t *pPhysicalDeviceCount,
503                            VkPhysicalDevice *pPhysicalDevices)
504{
505   TU_FROM_HANDLE(tu_instance, instance, _instance);
506   VK_OUTARRAY_MAKE(out, pPhysicalDevices, pPhysicalDeviceCount);
507
508   VkResult result;
509
510   if (instance->physical_device_count < 0) {
511      result = tu_enumerate_devices(instance);
512      if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
513         return result;
514   }
515
516   for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
517      vk_outarray_append(&out, p)
518      {
519         *p = tu_physical_device_to_handle(instance->physical_devices + i);
520      }
521   }
522
523   return vk_outarray_status(&out);
524}
525
526VkResult
527tu_EnumeratePhysicalDeviceGroups(
528   VkInstance _instance,
529   uint32_t *pPhysicalDeviceGroupCount,
530   VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties)
531{
532   TU_FROM_HANDLE(tu_instance, instance, _instance);
533   VK_OUTARRAY_MAKE(out, pPhysicalDeviceGroupProperties,
534                    pPhysicalDeviceGroupCount);
535   VkResult result;
536
537   if (instance->physical_device_count < 0) {
538      result = tu_enumerate_devices(instance);
539      if (result != VK_SUCCESS && result != VK_ERROR_INCOMPATIBLE_DRIVER)
540         return result;
541   }
542
543   for (uint32_t i = 0; i < instance->physical_device_count; ++i) {
544      vk_outarray_append(&out, p)
545      {
546         p->physicalDeviceCount = 1;
547         p->physicalDevices[0] =
548            tu_physical_device_to_handle(instance->physical_devices + i);
549         p->subsetAllocation = false;
550      }
551   }
552
553   return vk_outarray_status(&out);
554}
555
556void
557tu_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
558                             VkPhysicalDeviceFeatures *pFeatures)
559{
560   memset(pFeatures, 0, sizeof(*pFeatures));
561
562   *pFeatures = (VkPhysicalDeviceFeatures) {
563      .robustBufferAccess = false,
564      .fullDrawIndexUint32 = false,
565      .imageCubeArray = false,
566      .independentBlend = false,
567      .geometryShader = false,
568      .tessellationShader = false,
569      .sampleRateShading = false,
570      .dualSrcBlend = false,
571      .logicOp = false,
572      .multiDrawIndirect = false,
573      .drawIndirectFirstInstance = false,
574      .depthClamp = false,
575      .depthBiasClamp = false,
576      .fillModeNonSolid = false,
577      .depthBounds = false,
578      .wideLines = false,
579      .largePoints = false,
580      .alphaToOne = false,
581      .multiViewport = false,
582      .samplerAnisotropy = false,
583      .textureCompressionETC2 = false,
584      .textureCompressionASTC_LDR = false,
585      .textureCompressionBC = false,
586      .occlusionQueryPrecise = false,
587      .pipelineStatisticsQuery = false,
588      .vertexPipelineStoresAndAtomics = false,
589      .fragmentStoresAndAtomics = false,
590      .shaderTessellationAndGeometryPointSize = false,
591      .shaderImageGatherExtended = false,
592      .shaderStorageImageExtendedFormats = false,
593      .shaderStorageImageMultisample = false,
594      .shaderUniformBufferArrayDynamicIndexing = false,
595      .shaderSampledImageArrayDynamicIndexing = false,
596      .shaderStorageBufferArrayDynamicIndexing = false,
597      .shaderStorageImageArrayDynamicIndexing = false,
598      .shaderStorageImageReadWithoutFormat = false,
599      .shaderStorageImageWriteWithoutFormat = false,
600      .shaderClipDistance = false,
601      .shaderCullDistance = false,
602      .shaderFloat64 = false,
603      .shaderInt64 = false,
604      .shaderInt16 = false,
605      .sparseBinding = false,
606      .variableMultisampleRate = false,
607      .inheritedQueries = false,
608   };
609}
610
611void
612tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
613                              VkPhysicalDeviceFeatures2 *pFeatures)
614{
615   vk_foreach_struct(ext, pFeatures->pNext)
616   {
617      switch (ext->sType) {
618      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
619         VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
620         features->variablePointersStorageBuffer = false;
621         features->variablePointers = false;
622         break;
623      }
624      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
625         VkPhysicalDeviceMultiviewFeatures *features =
626            (VkPhysicalDeviceMultiviewFeatures *) ext;
627         features->multiview = false;
628         features->multiviewGeometryShader = false;
629         features->multiviewTessellationShader = false;
630         break;
631      }
632      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
633         VkPhysicalDeviceShaderDrawParametersFeatures *features =
634            (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
635         features->shaderDrawParameters = false;
636         break;
637      }
638      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
639         VkPhysicalDeviceProtectedMemoryFeatures *features =
640            (VkPhysicalDeviceProtectedMemoryFeatures *) ext;
641         features->protectedMemory = false;
642         break;
643      }
644      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
645         VkPhysicalDevice16BitStorageFeatures *features =
646            (VkPhysicalDevice16BitStorageFeatures *) ext;
647         features->storageBuffer16BitAccess = false;
648         features->uniformAndStorageBuffer16BitAccess = false;
649         features->storagePushConstant16 = false;
650         features->storageInputOutput16 = false;
651         break;
652      }
653      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
654         VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
655            (VkPhysicalDeviceSamplerYcbcrConversionFeatures *) ext;
656         features->samplerYcbcrConversion = false;
657         break;
658      }
659      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
660         VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
661            (VkPhysicalDeviceDescriptorIndexingFeaturesEXT *) ext;
662         features->shaderInputAttachmentArrayDynamicIndexing = false;
663         features->shaderUniformTexelBufferArrayDynamicIndexing = false;
664         features->shaderStorageTexelBufferArrayDynamicIndexing = false;
665         features->shaderUniformBufferArrayNonUniformIndexing = false;
666         features->shaderSampledImageArrayNonUniformIndexing = false;
667         features->shaderStorageBufferArrayNonUniformIndexing = false;
668         features->shaderStorageImageArrayNonUniformIndexing = false;
669         features->shaderInputAttachmentArrayNonUniformIndexing = false;
670         features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
671         features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
672         features->descriptorBindingUniformBufferUpdateAfterBind = false;
673         features->descriptorBindingSampledImageUpdateAfterBind = false;
674         features->descriptorBindingStorageImageUpdateAfterBind = false;
675         features->descriptorBindingStorageBufferUpdateAfterBind = false;
676         features->descriptorBindingUniformTexelBufferUpdateAfterBind = false;
677         features->descriptorBindingStorageTexelBufferUpdateAfterBind = false;
678         features->descriptorBindingUpdateUnusedWhilePending = false;
679         features->descriptorBindingPartiallyBound = false;
680         features->descriptorBindingVariableDescriptorCount = false;
681         features->runtimeDescriptorArray = false;
682         break;
683      }
684      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
685         VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
686            (VkPhysicalDeviceConditionalRenderingFeaturesEXT *) ext;
687         features->conditionalRendering = false;
688         features->inheritedConditionalRendering = false;
689         break;
690      }
691      default:
692         break;
693      }
694   }
695   return tu_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
696}
697
698void
699tu_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
700                               VkPhysicalDeviceProperties *pProperties)
701{
702   TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
703   VkSampleCountFlags sample_counts = 0xf;
704
705   /* make sure that the entire descriptor set is addressable with a signed
706    * 32-bit int. So the sum of all limits scaled by descriptor size has to
707    * be at most 2 GiB. the combined image & samples object count as one of
708    * both. This limit is for the pipeline layout, not for the set layout, but
709    * there is no set limit, so we just set a pipeline limit. I don't think
710    * any app is going to hit this soon. */
711   size_t max_descriptor_set_size =
712      ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
713      (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
714       32 /* storage buffer, 32 due to potential space wasted on alignment */ +
715       32 /* sampler, largest when combined with image */ +
716       64 /* sampled image */ + 64 /* storage image */);
717
718   VkPhysicalDeviceLimits limits = {
719      .maxImageDimension1D = (1 << 14),
720      .maxImageDimension2D = (1 << 14),
721      .maxImageDimension3D = (1 << 11),
722      .maxImageDimensionCube = (1 << 14),
723      .maxImageArrayLayers = (1 << 11),
724      .maxTexelBufferElements = 128 * 1024 * 1024,
725      .maxUniformBufferRange = UINT32_MAX,
726      .maxStorageBufferRange = UINT32_MAX,
727      .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
728      .maxMemoryAllocationCount = UINT32_MAX,
729      .maxSamplerAllocationCount = 64 * 1024,
730      .bufferImageGranularity = 64,          /* A cache line */
731      .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
732      .maxBoundDescriptorSets = MAX_SETS,
733      .maxPerStageDescriptorSamplers = max_descriptor_set_size,
734      .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
735      .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
736      .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
737      .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
738      .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
739      .maxPerStageResources = max_descriptor_set_size,
740      .maxDescriptorSetSamplers = max_descriptor_set_size,
741      .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
742      .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
743      .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
744      .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
745      .maxDescriptorSetSampledImages = max_descriptor_set_size,
746      .maxDescriptorSetStorageImages = max_descriptor_set_size,
747      .maxDescriptorSetInputAttachments = max_descriptor_set_size,
748      .maxVertexInputAttributes = 32,
749      .maxVertexInputBindings = 32,
750      .maxVertexInputAttributeOffset = 2047,
751      .maxVertexInputBindingStride = 2048,
752      .maxVertexOutputComponents = 128,
753      .maxTessellationGenerationLevel = 64,
754      .maxTessellationPatchSize = 32,
755      .maxTessellationControlPerVertexInputComponents = 128,
756      .maxTessellationControlPerVertexOutputComponents = 128,
757      .maxTessellationControlPerPatchOutputComponents = 120,
758      .maxTessellationControlTotalOutputComponents = 4096,
759      .maxTessellationEvaluationInputComponents = 128,
760      .maxTessellationEvaluationOutputComponents = 128,
761      .maxGeometryShaderInvocations = 127,
762      .maxGeometryInputComponents = 64,
763      .maxGeometryOutputComponents = 128,
764      .maxGeometryOutputVertices = 256,
765      .maxGeometryTotalOutputComponents = 1024,
766      .maxFragmentInputComponents = 128,
767      .maxFragmentOutputAttachments = 8,
768      .maxFragmentDualSrcAttachments = 1,
769      .maxFragmentCombinedOutputResources = 8,
770      .maxComputeSharedMemorySize = 32768,
771      .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
772      .maxComputeWorkGroupInvocations = 2048,
773      .maxComputeWorkGroupSize = { 2048, 2048, 2048 },
774      .subPixelPrecisionBits = 4 /* FIXME */,
775      .subTexelPrecisionBits = 4 /* FIXME */,
776      .mipmapPrecisionBits = 4 /* FIXME */,
777      .maxDrawIndexedIndexValue = UINT32_MAX,
778      .maxDrawIndirectCount = UINT32_MAX,
779      .maxSamplerLodBias = 16,
780      .maxSamplerAnisotropy = 16,
781      .maxViewports = MAX_VIEWPORTS,
782      .maxViewportDimensions = { (1 << 14), (1 << 14) },
783      .viewportBoundsRange = { INT16_MIN, INT16_MAX },
784      .viewportSubPixelBits = 8,
785      .minMemoryMapAlignment = 4096, /* A page */
786      .minTexelBufferOffsetAlignment = 1,
787      .minUniformBufferOffsetAlignment = 4,
788      .minStorageBufferOffsetAlignment = 4,
789      .minTexelOffset = -32,
790      .maxTexelOffset = 31,
791      .minTexelGatherOffset = -32,
792      .maxTexelGatherOffset = 31,
793      .minInterpolationOffset = -2,
794      .maxInterpolationOffset = 2,
795      .subPixelInterpolationOffsetBits = 8,
796      .maxFramebufferWidth = (1 << 14),
797      .maxFramebufferHeight = (1 << 14),
798      .maxFramebufferLayers = (1 << 10),
799      .framebufferColorSampleCounts = sample_counts,
800      .framebufferDepthSampleCounts = sample_counts,
801      .framebufferStencilSampleCounts = sample_counts,
802      .framebufferNoAttachmentsSampleCounts = sample_counts,
803      .maxColorAttachments = MAX_RTS,
804      .sampledImageColorSampleCounts = sample_counts,
805      .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
806      .sampledImageDepthSampleCounts = sample_counts,
807      .sampledImageStencilSampleCounts = sample_counts,
808      .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
809      .maxSampleMaskWords = 1,
810      .timestampComputeAndGraphics = true,
811      .timestampPeriod = 1,
812      .maxClipDistances = 8,
813      .maxCullDistances = 8,
814      .maxCombinedClipAndCullDistances = 8,
815      .discreteQueuePriorities = 1,
816      .pointSizeRange = { 0.125, 255.875 },
817      .lineWidthRange = { 0.0, 7.9921875 },
818      .pointSizeGranularity = (1.0 / 8.0),
819      .lineWidthGranularity = (1.0 / 128.0),
820      .strictLines = false, /* FINISHME */
821      .standardSampleLocations = true,
822      .optimalBufferCopyOffsetAlignment = 128,
823      .optimalBufferCopyRowPitchAlignment = 128,
824      .nonCoherentAtomSize = 64,
825   };
826
827   *pProperties = (VkPhysicalDeviceProperties) {
828      .apiVersion = tu_physical_device_api_version(pdevice),
829      .driverVersion = vk_get_driver_version(),
830      .vendorID = 0, /* TODO */
831      .deviceID = 0,
832      .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
833      .limits = limits,
834      .sparseProperties = { 0 },
835   };
836
837   strcpy(pProperties->deviceName, pdevice->name);
838   memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
839}
840
841void
842tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
843                                VkPhysicalDeviceProperties2 *pProperties)
844{
845   TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
846   tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
847
848   vk_foreach_struct(ext, pProperties->pNext)
849   {
850      switch (ext->sType) {
851      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
852         VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
853            (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
854         properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
855         break;
856      }
857      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
858         VkPhysicalDeviceIDProperties *properties =
859            (VkPhysicalDeviceIDProperties *) ext;
860         memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
861         memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
862         properties->deviceLUIDValid = false;
863         break;
864      }
865      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
866         VkPhysicalDeviceMultiviewProperties *properties =
867            (VkPhysicalDeviceMultiviewProperties *) ext;
868         properties->maxMultiviewViewCount = MAX_VIEWS;
869         properties->maxMultiviewInstanceIndex = INT_MAX;
870         break;
871      }
872      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
873         VkPhysicalDevicePointClippingProperties *properties =
874            (VkPhysicalDevicePointClippingProperties *) ext;
875         properties->pointClippingBehavior =
876            VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
877         break;
878      }
879      case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
880         VkPhysicalDeviceMaintenance3Properties *properties =
881            (VkPhysicalDeviceMaintenance3Properties *) ext;
882         /* Make sure everything is addressable by a signed 32-bit int, and
883          * our largest descriptors are 96 bytes. */
884         properties->maxPerSetDescriptors = (1ull << 31) / 96;
885         /* Our buffer size fields allow only this much */
886         properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
887         break;
888      }
889      default:
890         break;
891      }
892   }
893}
894
895static const VkQueueFamilyProperties tu_queue_family_properties = {
896   .queueFlags =
897      VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
898   .queueCount = 1,
899   .timestampValidBits = 64,
900   .minImageTransferGranularity = { 1, 1, 1 },
901};
902
903void
904tu_GetPhysicalDeviceQueueFamilyProperties(
905   VkPhysicalDevice physicalDevice,
906   uint32_t *pQueueFamilyPropertyCount,
907   VkQueueFamilyProperties *pQueueFamilyProperties)
908{
909   VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
910
911   vk_outarray_append(&out, p) { *p = tu_queue_family_properties; }
912}
913
914void
915tu_GetPhysicalDeviceQueueFamilyProperties2(
916   VkPhysicalDevice physicalDevice,
917   uint32_t *pQueueFamilyPropertyCount,
918   VkQueueFamilyProperties2 *pQueueFamilyProperties)
919{
920   VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
921
922   vk_outarray_append(&out, p)
923   {
924      p->queueFamilyProperties = tu_queue_family_properties;
925   }
926}
927
928static uint64_t
929tu_get_system_heap_size()
930{
931   struct sysinfo info;
932   sysinfo(&info);
933
934   uint64_t total_ram = (uint64_t) info.totalram * (uint64_t) info.mem_unit;
935
936   /* We don't want to burn too much ram with the GPU.  If the user has 4GiB
937    * or less, we use at most half.  If they have more than 4GiB, we use 3/4.
938    */
939   uint64_t available_ram;
940   if (total_ram <= 4ull * 1024ull * 1024ull * 1024ull)
941      available_ram = total_ram / 2;
942   else
943      available_ram = total_ram * 3 / 4;
944
945   return available_ram;
946}
947
948void
949tu_GetPhysicalDeviceMemoryProperties(
950   VkPhysicalDevice physicalDevice,
951   VkPhysicalDeviceMemoryProperties *pMemoryProperties)
952{
953   pMemoryProperties->memoryHeapCount = 1;
954   pMemoryProperties->memoryHeaps[0].size = tu_get_system_heap_size();
955   pMemoryProperties->memoryHeaps[0].flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
956
957   pMemoryProperties->memoryTypeCount = 1;
958   pMemoryProperties->memoryTypes[0].propertyFlags =
959      VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
960      VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
961      VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
962   pMemoryProperties->memoryTypes[0].heapIndex = 0;
963}
964
965void
966tu_GetPhysicalDeviceMemoryProperties2(
967   VkPhysicalDevice physicalDevice,
968   VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
969{
970   return tu_GetPhysicalDeviceMemoryProperties(
971      physicalDevice, &pMemoryProperties->memoryProperties);
972}
973
974static VkResult
975tu_queue_init(struct tu_device *device,
976              struct tu_queue *queue,
977              uint32_t queue_family_index,
978              int idx,
979              VkDeviceQueueCreateFlags flags)
980{
981   queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
982   queue->device = device;
983   queue->queue_family_index = queue_family_index;
984   queue->queue_idx = idx;
985   queue->flags = flags;
986
987   int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
988   if (ret)
989      return VK_ERROR_INITIALIZATION_FAILED;
990
991   tu_fence_init(&queue->submit_fence, false);
992
993   return VK_SUCCESS;
994}
995
996static void
997tu_queue_finish(struct tu_queue *queue)
998{
999   tu_fence_finish(&queue->submit_fence);
1000   tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
1001}
1002
1003static int
1004tu_get_device_extension_index(const char *name)
1005{
1006   for (unsigned i = 0; i < TU_DEVICE_EXTENSION_COUNT; ++i) {
1007      if (strcmp(name, tu_device_extensions[i].extensionName) == 0)
1008         return i;
1009   }
1010   return -1;
1011}
1012
1013VkResult
1014tu_CreateDevice(VkPhysicalDevice physicalDevice,
1015                const VkDeviceCreateInfo *pCreateInfo,
1016                const VkAllocationCallbacks *pAllocator,
1017                VkDevice *pDevice)
1018{
1019   TU_FROM_HANDLE(tu_physical_device, physical_device, physicalDevice);
1020   VkResult result;
1021   struct tu_device *device;
1022
1023   /* Check enabled features */
1024   if (pCreateInfo->pEnabledFeatures) {
1025      VkPhysicalDeviceFeatures supported_features;
1026      tu_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1027      VkBool32 *supported_feature = (VkBool32 *) &supported_features;
1028      VkBool32 *enabled_feature = (VkBool32 *) pCreateInfo->pEnabledFeatures;
1029      unsigned num_features =
1030         sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1031      for (uint32_t i = 0; i < num_features; i++) {
1032         if (enabled_feature[i] && !supported_feature[i])
1033            return vk_error(physical_device->instance,
1034                            VK_ERROR_FEATURE_NOT_PRESENT);
1035      }
1036   }
1037
1038   device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1039                       sizeof(*device), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1040   if (!device)
1041      return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1042
1043   device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1044   device->instance = physical_device->instance;
1045   device->physical_device = physical_device;
1046
1047   if (pAllocator)
1048      device->alloc = *pAllocator;
1049   else
1050      device->alloc = physical_device->instance->alloc;
1051
1052   for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1053      const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1054      int index = tu_get_device_extension_index(ext_name);
1055      if (index < 0 ||
1056          !physical_device->supported_extensions.extensions[index]) {
1057         vk_free(&device->alloc, device);
1058         return vk_error(physical_device->instance,
1059                         VK_ERROR_EXTENSION_NOT_PRESENT);
1060      }
1061
1062      device->enabled_extensions.extensions[index] = true;
1063   }
1064
1065   for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1066      const VkDeviceQueueCreateInfo *queue_create =
1067         &pCreateInfo->pQueueCreateInfos[i];
1068      uint32_t qfi = queue_create->queueFamilyIndex;
1069      device->queues[qfi] = vk_alloc(
1070         &device->alloc, queue_create->queueCount * sizeof(struct tu_queue),
1071         8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1072      if (!device->queues[qfi]) {
1073         result = VK_ERROR_OUT_OF_HOST_MEMORY;
1074         goto fail;
1075      }
1076
1077      memset(device->queues[qfi], 0,
1078             queue_create->queueCount * sizeof(struct tu_queue));
1079
1080      device->queue_count[qfi] = queue_create->queueCount;
1081
1082      for (unsigned q = 0; q < queue_create->queueCount; q++) {
1083         result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
1084                                queue_create->flags);
1085         if (result != VK_SUCCESS)
1086            goto fail;
1087      }
1088   }
1089
1090   device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
1091   if (!device->compiler)
1092      goto fail;
1093
1094   VkPipelineCacheCreateInfo ci;
1095   ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1096   ci.pNext = NULL;
1097   ci.flags = 0;
1098   ci.pInitialData = NULL;
1099   ci.initialDataSize = 0;
1100   VkPipelineCache pc;
1101   result =
1102      tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
1103   if (result != VK_SUCCESS)
1104      goto fail;
1105
1106   device->mem_cache = tu_pipeline_cache_from_handle(pc);
1107
1108   *pDevice = tu_device_to_handle(device);
1109   return VK_SUCCESS;
1110
1111fail:
1112   for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1113      for (unsigned q = 0; q < device->queue_count[i]; q++)
1114         tu_queue_finish(&device->queues[i][q]);
1115      if (device->queue_count[i])
1116         vk_free(&device->alloc, device->queues[i]);
1117   }
1118
1119   if (device->compiler)
1120      ralloc_free(device->compiler);
1121
1122   vk_free(&device->alloc, device);
1123   return result;
1124}
1125
1126void
1127tu_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
1128{
1129   TU_FROM_HANDLE(tu_device, device, _device);
1130
1131   if (!device)
1132      return;
1133
1134   for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1135      for (unsigned q = 0; q < device->queue_count[i]; q++)
1136         tu_queue_finish(&device->queues[i][q]);
1137      if (device->queue_count[i])
1138         vk_free(&device->alloc, device->queues[i]);
1139   }
1140
1141   /* the compiler does not use pAllocator */
1142   ralloc_free(device->compiler);
1143
1144   VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
1145   tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
1146
1147   vk_free(&device->alloc, device);
1148}
1149
1150VkResult
1151tu_EnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
1152                                    VkLayerProperties *pProperties)
1153{
1154   *pPropertyCount = 0;
1155   return VK_SUCCESS;
1156}
1157
1158VkResult
1159tu_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
1160                                  uint32_t *pPropertyCount,
1161                                  VkLayerProperties *pProperties)
1162{
1163   *pPropertyCount = 0;
1164   return VK_SUCCESS;
1165}
1166
1167void
1168tu_GetDeviceQueue2(VkDevice _device,
1169                   const VkDeviceQueueInfo2 *pQueueInfo,
1170                   VkQueue *pQueue)
1171{
1172   TU_FROM_HANDLE(tu_device, device, _device);
1173   struct tu_queue *queue;
1174
1175   queue =
1176      &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1177   if (pQueueInfo->flags != queue->flags) {
1178      /* From the Vulkan 1.1.70 spec:
1179       *
1180       * "The queue returned by vkGetDeviceQueue2 must have the same
1181       * flags value from this structure as that used at device
1182       * creation time in a VkDeviceQueueCreateInfo instance. If no
1183       * matching flags were specified at device creation time then
1184       * pQueue will return VK_NULL_HANDLE."
1185       */
1186      *pQueue = VK_NULL_HANDLE;
1187      return;
1188   }
1189
1190   *pQueue = tu_queue_to_handle(queue);
1191}
1192
1193void
1194tu_GetDeviceQueue(VkDevice _device,
1195                  uint32_t queueFamilyIndex,
1196                  uint32_t queueIndex,
1197                  VkQueue *pQueue)
1198{
1199   const VkDeviceQueueInfo2 info =
1200      (VkDeviceQueueInfo2) { .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1201                             .queueFamilyIndex = queueFamilyIndex,
1202                             .queueIndex = queueIndex };
1203
1204   tu_GetDeviceQueue2(_device, &info, pQueue);
1205}
1206
1207VkResult
1208tu_QueueSubmit(VkQueue _queue,
1209               uint32_t submitCount,
1210               const VkSubmitInfo *pSubmits,
1211               VkFence _fence)
1212{
1213   TU_FROM_HANDLE(tu_queue, queue, _queue);
1214
1215   for (uint32_t i = 0; i < submitCount; ++i) {
1216      const VkSubmitInfo *submit = pSubmits + i;
1217      const bool last_submit = (i == submitCount - 1);
1218      struct tu_bo_list bo_list;
1219      tu_bo_list_init(&bo_list);
1220
1221      uint32_t entry_count = 0;
1222      for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1223         TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1224         entry_count += cmdbuf->cs.entry_count;
1225      }
1226
1227      struct drm_msm_gem_submit_cmd cmds[entry_count];
1228      uint32_t entry_idx = 0;
1229      for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
1230         TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
1231         struct tu_cs *cs = &cmdbuf->cs;
1232         for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
1233            cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
1234            cmds[entry_idx].submit_idx =
1235               tu_bo_list_add(&bo_list, cs->entries[i].bo,
1236                              MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
1237            cmds[entry_idx].submit_offset = cs->entries[i].offset;
1238            cmds[entry_idx].size = cs->entries[i].size;
1239            cmds[entry_idx].pad = 0;
1240            cmds[entry_idx].nr_relocs = 0;
1241            cmds[entry_idx].relocs = 0;
1242         }
1243
1244         tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
1245      }
1246
1247      uint32_t flags = MSM_PIPE_3D0;
1248      if (last_submit) {
1249         flags |= MSM_SUBMIT_FENCE_FD_OUT;
1250      }
1251
1252      struct drm_msm_gem_submit req = {
1253         .flags = flags,
1254         .queueid = queue->msm_queue_id,
1255         .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
1256         .nr_bos = bo_list.count,
1257         .cmds = (uint64_t)(uintptr_t)cmds,
1258         .nr_cmds = entry_count,
1259      };
1260
1261      int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
1262                                    DRM_MSM_GEM_SUBMIT,
1263                                    &req, sizeof(req));
1264      if (ret) {
1265         fprintf(stderr, "submit failed: %s\n", strerror(errno));
1266         abort();
1267      }
1268
1269      tu_bo_list_destroy(&bo_list);
1270
1271      if (last_submit) {
1272         /* no need to merge fences as queue execution is serialized */
1273         tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
1274      }
1275   }
1276
1277   if (_fence != VK_NULL_HANDLE) {
1278      TU_FROM_HANDLE(tu_fence, fence, _fence);
1279      tu_fence_copy(fence, &queue->submit_fence);
1280   }
1281
1282   return VK_SUCCESS;
1283}
1284
1285VkResult
1286tu_QueueWaitIdle(VkQueue _queue)
1287{
1288   TU_FROM_HANDLE(tu_queue, queue, _queue);
1289
1290   tu_fence_wait_idle(&queue->submit_fence);
1291
1292   return VK_SUCCESS;
1293}
1294
1295VkResult
1296tu_DeviceWaitIdle(VkDevice _device)
1297{
1298   TU_FROM_HANDLE(tu_device, device, _device);
1299
1300   for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
1301      for (unsigned q = 0; q < device->queue_count[i]; q++) {
1302         tu_QueueWaitIdle(tu_queue_to_handle(&device->queues[i][q]));
1303      }
1304   }
1305   return VK_SUCCESS;
1306}
1307
1308VkResult
1309tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
1310                                        uint32_t *pPropertyCount,
1311                                        VkExtensionProperties *pProperties)
1312{
1313   VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1314
1315   /* We spport no lyaers */
1316   if (pLayerName)
1317      return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1318
1319   for (int i = 0; i < TU_INSTANCE_EXTENSION_COUNT; i++) {
1320      if (tu_supported_instance_extensions.extensions[i]) {
1321         vk_outarray_append(&out, prop) { *prop = tu_instance_extensions[i]; }
1322      }
1323   }
1324
1325   return vk_outarray_status(&out);
1326}
1327
1328VkResult
1329tu_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1330                                      const char *pLayerName,
1331                                      uint32_t *pPropertyCount,
1332                                      VkExtensionProperties *pProperties)
1333{
1334   /* We spport no lyaers */
1335   TU_FROM_HANDLE(tu_physical_device, device, physicalDevice);
1336   VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
1337
1338   /* We spport no lyaers */
1339   if (pLayerName)
1340      return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1341
1342   for (int i = 0; i < TU_DEVICE_EXTENSION_COUNT; i++) {
1343      if (device->supported_extensions.extensions[i]) {
1344         vk_outarray_append(&out, prop) { *prop = tu_device_extensions[i]; }
1345      }
1346   }
1347
1348   return vk_outarray_status(&out);
1349}
1350
1351PFN_vkVoidFunction
1352tu_GetInstanceProcAddr(VkInstance _instance, const char *pName)
1353{
1354   TU_FROM_HANDLE(tu_instance, instance, _instance);
1355
1356   return tu_lookup_entrypoint_checked(
1357      pName, instance ? instance->api_version : 0,
1358      instance ? &instance->enabled_extensions : NULL, NULL);
1359}
1360
1361/* The loader wants us to expose a second GetInstanceProcAddr function
1362 * to work around certain LD_PRELOAD issues seen in apps.
1363 */
1364PUBLIC
1365VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1366vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName);
1367
1368PUBLIC
1369VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
1370vk_icdGetInstanceProcAddr(VkInstance instance, const char *pName)
1371{
1372   return tu_GetInstanceProcAddr(instance, pName);
1373}
1374
1375PFN_vkVoidFunction
1376tu_GetDeviceProcAddr(VkDevice _device, const char *pName)
1377{
1378   TU_FROM_HANDLE(tu_device, device, _device);
1379
1380   return tu_lookup_entrypoint_checked(pName, device->instance->api_version,
1381                                       &device->instance->enabled_extensions,
1382                                       &device->enabled_extensions);
1383}
1384
1385static VkResult
1386tu_alloc_memory(struct tu_device *device,
1387                const VkMemoryAllocateInfo *pAllocateInfo,
1388                const VkAllocationCallbacks *pAllocator,
1389                VkDeviceMemory *pMem)
1390{
1391   struct tu_device_memory *mem;
1392   VkResult result;
1393
1394   assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1395
1396   if (pAllocateInfo->allocationSize == 0) {
1397      /* Apparently, this is allowed */
1398      *pMem = VK_NULL_HANDLE;
1399      return VK_SUCCESS;
1400   }
1401
1402   mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1403                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1404   if (mem == NULL)
1405      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1406
1407   const VkImportMemoryFdInfoKHR *fd_info =
1408      vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
1409   if (fd_info && !fd_info->handleType)
1410      fd_info = NULL;
1411
1412   if (fd_info) {
1413      assert(fd_info->handleType ==
1414                VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1415             fd_info->handleType ==
1416                VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1417
1418      /*
1419       * TODO Importing the same fd twice gives us the same handle without
1420       * reference counting.  We need to maintain a per-instance handle-to-bo
1421       * table and add reference count to tu_bo.
1422       */
1423      result = tu_bo_init_dmabuf(device, &mem->bo,
1424                                 pAllocateInfo->allocationSize, fd_info->fd);
1425      if (result == VK_SUCCESS) {
1426         /* take ownership and close the fd */
1427         close(fd_info->fd);
1428      }
1429   } else {
1430      result =
1431         tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
1432   }
1433
1434   if (result != VK_SUCCESS) {
1435      vk_free2(&device->alloc, pAllocator, mem);
1436      return result;
1437   }
1438
1439   mem->size = pAllocateInfo->allocationSize;
1440   mem->type_index = pAllocateInfo->memoryTypeIndex;
1441
1442   mem->map = NULL;
1443   mem->user_ptr = NULL;
1444
1445   *pMem = tu_device_memory_to_handle(mem);
1446
1447   return VK_SUCCESS;
1448}
1449
1450VkResult
1451tu_AllocateMemory(VkDevice _device,
1452                  const VkMemoryAllocateInfo *pAllocateInfo,
1453                  const VkAllocationCallbacks *pAllocator,
1454                  VkDeviceMemory *pMem)
1455{
1456   TU_FROM_HANDLE(tu_device, device, _device);
1457   return tu_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
1458}
1459
1460void
1461tu_FreeMemory(VkDevice _device,
1462              VkDeviceMemory _mem,
1463              const VkAllocationCallbacks *pAllocator)
1464{
1465   TU_FROM_HANDLE(tu_device, device, _device);
1466   TU_FROM_HANDLE(tu_device_memory, mem, _mem);
1467
1468   if (mem == NULL)
1469      return;
1470
1471   tu_bo_finish(device, &mem->bo);
1472   vk_free2(&device->alloc, pAllocator, mem);
1473}
1474
1475VkResult
1476tu_MapMemory(VkDevice _device,
1477             VkDeviceMemory _memory,
1478             VkDeviceSize offset,
1479             VkDeviceSize size,
1480             VkMemoryMapFlags flags,
1481             void **ppData)
1482{
1483   TU_FROM_HANDLE(tu_device, device, _device);
1484   TU_FROM_HANDLE(tu_device_memory, mem, _memory);
1485   VkResult result;
1486
1487   if (mem == NULL) {
1488      *ppData = NULL;
1489      return VK_SUCCESS;
1490   }
1491
1492   if (mem->user_ptr) {
1493      *ppData = mem->user_ptr;
1494   } else if (!mem->map) {
1495      result = tu_bo_map(device, &mem->bo);
1496      if (result != VK_SUCCESS)
1497         return result;
1498      *ppData = mem->map = mem->bo.map;
1499   } else
1500      *ppData = mem->map;
1501
1502   if (*ppData) {
1503      *ppData += offset;
1504      return VK_SUCCESS;
1505   }
1506
1507   return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
1508}
1509
1510void
1511tu_UnmapMemory(VkDevice _device, VkDeviceMemory _memory)
1512{
1513   /* I do not see any unmapping done by the freedreno Gallium driver. */
1514}
1515
1516VkResult
1517tu_FlushMappedMemoryRanges(VkDevice _device,
1518                           uint32_t memoryRangeCount,
1519                           const VkMappedMemoryRange *pMemoryRanges)
1520{
1521   return VK_SUCCESS;
1522}
1523
1524VkResult
1525tu_InvalidateMappedMemoryRanges(VkDevice _device,
1526                                uint32_t memoryRangeCount,
1527                                const VkMappedMemoryRange *pMemoryRanges)
1528{
1529   return VK_SUCCESS;
1530}
1531
1532void
1533tu_GetBufferMemoryRequirements(VkDevice _device,
1534                               VkBuffer _buffer,
1535                               VkMemoryRequirements *pMemoryRequirements)
1536{
1537   TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1538
1539   pMemoryRequirements->memoryTypeBits = 1;
1540   pMemoryRequirements->alignment = 16;
1541   pMemoryRequirements->size =
1542      align64(buffer->size, pMemoryRequirements->alignment);
1543}
1544
1545void
1546tu_GetBufferMemoryRequirements2(
1547   VkDevice device,
1548   const VkBufferMemoryRequirementsInfo2 *pInfo,
1549   VkMemoryRequirements2 *pMemoryRequirements)
1550{
1551   tu_GetBufferMemoryRequirements(device, pInfo->buffer,
1552                                  &pMemoryRequirements->memoryRequirements);
1553}
1554
1555void
1556tu_GetImageMemoryRequirements(VkDevice _device,
1557                              VkImage _image,
1558                              VkMemoryRequirements *pMemoryRequirements)
1559{
1560   TU_FROM_HANDLE(tu_image, image, _image);
1561
1562   pMemoryRequirements->memoryTypeBits = 1;
1563   pMemoryRequirements->size = image->size;
1564   pMemoryRequirements->alignment = image->alignment;
1565}
1566
1567void
1568tu_GetImageMemoryRequirements2(VkDevice device,
1569                               const VkImageMemoryRequirementsInfo2 *pInfo,
1570                               VkMemoryRequirements2 *pMemoryRequirements)
1571{
1572   tu_GetImageMemoryRequirements(device, pInfo->image,
1573                                 &pMemoryRequirements->memoryRequirements);
1574}
1575
1576void
1577tu_GetImageSparseMemoryRequirements(
1578   VkDevice device,
1579   VkImage image,
1580   uint32_t *pSparseMemoryRequirementCount,
1581   VkSparseImageMemoryRequirements *pSparseMemoryRequirements)
1582{
1583   tu_stub();
1584}
1585
1586void
1587tu_GetImageSparseMemoryRequirements2(
1588   VkDevice device,
1589   const VkImageSparseMemoryRequirementsInfo2 *pInfo,
1590   uint32_t *pSparseMemoryRequirementCount,
1591   VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
1592{
1593   tu_stub();
1594}
1595
1596void
1597tu_GetDeviceMemoryCommitment(VkDevice device,
1598                             VkDeviceMemory memory,
1599                             VkDeviceSize *pCommittedMemoryInBytes)
1600{
1601   *pCommittedMemoryInBytes = 0;
1602}
1603
1604VkResult
1605tu_BindBufferMemory2(VkDevice device,
1606                     uint32_t bindInfoCount,
1607                     const VkBindBufferMemoryInfo *pBindInfos)
1608{
1609   for (uint32_t i = 0; i < bindInfoCount; ++i) {
1610      TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1611      TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
1612
1613      if (mem) {
1614         buffer->bo = &mem->bo;
1615         buffer->bo_offset = pBindInfos[i].memoryOffset;
1616      } else {
1617         buffer->bo = NULL;
1618      }
1619   }
1620   return VK_SUCCESS;
1621}
1622
1623VkResult
1624tu_BindBufferMemory(VkDevice device,
1625                    VkBuffer buffer,
1626                    VkDeviceMemory memory,
1627                    VkDeviceSize memoryOffset)
1628{
1629   const VkBindBufferMemoryInfo info = {
1630      .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1631      .buffer = buffer,
1632      .memory = memory,
1633      .memoryOffset = memoryOffset
1634   };
1635
1636   return tu_BindBufferMemory2(device, 1, &info);
1637}
1638
1639VkResult
1640tu_BindImageMemory2(VkDevice device,
1641                    uint32_t bindInfoCount,
1642                    const VkBindImageMemoryInfo *pBindInfos)
1643{
1644   for (uint32_t i = 0; i < bindInfoCount; ++i) {
1645      TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
1646      TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
1647
1648      if (mem) {
1649         image->bo = &mem->bo;
1650         image->bo_offset = pBindInfos[i].memoryOffset;
1651      } else {
1652         image->bo = NULL;
1653         image->bo_offset = 0;
1654      }
1655   }
1656
1657   return VK_SUCCESS;
1658}
1659
1660VkResult
1661tu_BindImageMemory(VkDevice device,
1662                   VkImage image,
1663                   VkDeviceMemory memory,
1664                   VkDeviceSize memoryOffset)
1665{
1666   const VkBindImageMemoryInfo info = {
1667      .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
1668      .image = image,
1669      .memory = memory,
1670      .memoryOffset = memoryOffset
1671   };
1672
1673   return tu_BindImageMemory2(device, 1, &info);
1674}
1675
1676VkResult
1677tu_QueueBindSparse(VkQueue _queue,
1678                   uint32_t bindInfoCount,
1679                   const VkBindSparseInfo *pBindInfo,
1680                   VkFence _fence)
1681{
1682   return VK_SUCCESS;
1683}
1684
1685// Queue semaphore functions
1686
1687VkResult
1688tu_CreateSemaphore(VkDevice _device,
1689                   const VkSemaphoreCreateInfo *pCreateInfo,
1690                   const VkAllocationCallbacks *pAllocator,
1691                   VkSemaphore *pSemaphore)
1692{
1693   TU_FROM_HANDLE(tu_device, device, _device);
1694
1695   struct tu_semaphore *sem =
1696      vk_alloc2(&device->alloc, pAllocator, sizeof(*sem), 8,
1697                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1698   if (!sem)
1699      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1700
1701   *pSemaphore = tu_semaphore_to_handle(sem);
1702   return VK_SUCCESS;
1703}
1704
1705void
1706tu_DestroySemaphore(VkDevice _device,
1707                    VkSemaphore _semaphore,
1708                    const VkAllocationCallbacks *pAllocator)
1709{
1710   TU_FROM_HANDLE(tu_device, device, _device);
1711   TU_FROM_HANDLE(tu_semaphore, sem, _semaphore);
1712   if (!_semaphore)
1713      return;
1714
1715   vk_free2(&device->alloc, pAllocator, sem);
1716}
1717
1718VkResult
1719tu_CreateEvent(VkDevice _device,
1720               const VkEventCreateInfo *pCreateInfo,
1721               const VkAllocationCallbacks *pAllocator,
1722               VkEvent *pEvent)
1723{
1724   TU_FROM_HANDLE(tu_device, device, _device);
1725   struct tu_event *event =
1726      vk_alloc2(&device->alloc, pAllocator, sizeof(*event), 8,
1727                VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1728
1729   if (!event)
1730      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1731
1732   *pEvent = tu_event_to_handle(event);
1733
1734   return VK_SUCCESS;
1735}
1736
1737void
1738tu_DestroyEvent(VkDevice _device,
1739                VkEvent _event,
1740                const VkAllocationCallbacks *pAllocator)
1741{
1742   TU_FROM_HANDLE(tu_device, device, _device);
1743   TU_FROM_HANDLE(tu_event, event, _event);
1744
1745   if (!event)
1746      return;
1747   vk_free2(&device->alloc, pAllocator, event);
1748}
1749
1750VkResult
1751tu_GetEventStatus(VkDevice _device, VkEvent _event)
1752{
1753   TU_FROM_HANDLE(tu_event, event, _event);
1754
1755   if (*event->map == 1)
1756      return VK_EVENT_SET;
1757   return VK_EVENT_RESET;
1758}
1759
1760VkResult
1761tu_SetEvent(VkDevice _device, VkEvent _event)
1762{
1763   TU_FROM_HANDLE(tu_event, event, _event);
1764   *event->map = 1;
1765
1766   return VK_SUCCESS;
1767}
1768
1769VkResult
1770tu_ResetEvent(VkDevice _device, VkEvent _event)
1771{
1772   TU_FROM_HANDLE(tu_event, event, _event);
1773   *event->map = 0;
1774
1775   return VK_SUCCESS;
1776}
1777
1778VkResult
1779tu_CreateBuffer(VkDevice _device,
1780                const VkBufferCreateInfo *pCreateInfo,
1781                const VkAllocationCallbacks *pAllocator,
1782                VkBuffer *pBuffer)
1783{
1784   TU_FROM_HANDLE(tu_device, device, _device);
1785   struct tu_buffer *buffer;
1786
1787   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1788
1789   buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1790                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1791   if (buffer == NULL)
1792      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1793
1794   buffer->size = pCreateInfo->size;
1795   buffer->usage = pCreateInfo->usage;
1796   buffer->flags = pCreateInfo->flags;
1797
1798   *pBuffer = tu_buffer_to_handle(buffer);
1799
1800   return VK_SUCCESS;
1801}
1802
1803void
1804tu_DestroyBuffer(VkDevice _device,
1805                 VkBuffer _buffer,
1806                 const VkAllocationCallbacks *pAllocator)
1807{
1808   TU_FROM_HANDLE(tu_device, device, _device);
1809   TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1810
1811   if (!buffer)
1812      return;
1813
1814   vk_free2(&device->alloc, pAllocator, buffer);
1815}
1816
1817static uint32_t
1818tu_surface_max_layer_count(struct tu_image_view *iview)
1819{
1820   return iview->type == VK_IMAGE_VIEW_TYPE_3D
1821             ? iview->extent.depth
1822             : (iview->base_layer + iview->layer_count);
1823}
1824
1825VkResult
1826tu_CreateFramebuffer(VkDevice _device,
1827                     const VkFramebufferCreateInfo *pCreateInfo,
1828                     const VkAllocationCallbacks *pAllocator,
1829                     VkFramebuffer *pFramebuffer)
1830{
1831   TU_FROM_HANDLE(tu_device, device, _device);
1832   struct tu_framebuffer *framebuffer;
1833
1834   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1835
1836   size_t size = sizeof(*framebuffer) + sizeof(struct tu_attachment_info) *
1837                                           pCreateInfo->attachmentCount;
1838   framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1839                           VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1840   if (framebuffer == NULL)
1841      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1842
1843   framebuffer->attachment_count = pCreateInfo->attachmentCount;
1844   framebuffer->width = pCreateInfo->width;
1845   framebuffer->height = pCreateInfo->height;
1846   framebuffer->layers = pCreateInfo->layers;
1847   for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1848      VkImageView _iview = pCreateInfo->pAttachments[i];
1849      struct tu_image_view *iview = tu_image_view_from_handle(_iview);
1850      framebuffer->attachments[i].attachment = iview;
1851
1852      framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
1853      framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
1854      framebuffer->layers =
1855         MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
1856   }
1857
1858   *pFramebuffer = tu_framebuffer_to_handle(framebuffer);
1859   return VK_SUCCESS;
1860}
1861
1862void
1863tu_DestroyFramebuffer(VkDevice _device,
1864                      VkFramebuffer _fb,
1865                      const VkAllocationCallbacks *pAllocator)
1866{
1867   TU_FROM_HANDLE(tu_device, device, _device);
1868   TU_FROM_HANDLE(tu_framebuffer, fb, _fb);
1869
1870   if (!fb)
1871      return;
1872   vk_free2(&device->alloc, pAllocator, fb);
1873}
1874
1875static void
1876tu_init_sampler(struct tu_device *device,
1877                struct tu_sampler *sampler,
1878                const VkSamplerCreateInfo *pCreateInfo)
1879{
1880}
1881
1882VkResult
1883tu_CreateSampler(VkDevice _device,
1884                 const VkSamplerCreateInfo *pCreateInfo,
1885                 const VkAllocationCallbacks *pAllocator,
1886                 VkSampler *pSampler)
1887{
1888   TU_FROM_HANDLE(tu_device, device, _device);
1889   struct tu_sampler *sampler;
1890
1891   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
1892
1893   sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
1894                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1895   if (!sampler)
1896      return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1897
1898   tu_init_sampler(device, sampler, pCreateInfo);
1899   *pSampler = tu_sampler_to_handle(sampler);
1900
1901   return VK_SUCCESS;
1902}
1903
1904void
1905tu_DestroySampler(VkDevice _device,
1906                  VkSampler _sampler,
1907                  const VkAllocationCallbacks *pAllocator)
1908{
1909   TU_FROM_HANDLE(tu_device, device, _device);
1910   TU_FROM_HANDLE(tu_sampler, sampler, _sampler);
1911
1912   if (!sampler)
1913      return;
1914   vk_free2(&device->alloc, pAllocator, sampler);
1915}
1916
1917/* vk_icd.h does not declare this function, so we declare it here to
1918 * suppress Wmissing-prototypes.
1919 */
1920PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1921vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
1922
1923PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
1924vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
1925{
1926   /* For the full details on loader interface versioning, see
1927    * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
1928    * What follows is a condensed summary, to help you navigate the large and
1929    * confusing official doc.
1930    *
1931    *   - Loader interface v0 is incompatible with later versions. We don't
1932    *     support it.
1933    *
1934    *   - In loader interface v1:
1935    *       - The first ICD entrypoint called by the loader is
1936    *         vk_icdGetInstanceProcAddr(). The ICD must statically expose this
1937    *         entrypoint.
1938    *       - The ICD must statically expose no other Vulkan symbol unless it
1939    * is linked with -Bsymbolic.
1940    *       - Each dispatchable Vulkan handle created by the ICD must be
1941    *         a pointer to a struct whose first member is VK_LOADER_DATA. The
1942    *         ICD must initialize VK_LOADER_DATA.loadMagic to
1943    * ICD_LOADER_MAGIC.
1944    *       - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
1945    *         vkDestroySurfaceKHR(). The ICD must be capable of working with
1946    *         such loader-managed surfaces.
1947    *
1948    *    - Loader interface v2 differs from v1 in:
1949    *       - The first ICD entrypoint called by the loader is
1950    *         vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
1951    *         statically expose this entrypoint.
1952    *
1953    *    - Loader interface v3 differs from v2 in:
1954    *        - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
1955    *          vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
1956    *          because the loader no longer does so.
1957    */
1958   *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
1959   return VK_SUCCESS;
1960}
1961
1962VkResult
1963tu_GetMemoryFdKHR(VkDevice _device,
1964                  const VkMemoryGetFdInfoKHR *pGetFdInfo,
1965                  int *pFd)
1966{
1967   TU_FROM_HANDLE(tu_device, device, _device);
1968   TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
1969
1970   assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
1971
1972   /* At the moment, we support only the below handle types. */
1973   assert(pGetFdInfo->handleType ==
1974             VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
1975          pGetFdInfo->handleType ==
1976             VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1977
1978   int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
1979   if (prime_fd < 0)
1980      return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
1981
1982   *pFd = prime_fd;
1983   return VK_SUCCESS;
1984}
1985
1986VkResult
1987tu_GetMemoryFdPropertiesKHR(VkDevice _device,
1988                            VkExternalMemoryHandleTypeFlagBits handleType,
1989                            int fd,
1990                            VkMemoryFdPropertiesKHR *pMemoryFdProperties)
1991{
1992   assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
1993   pMemoryFdProperties->memoryTypeBits = 1;
1994   return VK_SUCCESS;
1995}
1996
1997void
1998tu_GetPhysicalDeviceExternalSemaphoreProperties(
1999   VkPhysicalDevice physicalDevice,
2000   const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
2001   VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
2002{
2003   pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
2004   pExternalSemaphoreProperties->compatibleHandleTypes = 0;
2005   pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
2006}
2007
2008void
2009tu_GetPhysicalDeviceExternalFenceProperties(
2010   VkPhysicalDevice physicalDevice,
2011   const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
2012   VkExternalFenceProperties *pExternalFenceProperties)
2013{
2014   pExternalFenceProperties->exportFromImportedHandleTypes = 0;
2015   pExternalFenceProperties->compatibleHandleTypes = 0;
2016   pExternalFenceProperties->externalFenceFeatures = 0;
2017}
2018
2019VkResult
2020tu_CreateDebugReportCallbackEXT(
2021   VkInstance _instance,
2022   const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
2023   const VkAllocationCallbacks *pAllocator,
2024   VkDebugReportCallbackEXT *pCallback)
2025{
2026   TU_FROM_HANDLE(tu_instance, instance, _instance);
2027   return vk_create_debug_report_callback(&instance->debug_report_callbacks,
2028                                          pCreateInfo, pAllocator,
2029                                          &instance->alloc, pCallback);
2030}
2031
2032void
2033tu_DestroyDebugReportCallbackEXT(VkInstance _instance,
2034                                 VkDebugReportCallbackEXT _callback,
2035                                 const VkAllocationCallbacks *pAllocator)
2036{
2037   TU_FROM_HANDLE(tu_instance, instance, _instance);
2038   vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
2039                                    _callback, pAllocator, &instance->alloc);
2040}
2041
2042void
2043tu_DebugReportMessageEXT(VkInstance _instance,
2044                         VkDebugReportFlagsEXT flags,
2045                         VkDebugReportObjectTypeEXT objectType,
2046                         uint64_t object,
2047                         size_t location,
2048                         int32_t messageCode,
2049                         const char *pLayerPrefix,
2050                         const char *pMessage)
2051{
2052   TU_FROM_HANDLE(tu_instance, instance, _instance);
2053   vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
2054                   object, location, messageCode, pLayerPrefix, pMessage);
2055}
2056
2057void
2058tu_GetDeviceGroupPeerMemoryFeatures(
2059   VkDevice device,
2060   uint32_t heapIndex,
2061   uint32_t localDeviceIndex,
2062   uint32_t remoteDeviceIndex,
2063   VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
2064{
2065   assert(localDeviceIndex == remoteDeviceIndex);
2066
2067   *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
2068                          VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
2069                          VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
2070                          VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
2071}
2072