anv_android.c revision 01e04c3f
1/*
2 * Copyright © 2017, Google Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <hardware/gralloc.h>
25#include <hardware/hardware.h>
26#include <hardware/hwvulkan.h>
27#include <vulkan/vk_android_native_buffer.h>
28#include <vulkan/vk_icd.h>
29#include <sync/sync.h>
30
31#include "anv_private.h"
32
33static int anv_hal_open(const struct hw_module_t* mod, const char* id, struct hw_device_t** dev);
34static int anv_hal_close(struct hw_device_t *dev);
35
36static void UNUSED
37static_asserts(void)
38{
39   STATIC_ASSERT(HWVULKAN_DISPATCH_MAGIC == ICD_LOADER_MAGIC);
40}
41
42PUBLIC struct hwvulkan_module_t HAL_MODULE_INFO_SYM = {
43   .common = {
44      .tag = HARDWARE_MODULE_TAG,
45      .module_api_version = HWVULKAN_MODULE_API_VERSION_0_1,
46      .hal_api_version = HARDWARE_MAKE_API_VERSION(1, 0),
47      .id = HWVULKAN_HARDWARE_MODULE_ID,
48      .name = "Intel Vulkan HAL",
49      .author = "Intel",
50      .methods = &(hw_module_methods_t) {
51         .open = anv_hal_open,
52      },
53   },
54};
55
56/* If any bits in test_mask are set, then unset them and return true. */
57static inline bool
58unmask32(uint32_t *inout_mask, uint32_t test_mask)
59{
60   uint32_t orig_mask = *inout_mask;
61   *inout_mask &= ~test_mask;
62   return *inout_mask != orig_mask;
63}
64
65static int
66anv_hal_open(const struct hw_module_t* mod, const char* id,
67             struct hw_device_t** dev)
68{
69   assert(mod == &HAL_MODULE_INFO_SYM.common);
70   assert(strcmp(id, HWVULKAN_DEVICE_0) == 0);
71
72   hwvulkan_device_t *hal_dev = malloc(sizeof(*hal_dev));
73   if (!hal_dev)
74      return -1;
75
76   *hal_dev = (hwvulkan_device_t) {
77      .common = {
78         .tag = HARDWARE_DEVICE_TAG,
79         .version = HWVULKAN_DEVICE_API_VERSION_0_1,
80         .module = &HAL_MODULE_INFO_SYM.common,
81         .close = anv_hal_close,
82      },
83     .EnumerateInstanceExtensionProperties = anv_EnumerateInstanceExtensionProperties,
84     .CreateInstance = anv_CreateInstance,
85     .GetInstanceProcAddr = anv_GetInstanceProcAddr,
86   };
87
88   *dev = &hal_dev->common;
89   return 0;
90}
91
92static int
93anv_hal_close(struct hw_device_t *dev)
94{
95   /* hwvulkan.h claims that hw_device_t::close() is never called. */
96   return -1;
97}
98
99VkResult
100anv_image_from_gralloc(VkDevice device_h,
101                       const VkImageCreateInfo *base_info,
102                       const VkNativeBufferANDROID *gralloc_info,
103                       const VkAllocationCallbacks *alloc,
104                       VkImage *out_image_h)
105
106{
107   ANV_FROM_HANDLE(anv_device, device, device_h);
108   VkImage image_h = VK_NULL_HANDLE;
109   struct anv_image *image = NULL;
110   struct anv_bo *bo = NULL;
111   VkResult result;
112
113   struct anv_image_create_info anv_info = {
114      .vk_info = base_info,
115      .isl_extra_usage_flags = ISL_SURF_USAGE_DISABLE_AUX_BIT,
116   };
117
118   if (gralloc_info->handle->numFds != 1) {
119      return vk_errorf(device->instance, device,
120                       VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
121                       "VkNativeBufferANDROID::handle::numFds is %d, "
122                       "expected 1", gralloc_info->handle->numFds);
123   }
124
125   /* Do not close the gralloc handle's dma_buf. The lifetime of the dma_buf
126    * must exceed that of the gralloc handle, and we do not own the gralloc
127    * handle.
128    */
129   int dma_buf = gralloc_info->handle->data[0];
130
131   uint64_t bo_flags = ANV_BO_EXTERNAL;
132   if (device->instance->physicalDevice.supports_48bit_addresses)
133      bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
134   if (device->instance->physicalDevice.use_softpin)
135      bo_flags |= EXEC_OBJECT_PINNED;
136
137   result = anv_bo_cache_import(device, &device->bo_cache, dma_buf, bo_flags, &bo);
138   if (result != VK_SUCCESS) {
139      return vk_errorf(device->instance, device, result,
140                       "failed to import dma-buf from VkNativeBufferANDROID");
141   }
142
143   int i915_tiling = anv_gem_get_tiling(device, bo->gem_handle);
144   switch (i915_tiling) {
145   case I915_TILING_NONE:
146      anv_info.isl_tiling_flags = ISL_TILING_LINEAR_BIT;
147      break;
148   case I915_TILING_X:
149      anv_info.isl_tiling_flags = ISL_TILING_X_BIT;
150      break;
151   case I915_TILING_Y:
152      anv_info.isl_tiling_flags = ISL_TILING_Y0_BIT;
153      break;
154   case -1:
155      result = vk_errorf(device->instance, device,
156                         VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
157                         "DRM_IOCTL_I915_GEM_GET_TILING failed for "
158                         "VkNativeBufferANDROID");
159      goto fail_tiling;
160   default:
161      result = vk_errorf(device->instance, device,
162                         VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
163                         "DRM_IOCTL_I915_GEM_GET_TILING returned unknown "
164                         "tiling %d for VkNativeBufferANDROID", i915_tiling);
165      goto fail_tiling;
166   }
167
168   enum isl_format format = anv_get_isl_format(&device->info,
169                                               base_info->format,
170                                               VK_IMAGE_ASPECT_COLOR_BIT,
171                                               base_info->tiling);
172   assert(format != ISL_FORMAT_UNSUPPORTED);
173
174   anv_info.stride = gralloc_info->stride *
175                     (isl_format_get_layout(format)->bpb / 8);
176
177   result = anv_image_create(device_h, &anv_info, alloc, &image_h);
178   image = anv_image_from_handle(image_h);
179   if (result != VK_SUCCESS)
180      goto fail_create;
181
182   if (bo->size < image->size) {
183      result = vk_errorf(device->instance, device,
184                         VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR,
185                         "dma-buf from VkNativeBufferANDROID is too small for "
186                         "VkImage: %"PRIu64"B < %"PRIu64"B",
187                         bo->size, image->size);
188      goto fail_size;
189   }
190
191   assert(image->n_planes == 1);
192   assert(image->planes[0].address.offset == 0);
193
194   image->planes[0].address.bo = bo;
195   image->planes[0].bo_is_owned = true;
196
197   /* We need to set the WRITE flag on window system buffers so that GEM will
198    * know we're writing to them and synchronize uses on other rings (for
199    * example, if the display server uses the blitter ring).
200    *
201    * If this function fails and if the imported bo was resident in the cache,
202    * we should avoid updating the bo's flags. Therefore, we defer updating
203    * the flags until success is certain.
204    *
205    */
206   bo->flags &= ~EXEC_OBJECT_ASYNC;
207   bo->flags |= EXEC_OBJECT_WRITE;
208
209   /* Don't clobber the out-parameter until success is certain. */
210   *out_image_h = image_h;
211
212   return VK_SUCCESS;
213
214 fail_size:
215   anv_DestroyImage(device_h, image_h, alloc);
216 fail_create:
217 fail_tiling:
218   anv_bo_cache_release(device, &device->bo_cache, bo);
219
220   return result;
221}
222
223VkResult anv_GetSwapchainGrallocUsageANDROID(
224    VkDevice            device_h,
225    VkFormat            format,
226    VkImageUsageFlags   imageUsage,
227    int*                grallocUsage)
228{
229   ANV_FROM_HANDLE(anv_device, device, device_h);
230   struct anv_physical_device *phys_dev = &device->instance->physicalDevice;
231   VkPhysicalDevice phys_dev_h = anv_physical_device_to_handle(phys_dev);
232   VkResult result;
233
234   *grallocUsage = 0;
235   intel_logd("%s: format=%d, usage=0x%x", __func__, format, imageUsage);
236
237   /* WARNING: Android's libvulkan.so hardcodes the VkImageUsageFlags
238    * returned to applications via VkSurfaceCapabilitiesKHR::supportedUsageFlags.
239    * The relevant code in libvulkan/swapchain.cpp contains this fun comment:
240    *
241    *     TODO(jessehall): I think these are right, but haven't thought hard
242    *     about it. Do we need to query the driver for support of any of
243    *     these?
244    *
245    * Any disagreement between this function and the hardcoded
246    * VkSurfaceCapabilitiesKHR:supportedUsageFlags causes tests
247    * dEQP-VK.wsi.android.swapchain.*.image_usage to fail.
248    */
249
250   const VkPhysicalDeviceImageFormatInfo2KHR image_format_info = {
251      .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR,
252      .format = format,
253      .type = VK_IMAGE_TYPE_2D,
254      .tiling = VK_IMAGE_TILING_OPTIMAL,
255      .usage = imageUsage,
256   };
257
258   VkImageFormatProperties2KHR image_format_props = {
259      .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR,
260   };
261
262   /* Check that requested format and usage are supported. */
263   result = anv_GetPhysicalDeviceImageFormatProperties2(phys_dev_h,
264               &image_format_info, &image_format_props);
265   if (result != VK_SUCCESS) {
266      return vk_errorf(device->instance, device, result,
267                       "anv_GetPhysicalDeviceImageFormatProperties2 failed "
268                       "inside %s", __func__);
269   }
270
271   if (unmask32(&imageUsage, VK_IMAGE_USAGE_TRANSFER_DST_BIT |
272                             VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT))
273      *grallocUsage |= GRALLOC_USAGE_HW_RENDER;
274
275   if (unmask32(&imageUsage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
276                             VK_IMAGE_USAGE_SAMPLED_BIT |
277                             VK_IMAGE_USAGE_STORAGE_BIT |
278                             VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
279      *grallocUsage |= GRALLOC_USAGE_HW_TEXTURE;
280
281   /* All VkImageUsageFlags not explicitly checked here are unsupported for
282    * gralloc swapchains.
283    */
284   if (imageUsage != 0) {
285      return vk_errorf(device->instance, device, VK_ERROR_FORMAT_NOT_SUPPORTED,
286                       "unsupported VkImageUsageFlags(0x%x) for gralloc "
287                       "swapchain", imageUsage);
288   }
289
290   /* The below formats support GRALLOC_USAGE_HW_FB (that is, display
291    * scanout). This short list of formats is univserally supported on Intel
292    * but is incomplete.  The full set of supported formats is dependent on
293    * kernel and hardware.
294    *
295    * FINISHME: Advertise all display-supported formats.
296    */
297   switch (format) {
298      case VK_FORMAT_B8G8R8A8_UNORM:
299      case VK_FORMAT_B5G6R5_UNORM_PACK16:
300      case VK_FORMAT_R8G8B8A8_UNORM:
301      case VK_FORMAT_R8G8B8A8_SRGB:
302         *grallocUsage |= GRALLOC_USAGE_HW_FB |
303                          GRALLOC_USAGE_HW_COMPOSER |
304                          GRALLOC_USAGE_EXTERNAL_DISP;
305         break;
306      default:
307         intel_logw("%s: unsupported format=%d", __func__, format);
308   }
309
310   if (*grallocUsage == 0)
311      return VK_ERROR_FORMAT_NOT_SUPPORTED;
312
313   return VK_SUCCESS;
314}
315
316VkResult
317anv_AcquireImageANDROID(
318      VkDevice            device_h,
319      VkImage             image_h,
320      int                 nativeFenceFd,
321      VkSemaphore         semaphore_h,
322      VkFence             fence_h)
323{
324   ANV_FROM_HANDLE(anv_device, device, device_h);
325   VkResult result = VK_SUCCESS;
326
327   if (nativeFenceFd != -1) {
328      /* As a simple, firstpass implementation of VK_ANDROID_native_buffer, we
329       * block on the nativeFenceFd. This may introduce latency and is
330       * definitiely inefficient, yet it's correct.
331       *
332       * FINISHME(chadv): Import the nativeFenceFd into the VkSemaphore and
333       * VkFence.
334       */
335      if (sync_wait(nativeFenceFd, /*timeout*/ -1) < 0) {
336         result = vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
337                            "%s: failed to wait on nativeFenceFd=%d",
338                            __func__, nativeFenceFd);
339      }
340
341      /* From VK_ANDROID_native_buffer's pseudo spec
342       * (https://source.android.com/devices/graphics/implement-vulkan):
343       *
344       *    The driver takes ownership of the fence fd and is responsible for
345       *    closing it [...] even if vkAcquireImageANDROID fails and returns
346       *    an error.
347       */
348      close(nativeFenceFd);
349
350      if (result != VK_SUCCESS)
351         return result;
352   }
353
354   if (semaphore_h || fence_h) {
355      /* Thanks to implicit sync, the image is ready for GPU access.  But we
356       * must still put the semaphore into the "submit" state; otherwise the
357       * client may get unexpected behavior if the client later uses it as
358       * a wait semaphore.
359       *
360       * Because we blocked above on the nativeFenceFd, the image is also
361       * ready for foreign-device access (including CPU access). But we must
362       * still signal the fence; otherwise the client may get unexpected
363       * behavior if the client later waits on it.
364       *
365       * For some values of anv_semaphore_type, we must submit the semaphore
366       * to execbuf in order to signal it.  Likewise for anv_fence_type.
367       * Instead of open-coding here the signal operation for each
368       * anv_semaphore_type and anv_fence_type, we piggy-back on
369       * vkQueueSubmit.
370       */
371      const VkSubmitInfo submit = {
372         .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
373         .waitSemaphoreCount = 0,
374         .commandBufferCount = 0,
375         .signalSemaphoreCount = (semaphore_h ? 1 : 0),
376         .pSignalSemaphores = &semaphore_h,
377      };
378
379      result = anv_QueueSubmit(anv_queue_to_handle(&device->queue), 1,
380                               &submit, fence_h);
381      if (result != VK_SUCCESS) {
382         return vk_errorf(device->instance, device, result,
383                          "anv_QueueSubmit failed inside %s", __func__);
384      }
385   }
386
387   return VK_SUCCESS;
388}
389
390VkResult
391anv_QueueSignalReleaseImageANDROID(
392      VkQueue             queue,
393      uint32_t            waitSemaphoreCount,
394      const VkSemaphore*  pWaitSemaphores,
395      VkImage             image,
396      int*                pNativeFenceFd)
397{
398   VkResult result;
399
400   if (waitSemaphoreCount == 0)
401      goto done;
402
403   result = anv_QueueSubmit(queue, 1,
404      &(VkSubmitInfo) {
405            .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
406            .waitSemaphoreCount = 1,
407            .pWaitSemaphores = pWaitSemaphores,
408      },
409      (VkFence) VK_NULL_HANDLE);
410   if (result != VK_SUCCESS)
411      return result;
412
413 done:
414   if (pNativeFenceFd) {
415      /* We can rely implicit on sync because above we submitted all
416       * semaphores to the queue.
417       */
418      *pNativeFenceFd = -1;
419   }
420
421   return VK_SUCCESS;
422}
423