1/*
2 * Copyright © 2019 Raspberry Pi
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "v3dv_private.h"
25
26#include "drm-uapi/drm_fourcc.h"
27#include "util/format/u_format.h"
28#include "util/u_math.h"
29#include "vk_format_info.h"
30#include "vk_util.h"
31#include "vulkan/wsi/wsi_common.h"
32
33/**
34 * Computes the HW's UIFblock padding for a given height/cpp.
35 *
36 * The goal of the padding is to keep pages of the same color (bank number) at
37 * least half a page away from each other vertically when crossing between
38 * columns of UIF blocks.
39 */
40static uint32_t
41v3d_get_ub_pad(uint32_t cpp, uint32_t height)
42{
43   uint32_t utile_h = v3d_utile_height(cpp);
44   uint32_t uif_block_h = utile_h * 2;
45   uint32_t height_ub = height / uif_block_h;
46
47   uint32_t height_offset_in_pc = height_ub % PAGE_CACHE_UB_ROWS;
48
49   /* For the perfectly-aligned-for-UIF-XOR case, don't add any pad. */
50   if (height_offset_in_pc == 0)
51      return 0;
52
53   /* Try padding up to where we're offset by at least half a page. */
54   if (height_offset_in_pc < PAGE_UB_ROWS_TIMES_1_5) {
55      /* If we fit entirely in the page cache, don't pad. */
56      if (height_ub < PAGE_CACHE_UB_ROWS)
57         return 0;
58      else
59         return PAGE_UB_ROWS_TIMES_1_5 - height_offset_in_pc;
60   }
61
62   /* If we're close to being aligned to page cache size, then round up
63    * and rely on XOR.
64    */
65   if (height_offset_in_pc > PAGE_CACHE_MINUS_1_5_UB_ROWS)
66      return PAGE_CACHE_UB_ROWS - height_offset_in_pc;
67
68   /* Otherwise, we're far enough away (top and bottom) to not need any
69    * padding.
70    */
71   return 0;
72}
73
74static void
75v3d_setup_slices(struct v3dv_image *image)
76{
77   assert(image->cpp > 0);
78
79   uint32_t width = image->vk.extent.width;
80   uint32_t height = image->vk.extent.height;
81   uint32_t depth = image->vk.extent.depth;
82
83   /* Note that power-of-two padding is based on level 1.  These are not
84    * equivalent to just util_next_power_of_two(dimension), because at a
85    * level 0 dimension of 9, the level 1 power-of-two padded value is 4,
86    * not 8.
87    */
88   uint32_t pot_width = 2 * util_next_power_of_two(u_minify(width, 1));
89   uint32_t pot_height = 2 * util_next_power_of_two(u_minify(height, 1));
90   uint32_t pot_depth = 2 * util_next_power_of_two(u_minify(depth, 1));
91
92   uint32_t utile_w = v3d_utile_width(image->cpp);
93   uint32_t utile_h = v3d_utile_height(image->cpp);
94   uint32_t uif_block_w = utile_w * 2;
95   uint32_t uif_block_h = utile_h * 2;
96
97   uint32_t block_width = vk_format_get_blockwidth(image->vk.format);
98   uint32_t block_height = vk_format_get_blockheight(image->vk.format);
99
100   assert(image->vk.samples == VK_SAMPLE_COUNT_1_BIT ||
101          image->vk.samples == VK_SAMPLE_COUNT_4_BIT);
102   bool msaa = image->vk.samples != VK_SAMPLE_COUNT_1_BIT;
103
104   bool uif_top = msaa;
105
106   assert(image->vk.array_layers > 0);
107   assert(depth > 0);
108   assert(image->vk.mip_levels >= 1);
109
110   uint32_t offset = 0;
111   for (int32_t i = image->vk.mip_levels - 1; i >= 0; i--) {
112      struct v3d_resource_slice *slice = &image->slices[i];
113
114      uint32_t level_width, level_height, level_depth;
115      if (i < 2) {
116         level_width = u_minify(width, i);
117         level_height = u_minify(height, i);
118      } else {
119         level_width = u_minify(pot_width, i);
120         level_height = u_minify(pot_height, i);
121      }
122
123      if (i < 1)
124         level_depth = u_minify(depth, i);
125      else
126         level_depth = u_minify(pot_depth, i);
127
128      if (msaa) {
129         level_width *= 2;
130         level_height *= 2;
131      }
132
133      level_width = DIV_ROUND_UP(level_width, block_width);
134      level_height = DIV_ROUND_UP(level_height, block_height);
135
136      if (!image->tiled) {
137         slice->tiling = V3D_TILING_RASTER;
138         if (image->vk.image_type == VK_IMAGE_TYPE_1D)
139            level_width = align(level_width, 64 / image->cpp);
140      } else {
141         if ((i != 0 || !uif_top) &&
142             (level_width <= utile_w || level_height <= utile_h)) {
143            slice->tiling = V3D_TILING_LINEARTILE;
144            level_width = align(level_width, utile_w);
145            level_height = align(level_height, utile_h);
146         } else if ((i != 0 || !uif_top) && level_width <= uif_block_w) {
147            slice->tiling = V3D_TILING_UBLINEAR_1_COLUMN;
148            level_width = align(level_width, uif_block_w);
149            level_height = align(level_height, uif_block_h);
150         } else if ((i != 0 || !uif_top) && level_width <= 2 * uif_block_w) {
151            slice->tiling = V3D_TILING_UBLINEAR_2_COLUMN;
152            level_width = align(level_width, 2 * uif_block_w);
153            level_height = align(level_height, uif_block_h);
154         } else {
155            /* We align the width to a 4-block column of UIF blocks, but we
156             * only align height to UIF blocks.
157             */
158            level_width = align(level_width, 4 * uif_block_w);
159            level_height = align(level_height, uif_block_h);
160
161            slice->ub_pad = v3d_get_ub_pad(image->cpp, level_height);
162            level_height += slice->ub_pad * uif_block_h;
163
164            /* If the padding set us to to be aligned to the page cache size,
165             * then the HW will use the XOR bit on odd columns to get us
166             * perfectly misaligned.
167             */
168            if ((level_height / uif_block_h) %
169                (V3D_PAGE_CACHE_SIZE / V3D_UIFBLOCK_ROW_SIZE) == 0) {
170               slice->tiling = V3D_TILING_UIF_XOR;
171            } else {
172               slice->tiling = V3D_TILING_UIF_NO_XOR;
173            }
174         }
175      }
176
177      slice->offset = offset;
178      slice->stride = level_width * image->cpp;
179      slice->padded_height = level_height;
180      if (slice->tiling == V3D_TILING_UIF_NO_XOR ||
181          slice->tiling == V3D_TILING_UIF_XOR) {
182         slice->padded_height_of_output_image_in_uif_blocks =
183            slice->padded_height / (2 * v3d_utile_height(image->cpp));
184      }
185
186      slice->size = level_height * slice->stride;
187      uint32_t slice_total_size = slice->size * level_depth;
188
189      /* The HW aligns level 1's base to a page if any of level 1 or
190       * below could be UIF XOR.  The lower levels then inherit the
191       * alignment for as long as necesary, thanks to being power of
192       * two aligned.
193       */
194      if (i == 1 &&
195          level_width > 4 * uif_block_w &&
196          level_height > PAGE_CACHE_MINUS_1_5_UB_ROWS * uif_block_h) {
197         slice_total_size = align(slice_total_size, V3D_UIFCFG_PAGE_SIZE);
198      }
199
200      offset += slice_total_size;
201   }
202
203   image->size = offset;
204
205   /* UIF/UBLINEAR levels need to be aligned to UIF-blocks, and LT only
206    * needs to be aligned to utile boundaries.  Since tiles are laid out
207    * from small to big in memory, we need to align the later UIF slices
208    * to UIF blocks, if they were preceded by non-UIF-block-aligned LT
209    * slices.
210    *
211    * We additionally align to 4k, which improves UIF XOR performance.
212    */
213   image->alignment = image->tiled ? 4096 : image->cpp;
214   uint32_t align_offset =
215      align(image->slices[0].offset, image->alignment) - image->slices[0].offset;
216   if (align_offset) {
217      image->size += align_offset;
218      for (int i = 0; i < image->vk.mip_levels; i++)
219         image->slices[i].offset += align_offset;
220   }
221
222   /* Arrays and cube textures have a stride which is the distance from
223    * one full mipmap tree to the next (64b aligned).  For 3D textures,
224    * we need to program the stride between slices of miplevel 0.
225    */
226   if (image->vk.image_type != VK_IMAGE_TYPE_3D) {
227      image->cube_map_stride =
228         align(image->slices[0].offset + image->slices[0].size, 64);
229      image->size += image->cube_map_stride * (image->vk.array_layers - 1);
230   } else {
231      image->cube_map_stride = image->slices[0].size;
232   }
233}
234
235uint32_t
236v3dv_layer_offset(const struct v3dv_image *image, uint32_t level, uint32_t layer)
237{
238   const struct v3d_resource_slice *slice = &image->slices[level];
239
240   if (image->vk.image_type == VK_IMAGE_TYPE_3D)
241      return image->mem_offset + slice->offset + layer * slice->size;
242   else
243      return image->mem_offset + slice->offset + layer * image->cube_map_stride;
244}
245
246static VkResult
247create_image(struct v3dv_device *device,
248             const VkImageCreateInfo *pCreateInfo,
249             const VkAllocationCallbacks *pAllocator,
250             VkImage *pImage)
251{
252   struct v3dv_image *image = NULL;
253
254   image = vk_image_create(&device->vk, pCreateInfo, pAllocator, sizeof(*image));
255   if (image == NULL)
256      return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
257
258   /* When using the simulator the WSI common code will see that our
259    * driver wsi device doesn't match the display device and because of that
260    * it will not attempt to present directly from the swapchain images,
261    * instead it will use the prime blit path (use_prime_blit flag in
262    * struct wsi_swapchain), where it copies the contents of the swapchain
263    * images to a linear buffer with appropriate row stride for presentation.
264    * As a result, on that path, swapchain images do not have any special
265    * requirements and are not created with the pNext structs below.
266    */
267   VkImageTiling tiling = pCreateInfo->tiling;
268   uint64_t modifier = DRM_FORMAT_MOD_INVALID;
269   if (tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
270      const VkImageDrmFormatModifierListCreateInfoEXT *mod_info =
271         vk_find_struct_const(pCreateInfo->pNext,
272                              IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
273      const VkImageDrmFormatModifierExplicitCreateInfoEXT *explicit_mod_info =
274         vk_find_struct_const(pCreateInfo->pNext,
275                              IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
276      assert(mod_info || explicit_mod_info);
277
278      if (mod_info) {
279         for (uint32_t i = 0; i < mod_info->drmFormatModifierCount; i++) {
280            switch (mod_info->pDrmFormatModifiers[i]) {
281            case DRM_FORMAT_MOD_LINEAR:
282               if (modifier == DRM_FORMAT_MOD_INVALID)
283                  modifier = DRM_FORMAT_MOD_LINEAR;
284               break;
285            case DRM_FORMAT_MOD_BROADCOM_UIF:
286               modifier = DRM_FORMAT_MOD_BROADCOM_UIF;
287               break;
288            }
289         }
290      } else {
291         modifier = explicit_mod_info->drmFormatModifier;
292      }
293      assert(modifier == DRM_FORMAT_MOD_LINEAR ||
294             modifier == DRM_FORMAT_MOD_BROADCOM_UIF);
295   } else if (pCreateInfo->imageType == VK_IMAGE_TYPE_1D ||
296              image->vk.wsi_legacy_scanout) {
297      tiling = VK_IMAGE_TILING_LINEAR;
298   }
299
300   const struct v3dv_format *format =
301      v3dv_X(device, get_format)(pCreateInfo->format);
302   v3dv_assert(format != NULL && format->supported);
303
304   assert(pCreateInfo->samples == VK_SAMPLE_COUNT_1_BIT ||
305          pCreateInfo->samples == VK_SAMPLE_COUNT_4_BIT);
306
307   image->format = format;
308   image->cpp = vk_format_get_blocksize(image->vk.format);
309   image->tiled = tiling == VK_IMAGE_TILING_OPTIMAL ||
310                  (tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT &&
311                   modifier != DRM_FORMAT_MOD_LINEAR);
312
313   image->vk.tiling = tiling;
314   image->vk.drm_format_mod = modifier;
315
316   /* Our meta paths can create image views with compatible formats for any
317    * image, so always set this flag to keep the common Vulkan image code
318    * happy.
319    */
320   image->vk.create_flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
321
322   v3d_setup_slices(image);
323
324   *pImage = v3dv_image_to_handle(image);
325
326   return VK_SUCCESS;
327}
328
329static VkResult
330create_image_from_swapchain(struct v3dv_device *device,
331                            const VkImageCreateInfo *pCreateInfo,
332                            const VkImageSwapchainCreateInfoKHR *swapchain_info,
333                            const VkAllocationCallbacks *pAllocator,
334                            VkImage *pImage)
335{
336   struct v3dv_image *swapchain_image =
337      v3dv_wsi_get_image_from_swapchain(swapchain_info->swapchain, 0);
338   assert(swapchain_image);
339
340   VkImageCreateInfo local_create_info = *pCreateInfo;
341   local_create_info.pNext = NULL;
342
343   /* Added by wsi code. */
344   local_create_info.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
345
346   /* The spec requires TILING_OPTIMAL as input, but the swapchain image may
347    * privately use a different tiling.  See spec anchor
348    * #swapchain-wsi-image-create-info .
349    */
350   assert(local_create_info.tiling == VK_IMAGE_TILING_OPTIMAL);
351   local_create_info.tiling = swapchain_image->vk.tiling;
352
353   VkImageDrmFormatModifierListCreateInfoEXT local_modifier_info = {
354      .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT,
355      .drmFormatModifierCount = 1,
356      .pDrmFormatModifiers = &swapchain_image->vk.drm_format_mod,
357   };
358
359   if (swapchain_image->vk.drm_format_mod != DRM_FORMAT_MOD_INVALID)
360      __vk_append_struct(&local_create_info, &local_modifier_info);
361
362   assert(swapchain_image->vk.image_type == local_create_info.imageType);
363   assert(swapchain_image->vk.format == local_create_info.format);
364   assert(swapchain_image->vk.extent.width == local_create_info.extent.width);
365   assert(swapchain_image->vk.extent.height == local_create_info.extent.height);
366   assert(swapchain_image->vk.extent.depth == local_create_info.extent.depth);
367   assert(swapchain_image->vk.array_layers == local_create_info.arrayLayers);
368   assert(swapchain_image->vk.samples == local_create_info.samples);
369   assert(swapchain_image->vk.tiling == local_create_info.tiling);
370   assert((swapchain_image->vk.usage & local_create_info.usage) ==
371          local_create_info.usage);
372
373   return create_image(device, &local_create_info, pAllocator, pImage);
374}
375
376VKAPI_ATTR VkResult VKAPI_CALL
377v3dv_CreateImage(VkDevice _device,
378                 const VkImageCreateInfo *pCreateInfo,
379                 const VkAllocationCallbacks *pAllocator,
380                 VkImage *pImage)
381{
382   V3DV_FROM_HANDLE(v3dv_device, device, _device);
383
384   const VkImageSwapchainCreateInfoKHR *swapchain_info =
385      vk_find_struct_const(pCreateInfo->pNext, IMAGE_SWAPCHAIN_CREATE_INFO_KHR);
386   if (swapchain_info && swapchain_info->swapchain != VK_NULL_HANDLE)
387      return create_image_from_swapchain(device, pCreateInfo, swapchain_info,
388                                         pAllocator, pImage);
389
390   return create_image(device, pCreateInfo, pAllocator, pImage);
391}
392
393VKAPI_ATTR void VKAPI_CALL
394v3dv_GetImageSubresourceLayout(VkDevice device,
395                               VkImage _image,
396                               const VkImageSubresource *subresource,
397                               VkSubresourceLayout *layout)
398{
399   V3DV_FROM_HANDLE(v3dv_image, image, _image);
400
401   const struct v3d_resource_slice *slice =
402      &image->slices[subresource->mipLevel];
403   layout->offset =
404      v3dv_layer_offset(image, subresource->mipLevel, subresource->arrayLayer);
405   layout->rowPitch = slice->stride;
406   layout->depthPitch = image->cube_map_stride;
407   layout->arrayPitch = image->cube_map_stride;
408
409   if (image->vk.image_type != VK_IMAGE_TYPE_3D) {
410      layout->size = slice->size;
411   } else {
412      /* For 3D images, the size of the slice represents the size of a 2D slice
413       * in the 3D image, so we have to multiply by the depth extent of the
414       * miplevel. For levels other than the first, we just compute the size
415       * as the distance between consecutive levels (notice that mip levels are
416       * arranged in memory from last to first).
417       */
418      if (subresource->mipLevel == 0) {
419         layout->size = slice->size * image->vk.extent.depth;
420      } else {
421            const struct v3d_resource_slice *prev_slice =
422               &image->slices[subresource->mipLevel - 1];
423            layout->size = prev_slice->offset - slice->offset;
424      }
425   }
426}
427
428VKAPI_ATTR void VKAPI_CALL
429v3dv_DestroyImage(VkDevice _device,
430                  VkImage _image,
431                  const VkAllocationCallbacks* pAllocator)
432{
433   V3DV_FROM_HANDLE(v3dv_device, device, _device);
434   V3DV_FROM_HANDLE(v3dv_image, image, _image);
435
436   if (image == NULL)
437      return;
438
439   vk_image_destroy(&device->vk, pAllocator, &image->vk);
440}
441
442VkImageViewType
443v3dv_image_type_to_view_type(VkImageType type)
444{
445   switch (type) {
446   case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
447   case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
448   case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
449   default:
450      unreachable("Invalid image type");
451   }
452}
453
454static enum pipe_swizzle
455vk_component_mapping_to_pipe_swizzle(VkComponentSwizzle swz)
456{
457   assert(swz != VK_COMPONENT_SWIZZLE_IDENTITY);
458
459   switch (swz) {
460   case VK_COMPONENT_SWIZZLE_ZERO:
461      return PIPE_SWIZZLE_0;
462   case VK_COMPONENT_SWIZZLE_ONE:
463      return PIPE_SWIZZLE_1;
464   case VK_COMPONENT_SWIZZLE_R:
465      return PIPE_SWIZZLE_X;
466   case VK_COMPONENT_SWIZZLE_G:
467      return PIPE_SWIZZLE_Y;
468   case VK_COMPONENT_SWIZZLE_B:
469      return PIPE_SWIZZLE_Z;
470   case VK_COMPONENT_SWIZZLE_A:
471      return PIPE_SWIZZLE_W;
472   default:
473      unreachable("Unknown VkComponentSwizzle");
474   };
475}
476
477VKAPI_ATTR VkResult VKAPI_CALL
478v3dv_CreateImageView(VkDevice _device,
479                     const VkImageViewCreateInfo *pCreateInfo,
480                     const VkAllocationCallbacks *pAllocator,
481                     VkImageView *pView)
482{
483   V3DV_FROM_HANDLE(v3dv_device, device, _device);
484   V3DV_FROM_HANDLE(v3dv_image, image, pCreateInfo->image);
485   struct v3dv_image_view *iview;
486
487   iview = vk_image_view_create(&device->vk, pCreateInfo, pAllocator,
488                                sizeof(*iview));
489   if (iview == NULL)
490      return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
491
492   const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
493
494   iview->offset = v3dv_layer_offset(image, iview->vk.base_mip_level,
495                                     iview->vk.base_array_layer);
496
497   /* If we have D24S8 format but the view only selects the stencil aspect
498    * we want to re-interpret the format as RGBA8_UINT, then map our stencil
499    * data reads to the R component and ignore the GBA channels that contain
500    * the depth aspect data.
501    */
502   VkFormat format;
503   uint8_t image_view_swizzle[4];
504   if (pCreateInfo->format == VK_FORMAT_D24_UNORM_S8_UINT &&
505       range->aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
506      format = VK_FORMAT_R8G8B8A8_UINT;
507      image_view_swizzle[0] = PIPE_SWIZZLE_X;
508      image_view_swizzle[1] = PIPE_SWIZZLE_0;
509      image_view_swizzle[2] = PIPE_SWIZZLE_0;
510      image_view_swizzle[3] = PIPE_SWIZZLE_1;
511   } else {
512      format = pCreateInfo->format;
513
514      /* FIXME: we are doing this vk to pipe swizzle mapping just to call
515       * util_format_compose_swizzles. Would be good to check if it would be
516       * better to reimplement the latter using vk component
517       */
518      image_view_swizzle[0] =
519         vk_component_mapping_to_pipe_swizzle(iview->vk.swizzle.r);
520      image_view_swizzle[1] =
521         vk_component_mapping_to_pipe_swizzle(iview->vk.swizzle.g);
522      image_view_swizzle[2] =
523         vk_component_mapping_to_pipe_swizzle(iview->vk.swizzle.b);
524      image_view_swizzle[3] =
525         vk_component_mapping_to_pipe_swizzle(iview->vk.swizzle.a);
526   }
527
528   iview->vk.format = format;
529   iview->format = v3dv_X(device, get_format)(format);
530   assert(iview->format && iview->format->supported);
531
532   if (vk_format_is_depth_or_stencil(iview->vk.format)) {
533      iview->internal_type =
534         v3dv_X(device, get_internal_depth_type)(iview->vk.format);
535   } else {
536      v3dv_X(device, get_internal_type_bpp_for_output_format)
537         (iview->format->rt_type, &iview->internal_type, &iview->internal_bpp);
538   }
539
540   const uint8_t *format_swizzle = v3dv_get_format_swizzle(device, format);
541   util_format_compose_swizzles(format_swizzle, image_view_swizzle,
542                                iview->swizzle);
543   iview->swap_rb = iview->swizzle[0] == PIPE_SWIZZLE_Z;
544
545   v3dv_X(device, pack_texture_shader_state)(device, iview);
546
547   *pView = v3dv_image_view_to_handle(iview);
548
549   return VK_SUCCESS;
550}
551
552VKAPI_ATTR void VKAPI_CALL
553v3dv_DestroyImageView(VkDevice _device,
554                      VkImageView imageView,
555                      const VkAllocationCallbacks* pAllocator)
556{
557   V3DV_FROM_HANDLE(v3dv_device, device, _device);
558   V3DV_FROM_HANDLE(v3dv_image_view, image_view, imageView);
559
560   if (image_view == NULL)
561      return;
562
563   vk_image_view_destroy(&device->vk, pAllocator, &image_view->vk);
564}
565
566VKAPI_ATTR VkResult VKAPI_CALL
567v3dv_CreateBufferView(VkDevice _device,
568                      const VkBufferViewCreateInfo *pCreateInfo,
569                      const VkAllocationCallbacks *pAllocator,
570                      VkBufferView *pView)
571{
572   V3DV_FROM_HANDLE(v3dv_device, device, _device);
573
574   struct v3dv_buffer *buffer =
575      v3dv_buffer_from_handle(pCreateInfo->buffer);
576
577   struct v3dv_buffer_view *view =
578      vk_object_zalloc(&device->vk, pAllocator, sizeof(*view),
579                       VK_OBJECT_TYPE_BUFFER_VIEW);
580   if (!view)
581      return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
582
583   uint32_t range;
584   if (pCreateInfo->range == VK_WHOLE_SIZE)
585      range = buffer->size - pCreateInfo->offset;
586   else
587      range = pCreateInfo->range;
588
589   enum pipe_format pipe_format = vk_format_to_pipe_format(pCreateInfo->format);
590   uint32_t num_elements = range / util_format_get_blocksize(pipe_format);
591
592   view->buffer = buffer;
593   view->offset = pCreateInfo->offset;
594   view->size = view->offset + range;
595   view->num_elements = num_elements;
596   view->vk_format = pCreateInfo->format;
597   view->format = v3dv_X(device, get_format)(view->vk_format);
598
599   v3dv_X(device, get_internal_type_bpp_for_output_format)
600      (view->format->rt_type, &view->internal_type, &view->internal_bpp);
601
602   if (buffer->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT ||
603       buffer->usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)
604      v3dv_X(device, pack_texture_shader_state_from_buffer_view)(device, view);
605
606   *pView = v3dv_buffer_view_to_handle(view);
607
608   return VK_SUCCESS;
609}
610
611VKAPI_ATTR void VKAPI_CALL
612v3dv_DestroyBufferView(VkDevice _device,
613                       VkBufferView bufferView,
614                       const VkAllocationCallbacks *pAllocator)
615{
616   V3DV_FROM_HANDLE(v3dv_device, device, _device);
617   V3DV_FROM_HANDLE(v3dv_buffer_view, buffer_view, bufferView);
618
619   if (buffer_view == NULL)
620      return;
621
622   vk_object_free(&device->vk, pAllocator, buffer_view);
623}
624