1b8e80941Smrg/* 2b8e80941Smrg * Copyright © 2016 Red Hat. 3b8e80941Smrg * Copyright © 2016 Bas Nieuwenhuizen 4b8e80941Smrg * 5b8e80941Smrg * based in part on anv driver which is: 6b8e80941Smrg * Copyright © 2015 Intel Corporation 7b8e80941Smrg * 8b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a 9b8e80941Smrg * copy of this software and associated documentation files (the "Software"), 10b8e80941Smrg * to deal in the Software without restriction, including without limitation 11b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the 13b8e80941Smrg * Software is furnished to do so, subject to the following conditions: 14b8e80941Smrg * 15b8e80941Smrg * The above copyright notice and this permission notice (including the next 16b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the 17b8e80941Smrg * Software. 18b8e80941Smrg * 19b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 25b8e80941Smrg * IN THE SOFTWARE. 26b8e80941Smrg */ 27b8e80941Smrg 28b8e80941Smrg#include "radv_debug.h" 29b8e80941Smrg#include "radv_private.h" 30b8e80941Smrg#include "vk_format.h" 31b8e80941Smrg#include "vk_util.h" 32b8e80941Smrg#include "radv_radeon_winsys.h" 33b8e80941Smrg#include "sid.h" 34b8e80941Smrg#include "gfx9d.h" 35b8e80941Smrg#include "util/debug.h" 36b8e80941Smrg#include "util/u_atomic.h" 37b8e80941Smrgstatic unsigned 38b8e80941Smrgradv_choose_tiling(struct radv_device *device, 39b8e80941Smrg const struct radv_image_create_info *create_info) 40b8e80941Smrg{ 41b8e80941Smrg const VkImageCreateInfo *pCreateInfo = create_info->vk_info; 42b8e80941Smrg 43b8e80941Smrg if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR) { 44b8e80941Smrg assert(pCreateInfo->samples <= 1); 45b8e80941Smrg return RADEON_SURF_MODE_LINEAR_ALIGNED; 46b8e80941Smrg } 47b8e80941Smrg 48b8e80941Smrg if (!vk_format_is_compressed(pCreateInfo->format) && 49b8e80941Smrg !vk_format_is_depth_or_stencil(pCreateInfo->format) 50b8e80941Smrg && device->physical_device->rad_info.chip_class <= VI) { 51b8e80941Smrg /* this causes hangs in some VK CTS tests on GFX9. */ 52b8e80941Smrg /* Textures with a very small height are recommended to be linear. */ 53b8e80941Smrg if (pCreateInfo->imageType == VK_IMAGE_TYPE_1D || 54b8e80941Smrg /* Only very thin and long 2D textures should benefit from 55b8e80941Smrg * linear_aligned. */ 56b8e80941Smrg (pCreateInfo->extent.width > 8 && pCreateInfo->extent.height <= 2)) 57b8e80941Smrg return RADEON_SURF_MODE_LINEAR_ALIGNED; 58b8e80941Smrg } 59b8e80941Smrg 60b8e80941Smrg /* MSAA resources must be 2D tiled. */ 61b8e80941Smrg if (pCreateInfo->samples > 1) 62b8e80941Smrg return RADEON_SURF_MODE_2D; 63b8e80941Smrg 64b8e80941Smrg return RADEON_SURF_MODE_2D; 65b8e80941Smrg} 66b8e80941Smrg 67b8e80941Smrgstatic bool 68b8e80941Smrgradv_use_tc_compat_htile_for_image(struct radv_device *device, 69b8e80941Smrg const VkImageCreateInfo *pCreateInfo) 70b8e80941Smrg{ 71b8e80941Smrg /* TC-compat HTILE is only available for GFX8+. */ 72b8e80941Smrg if (device->physical_device->rad_info.chip_class < VI) 73b8e80941Smrg return false; 74b8e80941Smrg 75b8e80941Smrg if ((pCreateInfo->usage & VK_IMAGE_USAGE_STORAGE_BIT) || 76b8e80941Smrg (pCreateInfo->flags & VK_IMAGE_CREATE_EXTENDED_USAGE_BIT)) 77b8e80941Smrg return false; 78b8e80941Smrg 79b8e80941Smrg if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR) 80b8e80941Smrg return false; 81b8e80941Smrg 82b8e80941Smrg if (pCreateInfo->mipLevels > 1) 83b8e80941Smrg return false; 84b8e80941Smrg 85b8e80941Smrg /* FIXME: for some reason TC compat with 2/4/8 samples breaks some cts 86b8e80941Smrg * tests - disable for now */ 87b8e80941Smrg if (pCreateInfo->samples >= 2 && 88b8e80941Smrg pCreateInfo->format == VK_FORMAT_D32_SFLOAT_S8_UINT) 89b8e80941Smrg return false; 90b8e80941Smrg 91b8e80941Smrg /* GFX9 supports both 32-bit and 16-bit depth surfaces, while GFX8 only 92b8e80941Smrg * supports 32-bit. Though, it's possible to enable TC-compat for 93b8e80941Smrg * 16-bit depth surfaces if no Z planes are compressed. 94b8e80941Smrg */ 95b8e80941Smrg if (pCreateInfo->format != VK_FORMAT_D32_SFLOAT_S8_UINT && 96b8e80941Smrg pCreateInfo->format != VK_FORMAT_D32_SFLOAT && 97b8e80941Smrg pCreateInfo->format != VK_FORMAT_D16_UNORM) 98b8e80941Smrg return false; 99b8e80941Smrg 100b8e80941Smrg if (pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) { 101b8e80941Smrg const struct VkImageFormatListCreateInfoKHR *format_list = 102b8e80941Smrg (const struct VkImageFormatListCreateInfoKHR *) 103b8e80941Smrg vk_find_struct_const(pCreateInfo->pNext, 104b8e80941Smrg IMAGE_FORMAT_LIST_CREATE_INFO_KHR); 105b8e80941Smrg 106b8e80941Smrg /* We have to ignore the existence of the list if viewFormatCount = 0 */ 107b8e80941Smrg if (format_list && format_list->viewFormatCount) { 108b8e80941Smrg /* compatibility is transitive, so we only need to check 109b8e80941Smrg * one format with everything else. 110b8e80941Smrg */ 111b8e80941Smrg for (unsigned i = 0; i < format_list->viewFormatCount; ++i) { 112b8e80941Smrg if (pCreateInfo->format != format_list->pViewFormats[i]) 113b8e80941Smrg return false; 114b8e80941Smrg } 115b8e80941Smrg } else { 116b8e80941Smrg return false; 117b8e80941Smrg } 118b8e80941Smrg } 119b8e80941Smrg 120b8e80941Smrg return true; 121b8e80941Smrg} 122b8e80941Smrg 123b8e80941Smrgstatic bool 124b8e80941Smrgradv_use_dcc_for_image(struct radv_device *device, 125b8e80941Smrg const struct radv_image *image, 126b8e80941Smrg const struct radv_image_create_info *create_info, 127b8e80941Smrg const VkImageCreateInfo *pCreateInfo) 128b8e80941Smrg{ 129b8e80941Smrg bool dcc_compatible_formats; 130b8e80941Smrg bool blendable; 131b8e80941Smrg 132b8e80941Smrg /* DCC (Delta Color Compression) is only available for GFX8+. */ 133b8e80941Smrg if (device->physical_device->rad_info.chip_class < VI) 134b8e80941Smrg return false; 135b8e80941Smrg 136b8e80941Smrg if (device->instance->debug_flags & RADV_DEBUG_NO_DCC) 137b8e80941Smrg return false; 138b8e80941Smrg 139b8e80941Smrg /* FIXME: DCC is broken for shareable images starting with GFX9 */ 140b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9 && 141b8e80941Smrg image->shareable) 142b8e80941Smrg return false; 143b8e80941Smrg 144b8e80941Smrg /* TODO: Enable DCC for storage images. */ 145b8e80941Smrg if ((pCreateInfo->usage & VK_IMAGE_USAGE_STORAGE_BIT) || 146b8e80941Smrg (pCreateInfo->flags & VK_IMAGE_CREATE_EXTENDED_USAGE_BIT)) 147b8e80941Smrg return false; 148b8e80941Smrg 149b8e80941Smrg if (pCreateInfo->tiling == VK_IMAGE_TILING_LINEAR) 150b8e80941Smrg return false; 151b8e80941Smrg 152b8e80941Smrg if (vk_format_is_subsampled(pCreateInfo->format) || 153b8e80941Smrg vk_format_get_plane_count(pCreateInfo->format) > 1) 154b8e80941Smrg return false; 155b8e80941Smrg 156b8e80941Smrg /* TODO: Enable DCC for mipmaps and array layers. */ 157b8e80941Smrg if (pCreateInfo->mipLevels > 1 || pCreateInfo->arrayLayers > 1) 158b8e80941Smrg return false; 159b8e80941Smrg 160b8e80941Smrg if (create_info->scanout) 161b8e80941Smrg return false; 162b8e80941Smrg 163b8e80941Smrg /* FIXME: DCC for MSAA with 4x and 8x samples doesn't work yet, while 164b8e80941Smrg * 2x can be enabled with an option. 165b8e80941Smrg */ 166b8e80941Smrg if (pCreateInfo->samples > 2 || 167b8e80941Smrg (pCreateInfo->samples == 2 && 168b8e80941Smrg !device->physical_device->dcc_msaa_allowed)) 169b8e80941Smrg return false; 170b8e80941Smrg 171b8e80941Smrg /* Determine if the formats are DCC compatible. */ 172b8e80941Smrg dcc_compatible_formats = 173b8e80941Smrg radv_is_colorbuffer_format_supported(pCreateInfo->format, 174b8e80941Smrg &blendable); 175b8e80941Smrg 176b8e80941Smrg if (pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) { 177b8e80941Smrg const struct VkImageFormatListCreateInfoKHR *format_list = 178b8e80941Smrg (const struct VkImageFormatListCreateInfoKHR *) 179b8e80941Smrg vk_find_struct_const(pCreateInfo->pNext, 180b8e80941Smrg IMAGE_FORMAT_LIST_CREATE_INFO_KHR); 181b8e80941Smrg 182b8e80941Smrg /* We have to ignore the existence of the list if viewFormatCount = 0 */ 183b8e80941Smrg if (format_list && format_list->viewFormatCount) { 184b8e80941Smrg /* compatibility is transitive, so we only need to check 185b8e80941Smrg * one format with everything else. */ 186b8e80941Smrg for (unsigned i = 0; i < format_list->viewFormatCount; ++i) { 187b8e80941Smrg if (!radv_dcc_formats_compatible(pCreateInfo->format, 188b8e80941Smrg format_list->pViewFormats[i])) 189b8e80941Smrg dcc_compatible_formats = false; 190b8e80941Smrg } 191b8e80941Smrg } else { 192b8e80941Smrg dcc_compatible_formats = false; 193b8e80941Smrg } 194b8e80941Smrg } 195b8e80941Smrg 196b8e80941Smrg if (!dcc_compatible_formats) 197b8e80941Smrg return false; 198b8e80941Smrg 199b8e80941Smrg return true; 200b8e80941Smrg} 201b8e80941Smrg 202b8e80941Smrgstatic int 203b8e80941Smrgradv_init_surface(struct radv_device *device, 204b8e80941Smrg const struct radv_image *image, 205b8e80941Smrg struct radeon_surf *surface, 206b8e80941Smrg unsigned plane_id, 207b8e80941Smrg const struct radv_image_create_info *create_info) 208b8e80941Smrg{ 209b8e80941Smrg const VkImageCreateInfo *pCreateInfo = create_info->vk_info; 210b8e80941Smrg unsigned array_mode = radv_choose_tiling(device, create_info); 211b8e80941Smrg VkFormat format = vk_format_get_plane_format(pCreateInfo->format, plane_id); 212b8e80941Smrg const struct vk_format_description *desc = vk_format_description(format); 213b8e80941Smrg bool is_depth, is_stencil; 214b8e80941Smrg 215b8e80941Smrg is_depth = vk_format_has_depth(desc); 216b8e80941Smrg is_stencil = vk_format_has_stencil(desc); 217b8e80941Smrg 218b8e80941Smrg surface->blk_w = vk_format_get_blockwidth(format); 219b8e80941Smrg surface->blk_h = vk_format_get_blockheight(format); 220b8e80941Smrg 221b8e80941Smrg surface->bpe = vk_format_get_blocksize(vk_format_depth_only(format)); 222b8e80941Smrg /* align byte per element on dword */ 223b8e80941Smrg if (surface->bpe == 3) { 224b8e80941Smrg surface->bpe = 4; 225b8e80941Smrg } 226b8e80941Smrg surface->flags = RADEON_SURF_SET(array_mode, MODE); 227b8e80941Smrg 228b8e80941Smrg switch (pCreateInfo->imageType){ 229b8e80941Smrg case VK_IMAGE_TYPE_1D: 230b8e80941Smrg if (pCreateInfo->arrayLayers > 1) 231b8e80941Smrg surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE); 232b8e80941Smrg else 233b8e80941Smrg surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE); 234b8e80941Smrg break; 235b8e80941Smrg case VK_IMAGE_TYPE_2D: 236b8e80941Smrg if (pCreateInfo->arrayLayers > 1) 237b8e80941Smrg surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE); 238b8e80941Smrg else 239b8e80941Smrg surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE); 240b8e80941Smrg break; 241b8e80941Smrg case VK_IMAGE_TYPE_3D: 242b8e80941Smrg surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE); 243b8e80941Smrg break; 244b8e80941Smrg default: 245b8e80941Smrg unreachable("unhandled image type"); 246b8e80941Smrg } 247b8e80941Smrg 248b8e80941Smrg if (is_depth) { 249b8e80941Smrg surface->flags |= RADEON_SURF_ZBUFFER; 250b8e80941Smrg if (radv_use_tc_compat_htile_for_image(device, pCreateInfo)) 251b8e80941Smrg surface->flags |= RADEON_SURF_TC_COMPATIBLE_HTILE; 252b8e80941Smrg } 253b8e80941Smrg 254b8e80941Smrg if (is_stencil) 255b8e80941Smrg surface->flags |= RADEON_SURF_SBUFFER; 256b8e80941Smrg 257b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9 && 258b8e80941Smrg pCreateInfo->imageType == VK_IMAGE_TYPE_3D && 259b8e80941Smrg vk_format_get_blocksizebits(pCreateInfo->format) == 128 && 260b8e80941Smrg vk_format_is_compressed(pCreateInfo->format)) 261b8e80941Smrg surface->flags |= RADEON_SURF_NO_RENDER_TARGET; 262b8e80941Smrg 263b8e80941Smrg surface->flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE; 264b8e80941Smrg 265b8e80941Smrg if (!radv_use_dcc_for_image(device, image, create_info, pCreateInfo)) 266b8e80941Smrg surface->flags |= RADEON_SURF_DISABLE_DCC; 267b8e80941Smrg 268b8e80941Smrg if (create_info->scanout) 269b8e80941Smrg surface->flags |= RADEON_SURF_SCANOUT; 270b8e80941Smrg return 0; 271b8e80941Smrg} 272b8e80941Smrg 273b8e80941Smrgstatic uint32_t si_get_bo_metadata_word1(struct radv_device *device) 274b8e80941Smrg{ 275b8e80941Smrg return (ATI_VENDOR_ID << 16) | device->physical_device->rad_info.pci_id; 276b8e80941Smrg} 277b8e80941Smrg 278b8e80941Smrgstatic inline unsigned 279b8e80941Smrgsi_tile_mode_index(const struct radv_image_plane *plane, unsigned level, bool stencil) 280b8e80941Smrg{ 281b8e80941Smrg if (stencil) 282b8e80941Smrg return plane->surface.u.legacy.stencil_tiling_index[level]; 283b8e80941Smrg else 284b8e80941Smrg return plane->surface.u.legacy.tiling_index[level]; 285b8e80941Smrg} 286b8e80941Smrg 287b8e80941Smrgstatic unsigned radv_map_swizzle(unsigned swizzle) 288b8e80941Smrg{ 289b8e80941Smrg switch (swizzle) { 290b8e80941Smrg case VK_SWIZZLE_Y: 291b8e80941Smrg return V_008F0C_SQ_SEL_Y; 292b8e80941Smrg case VK_SWIZZLE_Z: 293b8e80941Smrg return V_008F0C_SQ_SEL_Z; 294b8e80941Smrg case VK_SWIZZLE_W: 295b8e80941Smrg return V_008F0C_SQ_SEL_W; 296b8e80941Smrg case VK_SWIZZLE_0: 297b8e80941Smrg return V_008F0C_SQ_SEL_0; 298b8e80941Smrg case VK_SWIZZLE_1: 299b8e80941Smrg return V_008F0C_SQ_SEL_1; 300b8e80941Smrg default: /* VK_SWIZZLE_X */ 301b8e80941Smrg return V_008F0C_SQ_SEL_X; 302b8e80941Smrg } 303b8e80941Smrg} 304b8e80941Smrg 305b8e80941Smrgstatic void 306b8e80941Smrgradv_make_buffer_descriptor(struct radv_device *device, 307b8e80941Smrg struct radv_buffer *buffer, 308b8e80941Smrg VkFormat vk_format, 309b8e80941Smrg unsigned offset, 310b8e80941Smrg unsigned range, 311b8e80941Smrg uint32_t *state) 312b8e80941Smrg{ 313b8e80941Smrg const struct vk_format_description *desc; 314b8e80941Smrg unsigned stride; 315b8e80941Smrg uint64_t gpu_address = radv_buffer_get_va(buffer->bo); 316b8e80941Smrg uint64_t va = gpu_address + buffer->offset; 317b8e80941Smrg unsigned num_format, data_format; 318b8e80941Smrg int first_non_void; 319b8e80941Smrg desc = vk_format_description(vk_format); 320b8e80941Smrg first_non_void = vk_format_get_first_non_void_channel(vk_format); 321b8e80941Smrg stride = desc->block.bits / 8; 322b8e80941Smrg 323b8e80941Smrg num_format = radv_translate_buffer_numformat(desc, first_non_void); 324b8e80941Smrg data_format = radv_translate_buffer_dataformat(desc, first_non_void); 325b8e80941Smrg 326b8e80941Smrg va += offset; 327b8e80941Smrg state[0] = va; 328b8e80941Smrg state[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | 329b8e80941Smrg S_008F04_STRIDE(stride); 330b8e80941Smrg 331b8e80941Smrg if (device->physical_device->rad_info.chip_class != VI && stride) { 332b8e80941Smrg range /= stride; 333b8e80941Smrg } 334b8e80941Smrg 335b8e80941Smrg state[2] = range; 336b8e80941Smrg state[3] = S_008F0C_DST_SEL_X(radv_map_swizzle(desc->swizzle[0])) | 337b8e80941Smrg S_008F0C_DST_SEL_Y(radv_map_swizzle(desc->swizzle[1])) | 338b8e80941Smrg S_008F0C_DST_SEL_Z(radv_map_swizzle(desc->swizzle[2])) | 339b8e80941Smrg S_008F0C_DST_SEL_W(radv_map_swizzle(desc->swizzle[3])) | 340b8e80941Smrg S_008F0C_NUM_FORMAT(num_format) | 341b8e80941Smrg S_008F0C_DATA_FORMAT(data_format); 342b8e80941Smrg} 343b8e80941Smrg 344b8e80941Smrgstatic void 345b8e80941Smrgsi_set_mutable_tex_desc_fields(struct radv_device *device, 346b8e80941Smrg struct radv_image *image, 347b8e80941Smrg const struct legacy_surf_level *base_level_info, 348b8e80941Smrg unsigned plane_id, 349b8e80941Smrg unsigned base_level, unsigned first_level, 350b8e80941Smrg unsigned block_width, bool is_stencil, 351b8e80941Smrg bool is_storage_image, uint32_t *state) 352b8e80941Smrg{ 353b8e80941Smrg struct radv_image_plane *plane = &image->planes[plane_id]; 354b8e80941Smrg uint64_t gpu_address = image->bo ? radv_buffer_get_va(image->bo) + image->offset : 0; 355b8e80941Smrg uint64_t va = gpu_address + plane->offset; 356b8e80941Smrg enum chip_class chip_class = device->physical_device->rad_info.chip_class; 357b8e80941Smrg uint64_t meta_va = 0; 358b8e80941Smrg if (chip_class >= GFX9) { 359b8e80941Smrg if (is_stencil) 360b8e80941Smrg va += plane->surface.u.gfx9.stencil_offset; 361b8e80941Smrg else 362b8e80941Smrg va += plane->surface.u.gfx9.surf_offset; 363b8e80941Smrg } else 364b8e80941Smrg va += base_level_info->offset; 365b8e80941Smrg 366b8e80941Smrg state[0] = va >> 8; 367b8e80941Smrg if (chip_class >= GFX9 || 368b8e80941Smrg base_level_info->mode == RADEON_SURF_MODE_2D) 369b8e80941Smrg state[0] |= plane->surface.tile_swizzle; 370b8e80941Smrg state[1] &= C_008F14_BASE_ADDRESS_HI; 371b8e80941Smrg state[1] |= S_008F14_BASE_ADDRESS_HI(va >> 40); 372b8e80941Smrg 373b8e80941Smrg if (chip_class >= VI) { 374b8e80941Smrg state[6] &= C_008F28_COMPRESSION_EN; 375b8e80941Smrg state[7] = 0; 376b8e80941Smrg if (!is_storage_image && radv_dcc_enabled(image, first_level)) { 377b8e80941Smrg meta_va = gpu_address + image->dcc_offset; 378b8e80941Smrg if (chip_class <= VI) 379b8e80941Smrg meta_va += base_level_info->dcc_offset; 380b8e80941Smrg } else if (!is_storage_image && 381b8e80941Smrg radv_image_is_tc_compat_htile(image)) { 382b8e80941Smrg meta_va = gpu_address + image->htile_offset; 383b8e80941Smrg } 384b8e80941Smrg 385b8e80941Smrg if (meta_va) { 386b8e80941Smrg state[6] |= S_008F28_COMPRESSION_EN(1); 387b8e80941Smrg state[7] = meta_va >> 8; 388b8e80941Smrg state[7] |= plane->surface.tile_swizzle; 389b8e80941Smrg } 390b8e80941Smrg } 391b8e80941Smrg 392b8e80941Smrg if (chip_class >= GFX9) { 393b8e80941Smrg state[3] &= C_008F1C_SW_MODE; 394b8e80941Smrg state[4] &= C_008F20_PITCH_GFX9; 395b8e80941Smrg 396b8e80941Smrg if (is_stencil) { 397b8e80941Smrg state[3] |= S_008F1C_SW_MODE(plane->surface.u.gfx9.stencil.swizzle_mode); 398b8e80941Smrg state[4] |= S_008F20_PITCH_GFX9(plane->surface.u.gfx9.stencil.epitch); 399b8e80941Smrg } else { 400b8e80941Smrg state[3] |= S_008F1C_SW_MODE(plane->surface.u.gfx9.surf.swizzle_mode); 401b8e80941Smrg state[4] |= S_008F20_PITCH_GFX9(plane->surface.u.gfx9.surf.epitch); 402b8e80941Smrg } 403b8e80941Smrg 404b8e80941Smrg state[5] &= C_008F24_META_DATA_ADDRESS & 405b8e80941Smrg C_008F24_META_PIPE_ALIGNED & 406b8e80941Smrg C_008F24_META_RB_ALIGNED; 407b8e80941Smrg if (meta_va) { 408b8e80941Smrg struct gfx9_surf_meta_flags meta; 409b8e80941Smrg 410b8e80941Smrg if (image->dcc_offset) 411b8e80941Smrg meta = plane->surface.u.gfx9.dcc; 412b8e80941Smrg else 413b8e80941Smrg meta = plane->surface.u.gfx9.htile; 414b8e80941Smrg 415b8e80941Smrg state[5] |= S_008F24_META_DATA_ADDRESS(meta_va >> 40) | 416b8e80941Smrg S_008F24_META_PIPE_ALIGNED(meta.pipe_aligned) | 417b8e80941Smrg S_008F24_META_RB_ALIGNED(meta.rb_aligned); 418b8e80941Smrg } 419b8e80941Smrg } else { 420b8e80941Smrg /* SI-CI-VI */ 421b8e80941Smrg unsigned pitch = base_level_info->nblk_x * block_width; 422b8e80941Smrg unsigned index = si_tile_mode_index(plane, base_level, is_stencil); 423b8e80941Smrg 424b8e80941Smrg state[3] &= C_008F1C_TILING_INDEX; 425b8e80941Smrg state[3] |= S_008F1C_TILING_INDEX(index); 426b8e80941Smrg state[4] &= C_008F20_PITCH_GFX6; 427b8e80941Smrg state[4] |= S_008F20_PITCH_GFX6(pitch - 1); 428b8e80941Smrg } 429b8e80941Smrg} 430b8e80941Smrg 431b8e80941Smrgstatic unsigned radv_tex_dim(VkImageType image_type, VkImageViewType view_type, 432b8e80941Smrg unsigned nr_layers, unsigned nr_samples, bool is_storage_image, bool gfx9) 433b8e80941Smrg{ 434b8e80941Smrg if (view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) 435b8e80941Smrg return is_storage_image ? V_008F1C_SQ_RSRC_IMG_2D_ARRAY : V_008F1C_SQ_RSRC_IMG_CUBE; 436b8e80941Smrg 437b8e80941Smrg /* GFX9 allocates 1D textures as 2D. */ 438b8e80941Smrg if (gfx9 && image_type == VK_IMAGE_TYPE_1D) 439b8e80941Smrg image_type = VK_IMAGE_TYPE_2D; 440b8e80941Smrg switch (image_type) { 441b8e80941Smrg case VK_IMAGE_TYPE_1D: 442b8e80941Smrg return nr_layers > 1 ? V_008F1C_SQ_RSRC_IMG_1D_ARRAY : V_008F1C_SQ_RSRC_IMG_1D; 443b8e80941Smrg case VK_IMAGE_TYPE_2D: 444b8e80941Smrg if (nr_samples > 1) 445b8e80941Smrg return nr_layers > 1 ? V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY : V_008F1C_SQ_RSRC_IMG_2D_MSAA; 446b8e80941Smrg else 447b8e80941Smrg return nr_layers > 1 ? V_008F1C_SQ_RSRC_IMG_2D_ARRAY : V_008F1C_SQ_RSRC_IMG_2D; 448b8e80941Smrg case VK_IMAGE_TYPE_3D: 449b8e80941Smrg if (view_type == VK_IMAGE_VIEW_TYPE_3D) 450b8e80941Smrg return V_008F1C_SQ_RSRC_IMG_3D; 451b8e80941Smrg else 452b8e80941Smrg return V_008F1C_SQ_RSRC_IMG_2D_ARRAY; 453b8e80941Smrg default: 454b8e80941Smrg unreachable("illegal image type"); 455b8e80941Smrg } 456b8e80941Smrg} 457b8e80941Smrg 458b8e80941Smrgstatic unsigned gfx9_border_color_swizzle(const enum vk_swizzle swizzle[4]) 459b8e80941Smrg{ 460b8e80941Smrg unsigned bc_swizzle = V_008F20_BC_SWIZZLE_XYZW; 461b8e80941Smrg 462b8e80941Smrg if (swizzle[3] == VK_SWIZZLE_X) { 463b8e80941Smrg /* For the pre-defined border color values (white, opaque 464b8e80941Smrg * black, transparent black), the only thing that matters is 465b8e80941Smrg * that the alpha channel winds up in the correct place 466b8e80941Smrg * (because the RGB channels are all the same) so either of 467b8e80941Smrg * these enumerations will work. 468b8e80941Smrg */ 469b8e80941Smrg if (swizzle[2] == VK_SWIZZLE_Y) 470b8e80941Smrg bc_swizzle = V_008F20_BC_SWIZZLE_WZYX; 471b8e80941Smrg else 472b8e80941Smrg bc_swizzle = V_008F20_BC_SWIZZLE_WXYZ; 473b8e80941Smrg } else if (swizzle[0] == VK_SWIZZLE_X) { 474b8e80941Smrg if (swizzle[1] == VK_SWIZZLE_Y) 475b8e80941Smrg bc_swizzle = V_008F20_BC_SWIZZLE_XYZW; 476b8e80941Smrg else 477b8e80941Smrg bc_swizzle = V_008F20_BC_SWIZZLE_XWYZ; 478b8e80941Smrg } else if (swizzle[1] == VK_SWIZZLE_X) { 479b8e80941Smrg bc_swizzle = V_008F20_BC_SWIZZLE_YXWZ; 480b8e80941Smrg } else if (swizzle[2] == VK_SWIZZLE_X) { 481b8e80941Smrg bc_swizzle = V_008F20_BC_SWIZZLE_ZYXW; 482b8e80941Smrg } 483b8e80941Smrg 484b8e80941Smrg return bc_swizzle; 485b8e80941Smrg} 486b8e80941Smrg 487b8e80941Smrg/** 488b8e80941Smrg * Build the sampler view descriptor for a texture. 489b8e80941Smrg */ 490b8e80941Smrgstatic void 491b8e80941Smrgsi_make_texture_descriptor(struct radv_device *device, 492b8e80941Smrg struct radv_image *image, 493b8e80941Smrg bool is_storage_image, 494b8e80941Smrg VkImageViewType view_type, 495b8e80941Smrg VkFormat vk_format, 496b8e80941Smrg const VkComponentMapping *mapping, 497b8e80941Smrg unsigned first_level, unsigned last_level, 498b8e80941Smrg unsigned first_layer, unsigned last_layer, 499b8e80941Smrg unsigned width, unsigned height, unsigned depth, 500b8e80941Smrg uint32_t *state, 501b8e80941Smrg uint32_t *fmask_state) 502b8e80941Smrg{ 503b8e80941Smrg const struct vk_format_description *desc; 504b8e80941Smrg enum vk_swizzle swizzle[4]; 505b8e80941Smrg int first_non_void; 506b8e80941Smrg unsigned num_format, data_format, type; 507b8e80941Smrg 508b8e80941Smrg desc = vk_format_description(vk_format); 509b8e80941Smrg 510b8e80941Smrg if (desc->colorspace == VK_FORMAT_COLORSPACE_ZS) { 511b8e80941Smrg const unsigned char swizzle_xxxx[4] = {0, 0, 0, 0}; 512b8e80941Smrg vk_format_compose_swizzles(mapping, swizzle_xxxx, swizzle); 513b8e80941Smrg } else { 514b8e80941Smrg vk_format_compose_swizzles(mapping, desc->swizzle, swizzle); 515b8e80941Smrg } 516b8e80941Smrg 517b8e80941Smrg first_non_void = vk_format_get_first_non_void_channel(vk_format); 518b8e80941Smrg 519b8e80941Smrg num_format = radv_translate_tex_numformat(vk_format, desc, first_non_void); 520b8e80941Smrg if (num_format == ~0) { 521b8e80941Smrg num_format = 0; 522b8e80941Smrg } 523b8e80941Smrg 524b8e80941Smrg data_format = radv_translate_tex_dataformat(vk_format, desc, first_non_void); 525b8e80941Smrg if (data_format == ~0) { 526b8e80941Smrg data_format = 0; 527b8e80941Smrg } 528b8e80941Smrg 529b8e80941Smrg /* S8 with either Z16 or Z32 HTILE need a special format. */ 530b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9 && 531b8e80941Smrg vk_format == VK_FORMAT_S8_UINT && 532b8e80941Smrg radv_image_is_tc_compat_htile(image)) { 533b8e80941Smrg if (image->vk_format == VK_FORMAT_D32_SFLOAT_S8_UINT) 534b8e80941Smrg data_format = V_008F14_IMG_DATA_FORMAT_S8_32; 535b8e80941Smrg else if (image->vk_format == VK_FORMAT_D16_UNORM_S8_UINT) 536b8e80941Smrg data_format = V_008F14_IMG_DATA_FORMAT_S8_16; 537b8e80941Smrg } 538b8e80941Smrg type = radv_tex_dim(image->type, view_type, image->info.array_size, image->info.samples, 539b8e80941Smrg is_storage_image, device->physical_device->rad_info.chip_class >= GFX9); 540b8e80941Smrg if (type == V_008F1C_SQ_RSRC_IMG_1D_ARRAY) { 541b8e80941Smrg height = 1; 542b8e80941Smrg depth = image->info.array_size; 543b8e80941Smrg } else if (type == V_008F1C_SQ_RSRC_IMG_2D_ARRAY || 544b8e80941Smrg type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) { 545b8e80941Smrg if (view_type != VK_IMAGE_VIEW_TYPE_3D) 546b8e80941Smrg depth = image->info.array_size; 547b8e80941Smrg } else if (type == V_008F1C_SQ_RSRC_IMG_CUBE) 548b8e80941Smrg depth = image->info.array_size / 6; 549b8e80941Smrg 550b8e80941Smrg state[0] = 0; 551b8e80941Smrg state[1] = (S_008F14_DATA_FORMAT_GFX6(data_format) | 552b8e80941Smrg S_008F14_NUM_FORMAT_GFX6(num_format)); 553b8e80941Smrg state[2] = (S_008F18_WIDTH(width - 1) | 554b8e80941Smrg S_008F18_HEIGHT(height - 1) | 555b8e80941Smrg S_008F18_PERF_MOD(4)); 556b8e80941Smrg state[3] = (S_008F1C_DST_SEL_X(radv_map_swizzle(swizzle[0])) | 557b8e80941Smrg S_008F1C_DST_SEL_Y(radv_map_swizzle(swizzle[1])) | 558b8e80941Smrg S_008F1C_DST_SEL_Z(radv_map_swizzle(swizzle[2])) | 559b8e80941Smrg S_008F1C_DST_SEL_W(radv_map_swizzle(swizzle[3])) | 560b8e80941Smrg S_008F1C_BASE_LEVEL(image->info.samples > 1 ? 561b8e80941Smrg 0 : first_level) | 562b8e80941Smrg S_008F1C_LAST_LEVEL(image->info.samples > 1 ? 563b8e80941Smrg util_logbase2(image->info.samples) : 564b8e80941Smrg last_level) | 565b8e80941Smrg S_008F1C_TYPE(type)); 566b8e80941Smrg state[4] = 0; 567b8e80941Smrg state[5] = S_008F24_BASE_ARRAY(first_layer); 568b8e80941Smrg state[6] = 0; 569b8e80941Smrg state[7] = 0; 570b8e80941Smrg 571b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9) { 572b8e80941Smrg unsigned bc_swizzle = gfx9_border_color_swizzle(swizzle); 573b8e80941Smrg 574b8e80941Smrg /* Depth is the last accessible layer on Gfx9. 575b8e80941Smrg * The hw doesn't need to know the total number of layers. 576b8e80941Smrg */ 577b8e80941Smrg if (type == V_008F1C_SQ_RSRC_IMG_3D) 578b8e80941Smrg state[4] |= S_008F20_DEPTH(depth - 1); 579b8e80941Smrg else 580b8e80941Smrg state[4] |= S_008F20_DEPTH(last_layer); 581b8e80941Smrg 582b8e80941Smrg state[4] |= S_008F20_BC_SWIZZLE(bc_swizzle); 583b8e80941Smrg state[5] |= S_008F24_MAX_MIP(image->info.samples > 1 ? 584b8e80941Smrg util_logbase2(image->info.samples) : 585b8e80941Smrg image->info.levels - 1); 586b8e80941Smrg } else { 587b8e80941Smrg state[3] |= S_008F1C_POW2_PAD(image->info.levels > 1); 588b8e80941Smrg state[4] |= S_008F20_DEPTH(depth - 1); 589b8e80941Smrg state[5] |= S_008F24_LAST_ARRAY(last_layer); 590b8e80941Smrg } 591b8e80941Smrg if (image->dcc_offset) { 592b8e80941Smrg unsigned swap = radv_translate_colorswap(vk_format, FALSE); 593b8e80941Smrg 594b8e80941Smrg state[6] = S_008F28_ALPHA_IS_ON_MSB(swap <= 1); 595b8e80941Smrg } else { 596b8e80941Smrg /* The last dword is unused by hw. The shader uses it to clear 597b8e80941Smrg * bits in the first dword of sampler state. 598b8e80941Smrg */ 599b8e80941Smrg if (device->physical_device->rad_info.chip_class <= CIK && image->info.samples <= 1) { 600b8e80941Smrg if (first_level == last_level) 601b8e80941Smrg state[7] = C_008F30_MAX_ANISO_RATIO; 602b8e80941Smrg else 603b8e80941Smrg state[7] = 0xffffffff; 604b8e80941Smrg } 605b8e80941Smrg } 606b8e80941Smrg 607b8e80941Smrg /* Initialize the sampler view for FMASK. */ 608b8e80941Smrg if (radv_image_has_fmask(image)) { 609b8e80941Smrg uint32_t fmask_format, num_format; 610b8e80941Smrg uint64_t gpu_address = radv_buffer_get_va(image->bo); 611b8e80941Smrg uint64_t va; 612b8e80941Smrg 613b8e80941Smrg assert(image->plane_count == 1); 614b8e80941Smrg 615b8e80941Smrg va = gpu_address + image->offset + image->fmask.offset; 616b8e80941Smrg 617b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9) { 618b8e80941Smrg fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK; 619b8e80941Smrg switch (image->info.samples) { 620b8e80941Smrg case 2: 621b8e80941Smrg num_format = V_008F14_IMG_FMASK_8_2_2; 622b8e80941Smrg break; 623b8e80941Smrg case 4: 624b8e80941Smrg num_format = V_008F14_IMG_FMASK_8_4_4; 625b8e80941Smrg break; 626b8e80941Smrg case 8: 627b8e80941Smrg num_format = V_008F14_IMG_FMASK_32_8_8; 628b8e80941Smrg break; 629b8e80941Smrg default: 630b8e80941Smrg unreachable("invalid nr_samples"); 631b8e80941Smrg } 632b8e80941Smrg } else { 633b8e80941Smrg switch (image->info.samples) { 634b8e80941Smrg case 2: 635b8e80941Smrg fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK8_S2_F2; 636b8e80941Smrg break; 637b8e80941Smrg case 4: 638b8e80941Smrg fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK8_S4_F4; 639b8e80941Smrg break; 640b8e80941Smrg case 8: 641b8e80941Smrg fmask_format = V_008F14_IMG_DATA_FORMAT_FMASK32_S8_F8; 642b8e80941Smrg break; 643b8e80941Smrg default: 644b8e80941Smrg assert(0); 645b8e80941Smrg fmask_format = V_008F14_IMG_DATA_FORMAT_INVALID; 646b8e80941Smrg } 647b8e80941Smrg num_format = V_008F14_IMG_NUM_FORMAT_UINT; 648b8e80941Smrg } 649b8e80941Smrg 650b8e80941Smrg fmask_state[0] = va >> 8; 651b8e80941Smrg fmask_state[0] |= image->fmask.tile_swizzle; 652b8e80941Smrg fmask_state[1] = S_008F14_BASE_ADDRESS_HI(va >> 40) | 653b8e80941Smrg S_008F14_DATA_FORMAT_GFX6(fmask_format) | 654b8e80941Smrg S_008F14_NUM_FORMAT_GFX6(num_format); 655b8e80941Smrg fmask_state[2] = S_008F18_WIDTH(width - 1) | 656b8e80941Smrg S_008F18_HEIGHT(height - 1); 657b8e80941Smrg fmask_state[3] = S_008F1C_DST_SEL_X(V_008F1C_SQ_SEL_X) | 658b8e80941Smrg S_008F1C_DST_SEL_Y(V_008F1C_SQ_SEL_X) | 659b8e80941Smrg S_008F1C_DST_SEL_Z(V_008F1C_SQ_SEL_X) | 660b8e80941Smrg S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_X) | 661b8e80941Smrg S_008F1C_TYPE(radv_tex_dim(image->type, view_type, image->info.array_size, 0, false, false)); 662b8e80941Smrg fmask_state[4] = 0; 663b8e80941Smrg fmask_state[5] = S_008F24_BASE_ARRAY(first_layer); 664b8e80941Smrg fmask_state[6] = 0; 665b8e80941Smrg fmask_state[7] = 0; 666b8e80941Smrg 667b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9) { 668b8e80941Smrg fmask_state[3] |= S_008F1C_SW_MODE(image->planes[0].surface.u.gfx9.fmask.swizzle_mode); 669b8e80941Smrg fmask_state[4] |= S_008F20_DEPTH(last_layer) | 670b8e80941Smrg S_008F20_PITCH_GFX9(image->planes[0].surface.u.gfx9.fmask.epitch); 671b8e80941Smrg fmask_state[5] |= S_008F24_META_PIPE_ALIGNED(image->planes[0].surface.u.gfx9.cmask.pipe_aligned) | 672b8e80941Smrg S_008F24_META_RB_ALIGNED(image->planes[0].surface.u.gfx9.cmask.rb_aligned); 673b8e80941Smrg } else { 674b8e80941Smrg fmask_state[3] |= S_008F1C_TILING_INDEX(image->fmask.tile_mode_index); 675b8e80941Smrg fmask_state[4] |= S_008F20_DEPTH(depth - 1) | 676b8e80941Smrg S_008F20_PITCH_GFX6(image->fmask.pitch_in_pixels - 1); 677b8e80941Smrg fmask_state[5] |= S_008F24_LAST_ARRAY(last_layer); 678b8e80941Smrg } 679b8e80941Smrg } else if (fmask_state) 680b8e80941Smrg memset(fmask_state, 0, 8 * 4); 681b8e80941Smrg} 682b8e80941Smrg 683b8e80941Smrgstatic void 684b8e80941Smrgradv_query_opaque_metadata(struct radv_device *device, 685b8e80941Smrg struct radv_image *image, 686b8e80941Smrg struct radeon_bo_metadata *md) 687b8e80941Smrg{ 688b8e80941Smrg static const VkComponentMapping fixedmapping; 689b8e80941Smrg uint32_t desc[8], i; 690b8e80941Smrg 691b8e80941Smrg assert(image->plane_count == 1); 692b8e80941Smrg 693b8e80941Smrg /* Metadata image format format version 1: 694b8e80941Smrg * [0] = 1 (metadata format identifier) 695b8e80941Smrg * [1] = (VENDOR_ID << 16) | PCI_ID 696b8e80941Smrg * [2:9] = image descriptor for the whole resource 697b8e80941Smrg * [2] is always 0, because the base address is cleared 698b8e80941Smrg * [9] is the DCC offset bits [39:8] from the beginning of 699b8e80941Smrg * the buffer 700b8e80941Smrg * [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level 701b8e80941Smrg */ 702b8e80941Smrg md->metadata[0] = 1; /* metadata image format version 1 */ 703b8e80941Smrg 704b8e80941Smrg /* TILE_MODE_INDEX is ambiguous without a PCI ID. */ 705b8e80941Smrg md->metadata[1] = si_get_bo_metadata_word1(device); 706b8e80941Smrg 707b8e80941Smrg 708b8e80941Smrg si_make_texture_descriptor(device, image, false, 709b8e80941Smrg (VkImageViewType)image->type, image->vk_format, 710b8e80941Smrg &fixedmapping, 0, image->info.levels - 1, 0, 711b8e80941Smrg image->info.array_size - 1, 712b8e80941Smrg image->info.width, image->info.height, 713b8e80941Smrg image->info.depth, 714b8e80941Smrg desc, NULL); 715b8e80941Smrg 716b8e80941Smrg si_set_mutable_tex_desc_fields(device, image, &image->planes[0].surface.u.legacy.level[0], 0, 0, 0, 717b8e80941Smrg image->planes[0].surface.blk_w, false, false, desc); 718b8e80941Smrg 719b8e80941Smrg /* Clear the base address and set the relative DCC offset. */ 720b8e80941Smrg desc[0] = 0; 721b8e80941Smrg desc[1] &= C_008F14_BASE_ADDRESS_HI; 722b8e80941Smrg desc[7] = image->dcc_offset >> 8; 723b8e80941Smrg 724b8e80941Smrg /* Dwords [2:9] contain the image descriptor. */ 725b8e80941Smrg memcpy(&md->metadata[2], desc, sizeof(desc)); 726b8e80941Smrg 727b8e80941Smrg /* Dwords [10:..] contain the mipmap level offsets. */ 728b8e80941Smrg if (device->physical_device->rad_info.chip_class <= VI) { 729b8e80941Smrg for (i = 0; i <= image->info.levels - 1; i++) 730b8e80941Smrg md->metadata[10+i] = image->planes[0].surface.u.legacy.level[i].offset >> 8; 731b8e80941Smrg md->size_metadata = (11 + image->info.levels - 1) * 4; 732b8e80941Smrg } else 733b8e80941Smrg md->size_metadata = 10 * 4; 734b8e80941Smrg} 735b8e80941Smrg 736b8e80941Smrgvoid 737b8e80941Smrgradv_init_metadata(struct radv_device *device, 738b8e80941Smrg struct radv_image *image, 739b8e80941Smrg struct radeon_bo_metadata *metadata) 740b8e80941Smrg{ 741b8e80941Smrg struct radeon_surf *surface = &image->planes[0].surface; 742b8e80941Smrg 743b8e80941Smrg memset(metadata, 0, sizeof(*metadata)); 744b8e80941Smrg 745b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9) { 746b8e80941Smrg metadata->u.gfx9.swizzle_mode = surface->u.gfx9.surf.swizzle_mode; 747b8e80941Smrg } else { 748b8e80941Smrg metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ? 749b8e80941Smrg RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR; 750b8e80941Smrg metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ? 751b8e80941Smrg RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR; 752b8e80941Smrg metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config; 753b8e80941Smrg metadata->u.legacy.bankw = surface->u.legacy.bankw; 754b8e80941Smrg metadata->u.legacy.bankh = surface->u.legacy.bankh; 755b8e80941Smrg metadata->u.legacy.tile_split = surface->u.legacy.tile_split; 756b8e80941Smrg metadata->u.legacy.mtilea = surface->u.legacy.mtilea; 757b8e80941Smrg metadata->u.legacy.num_banks = surface->u.legacy.num_banks; 758b8e80941Smrg metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe; 759b8e80941Smrg metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0; 760b8e80941Smrg } 761b8e80941Smrg radv_query_opaque_metadata(device, image, metadata); 762b8e80941Smrg} 763b8e80941Smrg 764b8e80941Smrg/* The number of samples can be specified independently of the texture. */ 765b8e80941Smrgstatic void 766b8e80941Smrgradv_image_get_fmask_info(struct radv_device *device, 767b8e80941Smrg struct radv_image *image, 768b8e80941Smrg unsigned nr_samples, 769b8e80941Smrg struct radv_fmask_info *out) 770b8e80941Smrg{ 771b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9) { 772b8e80941Smrg out->alignment = image->planes[0].surface.fmask_alignment; 773b8e80941Smrg out->size = image->planes[0].surface.fmask_size; 774b8e80941Smrg out->tile_swizzle = image->planes[0].surface.fmask_tile_swizzle; 775b8e80941Smrg return; 776b8e80941Smrg } 777b8e80941Smrg 778b8e80941Smrg out->slice_tile_max = image->planes[0].surface.u.legacy.fmask.slice_tile_max; 779b8e80941Smrg out->tile_mode_index = image->planes[0].surface.u.legacy.fmask.tiling_index; 780b8e80941Smrg out->pitch_in_pixels = image->planes[0].surface.u.legacy.fmask.pitch_in_pixels; 781b8e80941Smrg out->bank_height = image->planes[0].surface.u.legacy.fmask.bankh; 782b8e80941Smrg out->tile_swizzle = image->planes[0].surface.fmask_tile_swizzle; 783b8e80941Smrg out->alignment = image->planes[0].surface.fmask_alignment; 784b8e80941Smrg out->size = image->planes[0].surface.fmask_size; 785b8e80941Smrg 786b8e80941Smrg assert(!out->tile_swizzle || !image->shareable); 787b8e80941Smrg} 788b8e80941Smrg 789b8e80941Smrgstatic void 790b8e80941Smrgradv_image_alloc_fmask(struct radv_device *device, 791b8e80941Smrg struct radv_image *image) 792b8e80941Smrg{ 793b8e80941Smrg radv_image_get_fmask_info(device, image, image->info.samples, &image->fmask); 794b8e80941Smrg 795b8e80941Smrg image->fmask.offset = align64(image->size, image->fmask.alignment); 796b8e80941Smrg image->size = image->fmask.offset + image->fmask.size; 797b8e80941Smrg image->alignment = MAX2(image->alignment, image->fmask.alignment); 798b8e80941Smrg} 799b8e80941Smrg 800b8e80941Smrgstatic void 801b8e80941Smrgradv_image_get_cmask_info(struct radv_device *device, 802b8e80941Smrg struct radv_image *image, 803b8e80941Smrg struct radv_cmask_info *out) 804b8e80941Smrg{ 805b8e80941Smrg unsigned pipe_interleave_bytes = device->physical_device->rad_info.pipe_interleave_bytes; 806b8e80941Smrg unsigned num_pipes = device->physical_device->rad_info.num_tile_pipes; 807b8e80941Smrg unsigned cl_width, cl_height; 808b8e80941Smrg 809b8e80941Smrg assert(image->plane_count == 1); 810b8e80941Smrg 811b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9) { 812b8e80941Smrg out->alignment = image->planes[0].surface.cmask_alignment; 813b8e80941Smrg out->size = image->planes[0].surface.cmask_size; 814b8e80941Smrg return; 815b8e80941Smrg } 816b8e80941Smrg 817b8e80941Smrg switch (num_pipes) { 818b8e80941Smrg case 2: 819b8e80941Smrg cl_width = 32; 820b8e80941Smrg cl_height = 16; 821b8e80941Smrg break; 822b8e80941Smrg case 4: 823b8e80941Smrg cl_width = 32; 824b8e80941Smrg cl_height = 32; 825b8e80941Smrg break; 826b8e80941Smrg case 8: 827b8e80941Smrg cl_width = 64; 828b8e80941Smrg cl_height = 32; 829b8e80941Smrg break; 830b8e80941Smrg case 16: /* Hawaii */ 831b8e80941Smrg cl_width = 64; 832b8e80941Smrg cl_height = 64; 833b8e80941Smrg break; 834b8e80941Smrg default: 835b8e80941Smrg assert(0); 836b8e80941Smrg return; 837b8e80941Smrg } 838b8e80941Smrg 839b8e80941Smrg unsigned base_align = num_pipes * pipe_interleave_bytes; 840b8e80941Smrg 841b8e80941Smrg unsigned width = align(image->planes[0].surface.u.legacy.level[0].nblk_x, cl_width*8); 842b8e80941Smrg unsigned height = align(image->planes[0].surface.u.legacy.level[0].nblk_y, cl_height*8); 843b8e80941Smrg unsigned slice_elements = (width * height) / (8*8); 844b8e80941Smrg 845b8e80941Smrg /* Each element of CMASK is a nibble. */ 846b8e80941Smrg unsigned slice_bytes = slice_elements / 2; 847b8e80941Smrg 848b8e80941Smrg out->slice_tile_max = (width * height) / (128*128); 849b8e80941Smrg if (out->slice_tile_max) 850b8e80941Smrg out->slice_tile_max -= 1; 851b8e80941Smrg 852b8e80941Smrg out->alignment = MAX2(256, base_align); 853b8e80941Smrg out->size = (image->type == VK_IMAGE_TYPE_3D ? image->info.depth : image->info.array_size) * 854b8e80941Smrg align(slice_bytes, base_align); 855b8e80941Smrg} 856b8e80941Smrg 857b8e80941Smrgstatic void 858b8e80941Smrgradv_image_alloc_cmask(struct radv_device *device, 859b8e80941Smrg struct radv_image *image) 860b8e80941Smrg{ 861b8e80941Smrg uint32_t clear_value_size = 0; 862b8e80941Smrg radv_image_get_cmask_info(device, image, &image->cmask); 863b8e80941Smrg 864b8e80941Smrg if (!image->cmask.size) 865b8e80941Smrg return; 866b8e80941Smrg 867b8e80941Smrg assert(image->cmask.alignment); 868b8e80941Smrg 869b8e80941Smrg image->cmask.offset = align64(image->size, image->cmask.alignment); 870b8e80941Smrg /* + 8 for storing the clear values */ 871b8e80941Smrg if (!image->clear_value_offset) { 872b8e80941Smrg image->clear_value_offset = image->cmask.offset + image->cmask.size; 873b8e80941Smrg clear_value_size = 8; 874b8e80941Smrg } 875b8e80941Smrg image->size = image->cmask.offset + image->cmask.size + clear_value_size; 876b8e80941Smrg image->alignment = MAX2(image->alignment, image->cmask.alignment); 877b8e80941Smrg} 878b8e80941Smrg 879b8e80941Smrgstatic void 880b8e80941Smrgradv_image_alloc_dcc(struct radv_image *image) 881b8e80941Smrg{ 882b8e80941Smrg assert(image->plane_count == 1); 883b8e80941Smrg 884b8e80941Smrg image->dcc_offset = align64(image->size, image->planes[0].surface.dcc_alignment); 885b8e80941Smrg /* + 16 for storing the clear values + dcc pred */ 886b8e80941Smrg image->clear_value_offset = image->dcc_offset + image->planes[0].surface.dcc_size; 887b8e80941Smrg image->fce_pred_offset = image->clear_value_offset + 8; 888b8e80941Smrg image->dcc_pred_offset = image->clear_value_offset + 16; 889b8e80941Smrg image->size = image->dcc_offset + image->planes[0].surface.dcc_size + 24; 890b8e80941Smrg image->alignment = MAX2(image->alignment, image->planes[0].surface.dcc_alignment); 891b8e80941Smrg} 892b8e80941Smrg 893b8e80941Smrgstatic void 894b8e80941Smrgradv_image_alloc_htile(struct radv_image *image) 895b8e80941Smrg{ 896b8e80941Smrg image->htile_offset = align64(image->size, image->planes[0].surface.htile_alignment); 897b8e80941Smrg 898b8e80941Smrg /* + 8 for storing the clear values */ 899b8e80941Smrg image->clear_value_offset = image->htile_offset + image->planes[0].surface.htile_size; 900b8e80941Smrg image->size = image->clear_value_offset + 8; 901b8e80941Smrg if (radv_image_is_tc_compat_htile(image)) { 902b8e80941Smrg /* Metadata for the TC-compatible HTILE hardware bug which 903b8e80941Smrg * have to be fixed by updating ZRANGE_PRECISION when doing 904b8e80941Smrg * fast depth clears to 0.0f. 905b8e80941Smrg */ 906b8e80941Smrg image->tc_compat_zrange_offset = image->clear_value_offset + 8; 907b8e80941Smrg image->size = image->clear_value_offset + 16; 908b8e80941Smrg } 909b8e80941Smrg image->alignment = align64(image->alignment, image->planes[0].surface.htile_alignment); 910b8e80941Smrg} 911b8e80941Smrg 912b8e80941Smrgstatic inline bool 913b8e80941Smrgradv_image_can_enable_dcc_or_cmask(struct radv_image *image) 914b8e80941Smrg{ 915b8e80941Smrg if (image->info.samples <= 1 && 916b8e80941Smrg image->info.width * image->info.height <= 512 * 512) { 917b8e80941Smrg /* Do not enable CMASK or DCC for small surfaces where the cost 918b8e80941Smrg * of the eliminate pass can be higher than the benefit of fast 919b8e80941Smrg * clear. RadeonSI does this, but the image threshold is 920b8e80941Smrg * different. 921b8e80941Smrg */ 922b8e80941Smrg return false; 923b8e80941Smrg } 924b8e80941Smrg 925b8e80941Smrg return image->usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT && 926b8e80941Smrg (image->exclusive || image->queue_family_mask == 1); 927b8e80941Smrg} 928b8e80941Smrg 929b8e80941Smrgstatic inline bool 930b8e80941Smrgradv_image_can_enable_dcc(struct radv_image *image) 931b8e80941Smrg{ 932b8e80941Smrg return radv_image_can_enable_dcc_or_cmask(image) && 933b8e80941Smrg radv_image_has_dcc(image); 934b8e80941Smrg} 935b8e80941Smrg 936b8e80941Smrgstatic inline bool 937b8e80941Smrgradv_image_can_enable_cmask(struct radv_image *image) 938b8e80941Smrg{ 939b8e80941Smrg if (image->planes[0].surface.bpe > 8 && image->info.samples == 1) { 940b8e80941Smrg /* Do not enable CMASK for non-MSAA images (fast color clear) 941b8e80941Smrg * because 128 bit formats are not supported, but FMASK might 942b8e80941Smrg * still be used. 943b8e80941Smrg */ 944b8e80941Smrg return false; 945b8e80941Smrg } 946b8e80941Smrg 947b8e80941Smrg return radv_image_can_enable_dcc_or_cmask(image) && 948b8e80941Smrg image->info.levels == 1 && 949b8e80941Smrg image->info.depth == 1 && 950b8e80941Smrg !image->planes[0].surface.is_linear; 951b8e80941Smrg} 952b8e80941Smrg 953b8e80941Smrgstatic inline bool 954b8e80941Smrgradv_image_can_enable_fmask(struct radv_image *image) 955b8e80941Smrg{ 956b8e80941Smrg return image->info.samples > 1 && vk_format_is_color(image->vk_format); 957b8e80941Smrg} 958b8e80941Smrg 959b8e80941Smrgstatic inline bool 960b8e80941Smrgradv_image_can_enable_htile(struct radv_image *image) 961b8e80941Smrg{ 962b8e80941Smrg return radv_image_has_htile(image) && 963b8e80941Smrg image->info.levels == 1 && 964b8e80941Smrg image->info.width * image->info.height >= 8 * 8; 965b8e80941Smrg} 966b8e80941Smrg 967b8e80941Smrgstatic void radv_image_disable_dcc(struct radv_image *image) 968b8e80941Smrg{ 969b8e80941Smrg for (unsigned i = 0; i < image->plane_count; ++i) 970b8e80941Smrg image->planes[i].surface.dcc_size = 0; 971b8e80941Smrg} 972b8e80941Smrg 973b8e80941Smrgstatic void radv_image_disable_htile(struct radv_image *image) 974b8e80941Smrg{ 975b8e80941Smrg for (unsigned i = 0; i < image->plane_count; ++i) 976b8e80941Smrg image->planes[i].surface.htile_size = 0; 977b8e80941Smrg} 978b8e80941Smrg 979b8e80941SmrgVkResult 980b8e80941Smrgradv_image_create(VkDevice _device, 981b8e80941Smrg const struct radv_image_create_info *create_info, 982b8e80941Smrg const VkAllocationCallbacks* alloc, 983b8e80941Smrg VkImage *pImage) 984b8e80941Smrg{ 985b8e80941Smrg RADV_FROM_HANDLE(radv_device, device, _device); 986b8e80941Smrg const VkImageCreateInfo *pCreateInfo = create_info->vk_info; 987b8e80941Smrg struct radv_image *image = NULL; 988b8e80941Smrg assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO); 989b8e80941Smrg 990b8e80941Smrg const unsigned plane_count = vk_format_get_plane_count(pCreateInfo->format); 991b8e80941Smrg const size_t image_struct_size = sizeof(*image) + sizeof(struct radv_image_plane) * plane_count; 992b8e80941Smrg 993b8e80941Smrg radv_assert(pCreateInfo->mipLevels > 0); 994b8e80941Smrg radv_assert(pCreateInfo->arrayLayers > 0); 995b8e80941Smrg radv_assert(pCreateInfo->samples > 0); 996b8e80941Smrg radv_assert(pCreateInfo->extent.width > 0); 997b8e80941Smrg radv_assert(pCreateInfo->extent.height > 0); 998b8e80941Smrg radv_assert(pCreateInfo->extent.depth > 0); 999b8e80941Smrg 1000b8e80941Smrg image = vk_zalloc2(&device->alloc, alloc, image_struct_size, 8, 1001b8e80941Smrg VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1002b8e80941Smrg if (!image) 1003b8e80941Smrg return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); 1004b8e80941Smrg 1005b8e80941Smrg image->type = pCreateInfo->imageType; 1006b8e80941Smrg image->info.width = pCreateInfo->extent.width; 1007b8e80941Smrg image->info.height = pCreateInfo->extent.height; 1008b8e80941Smrg image->info.depth = pCreateInfo->extent.depth; 1009b8e80941Smrg image->info.samples = pCreateInfo->samples; 1010b8e80941Smrg image->info.storage_samples = pCreateInfo->samples; 1011b8e80941Smrg image->info.array_size = pCreateInfo->arrayLayers; 1012b8e80941Smrg image->info.levels = pCreateInfo->mipLevels; 1013b8e80941Smrg image->info.num_channels = vk_format_get_nr_components(pCreateInfo->format); 1014b8e80941Smrg 1015b8e80941Smrg image->vk_format = pCreateInfo->format; 1016b8e80941Smrg image->tiling = pCreateInfo->tiling; 1017b8e80941Smrg image->usage = pCreateInfo->usage; 1018b8e80941Smrg image->flags = pCreateInfo->flags; 1019b8e80941Smrg 1020b8e80941Smrg image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE; 1021b8e80941Smrg if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) { 1022b8e80941Smrg for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; ++i) 1023b8e80941Smrg if (pCreateInfo->pQueueFamilyIndices[i] == VK_QUEUE_FAMILY_EXTERNAL) 1024b8e80941Smrg image->queue_family_mask |= (1u << RADV_MAX_QUEUE_FAMILIES) - 1u; 1025b8e80941Smrg else 1026b8e80941Smrg image->queue_family_mask |= 1u << pCreateInfo->pQueueFamilyIndices[i]; 1027b8e80941Smrg } 1028b8e80941Smrg 1029b8e80941Smrg image->shareable = vk_find_struct_const(pCreateInfo->pNext, 1030b8e80941Smrg EXTERNAL_MEMORY_IMAGE_CREATE_INFO) != NULL; 1031b8e80941Smrg if (!vk_format_is_depth_or_stencil(pCreateInfo->format) && !create_info->scanout && !image->shareable) { 1032b8e80941Smrg image->info.surf_index = &device->image_mrt_offset_counter; 1033b8e80941Smrg } 1034b8e80941Smrg 1035b8e80941Smrg image->plane_count = plane_count; 1036b8e80941Smrg image->size = 0; 1037b8e80941Smrg image->alignment = 1; 1038b8e80941Smrg for (unsigned plane = 0; plane < plane_count; ++plane) { 1039b8e80941Smrg struct ac_surf_info info = image->info; 1040b8e80941Smrg radv_init_surface(device, image, &image->planes[plane].surface, plane, create_info); 1041b8e80941Smrg 1042b8e80941Smrg if (plane) { 1043b8e80941Smrg const struct vk_format_description *desc = vk_format_description(pCreateInfo->format); 1044b8e80941Smrg assert(info.width % desc->width_divisor == 0); 1045b8e80941Smrg assert(info.height % desc->height_divisor == 0); 1046b8e80941Smrg 1047b8e80941Smrg info.width /= desc->width_divisor; 1048b8e80941Smrg info.height /= desc->height_divisor; 1049b8e80941Smrg } 1050b8e80941Smrg 1051b8e80941Smrg device->ws->surface_init(device->ws, &info, &image->planes[plane].surface); 1052b8e80941Smrg 1053b8e80941Smrg image->planes[plane].offset = align(image->size, image->planes[plane].surface.surf_alignment); 1054b8e80941Smrg image->size = image->planes[plane].offset + image->planes[plane].surface.surf_size; 1055b8e80941Smrg image->alignment = image->planes[plane].surface.surf_alignment; 1056b8e80941Smrg 1057b8e80941Smrg image->planes[plane].format = vk_format_get_plane_format(image->vk_format, plane); 1058b8e80941Smrg } 1059b8e80941Smrg 1060b8e80941Smrg if (!create_info->no_metadata_planes) { 1061b8e80941Smrg /* Try to enable DCC first. */ 1062b8e80941Smrg if (radv_image_can_enable_dcc(image)) { 1063b8e80941Smrg radv_image_alloc_dcc(image); 1064b8e80941Smrg if (image->info.samples > 1) { 1065b8e80941Smrg /* CMASK should be enabled because DCC fast 1066b8e80941Smrg * clear with MSAA needs it. 1067b8e80941Smrg */ 1068b8e80941Smrg assert(radv_image_can_enable_cmask(image)); 1069b8e80941Smrg radv_image_alloc_cmask(device, image); 1070b8e80941Smrg } 1071b8e80941Smrg } else { 1072b8e80941Smrg /* When DCC cannot be enabled, try CMASK. */ 1073b8e80941Smrg radv_image_disable_dcc(image); 1074b8e80941Smrg if (radv_image_can_enable_cmask(image)) { 1075b8e80941Smrg radv_image_alloc_cmask(device, image); 1076b8e80941Smrg } 1077b8e80941Smrg } 1078b8e80941Smrg 1079b8e80941Smrg /* Try to enable FMASK for multisampled images. */ 1080b8e80941Smrg if (radv_image_can_enable_fmask(image)) { 1081b8e80941Smrg radv_image_alloc_fmask(device, image); 1082b8e80941Smrg } else { 1083b8e80941Smrg /* Otherwise, try to enable HTILE for depth surfaces. */ 1084b8e80941Smrg if (radv_image_can_enable_htile(image) && 1085b8e80941Smrg !(device->instance->debug_flags & RADV_DEBUG_NO_HIZ)) { 1086b8e80941Smrg image->tc_compatible_htile = image->planes[0].surface.flags & RADEON_SURF_TC_COMPATIBLE_HTILE; 1087b8e80941Smrg radv_image_alloc_htile(image); 1088b8e80941Smrg } else { 1089b8e80941Smrg radv_image_disable_htile(image); 1090b8e80941Smrg } 1091b8e80941Smrg } 1092b8e80941Smrg } else { 1093b8e80941Smrg radv_image_disable_dcc(image); 1094b8e80941Smrg radv_image_disable_htile(image); 1095b8e80941Smrg } 1096b8e80941Smrg 1097b8e80941Smrg if (pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) { 1098b8e80941Smrg image->alignment = MAX2(image->alignment, 4096); 1099b8e80941Smrg image->size = align64(image->size, image->alignment); 1100b8e80941Smrg image->offset = 0; 1101b8e80941Smrg 1102b8e80941Smrg image->bo = device->ws->buffer_create(device->ws, image->size, image->alignment, 1103b8e80941Smrg 0, RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL); 1104b8e80941Smrg if (!image->bo) { 1105b8e80941Smrg vk_free2(&device->alloc, alloc, image); 1106b8e80941Smrg return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); 1107b8e80941Smrg } 1108b8e80941Smrg } 1109b8e80941Smrg 1110b8e80941Smrg *pImage = radv_image_to_handle(image); 1111b8e80941Smrg 1112b8e80941Smrg return VK_SUCCESS; 1113b8e80941Smrg} 1114b8e80941Smrg 1115b8e80941Smrgstatic void 1116b8e80941Smrgradv_image_view_make_descriptor(struct radv_image_view *iview, 1117b8e80941Smrg struct radv_device *device, 1118b8e80941Smrg VkFormat vk_format, 1119b8e80941Smrg const VkComponentMapping *components, 1120b8e80941Smrg bool is_storage_image, unsigned plane_id, 1121b8e80941Smrg unsigned descriptor_plane_id) 1122b8e80941Smrg{ 1123b8e80941Smrg struct radv_image *image = iview->image; 1124b8e80941Smrg struct radv_image_plane *plane = &image->planes[plane_id]; 1125b8e80941Smrg const struct vk_format_description *format_desc = vk_format_description(image->vk_format); 1126b8e80941Smrg bool is_stencil = iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT; 1127b8e80941Smrg uint32_t blk_w; 1128b8e80941Smrg union radv_descriptor *descriptor; 1129b8e80941Smrg uint32_t hw_level = 0; 1130b8e80941Smrg 1131b8e80941Smrg if (is_storage_image) { 1132b8e80941Smrg descriptor = &iview->storage_descriptor; 1133b8e80941Smrg } else { 1134b8e80941Smrg descriptor = &iview->descriptor; 1135b8e80941Smrg } 1136b8e80941Smrg 1137b8e80941Smrg assert(vk_format_get_plane_count(vk_format) == 1); 1138b8e80941Smrg assert(plane->surface.blk_w % vk_format_get_blockwidth(plane->format) == 0); 1139b8e80941Smrg blk_w = plane->surface.blk_w / vk_format_get_blockwidth(plane->format) * vk_format_get_blockwidth(vk_format); 1140b8e80941Smrg 1141b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9) 1142b8e80941Smrg hw_level = iview->base_mip; 1143b8e80941Smrg si_make_texture_descriptor(device, image, is_storage_image, 1144b8e80941Smrg iview->type, 1145b8e80941Smrg vk_format, 1146b8e80941Smrg components, 1147b8e80941Smrg hw_level, hw_level + iview->level_count - 1, 1148b8e80941Smrg iview->base_layer, 1149b8e80941Smrg iview->base_layer + iview->layer_count - 1, 1150b8e80941Smrg iview->extent.width / (plane_id ? format_desc->width_divisor : 1), 1151b8e80941Smrg iview->extent.height / (plane_id ? format_desc->height_divisor : 1), 1152b8e80941Smrg iview->extent.depth, 1153b8e80941Smrg descriptor->plane_descriptors[descriptor_plane_id], 1154b8e80941Smrg descriptor_plane_id ? NULL : descriptor->fmask_descriptor); 1155b8e80941Smrg 1156b8e80941Smrg const struct legacy_surf_level *base_level_info = NULL; 1157b8e80941Smrg if (device->physical_device->rad_info.chip_class <= GFX9) { 1158b8e80941Smrg if (is_stencil) 1159b8e80941Smrg base_level_info = &plane->surface.u.legacy.stencil_level[iview->base_mip]; 1160b8e80941Smrg else 1161b8e80941Smrg base_level_info = &plane->surface.u.legacy.level[iview->base_mip]; 1162b8e80941Smrg } 1163b8e80941Smrg si_set_mutable_tex_desc_fields(device, image, 1164b8e80941Smrg base_level_info, 1165b8e80941Smrg plane_id, 1166b8e80941Smrg iview->base_mip, 1167b8e80941Smrg iview->base_mip, 1168b8e80941Smrg blk_w, is_stencil, is_storage_image, descriptor->plane_descriptors[descriptor_plane_id]); 1169b8e80941Smrg} 1170b8e80941Smrg 1171b8e80941Smrgstatic unsigned 1172b8e80941Smrgradv_plane_from_aspect(VkImageAspectFlags mask) 1173b8e80941Smrg{ 1174b8e80941Smrg switch(mask) { 1175b8e80941Smrg case VK_IMAGE_ASPECT_PLANE_1_BIT: 1176b8e80941Smrg return 1; 1177b8e80941Smrg case VK_IMAGE_ASPECT_PLANE_2_BIT: 1178b8e80941Smrg return 2; 1179b8e80941Smrg default: 1180b8e80941Smrg return 0; 1181b8e80941Smrg } 1182b8e80941Smrg} 1183b8e80941Smrg 1184b8e80941SmrgVkFormat 1185b8e80941Smrgradv_get_aspect_format(struct radv_image *image, VkImageAspectFlags mask) 1186b8e80941Smrg{ 1187b8e80941Smrg switch(mask) { 1188b8e80941Smrg case VK_IMAGE_ASPECT_PLANE_0_BIT: 1189b8e80941Smrg return image->planes[0].format; 1190b8e80941Smrg case VK_IMAGE_ASPECT_PLANE_1_BIT: 1191b8e80941Smrg return image->planes[1].format; 1192b8e80941Smrg case VK_IMAGE_ASPECT_PLANE_2_BIT: 1193b8e80941Smrg return image->planes[2].format; 1194b8e80941Smrg case VK_IMAGE_ASPECT_STENCIL_BIT: 1195b8e80941Smrg return vk_format_stencil_only(image->vk_format); 1196b8e80941Smrg case VK_IMAGE_ASPECT_DEPTH_BIT: 1197b8e80941Smrg return vk_format_depth_only(image->vk_format); 1198b8e80941Smrg case VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT: 1199b8e80941Smrg return vk_format_depth_only(image->vk_format); 1200b8e80941Smrg default: 1201b8e80941Smrg return image->vk_format; 1202b8e80941Smrg } 1203b8e80941Smrg} 1204b8e80941Smrg 1205b8e80941Smrgvoid 1206b8e80941Smrgradv_image_view_init(struct radv_image_view *iview, 1207b8e80941Smrg struct radv_device *device, 1208b8e80941Smrg const VkImageViewCreateInfo* pCreateInfo) 1209b8e80941Smrg{ 1210b8e80941Smrg RADV_FROM_HANDLE(radv_image, image, pCreateInfo->image); 1211b8e80941Smrg const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange; 1212b8e80941Smrg 1213b8e80941Smrg switch (image->type) { 1214b8e80941Smrg case VK_IMAGE_TYPE_1D: 1215b8e80941Smrg case VK_IMAGE_TYPE_2D: 1216b8e80941Smrg assert(range->baseArrayLayer + radv_get_layerCount(image, range) - 1 <= image->info.array_size); 1217b8e80941Smrg break; 1218b8e80941Smrg case VK_IMAGE_TYPE_3D: 1219b8e80941Smrg assert(range->baseArrayLayer + radv_get_layerCount(image, range) - 1 1220b8e80941Smrg <= radv_minify(image->info.depth, range->baseMipLevel)); 1221b8e80941Smrg break; 1222b8e80941Smrg default: 1223b8e80941Smrg unreachable("bad VkImageType"); 1224b8e80941Smrg } 1225b8e80941Smrg iview->image = image; 1226b8e80941Smrg iview->bo = image->bo; 1227b8e80941Smrg iview->type = pCreateInfo->viewType; 1228b8e80941Smrg iview->plane_id = radv_plane_from_aspect(pCreateInfo->subresourceRange.aspectMask); 1229b8e80941Smrg iview->aspect_mask = pCreateInfo->subresourceRange.aspectMask; 1230b8e80941Smrg iview->multiple_planes = vk_format_get_plane_count(image->vk_format) > 1 && iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT; 1231b8e80941Smrg iview->vk_format = pCreateInfo->format; 1232b8e80941Smrg 1233b8e80941Smrg if (iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) { 1234b8e80941Smrg iview->vk_format = vk_format_stencil_only(iview->vk_format); 1235b8e80941Smrg } else if (iview->aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) { 1236b8e80941Smrg iview->vk_format = vk_format_depth_only(iview->vk_format); 1237b8e80941Smrg } 1238b8e80941Smrg 1239b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9) { 1240b8e80941Smrg iview->extent = (VkExtent3D) { 1241b8e80941Smrg .width = image->info.width, 1242b8e80941Smrg .height = image->info.height, 1243b8e80941Smrg .depth = image->info.depth, 1244b8e80941Smrg }; 1245b8e80941Smrg } else { 1246b8e80941Smrg iview->extent = (VkExtent3D) { 1247b8e80941Smrg .width = radv_minify(image->info.width , range->baseMipLevel), 1248b8e80941Smrg .height = radv_minify(image->info.height, range->baseMipLevel), 1249b8e80941Smrg .depth = radv_minify(image->info.depth , range->baseMipLevel), 1250b8e80941Smrg }; 1251b8e80941Smrg } 1252b8e80941Smrg 1253b8e80941Smrg if (iview->vk_format != image->planes[iview->plane_id].format) { 1254b8e80941Smrg unsigned view_bw = vk_format_get_blockwidth(iview->vk_format); 1255b8e80941Smrg unsigned view_bh = vk_format_get_blockheight(iview->vk_format); 1256b8e80941Smrg unsigned img_bw = vk_format_get_blockwidth(image->vk_format); 1257b8e80941Smrg unsigned img_bh = vk_format_get_blockheight(image->vk_format); 1258b8e80941Smrg 1259b8e80941Smrg iview->extent.width = round_up_u32(iview->extent.width * view_bw, img_bw); 1260b8e80941Smrg iview->extent.height = round_up_u32(iview->extent.height * view_bh, img_bh); 1261b8e80941Smrg 1262b8e80941Smrg /* Comment ported from amdvlk - 1263b8e80941Smrg * If we have the following image: 1264b8e80941Smrg * Uncompressed pixels Compressed block sizes (4x4) 1265b8e80941Smrg * mip0: 22 x 22 6 x 6 1266b8e80941Smrg * mip1: 11 x 11 3 x 3 1267b8e80941Smrg * mip2: 5 x 5 2 x 2 1268b8e80941Smrg * mip3: 2 x 2 1 x 1 1269b8e80941Smrg * mip4: 1 x 1 1 x 1 1270b8e80941Smrg * 1271b8e80941Smrg * On GFX9 the descriptor is always programmed with the WIDTH and HEIGHT of the base level and the HW is 1272b8e80941Smrg * calculating the degradation of the block sizes down the mip-chain as follows (straight-up 1273b8e80941Smrg * divide-by-two integer math): 1274b8e80941Smrg * mip0: 6x6 1275b8e80941Smrg * mip1: 3x3 1276b8e80941Smrg * mip2: 1x1 1277b8e80941Smrg * mip3: 1x1 1278b8e80941Smrg * 1279b8e80941Smrg * This means that mip2 will be missing texels. 1280b8e80941Smrg * 1281b8e80941Smrg * Fix this by calculating the base mip's width and height, then convert that, and round it 1282b8e80941Smrg * back up to get the level 0 size. 1283b8e80941Smrg * Clamp the converted size between the original values, and next power of two, which 1284b8e80941Smrg * means we don't oversize the image. 1285b8e80941Smrg */ 1286b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9 && 1287b8e80941Smrg vk_format_is_compressed(image->vk_format) && 1288b8e80941Smrg !vk_format_is_compressed(iview->vk_format)) { 1289b8e80941Smrg unsigned lvl_width = radv_minify(image->info.width , range->baseMipLevel); 1290b8e80941Smrg unsigned lvl_height = radv_minify(image->info.height, range->baseMipLevel); 1291b8e80941Smrg 1292b8e80941Smrg lvl_width = round_up_u32(lvl_width * view_bw, img_bw); 1293b8e80941Smrg lvl_height = round_up_u32(lvl_height * view_bh, img_bh); 1294b8e80941Smrg 1295b8e80941Smrg lvl_width <<= range->baseMipLevel; 1296b8e80941Smrg lvl_height <<= range->baseMipLevel; 1297b8e80941Smrg 1298b8e80941Smrg iview->extent.width = CLAMP(lvl_width, iview->extent.width, iview->image->planes[0].surface.u.gfx9.surf_pitch); 1299b8e80941Smrg iview->extent.height = CLAMP(lvl_height, iview->extent.height, iview->image->planes[0].surface.u.gfx9.surf_height); 1300b8e80941Smrg } 1301b8e80941Smrg } 1302b8e80941Smrg 1303b8e80941Smrg iview->base_layer = range->baseArrayLayer; 1304b8e80941Smrg iview->layer_count = radv_get_layerCount(image, range); 1305b8e80941Smrg iview->base_mip = range->baseMipLevel; 1306b8e80941Smrg iview->level_count = radv_get_levelCount(image, range); 1307b8e80941Smrg 1308b8e80941Smrg for (unsigned i = 0; i < (iview->multiple_planes ? vk_format_get_plane_count(image->vk_format) : 1); ++i) { 1309b8e80941Smrg VkFormat format = vk_format_get_plane_format(iview->vk_format, i); 1310b8e80941Smrg radv_image_view_make_descriptor(iview, device, format, &pCreateInfo->components, false, iview->plane_id + i, i); 1311b8e80941Smrg radv_image_view_make_descriptor(iview, device, format, &pCreateInfo->components, true, iview->plane_id + i, i); 1312b8e80941Smrg } 1313b8e80941Smrg} 1314b8e80941Smrg 1315b8e80941Smrgbool radv_layout_has_htile(const struct radv_image *image, 1316b8e80941Smrg VkImageLayout layout, 1317b8e80941Smrg unsigned queue_mask) 1318b8e80941Smrg{ 1319b8e80941Smrg if (radv_image_is_tc_compat_htile(image)) 1320b8e80941Smrg return layout != VK_IMAGE_LAYOUT_GENERAL; 1321b8e80941Smrg 1322b8e80941Smrg return radv_image_has_htile(image) && 1323b8e80941Smrg (layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || 1324b8e80941Smrg (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && 1325b8e80941Smrg queue_mask == (1u << RADV_QUEUE_GENERAL))); 1326b8e80941Smrg} 1327b8e80941Smrg 1328b8e80941Smrgbool radv_layout_is_htile_compressed(const struct radv_image *image, 1329b8e80941Smrg VkImageLayout layout, 1330b8e80941Smrg unsigned queue_mask) 1331b8e80941Smrg{ 1332b8e80941Smrg if (radv_image_is_tc_compat_htile(image)) 1333b8e80941Smrg return layout != VK_IMAGE_LAYOUT_GENERAL; 1334b8e80941Smrg 1335b8e80941Smrg return radv_image_has_htile(image) && 1336b8e80941Smrg (layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || 1337b8e80941Smrg (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && 1338b8e80941Smrg queue_mask == (1u << RADV_QUEUE_GENERAL))); 1339b8e80941Smrg} 1340b8e80941Smrg 1341b8e80941Smrgbool radv_layout_can_fast_clear(const struct radv_image *image, 1342b8e80941Smrg VkImageLayout layout, 1343b8e80941Smrg unsigned queue_mask) 1344b8e80941Smrg{ 1345b8e80941Smrg return layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; 1346b8e80941Smrg} 1347b8e80941Smrg 1348b8e80941Smrgbool radv_layout_dcc_compressed(const struct radv_image *image, 1349b8e80941Smrg VkImageLayout layout, 1350b8e80941Smrg unsigned queue_mask) 1351b8e80941Smrg{ 1352b8e80941Smrg /* Don't compress compute transfer dst, as image stores are not supported. */ 1353b8e80941Smrg if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && 1354b8e80941Smrg (queue_mask & (1u << RADV_QUEUE_COMPUTE))) 1355b8e80941Smrg return false; 1356b8e80941Smrg 1357b8e80941Smrg return radv_image_has_dcc(image) && layout != VK_IMAGE_LAYOUT_GENERAL; 1358b8e80941Smrg} 1359b8e80941Smrg 1360b8e80941Smrg 1361b8e80941Smrgunsigned radv_image_queue_family_mask(const struct radv_image *image, uint32_t family, uint32_t queue_family) 1362b8e80941Smrg{ 1363b8e80941Smrg if (!image->exclusive) 1364b8e80941Smrg return image->queue_family_mask; 1365b8e80941Smrg if (family == VK_QUEUE_FAMILY_EXTERNAL) 1366b8e80941Smrg return (1u << RADV_MAX_QUEUE_FAMILIES) - 1u; 1367b8e80941Smrg if (family == VK_QUEUE_FAMILY_IGNORED) 1368b8e80941Smrg return 1u << queue_family; 1369b8e80941Smrg return 1u << family; 1370b8e80941Smrg} 1371b8e80941Smrg 1372b8e80941SmrgVkResult 1373b8e80941Smrgradv_CreateImage(VkDevice device, 1374b8e80941Smrg const VkImageCreateInfo *pCreateInfo, 1375b8e80941Smrg const VkAllocationCallbacks *pAllocator, 1376b8e80941Smrg VkImage *pImage) 1377b8e80941Smrg{ 1378b8e80941Smrg#ifdef ANDROID 1379b8e80941Smrg const VkNativeBufferANDROID *gralloc_info = 1380b8e80941Smrg vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID); 1381b8e80941Smrg 1382b8e80941Smrg if (gralloc_info) 1383b8e80941Smrg return radv_image_from_gralloc(device, pCreateInfo, gralloc_info, 1384b8e80941Smrg pAllocator, pImage); 1385b8e80941Smrg#endif 1386b8e80941Smrg 1387b8e80941Smrg const struct wsi_image_create_info *wsi_info = 1388b8e80941Smrg vk_find_struct_const(pCreateInfo->pNext, WSI_IMAGE_CREATE_INFO_MESA); 1389b8e80941Smrg bool scanout = wsi_info && wsi_info->scanout; 1390b8e80941Smrg 1391b8e80941Smrg return radv_image_create(device, 1392b8e80941Smrg &(struct radv_image_create_info) { 1393b8e80941Smrg .vk_info = pCreateInfo, 1394b8e80941Smrg .scanout = scanout, 1395b8e80941Smrg }, 1396b8e80941Smrg pAllocator, 1397b8e80941Smrg pImage); 1398b8e80941Smrg} 1399b8e80941Smrg 1400b8e80941Smrgvoid 1401b8e80941Smrgradv_DestroyImage(VkDevice _device, VkImage _image, 1402b8e80941Smrg const VkAllocationCallbacks *pAllocator) 1403b8e80941Smrg{ 1404b8e80941Smrg RADV_FROM_HANDLE(radv_device, device, _device); 1405b8e80941Smrg RADV_FROM_HANDLE(radv_image, image, _image); 1406b8e80941Smrg 1407b8e80941Smrg if (!image) 1408b8e80941Smrg return; 1409b8e80941Smrg 1410b8e80941Smrg if (image->flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) 1411b8e80941Smrg device->ws->buffer_destroy(image->bo); 1412b8e80941Smrg 1413b8e80941Smrg if (image->owned_memory != VK_NULL_HANDLE) 1414b8e80941Smrg radv_FreeMemory(_device, image->owned_memory, pAllocator); 1415b8e80941Smrg 1416b8e80941Smrg vk_free2(&device->alloc, pAllocator, image); 1417b8e80941Smrg} 1418b8e80941Smrg 1419b8e80941Smrgvoid radv_GetImageSubresourceLayout( 1420b8e80941Smrg VkDevice _device, 1421b8e80941Smrg VkImage _image, 1422b8e80941Smrg const VkImageSubresource* pSubresource, 1423b8e80941Smrg VkSubresourceLayout* pLayout) 1424b8e80941Smrg{ 1425b8e80941Smrg RADV_FROM_HANDLE(radv_image, image, _image); 1426b8e80941Smrg RADV_FROM_HANDLE(radv_device, device, _device); 1427b8e80941Smrg int level = pSubresource->mipLevel; 1428b8e80941Smrg int layer = pSubresource->arrayLayer; 1429b8e80941Smrg 1430b8e80941Smrg unsigned plane_id = radv_plane_from_aspect(pSubresource->aspectMask); 1431b8e80941Smrg 1432b8e80941Smrg struct radv_image_plane *plane = &image->planes[plane_id]; 1433b8e80941Smrg struct radeon_surf *surface = &plane->surface; 1434b8e80941Smrg 1435b8e80941Smrg if (device->physical_device->rad_info.chip_class >= GFX9) { 1436b8e80941Smrg pLayout->offset = plane->offset + surface->u.gfx9.offset[level] + surface->u.gfx9.surf_slice_size * layer; 1437b8e80941Smrg pLayout->rowPitch = surface->u.gfx9.surf_pitch * surface->bpe; 1438b8e80941Smrg pLayout->arrayPitch = surface->u.gfx9.surf_slice_size; 1439b8e80941Smrg pLayout->depthPitch = surface->u.gfx9.surf_slice_size; 1440b8e80941Smrg pLayout->size = surface->u.gfx9.surf_slice_size; 1441b8e80941Smrg if (image->type == VK_IMAGE_TYPE_3D) 1442b8e80941Smrg pLayout->size *= u_minify(image->info.depth, level); 1443b8e80941Smrg } else { 1444b8e80941Smrg pLayout->offset = plane->offset + surface->u.legacy.level[level].offset + (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4 * layer; 1445b8e80941Smrg pLayout->rowPitch = surface->u.legacy.level[level].nblk_x * surface->bpe; 1446b8e80941Smrg pLayout->arrayPitch = (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4; 1447b8e80941Smrg pLayout->depthPitch = (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4; 1448b8e80941Smrg pLayout->size = (uint64_t)surface->u.legacy.level[level].slice_size_dw * 4; 1449b8e80941Smrg if (image->type == VK_IMAGE_TYPE_3D) 1450b8e80941Smrg pLayout->size *= u_minify(image->info.depth, level); 1451b8e80941Smrg } 1452b8e80941Smrg} 1453b8e80941Smrg 1454b8e80941Smrg 1455b8e80941SmrgVkResult 1456b8e80941Smrgradv_CreateImageView(VkDevice _device, 1457b8e80941Smrg const VkImageViewCreateInfo *pCreateInfo, 1458b8e80941Smrg const VkAllocationCallbacks *pAllocator, 1459b8e80941Smrg VkImageView *pView) 1460b8e80941Smrg{ 1461b8e80941Smrg RADV_FROM_HANDLE(radv_device, device, _device); 1462b8e80941Smrg struct radv_image_view *view; 1463b8e80941Smrg 1464b8e80941Smrg view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8, 1465b8e80941Smrg VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1466b8e80941Smrg if (view == NULL) 1467b8e80941Smrg return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); 1468b8e80941Smrg 1469b8e80941Smrg radv_image_view_init(view, device, pCreateInfo); 1470b8e80941Smrg 1471b8e80941Smrg *pView = radv_image_view_to_handle(view); 1472b8e80941Smrg 1473b8e80941Smrg return VK_SUCCESS; 1474b8e80941Smrg} 1475b8e80941Smrg 1476b8e80941Smrgvoid 1477b8e80941Smrgradv_DestroyImageView(VkDevice _device, VkImageView _iview, 1478b8e80941Smrg const VkAllocationCallbacks *pAllocator) 1479b8e80941Smrg{ 1480b8e80941Smrg RADV_FROM_HANDLE(radv_device, device, _device); 1481b8e80941Smrg RADV_FROM_HANDLE(radv_image_view, iview, _iview); 1482b8e80941Smrg 1483b8e80941Smrg if (!iview) 1484b8e80941Smrg return; 1485b8e80941Smrg vk_free2(&device->alloc, pAllocator, iview); 1486b8e80941Smrg} 1487b8e80941Smrg 1488b8e80941Smrgvoid radv_buffer_view_init(struct radv_buffer_view *view, 1489b8e80941Smrg struct radv_device *device, 1490b8e80941Smrg const VkBufferViewCreateInfo* pCreateInfo) 1491b8e80941Smrg{ 1492b8e80941Smrg RADV_FROM_HANDLE(radv_buffer, buffer, pCreateInfo->buffer); 1493b8e80941Smrg 1494b8e80941Smrg view->bo = buffer->bo; 1495b8e80941Smrg view->range = pCreateInfo->range == VK_WHOLE_SIZE ? 1496b8e80941Smrg buffer->size - pCreateInfo->offset : pCreateInfo->range; 1497b8e80941Smrg view->vk_format = pCreateInfo->format; 1498b8e80941Smrg 1499b8e80941Smrg radv_make_buffer_descriptor(device, buffer, view->vk_format, 1500b8e80941Smrg pCreateInfo->offset, view->range, view->state); 1501b8e80941Smrg} 1502b8e80941Smrg 1503b8e80941SmrgVkResult 1504b8e80941Smrgradv_CreateBufferView(VkDevice _device, 1505b8e80941Smrg const VkBufferViewCreateInfo *pCreateInfo, 1506b8e80941Smrg const VkAllocationCallbacks *pAllocator, 1507b8e80941Smrg VkBufferView *pView) 1508b8e80941Smrg{ 1509b8e80941Smrg RADV_FROM_HANDLE(radv_device, device, _device); 1510b8e80941Smrg struct radv_buffer_view *view; 1511b8e80941Smrg 1512b8e80941Smrg view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8, 1513b8e80941Smrg VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1514b8e80941Smrg if (!view) 1515b8e80941Smrg return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); 1516b8e80941Smrg 1517b8e80941Smrg radv_buffer_view_init(view, device, pCreateInfo); 1518b8e80941Smrg 1519b8e80941Smrg *pView = radv_buffer_view_to_handle(view); 1520b8e80941Smrg 1521b8e80941Smrg return VK_SUCCESS; 1522b8e80941Smrg} 1523b8e80941Smrg 1524b8e80941Smrgvoid 1525b8e80941Smrgradv_DestroyBufferView(VkDevice _device, VkBufferView bufferView, 1526b8e80941Smrg const VkAllocationCallbacks *pAllocator) 1527b8e80941Smrg{ 1528b8e80941Smrg RADV_FROM_HANDLE(radv_device, device, _device); 1529b8e80941Smrg RADV_FROM_HANDLE(radv_buffer_view, view, bufferView); 1530b8e80941Smrg 1531b8e80941Smrg if (!view) 1532b8e80941Smrg return; 1533b8e80941Smrg 1534b8e80941Smrg vk_free2(&device->alloc, pAllocator, view); 1535b8e80941Smrg} 1536