zink_resource.c revision 7ec681f3
1/* 2 * Copyright 2018 Collabora Ltd. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24#include "zink_resource.h" 25 26#include "zink_batch.h" 27#include "zink_context.h" 28#include "zink_fence.h" 29#include "zink_program.h" 30#include "zink_screen.h" 31 32#ifdef VK_USE_PLATFORM_METAL_EXT 33#include "QuartzCore/CAMetalLayer.h" 34#endif 35#include "vulkan/wsi/wsi_common.h" 36 37#include "util/slab.h" 38#include "util/u_blitter.h" 39#include "util/u_debug.h" 40#include "util/format/u_format.h" 41#include "util/u_transfer_helper.h" 42#include "util/u_inlines.h" 43#include "util/u_memory.h" 44#include "util/u_upload_mgr.h" 45#include "util/os_file.h" 46#include "frontend/sw_winsys.h" 47 48#ifndef _WIN32 49#define ZINK_USE_DMABUF 50#endif 51 52#ifdef ZINK_USE_DMABUF 53#include <xf86drm.h> 54#include "drm-uapi/drm_fourcc.h" 55#else 56/* these won't actually be used */ 57#define DRM_FORMAT_MOD_INVALID 0 58#define DRM_FORMAT_MOD_LINEAR 0 59#endif 60 61 62static bool 63equals_ivci(const void *a, const void *b) 64{ 65 return memcmp(a, b, sizeof(VkImageViewCreateInfo)) == 0; 66} 67 68static bool 69equals_bvci(const void *a, const void *b) 70{ 71 return memcmp(a, b, sizeof(VkBufferViewCreateInfo)) == 0; 72} 73 74static void 75zink_transfer_flush_region(struct pipe_context *pctx, 76 struct pipe_transfer *ptrans, 77 const struct pipe_box *box); 78 79void 80debug_describe_zink_resource_object(char *buf, const struct zink_resource_object *ptr) 81{ 82 sprintf(buf, "zink_resource_object"); 83} 84 85void 86zink_destroy_resource_object(struct zink_screen *screen, struct zink_resource_object *obj) 87{ 88 if (obj->is_buffer) { 89 util_dynarray_foreach(&obj->tmp, VkBuffer, buffer) 90 VKSCR(DestroyBuffer)(screen->dev, *buffer, NULL); 91 VKSCR(DestroyBuffer)(screen->dev, obj->buffer, NULL); 92 } else { 93 VKSCR(DestroyImage)(screen->dev, obj->image, NULL); 94 } 95 96 util_dynarray_fini(&obj->tmp); 97 zink_descriptor_set_refs_clear(&obj->desc_set_refs, obj); 98 zink_bo_unref(screen, obj->bo); 99 FREE(obj); 100} 101 102static void 103zink_resource_destroy(struct pipe_screen *pscreen, 104 struct pipe_resource *pres) 105{ 106 struct zink_screen *screen = zink_screen(pscreen); 107 struct zink_resource *res = zink_resource(pres); 108 if (pres->target == PIPE_BUFFER) { 109 util_range_destroy(&res->valid_buffer_range); 110 util_idalloc_mt_free(&screen->buffer_ids, res->base.buffer_id_unique); 111 assert(!_mesa_hash_table_num_entries(&res->bufferview_cache)); 112 simple_mtx_destroy(&res->bufferview_mtx); 113 } else { 114 assert(!_mesa_hash_table_num_entries(&res->surface_cache)); 115 simple_mtx_destroy(&res->surface_mtx); 116 } 117 /* no need to do anything for the caches, these objects own the resource lifetimes */ 118 119 zink_resource_object_reference(screen, &res->obj, NULL); 120 zink_resource_object_reference(screen, &res->scanout_obj, NULL); 121 threaded_resource_deinit(pres); 122 ralloc_free(res); 123} 124 125static VkImageAspectFlags 126aspect_from_format(enum pipe_format fmt) 127{ 128 if (util_format_is_depth_or_stencil(fmt)) { 129 VkImageAspectFlags aspect = 0; 130 const struct util_format_description *desc = util_format_description(fmt); 131 if (util_format_has_depth(desc)) 132 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT; 133 if (util_format_has_stencil(desc)) 134 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT; 135 return aspect; 136 } else 137 return VK_IMAGE_ASPECT_COLOR_BIT; 138} 139 140static VkBufferCreateInfo 141create_bci(struct zink_screen *screen, const struct pipe_resource *templ, unsigned bind) 142{ 143 VkBufferCreateInfo bci; 144 bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; 145 bci.pNext = NULL; 146 bci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 147 bci.queueFamilyIndexCount = 0; 148 bci.pQueueFamilyIndices = NULL; 149 bci.size = templ->width0; 150 bci.flags = 0; 151 assert(bci.size > 0); 152 153 bci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | 154 VK_BUFFER_USAGE_TRANSFER_DST_BIT | 155 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; 156 157 bci.usage |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | 158 VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | 159 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | 160 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | 161 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | 162 VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT | 163 VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT; 164 165 if (bind & PIPE_BIND_SHADER_IMAGE) 166 bci.usage |= VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; 167 168 if (bind & PIPE_BIND_QUERY_BUFFER) 169 bci.usage |= VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT; 170 171 if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE) 172 bci.flags |= VK_BUFFER_CREATE_SPARSE_BINDING_BIT; 173 return bci; 174} 175 176static bool 177check_ici(struct zink_screen *screen, VkImageCreateInfo *ici, uint64_t modifier) 178{ 179 VkImageFormatProperties image_props; 180 VkResult ret; 181 assert(modifier == DRM_FORMAT_MOD_INVALID || 182 (VKSCR(GetPhysicalDeviceImageFormatProperties2) && screen->info.have_EXT_image_drm_format_modifier)); 183 if (VKSCR(GetPhysicalDeviceImageFormatProperties2)) { 184 VkImageFormatProperties2 props2; 185 props2.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2; 186 props2.pNext = NULL; 187 VkPhysicalDeviceImageFormatInfo2 info; 188 info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2; 189 info.format = ici->format; 190 info.type = ici->imageType; 191 info.tiling = ici->tiling; 192 info.usage = ici->usage; 193 info.flags = ici->flags; 194 195 VkPhysicalDeviceImageDrmFormatModifierInfoEXT mod_info; 196 if (modifier != DRM_FORMAT_MOD_INVALID) { 197 mod_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT; 198 mod_info.pNext = NULL; 199 mod_info.drmFormatModifier = modifier; 200 mod_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 201 mod_info.queueFamilyIndexCount = 0; 202 info.pNext = &mod_info; 203 } else 204 info.pNext = NULL; 205 206 ret = VKSCR(GetPhysicalDeviceImageFormatProperties2)(screen->pdev, &info, &props2); 207 image_props = props2.imageFormatProperties; 208 } else 209 ret = VKSCR(GetPhysicalDeviceImageFormatProperties)(screen->pdev, ici->format, ici->imageType, 210 ici->tiling, ici->usage, ici->flags, &image_props); 211 return ret == VK_SUCCESS; 212} 213 214static VkImageUsageFlags 215get_image_usage_for_feats(struct zink_screen *screen, VkFormatFeatureFlags feats, const struct pipe_resource *templ, unsigned bind) 216{ 217 VkImageUsageFlags usage = 0; 218 if (bind & ZINK_BIND_TRANSIENT) 219 usage |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; 220 else { 221 /* sadly, gallium doesn't let us know if it'll ever need this, so we have to assume */ 222 if (feats & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) 223 usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; 224 if (feats & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) 225 usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; 226 if (feats & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT && (bind & (PIPE_BIND_LINEAR | PIPE_BIND_SHARED)) != (PIPE_BIND_LINEAR | PIPE_BIND_SHARED)) 227 usage |= VK_IMAGE_USAGE_SAMPLED_BIT; 228 229 if ((feats & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) && (bind & PIPE_BIND_SHADER_IMAGE)) { 230 assert(templ->nr_samples <= 1 || screen->info.feats.features.shaderStorageImageMultisample); 231 usage |= VK_IMAGE_USAGE_STORAGE_BIT; 232 } 233 } 234 235 if (bind & PIPE_BIND_RENDER_TARGET) { 236 if (feats & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) { 237 usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; 238 if ((bind & (PIPE_BIND_LINEAR | PIPE_BIND_SHARED)) != (PIPE_BIND_LINEAR | PIPE_BIND_SHARED)) 239 usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; 240 } else 241 return 0; 242 } 243 244 if (bind & PIPE_BIND_DEPTH_STENCIL) { 245 if (feats & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) 246 usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; 247 else 248 return 0; 249 /* this is unlikely to occur and has been included for completeness */ 250 } else if (bind & PIPE_BIND_SAMPLER_VIEW && !(usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) { 251 if (feats & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) 252 usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; 253 else 254 return 0; 255 } 256 257 if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE) 258 usage |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; 259 260 if (bind & PIPE_BIND_STREAM_OUTPUT) 261 usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; 262 263 return usage; 264} 265 266static VkFormatFeatureFlags 267find_modifier_feats(const struct zink_modifier_prop *prop, uint64_t modifier, uint64_t *mod) 268{ 269 for (unsigned j = 0; j < prop->drmFormatModifierCount; j++) { 270 if (prop->pDrmFormatModifierProperties[j].drmFormatModifier == modifier) { 271 *mod = modifier; 272 return prop->pDrmFormatModifierProperties[j].drmFormatModifierTilingFeatures; 273 } 274 } 275 return 0; 276} 277 278static VkImageUsageFlags 279get_image_usage(struct zink_screen *screen, VkImageCreateInfo *ici, const struct pipe_resource *templ, unsigned bind, unsigned modifiers_count, const uint64_t *modifiers, uint64_t *mod) 280{ 281 VkImageTiling tiling = ici->tiling; 282 *mod = DRM_FORMAT_MOD_INVALID; 283 if (modifiers_count) { 284 bool have_linear = false; 285 const struct zink_modifier_prop *prop = &screen->modifier_props[templ->format]; 286 assert(tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT); 287 for (unsigned i = 0; i < modifiers_count; i++) { 288 if (modifiers[i] == DRM_FORMAT_MOD_LINEAR) { 289 have_linear = true; 290 continue; 291 } 292 VkFormatFeatureFlags feats = find_modifier_feats(prop, modifiers[i], mod); 293 if (feats) { 294 VkImageUsageFlags usage = get_image_usage_for_feats(screen, feats, templ, bind); 295 if (usage) { 296 ici->usage = usage; 297 if (check_ici(screen, ici, *mod)) 298 return usage; 299 } 300 } 301 } 302 /* only try linear if no other options available */ 303 if (have_linear) { 304 VkFormatFeatureFlags feats = find_modifier_feats(prop, DRM_FORMAT_MOD_LINEAR, mod); 305 if (feats) { 306 VkImageUsageFlags usage = get_image_usage_for_feats(screen, feats, templ, bind); 307 if (usage) { 308 ici->usage = usage; 309 if (check_ici(screen, ici, *mod)) 310 return usage; 311 } 312 } 313 } 314 } else 315 { 316 VkFormatProperties props = screen->format_props[templ->format]; 317 VkFormatFeatureFlags feats = tiling == VK_IMAGE_TILING_LINEAR ? props.linearTilingFeatures : props.optimalTilingFeatures; 318 VkImageUsageFlags usage = get_image_usage_for_feats(screen, feats, templ, bind); 319 if (usage) { 320 ici->usage = usage; 321 if (check_ici(screen, ici, *mod)) 322 return usage; 323 } 324 } 325 *mod = DRM_FORMAT_MOD_INVALID; 326 return 0; 327} 328 329static uint64_t 330create_ici(struct zink_screen *screen, VkImageCreateInfo *ici, const struct pipe_resource *templ, bool dmabuf, unsigned bind, unsigned modifiers_count, const uint64_t *modifiers, bool *success) 331{ 332 ici->sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; 333 ici->pNext = NULL; 334 ici->flags = modifiers_count || dmabuf || bind & (PIPE_BIND_SCANOUT | PIPE_BIND_DEPTH_STENCIL) ? 0 : VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT; 335 ici->usage = 0; 336 ici->queueFamilyIndexCount = 0; 337 338 switch (templ->target) { 339 case PIPE_TEXTURE_1D: 340 case PIPE_TEXTURE_1D_ARRAY: 341 ici->imageType = VK_IMAGE_TYPE_1D; 342 break; 343 344 case PIPE_TEXTURE_CUBE: 345 case PIPE_TEXTURE_CUBE_ARRAY: 346 case PIPE_TEXTURE_2D: 347 case PIPE_TEXTURE_2D_ARRAY: 348 case PIPE_TEXTURE_RECT: 349 ici->imageType = VK_IMAGE_TYPE_2D; 350 break; 351 352 case PIPE_TEXTURE_3D: 353 ici->imageType = VK_IMAGE_TYPE_3D; 354 ici->flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT; 355 break; 356 357 case PIPE_BUFFER: 358 unreachable("PIPE_BUFFER should already be handled"); 359 360 default: 361 unreachable("Unknown target"); 362 } 363 364 if (screen->info.have_EXT_sample_locations && 365 bind & PIPE_BIND_DEPTH_STENCIL && 366 util_format_has_depth(util_format_description(templ->format))) 367 ici->flags |= VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT; 368 369 ici->format = zink_get_format(screen, templ->format); 370 ici->extent.width = templ->width0; 371 ici->extent.height = templ->height0; 372 ici->extent.depth = templ->depth0; 373 ici->mipLevels = templ->last_level + 1; 374 ici->arrayLayers = MAX2(templ->array_size, 1); 375 ici->samples = templ->nr_samples ? templ->nr_samples : VK_SAMPLE_COUNT_1_BIT; 376 ici->tiling = modifiers_count ? VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT : bind & PIPE_BIND_LINEAR ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL; 377 ici->sharingMode = VK_SHARING_MODE_EXCLUSIVE; 378 ici->initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; 379 380 /* sampleCounts will be set to VK_SAMPLE_COUNT_1_BIT if at least one of the following conditions is true: 381 * - flags contains VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT 382 * 383 * 44.1.1. Supported Sample Counts 384 */ 385 bool want_cube = ici->samples == 1 && 386 (templ->target == PIPE_TEXTURE_CUBE || 387 templ->target == PIPE_TEXTURE_CUBE_ARRAY || 388 (templ->target == PIPE_TEXTURE_2D_ARRAY && ici->extent.width == ici->extent.height && ici->arrayLayers >= 6)); 389 390 if (templ->target == PIPE_TEXTURE_CUBE) 391 ici->arrayLayers *= 6; 392 393 if (templ->usage == PIPE_USAGE_STAGING && 394 templ->format != PIPE_FORMAT_B4G4R4A4_UNORM && 395 templ->format != PIPE_FORMAT_B4G4R4A4_UINT) 396 ici->tiling = VK_IMAGE_TILING_LINEAR; 397 398 bool first = true; 399 bool tried[2] = {0}; 400 uint64_t mod = DRM_FORMAT_MOD_INVALID; 401 while (!ici->usage) { 402 if (!first) { 403 switch (ici->tiling) { 404 case VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT: 405 ici->tiling = VK_IMAGE_TILING_OPTIMAL; 406 modifiers_count = 0; 407 break; 408 case VK_IMAGE_TILING_OPTIMAL: 409 ici->tiling = VK_IMAGE_TILING_LINEAR; 410 break; 411 case VK_IMAGE_TILING_LINEAR: 412 if (bind & PIPE_BIND_LINEAR) { 413 *success = false; 414 return DRM_FORMAT_MOD_INVALID; 415 } 416 ici->tiling = VK_IMAGE_TILING_OPTIMAL; 417 break; 418 default: 419 unreachable("unhandled tiling mode"); 420 } 421 if (tried[ici->tiling]) { 422 *success = false; 423 return DRM_FORMAT_MOD_INVALID; 424 } 425 } 426 ici->usage = get_image_usage(screen, ici, templ, bind, modifiers_count, modifiers, &mod); 427 first = false; 428 if (ici->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) 429 tried[ici->tiling] = true; 430 } 431 if (want_cube) { 432 ici->flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; 433 if (get_image_usage(screen, ici, templ, bind, modifiers_count, modifiers, &mod) != ici->usage) 434 ici->flags &= ~VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; 435 } 436 437 *success = true; 438 return mod; 439} 440 441static struct zink_resource_object * 442resource_object_create(struct zink_screen *screen, const struct pipe_resource *templ, struct winsys_handle *whandle, bool *optimal_tiling, 443 const uint64_t *modifiers, int modifiers_count) 444{ 445 struct zink_resource_object *obj = CALLOC_STRUCT(zink_resource_object); 446 if (!obj) 447 return NULL; 448 449 VkMemoryRequirements reqs; 450 VkMemoryPropertyFlags flags; 451 bool need_dedicated = false; 452 bool shared = templ->bind & PIPE_BIND_SHARED; 453 VkExternalMemoryHandleTypeFlags export_types = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT; 454 455 VkExternalMemoryHandleTypeFlags external = 0; 456 if (whandle) { 457 if (whandle->type == WINSYS_HANDLE_TYPE_FD) { 458 external = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; 459 export_types |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; 460 } else 461 unreachable("unknown handle type"); 462 } 463 464 /* TODO: remove linear for wsi */ 465 bool scanout = templ->bind & PIPE_BIND_SCANOUT; 466 467 pipe_reference_init(&obj->reference, 1); 468 util_dynarray_init(&obj->tmp, NULL); 469 util_dynarray_init(&obj->desc_set_refs.refs, NULL); 470 if (templ->target == PIPE_BUFFER) { 471 VkBufferCreateInfo bci = create_bci(screen, templ, templ->bind); 472 473 if (VKSCR(CreateBuffer)(screen->dev, &bci, NULL, &obj->buffer) != VK_SUCCESS) { 474 debug_printf("vkCreateBuffer failed\n"); 475 goto fail1; 476 } 477 478 VKSCR(GetBufferMemoryRequirements)(screen->dev, obj->buffer, &reqs); 479 if (templ->usage == PIPE_USAGE_STAGING) 480 flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; 481 else if (templ->usage == PIPE_USAGE_STREAM) 482 flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; 483 else if (templ->usage == PIPE_USAGE_IMMUTABLE) 484 flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; 485 else 486 flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; 487 obj->is_buffer = true; 488 obj->transfer_dst = true; 489 } else { 490 bool winsys_modifier = shared && whandle && whandle->modifier != DRM_FORMAT_MOD_INVALID; 491 const uint64_t *ici_modifiers = winsys_modifier ? &whandle->modifier : modifiers; 492 unsigned ici_modifier_count = winsys_modifier ? 1 : modifiers_count; 493 bool success = false; 494 VkImageCreateInfo ici; 495 uint64_t mod = create_ici(screen, &ici, templ, !!external, templ->bind, ici_modifier_count, ici_modifiers, &success); 496 VkExternalMemoryImageCreateInfo emici; 497 VkImageDrmFormatModifierExplicitCreateInfoEXT idfmeci; 498 VkImageDrmFormatModifierListCreateInfoEXT idfmlci; 499 if (!success) 500 goto fail1; 501 502 if (shared || external) { 503 emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO; 504 emici.pNext = NULL; 505 emici.handleTypes = export_types; 506 ici.pNext = &emici; 507 508 assert(ici.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT || mod != DRM_FORMAT_MOD_INVALID); 509 if (winsys_modifier && ici.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { 510 assert(mod == whandle->modifier); 511 idfmeci.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT; 512 idfmeci.pNext = ici.pNext; 513 idfmeci.drmFormatModifier = mod; 514 515 /* TODO: store these values from other planes in their 516 * respective zink_resource, and walk the next-pointers to 517 * build up the planar array here instead. 518 */ 519 assert(util_format_get_num_planes(templ->format) == 1); 520 idfmeci.drmFormatModifierPlaneCount = 1; 521 VkSubresourceLayout plane_layout = { 522 .offset = whandle->offset, 523 .size = 0, 524 .rowPitch = whandle->stride, 525 .arrayPitch = 0, 526 .depthPitch = 0, 527 }; 528 idfmeci.pPlaneLayouts = &plane_layout; 529 530 ici.pNext = &idfmeci; 531 } else if (ici.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { 532 idfmlci.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT; 533 idfmlci.pNext = ici.pNext; 534 idfmlci.drmFormatModifierCount = modifiers_count; 535 idfmlci.pDrmFormatModifiers = modifiers; 536 ici.pNext = &idfmlci; 537 } else if (ici.tiling == VK_IMAGE_TILING_OPTIMAL) { 538 // TODO: remove for wsi 539 if (!external) 540 ici.pNext = NULL; 541 scanout = false; 542 shared = false; 543 } 544 } 545 546 if (optimal_tiling) 547 *optimal_tiling = ici.tiling == VK_IMAGE_TILING_OPTIMAL; 548 549 if (ici.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) 550 obj->transfer_dst = true; 551 552 if (ici.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) 553 obj->modifier_aspect = VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT; 554 555 struct wsi_image_create_info image_wsi_info = { 556 VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA, 557 NULL, 558 .scanout = true, 559 }; 560 561 if ((screen->needs_mesa_wsi || screen->needs_mesa_flush_wsi) && scanout && 562 ici.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) { 563 image_wsi_info.pNext = ici.pNext; 564 ici.pNext = &image_wsi_info; 565 } 566 567 VkResult result = VKSCR(CreateImage)(screen->dev, &ici, NULL, &obj->image); 568 if (result != VK_SUCCESS) { 569 debug_printf("vkCreateImage failed\n"); 570 goto fail1; 571 } 572 573 if (VKSCR(GetImageMemoryRequirements2)) { 574 VkMemoryRequirements2 req2; 575 req2.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2; 576 VkImageMemoryRequirementsInfo2 info2; 577 info2.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2; 578 info2.pNext = NULL; 579 info2.image = obj->image; 580 VkMemoryDedicatedRequirements ded; 581 ded.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS; 582 ded.pNext = NULL; 583 req2.pNext = &ded; 584 VKSCR(GetImageMemoryRequirements2)(screen->dev, &info2, &req2); 585 memcpy(&reqs, &req2.memoryRequirements, sizeof(VkMemoryRequirements)); 586 need_dedicated = ded.prefersDedicatedAllocation || ded.requiresDedicatedAllocation; 587 } else { 588 VKSCR(GetImageMemoryRequirements)(screen->dev, obj->image, &reqs); 589 } 590 if (templ->usage == PIPE_USAGE_STAGING && ici.tiling == VK_IMAGE_TILING_LINEAR) 591 flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; 592 else 593 flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; 594 595 obj->vkflags = ici.flags; 596 obj->vkusage = ici.usage; 597 } 598 obj->alignment = reqs.alignment; 599 600 if (templ->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT || templ->usage == PIPE_USAGE_DYNAMIC) 601 flags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; 602 else if (!(flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) && 603 templ->usage == PIPE_USAGE_STAGING) 604 flags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; 605 606 if (templ->bind & ZINK_BIND_TRANSIENT) 607 flags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; 608 609 VkMemoryAllocateInfo mai; 610 enum zink_alloc_flag aflags = templ->flags & PIPE_RESOURCE_FLAG_SPARSE ? ZINK_ALLOC_SPARSE : 0; 611 mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; 612 mai.pNext = NULL; 613 mai.allocationSize = reqs.size; 614 enum zink_heap heap = zink_heap_from_domain_flags(flags, aflags); 615 mai.memoryTypeIndex = screen->heap_map[heap]; 616 if (unlikely(!(reqs.memoryTypeBits & BITFIELD_BIT(mai.memoryTypeIndex)))) { 617 /* not valid based on reqs; demote to more compatible type */ 618 switch (heap) { 619 case ZINK_HEAP_DEVICE_LOCAL_VISIBLE: 620 heap = ZINK_HEAP_DEVICE_LOCAL; 621 break; 622 case ZINK_HEAP_HOST_VISIBLE_CACHED: 623 heap = ZINK_HEAP_HOST_VISIBLE_COHERENT; 624 break; 625 default: 626 break; 627 } 628 mai.memoryTypeIndex = screen->heap_map[heap]; 629 assert(reqs.memoryTypeBits & BITFIELD_BIT(mai.memoryTypeIndex)); 630 } 631 632 VkMemoryType mem_type = screen->info.mem_props.memoryTypes[mai.memoryTypeIndex]; 633 obj->coherent = mem_type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; 634 if (!(templ->flags & PIPE_RESOURCE_FLAG_SPARSE)) 635 obj->host_visible = mem_type.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; 636 637 VkMemoryDedicatedAllocateInfo ded_alloc_info = { 638 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, 639 .pNext = mai.pNext, 640 .image = obj->image, 641 .buffer = VK_NULL_HANDLE, 642 }; 643 644 if (screen->info.have_KHR_dedicated_allocation && need_dedicated) { 645 ded_alloc_info.pNext = mai.pNext; 646 mai.pNext = &ded_alloc_info; 647 } 648 649 VkExportMemoryAllocateInfo emai; 650 if (templ->bind & PIPE_BIND_SHARED && shared) { 651 emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO; 652 emai.handleTypes = export_types; 653 654 emai.pNext = mai.pNext; 655 mai.pNext = &emai; 656 } 657 658 VkImportMemoryFdInfoKHR imfi = { 659 VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, 660 NULL, 661 }; 662 663 if (whandle) { 664 imfi.pNext = NULL; 665 imfi.handleType = external; 666 imfi.fd = os_dupfd_cloexec(whandle->handle); 667 if (imfi.fd < 0) { 668 mesa_loge("ZINK: failed to dup dmabuf fd: %s\n", strerror(errno)); 669 goto fail1; 670 } 671 672 imfi.pNext = mai.pNext; 673 mai.pNext = &imfi; 674 } 675 676 struct wsi_memory_allocate_info memory_wsi_info = { 677 VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA, 678 NULL, 679 }; 680 681 if (screen->needs_mesa_wsi && scanout) { 682 memory_wsi_info.implicit_sync = true; 683 684 memory_wsi_info.pNext = mai.pNext; 685 mai.pNext = &memory_wsi_info; 686 } 687 688 unsigned alignment = MAX2(reqs.alignment, 256); 689 if (templ->usage == PIPE_USAGE_STAGING && obj->is_buffer) 690 alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment); 691 obj->alignment = alignment; 692 obj->bo = zink_bo(zink_bo_create(screen, reqs.size, alignment, heap, mai.pNext ? ZINK_ALLOC_NO_SUBALLOC : 0, mai.pNext)); 693 if (!obj->bo) 694 goto fail2; 695 if (aflags == ZINK_ALLOC_SPARSE) { 696 obj->size = templ->width0; 697 } else { 698 obj->offset = zink_bo_get_offset(obj->bo); 699 obj->size = zink_bo_get_size(obj->bo); 700 } 701 702 if (templ->target == PIPE_BUFFER) { 703 if (!(templ->flags & PIPE_RESOURCE_FLAG_SPARSE)) 704 if (VKSCR(BindBufferMemory)(screen->dev, obj->buffer, zink_bo_get_mem(obj->bo), obj->offset) != VK_SUCCESS) 705 goto fail3; 706 } else { 707 if (VKSCR(BindImageMemory)(screen->dev, obj->image, zink_bo_get_mem(obj->bo), obj->offset) != VK_SUCCESS) 708 goto fail3; 709 } 710 return obj; 711 712fail3: 713 zink_bo_unref(screen, obj->bo); 714 715fail2: 716 if (templ->target == PIPE_BUFFER) 717 VKSCR(DestroyBuffer)(screen->dev, obj->buffer, NULL); 718 else 719 VKSCR(DestroyImage)(screen->dev, obj->image, NULL); 720fail1: 721 FREE(obj); 722 return NULL; 723} 724 725static struct pipe_resource * 726resource_create(struct pipe_screen *pscreen, 727 const struct pipe_resource *templ, 728 struct winsys_handle *whandle, 729 unsigned external_usage, 730 const uint64_t *modifiers, int modifiers_count) 731{ 732 struct zink_screen *screen = zink_screen(pscreen); 733 struct zink_resource *res = rzalloc(NULL, struct zink_resource); 734 735 if (modifiers_count > 0) { 736 /* for rebinds */ 737 res->modifiers_count = modifiers_count; 738 res->modifiers = mem_dup(modifiers, modifiers_count * sizeof(uint64_t)); 739 if (!res->modifiers) { 740 ralloc_free(res); 741 return NULL; 742 } 743 /* TODO: remove this when multi-plane modifiers are supported */ 744 const struct zink_modifier_prop *prop = &screen->modifier_props[templ->format]; 745 for (unsigned i = 0; i < modifiers_count; i++) { 746 for (unsigned j = 0; j < prop->drmFormatModifierCount; j++) { 747 if (prop->pDrmFormatModifierProperties[j].drmFormatModifier == modifiers[i]) { 748 if (prop->pDrmFormatModifierProperties[j].drmFormatModifierPlaneCount != 1) 749 res->modifiers[i] = DRM_FORMAT_MOD_INVALID; 750 break; 751 } 752 } 753 } 754 } 755 756 res->base.b = *templ; 757 758 threaded_resource_init(&res->base.b); 759 pipe_reference_init(&res->base.b.reference, 1); 760 res->base.b.screen = pscreen; 761 762 bool optimal_tiling = false; 763 struct pipe_resource templ2 = *templ; 764 unsigned scanout_flags = templ->bind & (PIPE_BIND_SCANOUT | PIPE_BIND_SHARED); 765 if (!(templ->bind & PIPE_BIND_LINEAR)) 766 templ2.bind &= ~scanout_flags; 767 res->obj = resource_object_create(screen, &templ2, whandle, &optimal_tiling, NULL, 0); 768 if (!res->obj) { 769 free(res->modifiers); 770 ralloc_free(res); 771 return NULL; 772 } 773 774 res->internal_format = templ->format; 775 if (templ->target == PIPE_BUFFER) { 776 util_range_init(&res->valid_buffer_range); 777 if (!screen->resizable_bar && templ->width0 >= 8196) { 778 /* We don't want to evict buffers from VRAM by mapping them for CPU access, 779 * because they might never be moved back again. If a buffer is large enough, 780 * upload data by copying from a temporary GTT buffer. 8K might not seem much, 781 * but there can be 100000 buffers. 782 * 783 * This tweak improves performance for viewperf. 784 */ 785 res->base.b.flags |= PIPE_RESOURCE_FLAG_DONT_MAP_DIRECTLY; 786 } 787 } else { 788 res->format = zink_get_format(screen, templ->format); 789 res->dmabuf_acquire = whandle && whandle->type == WINSYS_HANDLE_TYPE_FD; 790 res->layout = res->dmabuf_acquire ? VK_IMAGE_LAYOUT_PREINITIALIZED : VK_IMAGE_LAYOUT_UNDEFINED; 791 res->optimal_tiling = optimal_tiling; 792 res->aspect = aspect_from_format(templ->format); 793 if (scanout_flags && optimal_tiling) { 794 // TODO: remove for wsi 795 templ2 = res->base.b; 796 templ2.bind = scanout_flags | PIPE_BIND_LINEAR; 797 res->scanout_obj = resource_object_create(screen, &templ2, whandle, &optimal_tiling, res->modifiers, res->modifiers_count); 798 assert(!optimal_tiling); 799 } 800 } 801 802 if (screen->winsys && (templ->bind & PIPE_BIND_DISPLAY_TARGET)) { 803 struct sw_winsys *winsys = screen->winsys; 804 res->dt = winsys->displaytarget_create(screen->winsys, 805 res->base.b.bind, 806 res->base.b.format, 807 templ->width0, 808 templ->height0, 809 64, NULL, 810 &res->dt_stride); 811 } 812 if (res->obj->is_buffer) { 813 res->base.buffer_id_unique = util_idalloc_mt_alloc(&screen->buffer_ids); 814 _mesa_hash_table_init(&res->bufferview_cache, res, NULL, equals_bvci); 815 simple_mtx_init(&res->bufferview_mtx, mtx_plain); 816 } else { 817 _mesa_hash_table_init(&res->surface_cache, res, NULL, equals_ivci); 818 simple_mtx_init(&res->surface_mtx, mtx_plain); 819 } 820 return &res->base.b; 821} 822 823static struct pipe_resource * 824zink_resource_create(struct pipe_screen *pscreen, 825 const struct pipe_resource *templ) 826{ 827 return resource_create(pscreen, templ, NULL, 0, NULL, 0); 828} 829 830static struct pipe_resource * 831zink_resource_create_with_modifiers(struct pipe_screen *pscreen, const struct pipe_resource *templ, 832 const uint64_t *modifiers, int modifiers_count) 833{ 834 return resource_create(pscreen, templ, NULL, 0, modifiers, modifiers_count); 835} 836 837static bool 838zink_resource_get_param(struct pipe_screen *pscreen, struct pipe_context *pctx, 839 struct pipe_resource *pres, 840 unsigned plane, 841 unsigned layer, 842 unsigned level, 843 enum pipe_resource_param param, 844 unsigned handle_usage, 845 uint64_t *value) 846{ 847 struct zink_screen *screen = zink_screen(pscreen); 848 struct zink_resource *res = zink_resource(pres); 849 //TODO: remove for wsi 850 struct zink_resource_object *obj = res->scanout_obj ? res->scanout_obj : res->obj; 851 VkImageAspectFlags aspect = obj->modifier_aspect ? obj->modifier_aspect : res->aspect; 852 struct winsys_handle whandle; 853 switch (param) { 854 case PIPE_RESOURCE_PARAM_NPLANES: 855 /* not yet implemented */ 856 *value = 1; 857 break; 858 859 case PIPE_RESOURCE_PARAM_STRIDE: { 860 VkImageSubresource sub_res = {0}; 861 VkSubresourceLayout sub_res_layout = {0}; 862 863 sub_res.aspectMask = aspect; 864 865 VKSCR(GetImageSubresourceLayout)(screen->dev, obj->image, &sub_res, &sub_res_layout); 866 867 *value = sub_res_layout.rowPitch; 868 break; 869 } 870 871 case PIPE_RESOURCE_PARAM_OFFSET: { 872 VkImageSubresource isr = { 873 aspect, 874 level, 875 layer 876 }; 877 VkSubresourceLayout srl; 878 VKSCR(GetImageSubresourceLayout)(screen->dev, obj->image, &isr, &srl); 879 *value = srl.offset; 880 break; 881 } 882 883 case PIPE_RESOURCE_PARAM_MODIFIER: { 884 *value = DRM_FORMAT_MOD_INVALID; 885 if (!screen->info.have_EXT_image_drm_format_modifier) 886 return false; 887 if (!res->modifiers) 888 return false; 889 VkImageDrmFormatModifierPropertiesEXT prop; 890 prop.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT; 891 prop.pNext = NULL; 892 if (VKSCR(GetImageDrmFormatModifierPropertiesEXT)(screen->dev, obj->image, &prop) == VK_SUCCESS) 893 *value = prop.drmFormatModifier; 894 break; 895 } 896 897 case PIPE_RESOURCE_PARAM_LAYER_STRIDE: { 898 VkImageSubresource isr = { 899 aspect, 900 level, 901 layer 902 }; 903 VkSubresourceLayout srl; 904 VKSCR(GetImageSubresourceLayout)(screen->dev, obj->image, &isr, &srl); 905 if (res->base.b.target == PIPE_TEXTURE_3D) 906 *value = srl.depthPitch; 907 else 908 *value = srl.arrayPitch; 909 break; 910 } 911 912 case PIPE_RESOURCE_PARAM_HANDLE_TYPE_SHARED: 913 case PIPE_RESOURCE_PARAM_HANDLE_TYPE_KMS: 914 case PIPE_RESOURCE_PARAM_HANDLE_TYPE_FD: { 915 memset(&whandle, 0, sizeof(whandle)); 916 if (param == PIPE_RESOURCE_PARAM_HANDLE_TYPE_SHARED) 917 whandle.type = WINSYS_HANDLE_TYPE_SHARED; 918 else if (param == PIPE_RESOURCE_PARAM_HANDLE_TYPE_KMS) 919 whandle.type = WINSYS_HANDLE_TYPE_KMS; 920 else if (param == PIPE_RESOURCE_PARAM_HANDLE_TYPE_FD) 921 whandle.type = WINSYS_HANDLE_TYPE_FD; 922 923 if (!pscreen->resource_get_handle(pscreen, pctx, pres, &whandle, handle_usage)) 924 return false; 925 926 *value = whandle.handle; 927 break; 928 } 929 } 930 return true; 931} 932 933static bool 934zink_resource_get_handle(struct pipe_screen *pscreen, 935 struct pipe_context *context, 936 struct pipe_resource *tex, 937 struct winsys_handle *whandle, 938 unsigned usage) 939{ 940 if (whandle->type == WINSYS_HANDLE_TYPE_FD || whandle->type == WINSYS_HANDLE_TYPE_KMS) { 941#ifdef ZINK_USE_DMABUF 942 struct zink_resource *res = zink_resource(tex); 943 struct zink_screen *screen = zink_screen(pscreen); 944 //TODO: remove for wsi 945 struct zink_resource_object *obj = res->scanout_obj ? res->scanout_obj : res->obj; 946 947 VkMemoryGetFdInfoKHR fd_info = {0}; 948 int fd; 949 fd_info.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR; 950 //TODO: remove for wsi 951 fd_info.memory = zink_bo_get_mem(obj->bo); 952 if (whandle->type == WINSYS_HANDLE_TYPE_FD) 953 fd_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; 954 else 955 fd_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT; 956 VkResult result = VKSCR(GetMemoryFdKHR)(screen->dev, &fd_info, &fd); 957 if (result != VK_SUCCESS) 958 return false; 959 if (whandle->type == WINSYS_HANDLE_TYPE_KMS) { 960 uint32_t h; 961 bool success = drmPrimeFDToHandle(screen->drm_fd, fd, &h) == 0; 962 close(fd); 963 if (!success) 964 return false; 965 fd = h; 966 } 967 whandle->handle = fd; 968 uint64_t value; 969 zink_resource_get_param(pscreen, context, tex, 0, 0, 0, PIPE_RESOURCE_PARAM_MODIFIER, 0, &value); 970 whandle->modifier = value; 971 zink_resource_get_param(pscreen, context, tex, 0, 0, 0, PIPE_RESOURCE_PARAM_OFFSET, 0, &value); 972 whandle->offset = value; 973 zink_resource_get_param(pscreen, context, tex, 0, 0, 0, PIPE_RESOURCE_PARAM_STRIDE, 0, &value); 974 whandle->stride = value; 975#else 976 return false; 977#endif 978 } 979 return true; 980} 981 982static struct pipe_resource * 983zink_resource_from_handle(struct pipe_screen *pscreen, 984 const struct pipe_resource *templ, 985 struct winsys_handle *whandle, 986 unsigned usage) 987{ 988#ifdef ZINK_USE_DMABUF 989 if (whandle->modifier != DRM_FORMAT_MOD_INVALID && 990 !zink_screen(pscreen)->info.have_EXT_image_drm_format_modifier) 991 return NULL; 992 993 /* ignore any AUX planes, as well as planar formats */ 994 if (templ->format == PIPE_FORMAT_NONE || 995 util_format_get_num_planes(templ->format) != 1) 996 return NULL; 997 998 uint64_t modifier = DRM_FORMAT_MOD_INVALID; 999 int modifier_count = 0; 1000 if (whandle->modifier != DRM_FORMAT_MOD_INVALID) { 1001 modifier = whandle->modifier; 1002 modifier_count = 1; 1003 } 1004 return resource_create(pscreen, templ, whandle, usage, &modifier, modifier_count); 1005#else 1006 return NULL; 1007#endif 1008} 1009 1010static bool 1011invalidate_buffer(struct zink_context *ctx, struct zink_resource *res) 1012{ 1013 struct zink_screen *screen = zink_screen(ctx->base.screen); 1014 1015 assert(res->base.b.target == PIPE_BUFFER); 1016 1017 if (res->base.b.flags & PIPE_RESOURCE_FLAG_SPARSE) 1018 return false; 1019 1020 if (res->valid_buffer_range.start > res->valid_buffer_range.end) 1021 return false; 1022 1023 if (res->so_valid) 1024 ctx->dirty_so_targets = true; 1025 /* force counter buffer reset */ 1026 res->so_valid = false; 1027 1028 util_range_set_empty(&res->valid_buffer_range); 1029 if (!zink_resource_has_usage(res)) 1030 return false; 1031 1032 struct zink_resource_object *old_obj = res->obj; 1033 struct zink_resource_object *new_obj = resource_object_create(screen, &res->base.b, NULL, NULL, NULL, 0); 1034 if (!new_obj) { 1035 debug_printf("new backing resource alloc failed!"); 1036 return false; 1037 } 1038 /* this ref must be transferred before rebind or else BOOM */ 1039 zink_batch_reference_resource_move(&ctx->batch, res); 1040 res->obj = new_obj; 1041 zink_resource_rebind(ctx, res); 1042 zink_descriptor_set_refs_clear(&old_obj->desc_set_refs, old_obj); 1043 return true; 1044} 1045 1046 1047static void 1048zink_resource_invalidate(struct pipe_context *pctx, struct pipe_resource *pres) 1049{ 1050 if (pres->target == PIPE_BUFFER) 1051 invalidate_buffer(zink_context(pctx), zink_resource(pres)); 1052} 1053 1054static void 1055zink_transfer_copy_bufimage(struct zink_context *ctx, 1056 struct zink_resource *dst, 1057 struct zink_resource *src, 1058 struct zink_transfer *trans) 1059{ 1060 assert((trans->base.b.usage & (PIPE_MAP_DEPTH_ONLY | PIPE_MAP_STENCIL_ONLY)) != 1061 (PIPE_MAP_DEPTH_ONLY | PIPE_MAP_STENCIL_ONLY)); 1062 1063 bool buf2img = src->base.b.target == PIPE_BUFFER; 1064 1065 struct pipe_box box = trans->base.b.box; 1066 int x = box.x; 1067 if (buf2img) 1068 box.x = trans->offset; 1069 1070 if (dst->obj->transfer_dst) 1071 zink_copy_image_buffer(ctx, dst, src, trans->base.b.level, buf2img ? x : 0, 1072 box.y, box.z, trans->base.b.level, &box, trans->base.b.usage); 1073 else 1074 util_blitter_copy_texture(ctx->blitter, &dst->base.b, trans->base.b.level, 1075 x, box.y, box.z, &src->base.b, 1076 0, &box); 1077} 1078 1079ALWAYS_INLINE static void 1080align_offset_size(const VkDeviceSize alignment, VkDeviceSize *offset, VkDeviceSize *size, VkDeviceSize obj_size) 1081{ 1082 VkDeviceSize align = *offset % alignment; 1083 if (alignment - 1 > *offset) 1084 *offset = 0; 1085 else 1086 *offset -= align, *size += align; 1087 align = alignment - (*size % alignment); 1088 if (*offset + *size + align > obj_size) 1089 *size = obj_size - *offset; 1090 else 1091 *size += align; 1092} 1093 1094VkMappedMemoryRange 1095zink_resource_init_mem_range(struct zink_screen *screen, struct zink_resource_object *obj, VkDeviceSize offset, VkDeviceSize size) 1096{ 1097 assert(obj->size); 1098 align_offset_size(screen->info.props.limits.nonCoherentAtomSize, &offset, &size, obj->size); 1099 VkMappedMemoryRange range = { 1100 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, 1101 NULL, 1102 zink_bo_get_mem(obj->bo), 1103 offset, 1104 size 1105 }; 1106 assert(range.size); 1107 return range; 1108} 1109 1110static void * 1111map_resource(struct zink_screen *screen, struct zink_resource *res) 1112{ 1113 assert(res->obj->host_visible); 1114 return zink_bo_map(screen, res->obj->bo); 1115} 1116 1117static void 1118unmap_resource(struct zink_screen *screen, struct zink_resource *res) 1119{ 1120 zink_bo_unmap(screen, res->obj->bo); 1121} 1122 1123static struct zink_transfer * 1124create_transfer(struct zink_context *ctx, struct pipe_resource *pres, unsigned usage, const struct pipe_box *box) 1125{ 1126 struct zink_transfer *trans; 1127 1128 if (usage & PIPE_MAP_THREAD_SAFE) 1129 trans = malloc(sizeof(*trans)); 1130 else if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC) 1131 trans = slab_alloc(&ctx->transfer_pool_unsync); 1132 else 1133 trans = slab_alloc(&ctx->transfer_pool); 1134 if (!trans) 1135 return NULL; 1136 1137 memset(trans, 0, sizeof(*trans)); 1138 pipe_resource_reference(&trans->base.b.resource, pres); 1139 1140 trans->base.b.usage = usage; 1141 trans->base.b.box = *box; 1142 return trans; 1143} 1144 1145static void 1146destroy_transfer(struct zink_context *ctx, struct zink_transfer *trans) 1147{ 1148 if (trans->base.b.usage & PIPE_MAP_THREAD_SAFE) { 1149 free(trans); 1150 } else { 1151 /* Don't use pool_transfers_unsync. We are always in the driver 1152 * thread. Freeing an object into a different pool is allowed. 1153 */ 1154 slab_free(&ctx->transfer_pool, trans); 1155 } 1156} 1157 1158static void * 1159zink_buffer_map(struct pipe_context *pctx, 1160 struct pipe_resource *pres, 1161 unsigned level, 1162 unsigned usage, 1163 const struct pipe_box *box, 1164 struct pipe_transfer **transfer) 1165{ 1166 struct zink_context *ctx = zink_context(pctx); 1167 struct zink_screen *screen = zink_screen(pctx->screen); 1168 struct zink_resource *res = zink_resource(pres); 1169 struct zink_transfer *trans = create_transfer(ctx, pres, usage, box); 1170 if (!trans) 1171 return NULL; 1172 1173 void *ptr = NULL; 1174 1175 if (res->base.is_user_ptr) 1176 usage |= PIPE_MAP_PERSISTENT; 1177 1178 /* See if the buffer range being mapped has never been initialized, 1179 * in which case it can be mapped unsynchronized. */ 1180 if (!(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) && 1181 usage & PIPE_MAP_WRITE && !res->base.is_shared && 1182 !util_ranges_intersect(&res->valid_buffer_range, box->x, box->x + box->width)) { 1183 usage |= PIPE_MAP_UNSYNCHRONIZED; 1184 } 1185 1186 /* If discarding the entire range, discard the whole resource instead. */ 1187 if (usage & PIPE_MAP_DISCARD_RANGE && box->x == 0 && box->width == res->base.b.width0) { 1188 usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE; 1189 } 1190 1191 /* If a buffer in VRAM is too large and the range is discarded, don't 1192 * map it directly. This makes sure that the buffer stays in VRAM. 1193 */ 1194 bool force_discard_range = false; 1195 if (usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_DISCARD_RANGE) && 1196 !(usage & PIPE_MAP_PERSISTENT) && 1197 res->base.b.flags & PIPE_RESOURCE_FLAG_DONT_MAP_DIRECTLY) { 1198 usage &= ~(PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_UNSYNCHRONIZED); 1199 usage |= PIPE_MAP_DISCARD_RANGE; 1200 force_discard_range = true; 1201 } 1202 1203 if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE && 1204 !(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INVALIDATE))) { 1205 assert(usage & PIPE_MAP_WRITE); 1206 1207 if (invalidate_buffer(ctx, res)) { 1208 /* At this point, the buffer is always idle. */ 1209 usage |= PIPE_MAP_UNSYNCHRONIZED; 1210 } else { 1211 /* Fall back to a temporary buffer. */ 1212 usage |= PIPE_MAP_DISCARD_RANGE; 1213 } 1214 } 1215 1216 if (usage & PIPE_MAP_DISCARD_RANGE && 1217 (!res->obj->host_visible || 1218 !(usage & (PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_PERSISTENT)))) { 1219 1220 /* Check if mapping this buffer would cause waiting for the GPU. 1221 */ 1222 1223 if (!res->obj->host_visible || force_discard_range || 1224 !zink_resource_usage_check_completion(screen, res, ZINK_RESOURCE_ACCESS_RW)) { 1225 /* Do a wait-free write-only transfer using a temporary buffer. */ 1226 unsigned offset; 1227 1228 /* If we are not called from the driver thread, we have 1229 * to use the uploader from u_threaded_context, which is 1230 * local to the calling thread. 1231 */ 1232 struct u_upload_mgr *mgr; 1233 if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC) 1234 mgr = ctx->tc->base.stream_uploader; 1235 else 1236 mgr = ctx->base.stream_uploader; 1237 u_upload_alloc(mgr, 0, box->width, 1238 screen->info.props.limits.minMemoryMapAlignment, &offset, 1239 (struct pipe_resource **)&trans->staging_res, (void **)&ptr); 1240 res = zink_resource(trans->staging_res); 1241 trans->offset = offset; 1242 usage |= PIPE_MAP_UNSYNCHRONIZED; 1243 ptr = ((uint8_t *)ptr); 1244 } else { 1245 /* At this point, the buffer is always idle (we checked it above). */ 1246 usage |= PIPE_MAP_UNSYNCHRONIZED; 1247 } 1248 } else if (usage & PIPE_MAP_DONTBLOCK) { 1249 /* sparse/device-local will always need to wait since it has to copy */ 1250 if (!res->obj->host_visible) 1251 goto success; 1252 if (!zink_resource_usage_check_completion(screen, res, ZINK_RESOURCE_ACCESS_WRITE)) 1253 goto success; 1254 usage |= PIPE_MAP_UNSYNCHRONIZED; 1255 } else if (!(usage & PIPE_MAP_UNSYNCHRONIZED) && 1256 (((usage & PIPE_MAP_READ) && !(usage & PIPE_MAP_PERSISTENT) && res->base.b.usage != PIPE_USAGE_STAGING) || !res->obj->host_visible)) { 1257 assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_MAP_THREAD_SAFE))); 1258 if (!res->obj->host_visible || !(usage & PIPE_MAP_ONCE)) { 1259 trans->offset = box->x % screen->info.props.limits.minMemoryMapAlignment; 1260 trans->staging_res = pipe_buffer_create(&screen->base, PIPE_BIND_LINEAR, PIPE_USAGE_STAGING, box->width + trans->offset); 1261 if (!trans->staging_res) 1262 goto fail; 1263 struct zink_resource *staging_res = zink_resource(trans->staging_res); 1264 zink_copy_buffer(ctx, staging_res, res, trans->offset, box->x, box->width); 1265 res = staging_res; 1266 usage &= ~PIPE_MAP_UNSYNCHRONIZED; 1267 ptr = map_resource(screen, res); 1268 ptr = ((uint8_t *)ptr) + trans->offset; 1269 } 1270 } 1271 1272 if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) { 1273 if (usage & PIPE_MAP_WRITE) 1274 zink_resource_usage_wait(ctx, res, ZINK_RESOURCE_ACCESS_RW); 1275 else 1276 zink_resource_usage_wait(ctx, res, ZINK_RESOURCE_ACCESS_WRITE); 1277 res->obj->access = 0; 1278 res->obj->access_stage = 0; 1279 } 1280 1281 if (!ptr) { 1282 /* if writing to a streamout buffer, ensure synchronization next time it's used */ 1283 if (usage & PIPE_MAP_WRITE && res->so_valid) { 1284 ctx->dirty_so_targets = true; 1285 /* force counter buffer reset */ 1286 res->so_valid = false; 1287 } 1288 ptr = map_resource(screen, res); 1289 if (!ptr) 1290 goto fail; 1291 ptr = ((uint8_t *)ptr) + box->x; 1292 } 1293 1294 if (!res->obj->coherent 1295#if defined(MVK_VERSION) 1296 // Work around for MoltenVk limitation specifically on coherent memory 1297 // MoltenVk returns blank memory ranges when there should be data present 1298 // This is a known limitation of MoltenVK. 1299 // See https://github.com/KhronosGroup/MoltenVK/blob/master/Docs/MoltenVK_Runtime_UserGuide.md#known-moltenvk-limitations 1300 1301 || screen->instance_info.have_MVK_moltenvk 1302#endif 1303 ) { 1304 VkDeviceSize size = box->width; 1305 VkDeviceSize offset = res->obj->offset + trans->offset; 1306 VkMappedMemoryRange range = zink_resource_init_mem_range(screen, res->obj, offset, size); 1307 if (VKSCR(InvalidateMappedMemoryRanges)(screen->dev, 1, &range) != VK_SUCCESS) { 1308 zink_bo_unmap(screen, res->obj->bo); 1309 goto fail; 1310 } 1311 } 1312 trans->base.b.usage = usage; 1313 if (usage & PIPE_MAP_WRITE) 1314 util_range_add(&res->base.b, &res->valid_buffer_range, box->x, box->x + box->width); 1315 if ((usage & PIPE_MAP_PERSISTENT) && !(usage & PIPE_MAP_COHERENT)) 1316 res->obj->persistent_maps++; 1317 1318success: 1319 *transfer = &trans->base.b; 1320 return ptr; 1321 1322fail: 1323 destroy_transfer(ctx, trans); 1324 return NULL; 1325} 1326 1327static void * 1328zink_image_map(struct pipe_context *pctx, 1329 struct pipe_resource *pres, 1330 unsigned level, 1331 unsigned usage, 1332 const struct pipe_box *box, 1333 struct pipe_transfer **transfer) 1334{ 1335 struct zink_context *ctx = zink_context(pctx); 1336 struct zink_screen *screen = zink_screen(pctx->screen); 1337 struct zink_resource *res = zink_resource(pres); 1338 struct zink_transfer *trans = create_transfer(ctx, pres, usage, box); 1339 if (!trans) 1340 return NULL; 1341 1342 trans->base.b.level = level; 1343 1344 void *ptr; 1345 if (usage & PIPE_MAP_WRITE && !(usage & PIPE_MAP_READ)) 1346 /* this is like a blit, so we can potentially dump some clears or maybe we have to */ 1347 zink_fb_clears_apply_or_discard(ctx, pres, zink_rect_from_box(box), false); 1348 else if (usage & PIPE_MAP_READ) 1349 /* if the map region intersects with any clears then we have to apply them */ 1350 zink_fb_clears_apply_region(ctx, pres, zink_rect_from_box(box)); 1351 if (res->optimal_tiling || !res->obj->host_visible) { 1352 enum pipe_format format = pres->format; 1353 if (usage & PIPE_MAP_DEPTH_ONLY) 1354 format = util_format_get_depth_only(pres->format); 1355 else if (usage & PIPE_MAP_STENCIL_ONLY) 1356 format = PIPE_FORMAT_S8_UINT; 1357 trans->base.b.stride = util_format_get_stride(format, box->width); 1358 trans->base.b.layer_stride = util_format_get_2d_size(format, 1359 trans->base.b.stride, 1360 box->height); 1361 1362 struct pipe_resource templ = *pres; 1363 templ.format = format; 1364 templ.usage = usage & PIPE_MAP_READ ? PIPE_USAGE_STAGING : PIPE_USAGE_STREAM; 1365 templ.target = PIPE_BUFFER; 1366 templ.bind = PIPE_BIND_LINEAR; 1367 templ.width0 = trans->base.b.layer_stride * box->depth; 1368 templ.height0 = templ.depth0 = 0; 1369 templ.last_level = 0; 1370 templ.array_size = 1; 1371 templ.flags = 0; 1372 1373 trans->staging_res = zink_resource_create(pctx->screen, &templ); 1374 if (!trans->staging_res) 1375 goto fail; 1376 1377 struct zink_resource *staging_res = zink_resource(trans->staging_res); 1378 1379 if (usage & PIPE_MAP_READ) { 1380 /* force multi-context sync */ 1381 if (zink_resource_usage_is_unflushed_write(res)) 1382 zink_resource_usage_wait(ctx, res, ZINK_RESOURCE_ACCESS_WRITE); 1383 zink_transfer_copy_bufimage(ctx, staging_res, res, trans); 1384 /* need to wait for rendering to finish */ 1385 zink_fence_wait(pctx); 1386 } 1387 1388 ptr = map_resource(screen, staging_res); 1389 } else { 1390 assert(!res->optimal_tiling); 1391 ptr = map_resource(screen, res); 1392 if (!ptr) 1393 goto fail; 1394 if (zink_resource_has_usage(res)) { 1395 if (usage & PIPE_MAP_WRITE) 1396 zink_fence_wait(pctx); 1397 else 1398 zink_resource_usage_wait(ctx, res, ZINK_RESOURCE_ACCESS_WRITE); 1399 } 1400 VkImageSubresource isr = { 1401 res->obj->modifier_aspect ? res->obj->modifier_aspect : res->aspect, 1402 level, 1403 0 1404 }; 1405 VkSubresourceLayout srl; 1406 VKSCR(GetImageSubresourceLayout)(screen->dev, res->obj->image, &isr, &srl); 1407 trans->base.b.stride = srl.rowPitch; 1408 if (res->base.b.target == PIPE_TEXTURE_3D) 1409 trans->base.b.layer_stride = srl.depthPitch; 1410 else 1411 trans->base.b.layer_stride = srl.arrayPitch; 1412 trans->offset = srl.offset; 1413 trans->depthPitch = srl.depthPitch; 1414 const struct util_format_description *desc = util_format_description(res->base.b.format); 1415 unsigned offset = srl.offset + 1416 box->z * srl.depthPitch + 1417 (box->y / desc->block.height) * srl.rowPitch + 1418 (box->x / desc->block.width) * (desc->block.bits / 8); 1419 if (!res->obj->coherent) { 1420 VkDeviceSize size = (VkDeviceSize)box->width * box->height * desc->block.bits / 8; 1421 VkMappedMemoryRange range = zink_resource_init_mem_range(screen, res->obj, res->obj->offset + offset, size); 1422 VKSCR(FlushMappedMemoryRanges)(screen->dev, 1, &range); 1423 } 1424 ptr = ((uint8_t *)ptr) + offset; 1425 } 1426 if (!ptr) 1427 goto fail; 1428 1429 if (sizeof(void*) == 4) 1430 trans->base.b.usage |= ZINK_MAP_TEMPORARY; 1431 if ((usage & PIPE_MAP_PERSISTENT) && !(usage & PIPE_MAP_COHERENT)) 1432 res->obj->persistent_maps++; 1433 1434 *transfer = &trans->base.b; 1435 return ptr; 1436 1437fail: 1438 destroy_transfer(ctx, trans); 1439 return NULL; 1440} 1441 1442static void 1443zink_transfer_flush_region(struct pipe_context *pctx, 1444 struct pipe_transfer *ptrans, 1445 const struct pipe_box *box) 1446{ 1447 struct zink_context *ctx = zink_context(pctx); 1448 struct zink_resource *res = zink_resource(ptrans->resource); 1449 struct zink_transfer *trans = (struct zink_transfer *)ptrans; 1450 1451 if (trans->base.b.usage & PIPE_MAP_WRITE) { 1452 struct zink_screen *screen = zink_screen(pctx->screen); 1453 struct zink_resource *m = trans->staging_res ? zink_resource(trans->staging_res) : 1454 res; 1455 ASSERTED VkDeviceSize size, offset; 1456 if (m->obj->is_buffer) { 1457 size = box->width; 1458 offset = trans->offset; 1459 } else { 1460 size = (VkDeviceSize)box->width * box->height * util_format_get_blocksize(m->base.b.format); 1461 offset = trans->offset + 1462 box->z * trans->depthPitch + 1463 util_format_get_2d_size(m->base.b.format, trans->base.b.stride, box->y) + 1464 util_format_get_stride(m->base.b.format, box->x); 1465 assert(offset + size <= res->obj->size); 1466 } 1467 if (!m->obj->coherent) { 1468 VkMappedMemoryRange range = zink_resource_init_mem_range(screen, m->obj, m->obj->offset, m->obj->size); 1469 VKSCR(FlushMappedMemoryRanges)(screen->dev, 1, &range); 1470 } 1471 if (trans->staging_res) { 1472 struct zink_resource *staging_res = zink_resource(trans->staging_res); 1473 1474 if (ptrans->resource->target == PIPE_BUFFER) 1475 zink_copy_buffer(ctx, res, staging_res, box->x, offset, box->width); 1476 else 1477 zink_transfer_copy_bufimage(ctx, res, staging_res, trans); 1478 } 1479 } 1480} 1481 1482static void 1483transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans) 1484{ 1485 struct zink_context *ctx = zink_context(pctx); 1486 struct zink_resource *res = zink_resource(ptrans->resource); 1487 struct zink_transfer *trans = (struct zink_transfer *)ptrans; 1488 1489 if (!(trans->base.b.usage & (PIPE_MAP_FLUSH_EXPLICIT | PIPE_MAP_COHERENT))) { 1490 zink_transfer_flush_region(pctx, ptrans, &ptrans->box); 1491 } 1492 1493 if ((trans->base.b.usage & PIPE_MAP_PERSISTENT) && !(trans->base.b.usage & PIPE_MAP_COHERENT)) 1494 res->obj->persistent_maps--; 1495 1496 if (trans->staging_res) 1497 pipe_resource_reference(&trans->staging_res, NULL); 1498 pipe_resource_reference(&trans->base.b.resource, NULL); 1499 1500 destroy_transfer(ctx, trans); 1501} 1502 1503static void 1504do_transfer_unmap(struct zink_screen *screen, struct zink_transfer *trans) 1505{ 1506 struct zink_resource *res = zink_resource(trans->staging_res); 1507 if (!res) 1508 res = zink_resource(trans->base.b.resource); 1509 unmap_resource(screen, res); 1510} 1511 1512static void 1513zink_buffer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans) 1514{ 1515 struct zink_screen *screen = zink_screen(pctx->screen); 1516 struct zink_transfer *trans = (struct zink_transfer *)ptrans; 1517 if (trans->base.b.usage & PIPE_MAP_ONCE && !trans->staging_res) 1518 do_transfer_unmap(screen, trans); 1519 transfer_unmap(pctx, ptrans); 1520} 1521 1522static void 1523zink_image_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans) 1524{ 1525 struct zink_screen *screen = zink_screen(pctx->screen); 1526 struct zink_transfer *trans = (struct zink_transfer *)ptrans; 1527 if (sizeof(void*) == 4) 1528 do_transfer_unmap(screen, trans); 1529 transfer_unmap(pctx, ptrans); 1530} 1531 1532static void 1533zink_buffer_subdata(struct pipe_context *ctx, struct pipe_resource *buffer, 1534 unsigned usage, unsigned offset, unsigned size, const void *data) 1535{ 1536 struct pipe_transfer *transfer = NULL; 1537 struct pipe_box box; 1538 uint8_t *map = NULL; 1539 1540 usage |= PIPE_MAP_WRITE; 1541 1542 if (!(usage & PIPE_MAP_DIRECTLY)) 1543 usage |= PIPE_MAP_DISCARD_RANGE; 1544 1545 u_box_1d(offset, size, &box); 1546 map = zink_buffer_map(ctx, buffer, 0, usage, &box, &transfer); 1547 if (!map) 1548 return; 1549 1550 memcpy(map, data, size); 1551 zink_buffer_unmap(ctx, transfer); 1552} 1553 1554static struct pipe_resource * 1555zink_resource_get_separate_stencil(struct pipe_resource *pres) 1556{ 1557 /* For packed depth-stencil, we treat depth as the primary resource 1558 * and store S8 as the "second plane" resource. 1559 */ 1560 if (pres->next && pres->next->format == PIPE_FORMAT_S8_UINT) 1561 return pres->next; 1562 1563 return NULL; 1564 1565} 1566 1567VkBuffer 1568zink_resource_tmp_buffer(struct zink_screen *screen, struct zink_resource *res, unsigned offset_add, unsigned add_binds, unsigned *offset_out) 1569{ 1570 VkBufferCreateInfo bci = create_bci(screen, &res->base.b, res->base.b.bind | add_binds); 1571 VkDeviceSize size = bci.size - offset_add; 1572 VkDeviceSize offset = offset_add; 1573 if (offset_add) { 1574 assert(bci.size > offset_add); 1575 1576 align_offset_size(res->obj->alignment, &offset, &size, bci.size); 1577 } 1578 bci.size = size; 1579 1580 VkBuffer buffer; 1581 if (VKSCR(CreateBuffer)(screen->dev, &bci, NULL, &buffer) != VK_SUCCESS) 1582 return VK_NULL_HANDLE; 1583 VKSCR(BindBufferMemory)(screen->dev, buffer, zink_bo_get_mem(res->obj->bo), res->obj->offset + offset); 1584 if (offset_out) 1585 *offset_out = offset_add - offset; 1586 return buffer; 1587} 1588 1589bool 1590zink_resource_object_init_storage(struct zink_context *ctx, struct zink_resource *res) 1591{ 1592 struct zink_screen *screen = zink_screen(ctx->base.screen); 1593 /* base resource already has the cap */ 1594 if (res->base.b.bind & PIPE_BIND_SHADER_IMAGE) 1595 return true; 1596 if (res->obj->is_buffer) { 1597 if (res->base.b.bind & PIPE_BIND_SHADER_IMAGE) 1598 return true; 1599 1600 VkBuffer buffer = zink_resource_tmp_buffer(screen, res, 0, PIPE_BIND_SHADER_IMAGE, NULL); 1601 if (!buffer) 1602 return false; 1603 util_dynarray_append(&res->obj->tmp, VkBuffer, res->obj->buffer); 1604 res->obj->buffer = buffer; 1605 res->base.b.bind |= PIPE_BIND_SHADER_IMAGE; 1606 } else { 1607 zink_fb_clears_apply_region(ctx, &res->base.b, (struct u_rect){0, res->base.b.width0, 0, res->base.b.height0}); 1608 zink_resource_image_barrier(ctx, res, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 0, 0); 1609 res->base.b.bind |= PIPE_BIND_SHADER_IMAGE; 1610 struct zink_resource_object *old_obj = res->obj; 1611 struct zink_resource_object *new_obj = resource_object_create(screen, &res->base.b, NULL, &res->optimal_tiling, res->modifiers, res->modifiers_count); 1612 if (!new_obj) { 1613 debug_printf("new backing resource alloc failed!"); 1614 res->base.b.bind &= ~PIPE_BIND_SHADER_IMAGE; 1615 return false; 1616 } 1617 struct zink_resource staging = *res; 1618 staging.obj = old_obj; 1619 bool needs_unref = true; 1620 if (zink_resource_has_usage(res)) { 1621 zink_batch_reference_resource_move(&ctx->batch, res); 1622 needs_unref = false; 1623 } 1624 res->obj = new_obj; 1625 zink_descriptor_set_refs_clear(&old_obj->desc_set_refs, old_obj); 1626 for (unsigned i = 0; i <= res->base.b.last_level; i++) { 1627 struct pipe_box box = {0, 0, 0, 1628 u_minify(res->base.b.width0, i), 1629 u_minify(res->base.b.height0, i), res->base.b.array_size}; 1630 box.depth = util_num_layers(&res->base.b, i); 1631 ctx->base.resource_copy_region(&ctx->base, &res->base.b, i, 0, 0, 0, &staging.base.b, i, &box); 1632 } 1633 if (needs_unref) 1634 zink_resource_object_reference(screen, &old_obj, NULL); 1635 } 1636 1637 zink_resource_rebind(ctx, res); 1638 1639 return true; 1640} 1641 1642void 1643zink_resource_setup_transfer_layouts(struct zink_context *ctx, struct zink_resource *src, struct zink_resource *dst) 1644{ 1645 if (src == dst) { 1646 /* The Vulkan 1.1 specification says the following about valid usage 1647 * of vkCmdBlitImage: 1648 * 1649 * "srcImageLayout must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, 1650 * VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL or VK_IMAGE_LAYOUT_GENERAL" 1651 * 1652 * and: 1653 * 1654 * "dstImageLayout must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, 1655 * VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL or VK_IMAGE_LAYOUT_GENERAL" 1656 * 1657 * Since we cant have the same image in two states at the same time, 1658 * we're effectively left with VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or 1659 * VK_IMAGE_LAYOUT_GENERAL. And since this isn't a present-related 1660 * operation, VK_IMAGE_LAYOUT_GENERAL seems most appropriate. 1661 */ 1662 zink_resource_image_barrier(ctx, src, 1663 VK_IMAGE_LAYOUT_GENERAL, 1664 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT, 1665 VK_PIPELINE_STAGE_TRANSFER_BIT); 1666 } else { 1667 zink_resource_image_barrier(ctx, src, 1668 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1669 VK_ACCESS_TRANSFER_READ_BIT, 1670 VK_PIPELINE_STAGE_TRANSFER_BIT); 1671 1672 zink_resource_image_barrier(ctx, dst, 1673 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1674 VK_ACCESS_TRANSFER_WRITE_BIT, 1675 VK_PIPELINE_STAGE_TRANSFER_BIT); 1676 } 1677} 1678 1679void 1680zink_get_depth_stencil_resources(struct pipe_resource *res, 1681 struct zink_resource **out_z, 1682 struct zink_resource **out_s) 1683{ 1684 if (!res) { 1685 if (out_z) *out_z = NULL; 1686 if (out_s) *out_s = NULL; 1687 return; 1688 } 1689 1690 if (res->format != PIPE_FORMAT_S8_UINT) { 1691 if (out_z) *out_z = zink_resource(res); 1692 if (out_s) *out_s = zink_resource(zink_resource_get_separate_stencil(res)); 1693 } else { 1694 if (out_z) *out_z = NULL; 1695 if (out_s) *out_s = zink_resource(res); 1696 } 1697} 1698 1699static void 1700zink_resource_set_separate_stencil(struct pipe_resource *pres, 1701 struct pipe_resource *stencil) 1702{ 1703 assert(util_format_has_depth(util_format_description(pres->format))); 1704 pipe_resource_reference(&pres->next, stencil); 1705} 1706 1707static enum pipe_format 1708zink_resource_get_internal_format(struct pipe_resource *pres) 1709{ 1710 struct zink_resource *res = zink_resource(pres); 1711 return res->internal_format; 1712} 1713 1714static const struct u_transfer_vtbl transfer_vtbl = { 1715 .resource_create = zink_resource_create, 1716 .resource_destroy = zink_resource_destroy, 1717 .transfer_map = zink_image_map, 1718 .transfer_unmap = zink_image_unmap, 1719 .transfer_flush_region = zink_transfer_flush_region, 1720 .get_internal_format = zink_resource_get_internal_format, 1721 .set_stencil = zink_resource_set_separate_stencil, 1722 .get_stencil = zink_resource_get_separate_stencil, 1723}; 1724 1725bool 1726zink_screen_resource_init(struct pipe_screen *pscreen) 1727{ 1728 struct zink_screen *screen = zink_screen(pscreen); 1729 pscreen->resource_create = zink_resource_create; 1730 pscreen->resource_create_with_modifiers = zink_resource_create_with_modifiers; 1731 pscreen->resource_destroy = zink_resource_destroy; 1732 pscreen->transfer_helper = u_transfer_helper_create(&transfer_vtbl, true, true, false, false); 1733 1734 if (screen->info.have_KHR_external_memory_fd) { 1735 pscreen->resource_get_handle = zink_resource_get_handle; 1736 pscreen->resource_from_handle = zink_resource_from_handle; 1737 } 1738 pscreen->resource_get_param = zink_resource_get_param; 1739 return true; 1740} 1741 1742void 1743zink_context_resource_init(struct pipe_context *pctx) 1744{ 1745 pctx->buffer_map = zink_buffer_map; 1746 pctx->buffer_unmap = zink_buffer_unmap; 1747 pctx->texture_map = u_transfer_helper_deinterleave_transfer_map; 1748 pctx->texture_unmap = u_transfer_helper_deinterleave_transfer_unmap; 1749 1750 pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region; 1751 pctx->buffer_subdata = zink_buffer_subdata; 1752 pctx->texture_subdata = u_default_texture_subdata; 1753 pctx->invalidate_resource = zink_resource_invalidate; 1754} 1755