17ec681f3Smrg/* 27ec681f3Smrg * Copyright 2018 Collabora Ltd. 37ec681f3Smrg * 47ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a 57ec681f3Smrg * copy of this software and associated documentation files (the "Software"), 67ec681f3Smrg * to deal in the Software without restriction, including without limitation 77ec681f3Smrg * on the rights to use, copy, modify, merge, publish, distribute, sub 87ec681f3Smrg * license, and/or sell copies of the Software, and to permit persons to whom 97ec681f3Smrg * the Software is furnished to do so, subject to the following conditions: 107ec681f3Smrg * 117ec681f3Smrg * The above copyright notice and this permission notice (including the next 127ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the 137ec681f3Smrg * Software. 147ec681f3Smrg * 157ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 167ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 177ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 187ec681f3Smrg * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 197ec681f3Smrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 207ec681f3Smrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 217ec681f3Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE. 227ec681f3Smrg */ 237ec681f3Smrg 247ec681f3Smrg#include "zink_context.h" 257ec681f3Smrg 267ec681f3Smrg#include "zink_batch.h" 277ec681f3Smrg#include "zink_compiler.h" 287ec681f3Smrg#include "zink_fence.h" 297ec681f3Smrg#include "zink_format.h" 307ec681f3Smrg#include "zink_framebuffer.h" 317ec681f3Smrg#include "zink_helpers.h" 327ec681f3Smrg#include "zink_program.h" 337ec681f3Smrg#include "zink_pipeline.h" 347ec681f3Smrg#include "zink_query.h" 357ec681f3Smrg#include "zink_render_pass.h" 367ec681f3Smrg#include "zink_resource.h" 377ec681f3Smrg#include "zink_screen.h" 387ec681f3Smrg#include "zink_state.h" 397ec681f3Smrg#include "zink_surface.h" 407ec681f3Smrg#include "zink_inlines.h" 417ec681f3Smrg 427ec681f3Smrg#include "util/u_blitter.h" 437ec681f3Smrg#include "util/u_debug.h" 447ec681f3Smrg#include "util/format_srgb.h" 457ec681f3Smrg#include "util/format/u_format.h" 467ec681f3Smrg#include "util/u_helpers.h" 477ec681f3Smrg#include "util/u_inlines.h" 487ec681f3Smrg#include "util/u_thread.h" 497ec681f3Smrg#include "util/u_cpu_detect.h" 507ec681f3Smrg#include "util/strndup.h" 517ec681f3Smrg#include "nir.h" 527ec681f3Smrg 537ec681f3Smrg#include "util/u_memory.h" 547ec681f3Smrg#include "util/u_upload_mgr.h" 557ec681f3Smrg 567ec681f3Smrg#define XXH_INLINE_ALL 577ec681f3Smrg#include "util/xxhash.h" 587ec681f3Smrg 597ec681f3Smrgstatic void 607ec681f3Smrgcalc_descriptor_hash_sampler_state(struct zink_sampler_state *sampler_state) 617ec681f3Smrg{ 627ec681f3Smrg void *hash_data = &sampler_state->sampler; 637ec681f3Smrg size_t data_size = sizeof(VkSampler); 647ec681f3Smrg sampler_state->hash = XXH32(hash_data, data_size, 0); 657ec681f3Smrg} 667ec681f3Smrg 677ec681f3Smrgvoid 687ec681f3Smrgdebug_describe_zink_buffer_view(char *buf, const struct zink_buffer_view *ptr) 697ec681f3Smrg{ 707ec681f3Smrg sprintf(buf, "zink_buffer_view"); 717ec681f3Smrg} 727ec681f3Smrg 737ec681f3SmrgALWAYS_INLINE static void 747ec681f3Smrgcheck_resource_for_batch_ref(struct zink_context *ctx, struct zink_resource *res) 757ec681f3Smrg{ 767ec681f3Smrg if (!zink_resource_has_binds(res)) 777ec681f3Smrg zink_batch_reference_resource(&ctx->batch, res); 787ec681f3Smrg} 797ec681f3Smrg 807ec681f3Smrgstatic void 817ec681f3Smrgzink_context_destroy(struct pipe_context *pctx) 827ec681f3Smrg{ 837ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 847ec681f3Smrg struct zink_screen *screen = zink_screen(pctx->screen); 857ec681f3Smrg 867ec681f3Smrg if (util_queue_is_initialized(&screen->flush_queue)) 877ec681f3Smrg util_queue_finish(&screen->flush_queue); 887ec681f3Smrg if (screen->queue && !screen->device_lost && VKSCR(QueueWaitIdle)(screen->queue) != VK_SUCCESS) 897ec681f3Smrg debug_printf("vkQueueWaitIdle failed\n"); 907ec681f3Smrg 917ec681f3Smrg util_blitter_destroy(ctx->blitter); 927ec681f3Smrg for (unsigned i = 0; i < ctx->fb_state.nr_cbufs; i++) 937ec681f3Smrg pipe_surface_release(&ctx->base, &ctx->fb_state.cbufs[i]); 947ec681f3Smrg pipe_surface_release(&ctx->base, &ctx->fb_state.zsbuf); 957ec681f3Smrg 967ec681f3Smrg pipe_resource_reference(&ctx->dummy_vertex_buffer, NULL); 977ec681f3Smrg pipe_resource_reference(&ctx->dummy_xfb_buffer, NULL); 987ec681f3Smrg 997ec681f3Smrg for (unsigned i = 0; i < ARRAY_SIZE(ctx->dummy_surface); i++) 1007ec681f3Smrg pipe_surface_release(&ctx->base, &ctx->dummy_surface[i]); 1017ec681f3Smrg zink_buffer_view_reference(screen, &ctx->dummy_bufferview, NULL); 1027ec681f3Smrg 1037ec681f3Smrg if (ctx->dd) 1047ec681f3Smrg zink_descriptors_deinit_bindless(ctx); 1057ec681f3Smrg 1067ec681f3Smrg simple_mtx_destroy(&ctx->batch_mtx); 1077ec681f3Smrg if (ctx->batch.state) { 1087ec681f3Smrg zink_clear_batch_state(ctx, ctx->batch.state); 1097ec681f3Smrg zink_batch_state_destroy(screen, ctx->batch.state); 1107ec681f3Smrg } 1117ec681f3Smrg struct zink_batch_state *bs = ctx->batch_states; 1127ec681f3Smrg while (bs) { 1137ec681f3Smrg struct zink_batch_state *bs_next = bs->next; 1147ec681f3Smrg zink_clear_batch_state(ctx, bs); 1157ec681f3Smrg zink_batch_state_destroy(screen, bs); 1167ec681f3Smrg bs = bs_next; 1177ec681f3Smrg } 1187ec681f3Smrg util_dynarray_foreach(&ctx->free_batch_states, struct zink_batch_state*, bs) { 1197ec681f3Smrg zink_clear_batch_state(ctx, *bs); 1207ec681f3Smrg zink_batch_state_destroy(screen, *bs); 1217ec681f3Smrg } 1227ec681f3Smrg 1237ec681f3Smrg for (unsigned i = 0; i < 2; i++) { 1247ec681f3Smrg util_idalloc_fini(&ctx->di.bindless[i].tex_slots); 1257ec681f3Smrg util_idalloc_fini(&ctx->di.bindless[i].img_slots); 1267ec681f3Smrg free(ctx->di.bindless[i].buffer_infos); 1277ec681f3Smrg free(ctx->di.bindless[i].img_infos); 1287ec681f3Smrg util_dynarray_fini(&ctx->di.bindless[i].updates); 1297ec681f3Smrg util_dynarray_fini(&ctx->di.bindless[i].resident); 1307ec681f3Smrg } 1317ec681f3Smrg 1327ec681f3Smrg if (screen->info.have_KHR_imageless_framebuffer) { 1337ec681f3Smrg hash_table_foreach(&ctx->framebuffer_cache, he) 1347ec681f3Smrg zink_destroy_framebuffer(screen, he->data); 1357ec681f3Smrg } else if (ctx->framebuffer) { 1367ec681f3Smrg simple_mtx_lock(&screen->framebuffer_mtx); 1377ec681f3Smrg struct hash_entry *entry = _mesa_hash_table_search(&screen->framebuffer_cache, &ctx->framebuffer->state); 1387ec681f3Smrg if (zink_framebuffer_reference(screen, &ctx->framebuffer, NULL)) 1397ec681f3Smrg _mesa_hash_table_remove(&screen->framebuffer_cache, entry); 1407ec681f3Smrg simple_mtx_unlock(&screen->framebuffer_mtx); 1417ec681f3Smrg } 1427ec681f3Smrg 1437ec681f3Smrg hash_table_foreach(ctx->render_pass_cache, he) 1447ec681f3Smrg zink_destroy_render_pass(screen, he->data); 1457ec681f3Smrg 1467ec681f3Smrg u_upload_destroy(pctx->stream_uploader); 1477ec681f3Smrg u_upload_destroy(pctx->const_uploader); 1487ec681f3Smrg slab_destroy_child(&ctx->transfer_pool); 1497ec681f3Smrg for (unsigned i = 0; i < ARRAY_SIZE(ctx->program_cache); i++) 1507ec681f3Smrg _mesa_hash_table_clear(&ctx->program_cache[i], NULL); 1517ec681f3Smrg _mesa_hash_table_clear(&ctx->compute_program_cache, NULL); 1527ec681f3Smrg _mesa_hash_table_destroy(ctx->render_pass_cache, NULL); 1537ec681f3Smrg slab_destroy_child(&ctx->transfer_pool_unsync); 1547ec681f3Smrg 1557ec681f3Smrg if (ctx->dd) 1567ec681f3Smrg screen->descriptors_deinit(ctx); 1577ec681f3Smrg 1587ec681f3Smrg zink_descriptor_layouts_deinit(ctx); 1597ec681f3Smrg 1607ec681f3Smrg p_atomic_dec(&screen->base.num_contexts); 1617ec681f3Smrg 1627ec681f3Smrg ralloc_free(ctx); 1637ec681f3Smrg} 1647ec681f3Smrg 1657ec681f3Smrgstatic void 1667ec681f3Smrgcheck_device_lost(struct zink_context *ctx) 1677ec681f3Smrg{ 1687ec681f3Smrg if (!zink_screen(ctx->base.screen)->device_lost || ctx->is_device_lost) 1697ec681f3Smrg return; 1707ec681f3Smrg debug_printf("ZINK: device lost detected!\n"); 1717ec681f3Smrg if (ctx->reset.reset) 1727ec681f3Smrg ctx->reset.reset(ctx->reset.data, PIPE_GUILTY_CONTEXT_RESET); 1737ec681f3Smrg ctx->is_device_lost = true; 1747ec681f3Smrg} 1757ec681f3Smrg 1767ec681f3Smrgstatic enum pipe_reset_status 1777ec681f3Smrgzink_get_device_reset_status(struct pipe_context *pctx) 1787ec681f3Smrg{ 1797ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 1807ec681f3Smrg 1817ec681f3Smrg enum pipe_reset_status status = PIPE_NO_RESET; 1827ec681f3Smrg 1837ec681f3Smrg if (ctx->is_device_lost) { 1847ec681f3Smrg // Since we don't know what really happened to the hardware, just 1857ec681f3Smrg // assume that we are in the wrong 1867ec681f3Smrg status = PIPE_GUILTY_CONTEXT_RESET; 1877ec681f3Smrg 1887ec681f3Smrg debug_printf("ZINK: device lost detected!\n"); 1897ec681f3Smrg 1907ec681f3Smrg if (ctx->reset.reset) 1917ec681f3Smrg ctx->reset.reset(ctx->reset.data, status); 1927ec681f3Smrg } 1937ec681f3Smrg 1947ec681f3Smrg return status; 1957ec681f3Smrg} 1967ec681f3Smrg 1977ec681f3Smrgstatic void 1987ec681f3Smrgzink_set_device_reset_callback(struct pipe_context *pctx, 1997ec681f3Smrg const struct pipe_device_reset_callback *cb) 2007ec681f3Smrg{ 2017ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 2027ec681f3Smrg 2037ec681f3Smrg if (cb) 2047ec681f3Smrg ctx->reset = *cb; 2057ec681f3Smrg else 2067ec681f3Smrg memset(&ctx->reset, 0, sizeof(ctx->reset)); 2077ec681f3Smrg} 2087ec681f3Smrg 2097ec681f3Smrgstatic void 2107ec681f3Smrgzink_set_context_param(struct pipe_context *pctx, enum pipe_context_param param, 2117ec681f3Smrg unsigned value) 2127ec681f3Smrg{ 2137ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 2147ec681f3Smrg 2157ec681f3Smrg switch (param) { 2167ec681f3Smrg case PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE: 2177ec681f3Smrg util_set_thread_affinity(zink_screen(ctx->base.screen)->flush_queue.threads[0], 2187ec681f3Smrg util_get_cpu_caps()->L3_affinity_mask[value], 2197ec681f3Smrg NULL, util_get_cpu_caps()->num_cpu_mask_bits); 2207ec681f3Smrg break; 2217ec681f3Smrg default: 2227ec681f3Smrg break; 2237ec681f3Smrg } 2247ec681f3Smrg} 2257ec681f3Smrg 2267ec681f3Smrgstatic VkSamplerMipmapMode 2277ec681f3Smrgsampler_mipmap_mode(enum pipe_tex_mipfilter filter) 2287ec681f3Smrg{ 2297ec681f3Smrg switch (filter) { 2307ec681f3Smrg case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST; 2317ec681f3Smrg case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR; 2327ec681f3Smrg case PIPE_TEX_MIPFILTER_NONE: 2337ec681f3Smrg unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier"); 2347ec681f3Smrg } 2357ec681f3Smrg unreachable("unexpected filter"); 2367ec681f3Smrg} 2377ec681f3Smrg 2387ec681f3Smrgstatic VkSamplerAddressMode 2397ec681f3Smrgsampler_address_mode(enum pipe_tex_wrap filter) 2407ec681f3Smrg{ 2417ec681f3Smrg switch (filter) { 2427ec681f3Smrg case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT; 2437ec681f3Smrg case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; 2447ec681f3Smrg case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; 2457ec681f3Smrg case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT; 2467ec681f3Smrg case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; 2477ec681f3Smrg case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */ 2487ec681f3Smrg default: break; 2497ec681f3Smrg } 2507ec681f3Smrg unreachable("unexpected wrap"); 2517ec681f3Smrg} 2527ec681f3Smrg 2537ec681f3Smrgstatic VkCompareOp 2547ec681f3Smrgcompare_op(enum pipe_compare_func op) 2557ec681f3Smrg{ 2567ec681f3Smrg switch (op) { 2577ec681f3Smrg case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER; 2587ec681f3Smrg case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS; 2597ec681f3Smrg case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL; 2607ec681f3Smrg case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL; 2617ec681f3Smrg case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER; 2627ec681f3Smrg case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL; 2637ec681f3Smrg case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL; 2647ec681f3Smrg case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS; 2657ec681f3Smrg } 2667ec681f3Smrg unreachable("unexpected compare"); 2677ec681f3Smrg} 2687ec681f3Smrg 2697ec681f3Smrgstatic inline bool 2707ec681f3Smrgwrap_needs_border_color(unsigned wrap) 2717ec681f3Smrg{ 2727ec681f3Smrg return wrap == PIPE_TEX_WRAP_CLAMP || wrap == PIPE_TEX_WRAP_CLAMP_TO_BORDER || 2737ec681f3Smrg wrap == PIPE_TEX_WRAP_MIRROR_CLAMP || wrap == PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER; 2747ec681f3Smrg} 2757ec681f3Smrg 2767ec681f3Smrgstatic VkBorderColor 2777ec681f3Smrgget_border_color(const union pipe_color_union *color, bool is_integer, bool need_custom) 2787ec681f3Smrg{ 2797ec681f3Smrg if (is_integer) { 2807ec681f3Smrg if (color->ui[0] == 0 && color->ui[1] == 0 && color->ui[2] == 0 && color->ui[3] == 0) 2817ec681f3Smrg return VK_BORDER_COLOR_INT_TRANSPARENT_BLACK; 2827ec681f3Smrg if (color->ui[0] == 0 && color->ui[1] == 0 && color->ui[2] == 0 && color->ui[3] == 1) 2837ec681f3Smrg return VK_BORDER_COLOR_INT_OPAQUE_BLACK; 2847ec681f3Smrg if (color->ui[0] == 1 && color->ui[1] == 1 && color->ui[2] == 1 && color->ui[3] == 1) 2857ec681f3Smrg return VK_BORDER_COLOR_INT_OPAQUE_WHITE; 2867ec681f3Smrg return need_custom ? VK_BORDER_COLOR_INT_CUSTOM_EXT : VK_BORDER_COLOR_INT_TRANSPARENT_BLACK; 2877ec681f3Smrg } 2887ec681f3Smrg 2897ec681f3Smrg if (color->f[0] == 0 && color->f[1] == 0 && color->f[2] == 0 && color->f[3] == 0) 2907ec681f3Smrg return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; 2917ec681f3Smrg if (color->f[0] == 0 && color->f[1] == 0 && color->f[2] == 0 && color->f[3] == 1) 2927ec681f3Smrg return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK; 2937ec681f3Smrg if (color->f[0] == 1 && color->f[1] == 1 && color->f[2] == 1 && color->f[3] == 1) 2947ec681f3Smrg return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; 2957ec681f3Smrg return need_custom ? VK_BORDER_COLOR_FLOAT_CUSTOM_EXT : VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; 2967ec681f3Smrg} 2977ec681f3Smrg 2987ec681f3Smrgstatic void * 2997ec681f3Smrgzink_create_sampler_state(struct pipe_context *pctx, 3007ec681f3Smrg const struct pipe_sampler_state *state) 3017ec681f3Smrg{ 3027ec681f3Smrg struct zink_screen *screen = zink_screen(pctx->screen); 3037ec681f3Smrg bool need_custom = false; 3047ec681f3Smrg 3057ec681f3Smrg VkSamplerCreateInfo sci = {0}; 3067ec681f3Smrg VkSamplerCustomBorderColorCreateInfoEXT cbci = {0}; 3077ec681f3Smrg sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; 3087ec681f3Smrg sci.magFilter = zink_filter(state->mag_img_filter); 3097ec681f3Smrg sci.minFilter = zink_filter(state->min_img_filter); 3107ec681f3Smrg 3117ec681f3Smrg VkSamplerReductionModeCreateInfo rci; 3127ec681f3Smrg rci.sType = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO; 3137ec681f3Smrg rci.pNext = NULL; 3147ec681f3Smrg switch (state->reduction_mode) { 3157ec681f3Smrg case PIPE_TEX_REDUCTION_MIN: 3167ec681f3Smrg rci.reductionMode = VK_SAMPLER_REDUCTION_MODE_MIN; 3177ec681f3Smrg break; 3187ec681f3Smrg case PIPE_TEX_REDUCTION_MAX: 3197ec681f3Smrg rci.reductionMode = VK_SAMPLER_REDUCTION_MODE_MAX; 3207ec681f3Smrg break; 3217ec681f3Smrg default: 3227ec681f3Smrg rci.reductionMode = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE; 3237ec681f3Smrg break; 3247ec681f3Smrg } 3257ec681f3Smrg if (state->reduction_mode) 3267ec681f3Smrg sci.pNext = &rci; 3277ec681f3Smrg 3287ec681f3Smrg if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) { 3297ec681f3Smrg sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter); 3307ec681f3Smrg sci.minLod = state->min_lod; 3317ec681f3Smrg sci.maxLod = state->max_lod; 3327ec681f3Smrg } else { 3337ec681f3Smrg sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; 3347ec681f3Smrg sci.minLod = 0; 3357ec681f3Smrg sci.maxLod = 0.25f; 3367ec681f3Smrg } 3377ec681f3Smrg 3387ec681f3Smrg sci.addressModeU = sampler_address_mode(state->wrap_s); 3397ec681f3Smrg sci.addressModeV = sampler_address_mode(state->wrap_t); 3407ec681f3Smrg sci.addressModeW = sampler_address_mode(state->wrap_r); 3417ec681f3Smrg sci.mipLodBias = state->lod_bias; 3427ec681f3Smrg 3437ec681f3Smrg need_custom |= wrap_needs_border_color(state->wrap_s); 3447ec681f3Smrg need_custom |= wrap_needs_border_color(state->wrap_t); 3457ec681f3Smrg need_custom |= wrap_needs_border_color(state->wrap_r); 3467ec681f3Smrg 3477ec681f3Smrg if (state->compare_mode == PIPE_TEX_COMPARE_NONE) 3487ec681f3Smrg sci.compareOp = VK_COMPARE_OP_NEVER; 3497ec681f3Smrg else { 3507ec681f3Smrg sci.compareOp = compare_op(state->compare_func); 3517ec681f3Smrg sci.compareEnable = VK_TRUE; 3527ec681f3Smrg } 3537ec681f3Smrg 3547ec681f3Smrg bool is_integer = state->border_color_is_integer; 3557ec681f3Smrg 3567ec681f3Smrg sci.borderColor = get_border_color(&state->border_color, is_integer, need_custom); 3577ec681f3Smrg if (sci.borderColor > VK_BORDER_COLOR_INT_OPAQUE_WHITE && need_custom) { 3587ec681f3Smrg if (screen->info.have_EXT_custom_border_color && 3597ec681f3Smrg screen->info.border_color_feats.customBorderColorWithoutFormat) { 3607ec681f3Smrg cbci.sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT; 3617ec681f3Smrg cbci.format = VK_FORMAT_UNDEFINED; 3627ec681f3Smrg /* these are identical unions */ 3637ec681f3Smrg memcpy(&cbci.customBorderColor, &state->border_color, sizeof(union pipe_color_union)); 3647ec681f3Smrg cbci.pNext = sci.pNext; 3657ec681f3Smrg sci.pNext = &cbci; 3667ec681f3Smrg UNUSED uint32_t check = p_atomic_inc_return(&screen->cur_custom_border_color_samplers); 3677ec681f3Smrg assert(check <= screen->info.border_color_props.maxCustomBorderColorSamplers); 3687ec681f3Smrg } else 3697ec681f3Smrg sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO with custom shader if we're super interested? 3707ec681f3Smrg } 3717ec681f3Smrg 3727ec681f3Smrg sci.unnormalizedCoordinates = !state->normalized_coords; 3737ec681f3Smrg 3747ec681f3Smrg if (state->max_anisotropy > 1) { 3757ec681f3Smrg sci.maxAnisotropy = state->max_anisotropy; 3767ec681f3Smrg sci.anisotropyEnable = VK_TRUE; 3777ec681f3Smrg } 3787ec681f3Smrg 3797ec681f3Smrg struct zink_sampler_state *sampler = CALLOC_STRUCT(zink_sampler_state); 3807ec681f3Smrg if (!sampler) 3817ec681f3Smrg return NULL; 3827ec681f3Smrg 3837ec681f3Smrg if (VKSCR(CreateSampler)(screen->dev, &sci, NULL, &sampler->sampler) != VK_SUCCESS) { 3847ec681f3Smrg FREE(sampler); 3857ec681f3Smrg return NULL; 3867ec681f3Smrg } 3877ec681f3Smrg util_dynarray_init(&sampler->desc_set_refs.refs, NULL); 3887ec681f3Smrg calc_descriptor_hash_sampler_state(sampler); 3897ec681f3Smrg sampler->custom_border_color = need_custom; 3907ec681f3Smrg 3917ec681f3Smrg return sampler; 3927ec681f3Smrg} 3937ec681f3Smrg 3947ec681f3SmrgALWAYS_INLINE static VkImageLayout 3957ec681f3Smrgget_layout_for_binding(struct zink_resource *res, enum zink_descriptor_type type, bool is_compute) 3967ec681f3Smrg{ 3977ec681f3Smrg if (res->obj->is_buffer) 3987ec681f3Smrg return 0; 3997ec681f3Smrg switch (type) { 4007ec681f3Smrg case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW: 4017ec681f3Smrg return res->image_bind_count[is_compute] ? 4027ec681f3Smrg VK_IMAGE_LAYOUT_GENERAL : 4037ec681f3Smrg res->aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) ? 4047ec681f3Smrg //Vulkan-Docs#1490 4057ec681f3Smrg //(res->aspect == VK_IMAGE_ASPECT_DEPTH_BIT ? VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL : 4067ec681f3Smrg //res->aspect == VK_IMAGE_ASPECT_STENCIL_BIT ? VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL : 4077ec681f3Smrg (res->aspect == VK_IMAGE_ASPECT_DEPTH_BIT ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : 4087ec681f3Smrg res->aspect == VK_IMAGE_ASPECT_STENCIL_BIT ? VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL : 4097ec681f3Smrg VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) : 4107ec681f3Smrg VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; 4117ec681f3Smrg case ZINK_DESCRIPTOR_TYPE_IMAGE: 4127ec681f3Smrg return VK_IMAGE_LAYOUT_GENERAL; 4137ec681f3Smrg default: 4147ec681f3Smrg break; 4157ec681f3Smrg } 4167ec681f3Smrg return 0; 4177ec681f3Smrg} 4187ec681f3Smrg 4197ec681f3SmrgALWAYS_INLINE static struct zink_surface * 4207ec681f3Smrgget_imageview_for_binding(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type, unsigned idx) 4217ec681f3Smrg{ 4227ec681f3Smrg switch (type) { 4237ec681f3Smrg case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW: { 4247ec681f3Smrg struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[stage][idx]); 4257ec681f3Smrg return sampler_view->base.texture ? sampler_view->image_view : NULL; 4267ec681f3Smrg } 4277ec681f3Smrg case ZINK_DESCRIPTOR_TYPE_IMAGE: { 4287ec681f3Smrg struct zink_image_view *image_view = &ctx->image_views[stage][idx]; 4297ec681f3Smrg return image_view->base.resource ? image_view->surface : NULL; 4307ec681f3Smrg } 4317ec681f3Smrg default: 4327ec681f3Smrg break; 4337ec681f3Smrg } 4347ec681f3Smrg unreachable("ACK"); 4357ec681f3Smrg return VK_NULL_HANDLE; 4367ec681f3Smrg} 4377ec681f3Smrg 4387ec681f3SmrgALWAYS_INLINE static struct zink_buffer_view * 4397ec681f3Smrgget_bufferview_for_binding(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type, unsigned idx) 4407ec681f3Smrg{ 4417ec681f3Smrg switch (type) { 4427ec681f3Smrg case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW: { 4437ec681f3Smrg struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[stage][idx]); 4447ec681f3Smrg return sampler_view->base.texture ? sampler_view->buffer_view : NULL; 4457ec681f3Smrg } 4467ec681f3Smrg case ZINK_DESCRIPTOR_TYPE_IMAGE: { 4477ec681f3Smrg struct zink_image_view *image_view = &ctx->image_views[stage][idx]; 4487ec681f3Smrg return image_view->base.resource ? image_view->buffer_view : NULL; 4497ec681f3Smrg } 4507ec681f3Smrg default: 4517ec681f3Smrg break; 4527ec681f3Smrg } 4537ec681f3Smrg unreachable("ACK"); 4547ec681f3Smrg return VK_NULL_HANDLE; 4557ec681f3Smrg} 4567ec681f3Smrg 4577ec681f3SmrgALWAYS_INLINE static struct zink_resource * 4587ec681f3Smrgupdate_descriptor_state_ubo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot, struct zink_resource *res) 4597ec681f3Smrg{ 4607ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 4617ec681f3Smrg bool have_null_descriptors = screen->info.rb2_feats.nullDescriptor; 4627ec681f3Smrg const enum zink_descriptor_type type = ZINK_DESCRIPTOR_TYPE_UBO; 4637ec681f3Smrg ctx->di.descriptor_res[type][shader][slot] = res; 4647ec681f3Smrg ctx->di.ubos[shader][slot].offset = ctx->ubos[shader][slot].buffer_offset; 4657ec681f3Smrg if (res) { 4667ec681f3Smrg ctx->di.ubos[shader][slot].buffer = res->obj->buffer; 4677ec681f3Smrg ctx->di.ubos[shader][slot].range = ctx->ubos[shader][slot].buffer_size; 4687ec681f3Smrg assert(ctx->di.ubos[shader][slot].range <= screen->info.props.limits.maxUniformBufferRange); 4697ec681f3Smrg } else { 4707ec681f3Smrg VkBuffer null_buffer = zink_resource(ctx->dummy_vertex_buffer)->obj->buffer; 4717ec681f3Smrg ctx->di.ubos[shader][slot].buffer = have_null_descriptors ? VK_NULL_HANDLE : null_buffer; 4727ec681f3Smrg ctx->di.ubos[shader][slot].range = VK_WHOLE_SIZE; 4737ec681f3Smrg } 4747ec681f3Smrg if (!slot) { 4757ec681f3Smrg if (res) 4767ec681f3Smrg ctx->di.push_valid |= BITFIELD64_BIT(shader); 4777ec681f3Smrg else 4787ec681f3Smrg ctx->di.push_valid &= ~BITFIELD64_BIT(shader); 4797ec681f3Smrg } 4807ec681f3Smrg return res; 4817ec681f3Smrg} 4827ec681f3Smrg 4837ec681f3SmrgALWAYS_INLINE static struct zink_resource * 4847ec681f3Smrgupdate_descriptor_state_ssbo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot, struct zink_resource *res) 4857ec681f3Smrg{ 4867ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 4877ec681f3Smrg bool have_null_descriptors = screen->info.rb2_feats.nullDescriptor; 4887ec681f3Smrg const enum zink_descriptor_type type = ZINK_DESCRIPTOR_TYPE_SSBO; 4897ec681f3Smrg ctx->di.descriptor_res[type][shader][slot] = res; 4907ec681f3Smrg ctx->di.ssbos[shader][slot].offset = ctx->ssbos[shader][slot].buffer_offset; 4917ec681f3Smrg if (res) { 4927ec681f3Smrg ctx->di.ssbos[shader][slot].buffer = res->obj->buffer; 4937ec681f3Smrg ctx->di.ssbos[shader][slot].range = ctx->ssbos[shader][slot].buffer_size; 4947ec681f3Smrg } else { 4957ec681f3Smrg VkBuffer null_buffer = zink_resource(ctx->dummy_vertex_buffer)->obj->buffer; 4967ec681f3Smrg ctx->di.ssbos[shader][slot].buffer = have_null_descriptors ? VK_NULL_HANDLE : null_buffer; 4977ec681f3Smrg ctx->di.ssbos[shader][slot].range = VK_WHOLE_SIZE; 4987ec681f3Smrg } 4997ec681f3Smrg return res; 5007ec681f3Smrg} 5017ec681f3Smrg 5027ec681f3SmrgALWAYS_INLINE static struct zink_resource * 5037ec681f3Smrgupdate_descriptor_state_sampler(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot, struct zink_resource *res) 5047ec681f3Smrg{ 5057ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 5067ec681f3Smrg bool have_null_descriptors = screen->info.rb2_feats.nullDescriptor; 5077ec681f3Smrg const enum zink_descriptor_type type = ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW; 5087ec681f3Smrg ctx->di.descriptor_res[type][shader][slot] = res; 5097ec681f3Smrg if (res) { 5107ec681f3Smrg if (res->obj->is_buffer) { 5117ec681f3Smrg struct zink_buffer_view *bv = get_bufferview_for_binding(ctx, shader, type, slot); 5127ec681f3Smrg ctx->di.tbos[shader][slot] = bv->buffer_view; 5137ec681f3Smrg ctx->di.sampler_surfaces[shader][slot].bufferview = bv; 5147ec681f3Smrg ctx->di.sampler_surfaces[shader][slot].is_buffer = true; 5157ec681f3Smrg } else { 5167ec681f3Smrg struct zink_surface *surface = get_imageview_for_binding(ctx, shader, type, slot); 5177ec681f3Smrg ctx->di.textures[shader][slot].imageLayout = get_layout_for_binding(res, type, shader == PIPE_SHADER_COMPUTE); 5187ec681f3Smrg ctx->di.textures[shader][slot].imageView = surface->image_view; 5197ec681f3Smrg ctx->di.sampler_surfaces[shader][slot].surface = surface; 5207ec681f3Smrg ctx->di.sampler_surfaces[shader][slot].is_buffer = false; 5217ec681f3Smrg } 5227ec681f3Smrg } else { 5237ec681f3Smrg if (likely(have_null_descriptors)) { 5247ec681f3Smrg ctx->di.textures[shader][slot].imageView = VK_NULL_HANDLE; 5257ec681f3Smrg ctx->di.textures[shader][slot].imageLayout = VK_IMAGE_LAYOUT_UNDEFINED; 5267ec681f3Smrg ctx->di.tbos[shader][slot] = VK_NULL_HANDLE; 5277ec681f3Smrg } else { 5287ec681f3Smrg struct zink_surface *null_surface = zink_csurface(ctx->dummy_surface[0]); 5297ec681f3Smrg struct zink_buffer_view *null_bufferview = ctx->dummy_bufferview; 5307ec681f3Smrg ctx->di.textures[shader][slot].imageView = null_surface->image_view; 5317ec681f3Smrg ctx->di.textures[shader][slot].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; 5327ec681f3Smrg ctx->di.tbos[shader][slot] = null_bufferview->buffer_view; 5337ec681f3Smrg } 5347ec681f3Smrg memset(&ctx->di.sampler_surfaces[shader][slot], 0, sizeof(ctx->di.sampler_surfaces[shader][slot])); 5357ec681f3Smrg } 5367ec681f3Smrg return res; 5377ec681f3Smrg} 5387ec681f3Smrg 5397ec681f3SmrgALWAYS_INLINE static struct zink_resource * 5407ec681f3Smrgupdate_descriptor_state_image(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot, struct zink_resource *res) 5417ec681f3Smrg{ 5427ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 5437ec681f3Smrg bool have_null_descriptors = screen->info.rb2_feats.nullDescriptor; 5447ec681f3Smrg const enum zink_descriptor_type type = ZINK_DESCRIPTOR_TYPE_IMAGE; 5457ec681f3Smrg ctx->di.descriptor_res[type][shader][slot] = res; 5467ec681f3Smrg if (res) { 5477ec681f3Smrg if (res->obj->is_buffer) { 5487ec681f3Smrg struct zink_buffer_view *bv = get_bufferview_for_binding(ctx, shader, type, slot); 5497ec681f3Smrg ctx->di.texel_images[shader][slot] = bv->buffer_view; 5507ec681f3Smrg ctx->di.image_surfaces[shader][slot].bufferview = bv; 5517ec681f3Smrg ctx->di.image_surfaces[shader][slot].is_buffer = true; 5527ec681f3Smrg } else { 5537ec681f3Smrg struct zink_surface *surface = get_imageview_for_binding(ctx, shader, type, slot); 5547ec681f3Smrg ctx->di.images[shader][slot].imageLayout = VK_IMAGE_LAYOUT_GENERAL; 5557ec681f3Smrg ctx->di.images[shader][slot].imageView = surface->image_view; 5567ec681f3Smrg ctx->di.image_surfaces[shader][slot].surface = surface; 5577ec681f3Smrg ctx->di.image_surfaces[shader][slot].is_buffer = false; 5587ec681f3Smrg } 5597ec681f3Smrg } else { 5607ec681f3Smrg if (likely(have_null_descriptors)) { 5617ec681f3Smrg memset(&ctx->di.images[shader][slot], 0, sizeof(ctx->di.images[shader][slot])); 5627ec681f3Smrg ctx->di.texel_images[shader][slot] = VK_NULL_HANDLE; 5637ec681f3Smrg } else { 5647ec681f3Smrg struct zink_surface *null_surface = zink_csurface(ctx->dummy_surface[0]); 5657ec681f3Smrg struct zink_buffer_view *null_bufferview = ctx->dummy_bufferview; 5667ec681f3Smrg ctx->di.images[shader][slot].imageView = null_surface->image_view; 5677ec681f3Smrg ctx->di.images[shader][slot].imageLayout = VK_IMAGE_LAYOUT_GENERAL; 5687ec681f3Smrg ctx->di.texel_images[shader][slot] = null_bufferview->buffer_view; 5697ec681f3Smrg } 5707ec681f3Smrg memset(&ctx->di.image_surfaces[shader][slot], 0, sizeof(ctx->di.image_surfaces[shader][slot])); 5717ec681f3Smrg } 5727ec681f3Smrg return res; 5737ec681f3Smrg} 5747ec681f3Smrg 5757ec681f3Smrgstatic void 5767ec681f3Smrgzink_bind_sampler_states(struct pipe_context *pctx, 5777ec681f3Smrg enum pipe_shader_type shader, 5787ec681f3Smrg unsigned start_slot, 5797ec681f3Smrg unsigned num_samplers, 5807ec681f3Smrg void **samplers) 5817ec681f3Smrg{ 5827ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 5837ec681f3Smrg for (unsigned i = 0; i < num_samplers; ++i) { 5847ec681f3Smrg struct zink_sampler_state *state = samplers[i]; 5857ec681f3Smrg if (ctx->sampler_states[shader][start_slot + i] != state) 5867ec681f3Smrg zink_screen(pctx->screen)->context_invalidate_descriptor_state(ctx, shader, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, start_slot, 1); 5877ec681f3Smrg ctx->sampler_states[shader][start_slot + i] = state; 5887ec681f3Smrg ctx->di.textures[shader][start_slot + i].sampler = state ? state->sampler : VK_NULL_HANDLE; 5897ec681f3Smrg if (state) 5907ec681f3Smrg zink_batch_usage_set(&state->batch_uses, ctx->batch.state); 5917ec681f3Smrg } 5927ec681f3Smrg ctx->di.num_samplers[shader] = start_slot + num_samplers; 5937ec681f3Smrg} 5947ec681f3Smrg 5957ec681f3Smrgstatic void 5967ec681f3Smrgzink_delete_sampler_state(struct pipe_context *pctx, 5977ec681f3Smrg void *sampler_state) 5987ec681f3Smrg{ 5997ec681f3Smrg struct zink_sampler_state *sampler = sampler_state; 6007ec681f3Smrg struct zink_batch *batch = &zink_context(pctx)->batch; 6017ec681f3Smrg zink_descriptor_set_refs_clear(&sampler->desc_set_refs, sampler_state); 6027ec681f3Smrg /* may be called if context_create fails */ 6037ec681f3Smrg if (batch->state) 6047ec681f3Smrg util_dynarray_append(&batch->state->zombie_samplers, VkSampler, 6057ec681f3Smrg sampler->sampler); 6067ec681f3Smrg if (sampler->custom_border_color) 6077ec681f3Smrg p_atomic_dec(&zink_screen(pctx->screen)->cur_custom_border_color_samplers); 6087ec681f3Smrg FREE(sampler); 6097ec681f3Smrg} 6107ec681f3Smrg 6117ec681f3Smrgstatic VkImageAspectFlags 6127ec681f3Smrgsampler_aspect_from_format(enum pipe_format fmt) 6137ec681f3Smrg{ 6147ec681f3Smrg if (util_format_is_depth_or_stencil(fmt)) { 6157ec681f3Smrg const struct util_format_description *desc = util_format_description(fmt); 6167ec681f3Smrg if (util_format_has_depth(desc)) 6177ec681f3Smrg return VK_IMAGE_ASPECT_DEPTH_BIT; 6187ec681f3Smrg assert(util_format_has_stencil(desc)); 6197ec681f3Smrg return VK_IMAGE_ASPECT_STENCIL_BIT; 6207ec681f3Smrg } else 6217ec681f3Smrg return VK_IMAGE_ASPECT_COLOR_BIT; 6227ec681f3Smrg} 6237ec681f3Smrg 6247ec681f3Smrgstatic uint32_t 6257ec681f3Smrghash_bufferview(void *bvci) 6267ec681f3Smrg{ 6277ec681f3Smrg size_t offset = offsetof(VkBufferViewCreateInfo, flags); 6287ec681f3Smrg return _mesa_hash_data((char*)bvci + offset, sizeof(VkBufferViewCreateInfo) - offset); 6297ec681f3Smrg} 6307ec681f3Smrg 6317ec681f3Smrgstatic VkBufferViewCreateInfo 6327ec681f3Smrgcreate_bvci(struct zink_context *ctx, struct zink_resource *res, enum pipe_format format, uint32_t offset, uint32_t range) 6337ec681f3Smrg{ 6347ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 6357ec681f3Smrg VkBufferViewCreateInfo bvci; 6367ec681f3Smrg // Zero whole struct (including alignment holes), so hash_bufferview 6377ec681f3Smrg // does not access potentially uninitialized data. 6387ec681f3Smrg memset(&bvci, 0, sizeof(bvci)); 6397ec681f3Smrg bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; 6407ec681f3Smrg bvci.pNext = NULL; 6417ec681f3Smrg bvci.buffer = res->obj->buffer; 6427ec681f3Smrg bvci.format = zink_get_format(screen, format); 6437ec681f3Smrg assert(bvci.format); 6447ec681f3Smrg bvci.offset = offset; 6457ec681f3Smrg bvci.range = !offset && range == res->base.b.width0 ? VK_WHOLE_SIZE : range; 6467ec681f3Smrg uint32_t clamp = util_format_get_blocksize(format) * screen->info.props.limits.maxTexelBufferElements; 6477ec681f3Smrg if (bvci.range == VK_WHOLE_SIZE && res->base.b.width0 > clamp) 6487ec681f3Smrg bvci.range = clamp; 6497ec681f3Smrg bvci.flags = 0; 6507ec681f3Smrg return bvci; 6517ec681f3Smrg} 6527ec681f3Smrg 6537ec681f3Smrgstatic struct zink_buffer_view * 6547ec681f3Smrgget_buffer_view(struct zink_context *ctx, struct zink_resource *res, VkBufferViewCreateInfo *bvci) 6557ec681f3Smrg{ 6567ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 6577ec681f3Smrg struct zink_buffer_view *buffer_view = NULL; 6587ec681f3Smrg 6597ec681f3Smrg uint32_t hash = hash_bufferview(bvci); 6607ec681f3Smrg simple_mtx_lock(&res->bufferview_mtx); 6617ec681f3Smrg struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&res->bufferview_cache, hash, bvci); 6627ec681f3Smrg if (he) { 6637ec681f3Smrg buffer_view = he->data; 6647ec681f3Smrg p_atomic_inc(&buffer_view->reference.count); 6657ec681f3Smrg } else { 6667ec681f3Smrg VkBufferView view; 6677ec681f3Smrg if (VKSCR(CreateBufferView)(screen->dev, bvci, NULL, &view) != VK_SUCCESS) 6687ec681f3Smrg goto out; 6697ec681f3Smrg buffer_view = CALLOC_STRUCT(zink_buffer_view); 6707ec681f3Smrg if (!buffer_view) { 6717ec681f3Smrg VKSCR(DestroyBufferView)(screen->dev, view, NULL); 6727ec681f3Smrg goto out; 6737ec681f3Smrg } 6747ec681f3Smrg pipe_reference_init(&buffer_view->reference, 1); 6757ec681f3Smrg pipe_resource_reference(&buffer_view->pres, &res->base.b); 6767ec681f3Smrg util_dynarray_init(&buffer_view->desc_set_refs.refs, NULL); 6777ec681f3Smrg buffer_view->bvci = *bvci; 6787ec681f3Smrg buffer_view->buffer_view = view; 6797ec681f3Smrg buffer_view->hash = hash; 6807ec681f3Smrg _mesa_hash_table_insert_pre_hashed(&res->bufferview_cache, hash, &buffer_view->bvci, buffer_view); 6817ec681f3Smrg } 6827ec681f3Smrgout: 6837ec681f3Smrg simple_mtx_unlock(&res->bufferview_mtx); 6847ec681f3Smrg return buffer_view; 6857ec681f3Smrg} 6867ec681f3Smrg 6877ec681f3Smrgenum pipe_swizzle 6887ec681f3Smrgzink_clamp_void_swizzle(const struct util_format_description *desc, enum pipe_swizzle swizzle) 6897ec681f3Smrg{ 6907ec681f3Smrg switch (swizzle) { 6917ec681f3Smrg case PIPE_SWIZZLE_X: 6927ec681f3Smrg case PIPE_SWIZZLE_Y: 6937ec681f3Smrg case PIPE_SWIZZLE_Z: 6947ec681f3Smrg case PIPE_SWIZZLE_W: 6957ec681f3Smrg return desc->channel[swizzle].type == UTIL_FORMAT_TYPE_VOID ? PIPE_SWIZZLE_1 : swizzle; 6967ec681f3Smrg default: 6977ec681f3Smrg break; 6987ec681f3Smrg } 6997ec681f3Smrg return swizzle; 7007ec681f3Smrg} 7017ec681f3Smrg 7027ec681f3SmrgALWAYS_INLINE static enum pipe_swizzle 7037ec681f3Smrgclamp_zs_swizzle(enum pipe_swizzle swizzle) 7047ec681f3Smrg{ 7057ec681f3Smrg switch (swizzle) { 7067ec681f3Smrg case PIPE_SWIZZLE_X: 7077ec681f3Smrg case PIPE_SWIZZLE_Y: 7087ec681f3Smrg case PIPE_SWIZZLE_Z: 7097ec681f3Smrg case PIPE_SWIZZLE_W: 7107ec681f3Smrg return PIPE_SWIZZLE_X; 7117ec681f3Smrg default: 7127ec681f3Smrg break; 7137ec681f3Smrg } 7147ec681f3Smrg return swizzle; 7157ec681f3Smrg} 7167ec681f3Smrg 7177ec681f3Smrgstatic struct pipe_sampler_view * 7187ec681f3Smrgzink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres, 7197ec681f3Smrg const struct pipe_sampler_view *state) 7207ec681f3Smrg{ 7217ec681f3Smrg struct zink_screen *screen = zink_screen(pctx->screen); 7227ec681f3Smrg struct zink_resource *res = zink_resource(pres); 7237ec681f3Smrg struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view); 7247ec681f3Smrg bool err; 7257ec681f3Smrg 7267ec681f3Smrg sampler_view->base = *state; 7277ec681f3Smrg sampler_view->base.texture = NULL; 7287ec681f3Smrg pipe_resource_reference(&sampler_view->base.texture, pres); 7297ec681f3Smrg sampler_view->base.reference.count = 1; 7307ec681f3Smrg sampler_view->base.context = pctx; 7317ec681f3Smrg 7327ec681f3Smrg if (state->target != PIPE_BUFFER) { 7337ec681f3Smrg VkImageViewCreateInfo ivci; 7347ec681f3Smrg 7357ec681f3Smrg struct pipe_surface templ = {0}; 7367ec681f3Smrg templ.u.tex.level = state->u.tex.first_level; 7377ec681f3Smrg templ.format = state->format; 7387ec681f3Smrg if (state->target != PIPE_TEXTURE_3D) { 7397ec681f3Smrg templ.u.tex.first_layer = state->u.tex.first_layer; 7407ec681f3Smrg templ.u.tex.last_layer = state->u.tex.last_layer; 7417ec681f3Smrg } 7427ec681f3Smrg 7437ec681f3Smrg ivci = create_ivci(screen, res, &templ, state->target); 7447ec681f3Smrg ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1; 7457ec681f3Smrg ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format); 7467ec681f3Smrg /* samplers for stencil aspects of packed formats need to always use stencil swizzle */ 7477ec681f3Smrg if (ivci.subresourceRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { 7487ec681f3Smrg ivci.components.r = zink_component_mapping(clamp_zs_swizzle(sampler_view->base.swizzle_r)); 7497ec681f3Smrg ivci.components.g = zink_component_mapping(clamp_zs_swizzle(sampler_view->base.swizzle_g)); 7507ec681f3Smrg ivci.components.b = zink_component_mapping(clamp_zs_swizzle(sampler_view->base.swizzle_b)); 7517ec681f3Smrg ivci.components.a = zink_component_mapping(clamp_zs_swizzle(sampler_view->base.swizzle_a)); 7527ec681f3Smrg } else { 7537ec681f3Smrg /* if we have e.g., R8G8B8X8, then we have to ignore alpha since we're just emulating 7547ec681f3Smrg * these formats 7557ec681f3Smrg */ 7567ec681f3Smrg if (zink_format_is_voidable_rgba_variant(state->format)) { 7577ec681f3Smrg const struct util_format_description *desc = util_format_description(state->format); 7587ec681f3Smrg sampler_view->base.swizzle_r = zink_clamp_void_swizzle(desc, sampler_view->base.swizzle_r); 7597ec681f3Smrg sampler_view->base.swizzle_g = zink_clamp_void_swizzle(desc, sampler_view->base.swizzle_g); 7607ec681f3Smrg sampler_view->base.swizzle_b = zink_clamp_void_swizzle(desc, sampler_view->base.swizzle_b); 7617ec681f3Smrg sampler_view->base.swizzle_a = zink_clamp_void_swizzle(desc, sampler_view->base.swizzle_a); 7627ec681f3Smrg } 7637ec681f3Smrg ivci.components.r = zink_component_mapping(sampler_view->base.swizzle_r); 7647ec681f3Smrg ivci.components.g = zink_component_mapping(sampler_view->base.swizzle_g); 7657ec681f3Smrg ivci.components.b = zink_component_mapping(sampler_view->base.swizzle_b); 7667ec681f3Smrg ivci.components.a = zink_component_mapping(sampler_view->base.swizzle_a); 7677ec681f3Smrg } 7687ec681f3Smrg assert(ivci.format); 7697ec681f3Smrg 7707ec681f3Smrg sampler_view->image_view = (struct zink_surface*)zink_get_surface(zink_context(pctx), pres, &templ, &ivci); 7717ec681f3Smrg err = !sampler_view->image_view; 7727ec681f3Smrg } else { 7737ec681f3Smrg VkBufferViewCreateInfo bvci = create_bvci(zink_context(pctx), res, state->format, state->u.buf.offset, state->u.buf.size); 7747ec681f3Smrg sampler_view->buffer_view = get_buffer_view(zink_context(pctx), res, &bvci); 7757ec681f3Smrg err = !sampler_view->buffer_view; 7767ec681f3Smrg } 7777ec681f3Smrg if (err) { 7787ec681f3Smrg FREE(sampler_view); 7797ec681f3Smrg return NULL; 7807ec681f3Smrg } 7817ec681f3Smrg return &sampler_view->base; 7827ec681f3Smrg} 7837ec681f3Smrg 7847ec681f3Smrgvoid 7857ec681f3Smrgzink_destroy_buffer_view(struct zink_screen *screen, struct zink_buffer_view *buffer_view) 7867ec681f3Smrg{ 7877ec681f3Smrg struct zink_resource *res = zink_resource(buffer_view->pres); 7887ec681f3Smrg simple_mtx_lock(&res->bufferview_mtx); 7897ec681f3Smrg if (buffer_view->reference.count) { 7907ec681f3Smrg /* got a cache hit during deletion */ 7917ec681f3Smrg simple_mtx_unlock(&res->bufferview_mtx); 7927ec681f3Smrg return; 7937ec681f3Smrg } 7947ec681f3Smrg struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&res->bufferview_cache, buffer_view->hash, &buffer_view->bvci); 7957ec681f3Smrg assert(he); 7967ec681f3Smrg _mesa_hash_table_remove(&res->bufferview_cache, he); 7977ec681f3Smrg simple_mtx_unlock(&res->bufferview_mtx); 7987ec681f3Smrg pipe_resource_reference(&buffer_view->pres, NULL); 7997ec681f3Smrg VKSCR(DestroyBufferView)(screen->dev, buffer_view->buffer_view, NULL); 8007ec681f3Smrg zink_descriptor_set_refs_clear(&buffer_view->desc_set_refs, buffer_view); 8017ec681f3Smrg FREE(buffer_view); 8027ec681f3Smrg} 8037ec681f3Smrg 8047ec681f3Smrgstatic void 8057ec681f3Smrgzink_sampler_view_destroy(struct pipe_context *pctx, 8067ec681f3Smrg struct pipe_sampler_view *pview) 8077ec681f3Smrg{ 8087ec681f3Smrg struct zink_sampler_view *view = zink_sampler_view(pview); 8097ec681f3Smrg if (pview->texture->target == PIPE_BUFFER) 8107ec681f3Smrg zink_buffer_view_reference(zink_screen(pctx->screen), &view->buffer_view, NULL); 8117ec681f3Smrg else { 8127ec681f3Smrg zink_surface_reference(zink_screen(pctx->screen), &view->image_view, NULL); 8137ec681f3Smrg } 8147ec681f3Smrg pipe_resource_reference(&pview->texture, NULL); 8157ec681f3Smrg FREE(view); 8167ec681f3Smrg} 8177ec681f3Smrg 8187ec681f3Smrgstatic void 8197ec681f3Smrgzink_get_sample_position(struct pipe_context *ctx, 8207ec681f3Smrg unsigned sample_count, 8217ec681f3Smrg unsigned sample_index, 8227ec681f3Smrg float *out_value) 8237ec681f3Smrg{ 8247ec681f3Smrg /* TODO: handle this I guess */ 8257ec681f3Smrg assert(zink_screen(ctx->screen)->info.props.limits.standardSampleLocations); 8267ec681f3Smrg /* from 26.4. Multisampling */ 8277ec681f3Smrg switch (sample_count) { 8287ec681f3Smrg case 0: 8297ec681f3Smrg case 1: { 8307ec681f3Smrg float pos[][2] = { {0.5,0.5}, }; 8317ec681f3Smrg out_value[0] = pos[sample_index][0]; 8327ec681f3Smrg out_value[1] = pos[sample_index][1]; 8337ec681f3Smrg break; 8347ec681f3Smrg } 8357ec681f3Smrg case 2: { 8367ec681f3Smrg float pos[][2] = { {0.75,0.75}, 8377ec681f3Smrg {0.25,0.25}, }; 8387ec681f3Smrg out_value[0] = pos[sample_index][0]; 8397ec681f3Smrg out_value[1] = pos[sample_index][1]; 8407ec681f3Smrg break; 8417ec681f3Smrg } 8427ec681f3Smrg case 4: { 8437ec681f3Smrg float pos[][2] = { {0.375, 0.125}, 8447ec681f3Smrg {0.875, 0.375}, 8457ec681f3Smrg {0.125, 0.625}, 8467ec681f3Smrg {0.625, 0.875}, }; 8477ec681f3Smrg out_value[0] = pos[sample_index][0]; 8487ec681f3Smrg out_value[1] = pos[sample_index][1]; 8497ec681f3Smrg break; 8507ec681f3Smrg } 8517ec681f3Smrg case 8: { 8527ec681f3Smrg float pos[][2] = { {0.5625, 0.3125}, 8537ec681f3Smrg {0.4375, 0.6875}, 8547ec681f3Smrg {0.8125, 0.5625}, 8557ec681f3Smrg {0.3125, 0.1875}, 8567ec681f3Smrg {0.1875, 0.8125}, 8577ec681f3Smrg {0.0625, 0.4375}, 8587ec681f3Smrg {0.6875, 0.9375}, 8597ec681f3Smrg {0.9375, 0.0625}, }; 8607ec681f3Smrg out_value[0] = pos[sample_index][0]; 8617ec681f3Smrg out_value[1] = pos[sample_index][1]; 8627ec681f3Smrg break; 8637ec681f3Smrg } 8647ec681f3Smrg case 16: { 8657ec681f3Smrg float pos[][2] = { {0.5625, 0.5625}, 8667ec681f3Smrg {0.4375, 0.3125}, 8677ec681f3Smrg {0.3125, 0.625}, 8687ec681f3Smrg {0.75, 0.4375}, 8697ec681f3Smrg {0.1875, 0.375}, 8707ec681f3Smrg {0.625, 0.8125}, 8717ec681f3Smrg {0.8125, 0.6875}, 8727ec681f3Smrg {0.6875, 0.1875}, 8737ec681f3Smrg {0.375, 0.875}, 8747ec681f3Smrg {0.5, 0.0625}, 8757ec681f3Smrg {0.25, 0.125}, 8767ec681f3Smrg {0.125, 0.75}, 8777ec681f3Smrg {0.0, 0.5}, 8787ec681f3Smrg {0.9375, 0.25}, 8797ec681f3Smrg {0.875, 0.9375}, 8807ec681f3Smrg {0.0625, 0.0}, }; 8817ec681f3Smrg out_value[0] = pos[sample_index][0]; 8827ec681f3Smrg out_value[1] = pos[sample_index][1]; 8837ec681f3Smrg break; 8847ec681f3Smrg } 8857ec681f3Smrg default: 8867ec681f3Smrg unreachable("unhandled sample count!"); 8877ec681f3Smrg } 8887ec681f3Smrg} 8897ec681f3Smrg 8907ec681f3Smrgstatic void 8917ec681f3Smrgzink_set_polygon_stipple(struct pipe_context *pctx, 8927ec681f3Smrg const struct pipe_poly_stipple *ps) 8937ec681f3Smrg{ 8947ec681f3Smrg} 8957ec681f3Smrg 8967ec681f3SmrgALWAYS_INLINE static void 8977ec681f3Smrgupdate_res_bind_count(struct zink_context *ctx, struct zink_resource *res, bool is_compute, bool decrement) 8987ec681f3Smrg{ 8997ec681f3Smrg if (decrement) { 9007ec681f3Smrg assert(res->bind_count[is_compute]); 9017ec681f3Smrg if (!--res->bind_count[is_compute]) 9027ec681f3Smrg _mesa_set_remove_key(ctx->need_barriers[is_compute], res); 9037ec681f3Smrg check_resource_for_batch_ref(ctx, res); 9047ec681f3Smrg } else 9057ec681f3Smrg res->bind_count[is_compute]++; 9067ec681f3Smrg} 9077ec681f3Smrg 9087ec681f3SmrgALWAYS_INLINE static void 9097ec681f3Smrgupdate_existing_vbo(struct zink_context *ctx, unsigned slot) 9107ec681f3Smrg{ 9117ec681f3Smrg if (!ctx->vertex_buffers[slot].buffer.resource) 9127ec681f3Smrg return; 9137ec681f3Smrg struct zink_resource *res = zink_resource(ctx->vertex_buffers[slot].buffer.resource); 9147ec681f3Smrg res->vbo_bind_mask &= ~BITFIELD_BIT(slot); 9157ec681f3Smrg update_res_bind_count(ctx, res, false, true); 9167ec681f3Smrg} 9177ec681f3Smrg 9187ec681f3Smrgstatic void 9197ec681f3Smrgzink_set_vertex_buffers(struct pipe_context *pctx, 9207ec681f3Smrg unsigned start_slot, 9217ec681f3Smrg unsigned num_buffers, 9227ec681f3Smrg unsigned unbind_num_trailing_slots, 9237ec681f3Smrg bool take_ownership, 9247ec681f3Smrg const struct pipe_vertex_buffer *buffers) 9257ec681f3Smrg{ 9267ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 9277ec681f3Smrg const bool need_state_change = !zink_screen(pctx->screen)->info.have_EXT_extended_dynamic_state && 9287ec681f3Smrg !zink_screen(pctx->screen)->info.have_EXT_vertex_input_dynamic_state; 9297ec681f3Smrg uint32_t enabled_buffers = ctx->gfx_pipeline_state.vertex_buffers_enabled_mask; 9307ec681f3Smrg enabled_buffers |= u_bit_consecutive(start_slot, num_buffers); 9317ec681f3Smrg enabled_buffers &= ~u_bit_consecutive(start_slot + num_buffers, unbind_num_trailing_slots); 9327ec681f3Smrg 9337ec681f3Smrg if (buffers) { 9347ec681f3Smrg if (need_state_change) 9357ec681f3Smrg ctx->vertex_state_changed = true; 9367ec681f3Smrg for (unsigned i = 0; i < num_buffers; ++i) { 9377ec681f3Smrg const struct pipe_vertex_buffer *vb = buffers + i; 9387ec681f3Smrg struct pipe_vertex_buffer *ctx_vb = &ctx->vertex_buffers[start_slot + i]; 9397ec681f3Smrg update_existing_vbo(ctx, start_slot + i); 9407ec681f3Smrg if (!take_ownership) 9417ec681f3Smrg pipe_resource_reference(&ctx_vb->buffer.resource, vb->buffer.resource); 9427ec681f3Smrg else { 9437ec681f3Smrg pipe_resource_reference(&ctx_vb->buffer.resource, NULL); 9447ec681f3Smrg ctx_vb->buffer.resource = vb->buffer.resource; 9457ec681f3Smrg } 9467ec681f3Smrg if (vb->buffer.resource) { 9477ec681f3Smrg struct zink_resource *res = zink_resource(vb->buffer.resource); 9487ec681f3Smrg res->vbo_bind_mask |= BITFIELD_BIT(start_slot + i); 9497ec681f3Smrg update_res_bind_count(ctx, res, false, false); 9507ec681f3Smrg ctx_vb->stride = vb->stride; 9517ec681f3Smrg ctx_vb->buffer_offset = vb->buffer_offset; 9527ec681f3Smrg /* always barrier before possible rebind */ 9537ec681f3Smrg zink_resource_buffer_barrier(ctx, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, 9547ec681f3Smrg VK_PIPELINE_STAGE_VERTEX_INPUT_BIT); 9557ec681f3Smrg } else { 9567ec681f3Smrg enabled_buffers &= ~BITFIELD_BIT(start_slot + i); 9577ec681f3Smrg } 9587ec681f3Smrg } 9597ec681f3Smrg } else { 9607ec681f3Smrg if (need_state_change) 9617ec681f3Smrg ctx->vertex_state_changed = true; 9627ec681f3Smrg for (unsigned i = 0; i < num_buffers; ++i) { 9637ec681f3Smrg update_existing_vbo(ctx, start_slot + i); 9647ec681f3Smrg pipe_resource_reference(&ctx->vertex_buffers[start_slot + i].buffer.resource, NULL); 9657ec681f3Smrg } 9667ec681f3Smrg } 9677ec681f3Smrg for (unsigned i = 0; i < unbind_num_trailing_slots; i++) { 9687ec681f3Smrg update_existing_vbo(ctx, start_slot + i); 9697ec681f3Smrg pipe_resource_reference(&ctx->vertex_buffers[start_slot + i].buffer.resource, NULL); 9707ec681f3Smrg } 9717ec681f3Smrg ctx->gfx_pipeline_state.vertex_buffers_enabled_mask = enabled_buffers; 9727ec681f3Smrg ctx->vertex_buffers_dirty = num_buffers > 0; 9737ec681f3Smrg#ifndef NDEBUG 9747ec681f3Smrg u_foreach_bit(b, enabled_buffers) 9757ec681f3Smrg assert(ctx->vertex_buffers[b].buffer.resource); 9767ec681f3Smrg#endif 9777ec681f3Smrg} 9787ec681f3Smrg 9797ec681f3Smrgstatic void 9807ec681f3Smrgzink_set_viewport_states(struct pipe_context *pctx, 9817ec681f3Smrg unsigned start_slot, 9827ec681f3Smrg unsigned num_viewports, 9837ec681f3Smrg const struct pipe_viewport_state *state) 9847ec681f3Smrg{ 9857ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 9867ec681f3Smrg 9877ec681f3Smrg for (unsigned i = 0; i < num_viewports; ++i) 9887ec681f3Smrg ctx->vp_state.viewport_states[start_slot + i] = state[i]; 9897ec681f3Smrg ctx->vp_state.num_viewports = start_slot + num_viewports; 9907ec681f3Smrg 9917ec681f3Smrg if (!zink_screen(pctx->screen)->info.have_EXT_extended_dynamic_state) { 9927ec681f3Smrg if (ctx->gfx_pipeline_state.dyn_state1.num_viewports != ctx->vp_state.num_viewports) 9937ec681f3Smrg ctx->gfx_pipeline_state.dirty = true; 9947ec681f3Smrg ctx->gfx_pipeline_state.dyn_state1.num_viewports = ctx->vp_state.num_viewports; 9957ec681f3Smrg } 9967ec681f3Smrg ctx->vp_state_changed = true; 9977ec681f3Smrg} 9987ec681f3Smrg 9997ec681f3Smrgstatic void 10007ec681f3Smrgzink_set_scissor_states(struct pipe_context *pctx, 10017ec681f3Smrg unsigned start_slot, unsigned num_scissors, 10027ec681f3Smrg const struct pipe_scissor_state *states) 10037ec681f3Smrg{ 10047ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 10057ec681f3Smrg 10067ec681f3Smrg for (unsigned i = 0; i < num_scissors; i++) 10077ec681f3Smrg ctx->vp_state.scissor_states[start_slot + i] = states[i]; 10087ec681f3Smrg ctx->scissor_changed = true; 10097ec681f3Smrg} 10107ec681f3Smrg 10117ec681f3Smrgstatic void 10127ec681f3Smrgzink_set_inlinable_constants(struct pipe_context *pctx, 10137ec681f3Smrg enum pipe_shader_type shader, 10147ec681f3Smrg uint num_values, uint32_t *values) 10157ec681f3Smrg{ 10167ec681f3Smrg struct zink_context *ctx = (struct zink_context *)pctx; 10177ec681f3Smrg const uint32_t bit = BITFIELD_BIT(shader); 10187ec681f3Smrg uint32_t *inlinable_uniforms; 10197ec681f3Smrg struct zink_shader_key *key = NULL; 10207ec681f3Smrg 10217ec681f3Smrg if (shader == PIPE_SHADER_COMPUTE) { 10227ec681f3Smrg inlinable_uniforms = ctx->compute_inlinable_uniforms; 10237ec681f3Smrg } else { 10247ec681f3Smrg key = &ctx->gfx_pipeline_state.shader_keys.key[shader]; 10257ec681f3Smrg inlinable_uniforms = key->base.inlined_uniform_values; 10267ec681f3Smrg } 10277ec681f3Smrg if (!(ctx->inlinable_uniforms_valid_mask & bit) || 10287ec681f3Smrg memcmp(inlinable_uniforms, values, num_values * 4)) { 10297ec681f3Smrg memcpy(inlinable_uniforms, values, num_values * 4); 10307ec681f3Smrg ctx->dirty_shader_stages |= bit; 10317ec681f3Smrg ctx->inlinable_uniforms_valid_mask |= bit; 10327ec681f3Smrg if (key) 10337ec681f3Smrg key->inline_uniforms = true; 10347ec681f3Smrg } 10357ec681f3Smrg} 10367ec681f3Smrg 10377ec681f3SmrgALWAYS_INLINE static void 10387ec681f3Smrgunbind_ubo(struct zink_context *ctx, struct zink_resource *res, enum pipe_shader_type pstage, unsigned slot) 10397ec681f3Smrg{ 10407ec681f3Smrg if (!res) 10417ec681f3Smrg return; 10427ec681f3Smrg res->ubo_bind_mask[pstage] &= ~BITFIELD_BIT(slot); 10437ec681f3Smrg res->ubo_bind_count[pstage == PIPE_SHADER_COMPUTE]--; 10447ec681f3Smrg update_res_bind_count(ctx, res, pstage == PIPE_SHADER_COMPUTE, true); 10457ec681f3Smrg} 10467ec681f3Smrg 10477ec681f3Smrgstatic void 10487ec681f3Smrginvalidate_inlined_uniforms(struct zink_context *ctx, enum pipe_shader_type pstage) 10497ec681f3Smrg{ 10507ec681f3Smrg unsigned bit = BITFIELD_BIT(pstage); 10517ec681f3Smrg if (!(ctx->inlinable_uniforms_valid_mask & bit)) 10527ec681f3Smrg return; 10537ec681f3Smrg ctx->inlinable_uniforms_valid_mask &= ~bit; 10547ec681f3Smrg ctx->dirty_shader_stages |= bit; 10557ec681f3Smrg if (pstage == PIPE_SHADER_COMPUTE) 10567ec681f3Smrg return; 10577ec681f3Smrg 10587ec681f3Smrg struct zink_shader_key *key = &ctx->gfx_pipeline_state.shader_keys.key[pstage]; 10597ec681f3Smrg key->inline_uniforms = false; 10607ec681f3Smrg} 10617ec681f3Smrg 10627ec681f3Smrgstatic void 10637ec681f3Smrgzink_set_constant_buffer(struct pipe_context *pctx, 10647ec681f3Smrg enum pipe_shader_type shader, uint index, 10657ec681f3Smrg bool take_ownership, 10667ec681f3Smrg const struct pipe_constant_buffer *cb) 10677ec681f3Smrg{ 10687ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 10697ec681f3Smrg bool update = false; 10707ec681f3Smrg 10717ec681f3Smrg struct zink_resource *res = zink_resource(ctx->ubos[shader][index].buffer); 10727ec681f3Smrg if (cb) { 10737ec681f3Smrg struct pipe_resource *buffer = cb->buffer; 10747ec681f3Smrg unsigned offset = cb->buffer_offset; 10757ec681f3Smrg struct zink_screen *screen = zink_screen(pctx->screen); 10767ec681f3Smrg if (cb->user_buffer) { 10777ec681f3Smrg u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size, 10787ec681f3Smrg screen->info.props.limits.minUniformBufferOffsetAlignment, 10797ec681f3Smrg cb->user_buffer, &offset, &buffer); 10807ec681f3Smrg } 10817ec681f3Smrg struct zink_resource *new_res = zink_resource(buffer); 10827ec681f3Smrg if (new_res) { 10837ec681f3Smrg if (new_res != res) { 10847ec681f3Smrg unbind_ubo(ctx, res, shader, index); 10857ec681f3Smrg new_res->ubo_bind_count[shader == PIPE_SHADER_COMPUTE]++; 10867ec681f3Smrg new_res->ubo_bind_mask[shader] |= BITFIELD_BIT(index); 10877ec681f3Smrg update_res_bind_count(ctx, new_res, shader == PIPE_SHADER_COMPUTE, false); 10887ec681f3Smrg } 10897ec681f3Smrg zink_batch_resource_usage_set(&ctx->batch, new_res, false); 10907ec681f3Smrg zink_resource_buffer_barrier(ctx, new_res, VK_ACCESS_UNIFORM_READ_BIT, 10917ec681f3Smrg zink_pipeline_flags_from_pipe_stage(shader)); 10927ec681f3Smrg } 10937ec681f3Smrg update |= ((index || screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY) && ctx->ubos[shader][index].buffer_offset != offset) || 10947ec681f3Smrg !!res != !!buffer || (res && res->obj->buffer != new_res->obj->buffer) || 10957ec681f3Smrg ctx->ubos[shader][index].buffer_size != cb->buffer_size; 10967ec681f3Smrg 10977ec681f3Smrg if (take_ownership) { 10987ec681f3Smrg pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL); 10997ec681f3Smrg ctx->ubos[shader][index].buffer = buffer; 11007ec681f3Smrg } else { 11017ec681f3Smrg pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer); 11027ec681f3Smrg } 11037ec681f3Smrg ctx->ubos[shader][index].buffer_offset = offset; 11047ec681f3Smrg ctx->ubos[shader][index].buffer_size = cb->buffer_size; 11057ec681f3Smrg ctx->ubos[shader][index].user_buffer = NULL; 11067ec681f3Smrg 11077ec681f3Smrg if (cb->user_buffer) 11087ec681f3Smrg pipe_resource_reference(&buffer, NULL); 11097ec681f3Smrg 11107ec681f3Smrg if (index + 1 >= ctx->di.num_ubos[shader]) 11117ec681f3Smrg ctx->di.num_ubos[shader] = index + 1; 11127ec681f3Smrg update_descriptor_state_ubo(ctx, shader, index, new_res); 11137ec681f3Smrg } else { 11147ec681f3Smrg ctx->ubos[shader][index].buffer_offset = 0; 11157ec681f3Smrg ctx->ubos[shader][index].buffer_size = 0; 11167ec681f3Smrg ctx->ubos[shader][index].user_buffer = NULL; 11177ec681f3Smrg if (res) { 11187ec681f3Smrg unbind_ubo(ctx, res, shader, index); 11197ec681f3Smrg update_descriptor_state_ubo(ctx, shader, index, NULL); 11207ec681f3Smrg } 11217ec681f3Smrg update = !!ctx->ubos[shader][index].buffer; 11227ec681f3Smrg 11237ec681f3Smrg pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL); 11247ec681f3Smrg if (ctx->di.num_ubos[shader] == index + 1) 11257ec681f3Smrg ctx->di.num_ubos[shader]--; 11267ec681f3Smrg } 11277ec681f3Smrg if (index == 0) { 11287ec681f3Smrg /* Invalidate current inlinable uniforms. */ 11297ec681f3Smrg invalidate_inlined_uniforms(ctx, shader); 11307ec681f3Smrg } 11317ec681f3Smrg 11327ec681f3Smrg if (update) 11337ec681f3Smrg zink_screen(pctx->screen)->context_invalidate_descriptor_state(ctx, shader, ZINK_DESCRIPTOR_TYPE_UBO, index, 1); 11347ec681f3Smrg} 11357ec681f3Smrg 11367ec681f3SmrgALWAYS_INLINE static void 11377ec681f3Smrgunbind_ssbo(struct zink_context *ctx, struct zink_resource *res, enum pipe_shader_type pstage, unsigned slot, bool writable) 11387ec681f3Smrg{ 11397ec681f3Smrg if (!res) 11407ec681f3Smrg return; 11417ec681f3Smrg res->ssbo_bind_mask[pstage] &= ~BITFIELD_BIT(slot); 11427ec681f3Smrg update_res_bind_count(ctx, res, pstage == PIPE_SHADER_COMPUTE, true); 11437ec681f3Smrg if (writable) 11447ec681f3Smrg res->write_bind_count[pstage == PIPE_SHADER_COMPUTE]--; 11457ec681f3Smrg} 11467ec681f3Smrg 11477ec681f3Smrgstatic void 11487ec681f3Smrgzink_set_shader_buffers(struct pipe_context *pctx, 11497ec681f3Smrg enum pipe_shader_type p_stage, 11507ec681f3Smrg unsigned start_slot, unsigned count, 11517ec681f3Smrg const struct pipe_shader_buffer *buffers, 11527ec681f3Smrg unsigned writable_bitmask) 11537ec681f3Smrg{ 11547ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 11557ec681f3Smrg bool update = false; 11567ec681f3Smrg unsigned max_slot = 0; 11577ec681f3Smrg 11587ec681f3Smrg unsigned modified_bits = u_bit_consecutive(start_slot, count); 11597ec681f3Smrg unsigned old_writable_mask = ctx->writable_ssbos[p_stage]; 11607ec681f3Smrg ctx->writable_ssbos[p_stage] &= ~modified_bits; 11617ec681f3Smrg ctx->writable_ssbos[p_stage] |= writable_bitmask << start_slot; 11627ec681f3Smrg 11637ec681f3Smrg for (unsigned i = 0; i < count; i++) { 11647ec681f3Smrg struct pipe_shader_buffer *ssbo = &ctx->ssbos[p_stage][start_slot + i]; 11657ec681f3Smrg struct zink_resource *res = ssbo->buffer ? zink_resource(ssbo->buffer) : NULL; 11667ec681f3Smrg bool was_writable = old_writable_mask & BITFIELD64_BIT(start_slot + i); 11677ec681f3Smrg if (buffers && buffers[i].buffer) { 11687ec681f3Smrg struct zink_resource *new_res = zink_resource(buffers[i].buffer); 11697ec681f3Smrg if (new_res != res) { 11707ec681f3Smrg unbind_ssbo(ctx, res, p_stage, i, was_writable); 11717ec681f3Smrg new_res->ssbo_bind_mask[p_stage] |= BITFIELD_BIT(i); 11727ec681f3Smrg update_res_bind_count(ctx, new_res, p_stage == PIPE_SHADER_COMPUTE, false); 11737ec681f3Smrg } 11747ec681f3Smrg VkAccessFlags access = VK_ACCESS_SHADER_READ_BIT; 11757ec681f3Smrg if (ctx->writable_ssbos[p_stage] & BITFIELD64_BIT(start_slot + i)) { 11767ec681f3Smrg new_res->write_bind_count[p_stage == PIPE_SHADER_COMPUTE]++; 11777ec681f3Smrg access |= VK_ACCESS_SHADER_WRITE_BIT; 11787ec681f3Smrg } 11797ec681f3Smrg pipe_resource_reference(&ssbo->buffer, &new_res->base.b); 11807ec681f3Smrg zink_batch_resource_usage_set(&ctx->batch, new_res, access & VK_ACCESS_SHADER_WRITE_BIT); 11817ec681f3Smrg ssbo->buffer_offset = buffers[i].buffer_offset; 11827ec681f3Smrg ssbo->buffer_size = MIN2(buffers[i].buffer_size, new_res->base.b.width0 - ssbo->buffer_offset); 11837ec681f3Smrg util_range_add(&new_res->base.b, &new_res->valid_buffer_range, ssbo->buffer_offset, 11847ec681f3Smrg ssbo->buffer_offset + ssbo->buffer_size); 11857ec681f3Smrg zink_resource_buffer_barrier(ctx, new_res, access, 11867ec681f3Smrg zink_pipeline_flags_from_pipe_stage(p_stage)); 11877ec681f3Smrg update = true; 11887ec681f3Smrg max_slot = MAX2(max_slot, start_slot + i); 11897ec681f3Smrg update_descriptor_state_ssbo(ctx, p_stage, start_slot + i, new_res); 11907ec681f3Smrg } else { 11917ec681f3Smrg update = !!res; 11927ec681f3Smrg ssbo->buffer_offset = 0; 11937ec681f3Smrg ssbo->buffer_size = 0; 11947ec681f3Smrg if (res) { 11957ec681f3Smrg unbind_ssbo(ctx, res, p_stage, i, was_writable); 11967ec681f3Smrg update_descriptor_state_ssbo(ctx, p_stage, start_slot + i, NULL); 11977ec681f3Smrg } 11987ec681f3Smrg pipe_resource_reference(&ssbo->buffer, NULL); 11997ec681f3Smrg } 12007ec681f3Smrg } 12017ec681f3Smrg if (start_slot + count >= ctx->di.num_ssbos[p_stage]) 12027ec681f3Smrg ctx->di.num_ssbos[p_stage] = max_slot + 1; 12037ec681f3Smrg if (update) 12047ec681f3Smrg zink_screen(pctx->screen)->context_invalidate_descriptor_state(ctx, p_stage, ZINK_DESCRIPTOR_TYPE_SSBO, start_slot, count); 12057ec681f3Smrg} 12067ec681f3Smrg 12077ec681f3Smrgstatic void 12087ec681f3Smrgupdate_binds_for_samplerviews(struct zink_context *ctx, struct zink_resource *res, bool is_compute) 12097ec681f3Smrg{ 12107ec681f3Smrg VkImageLayout layout = get_layout_for_binding(res, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, is_compute); 12117ec681f3Smrg if (is_compute) { 12127ec681f3Smrg u_foreach_bit(slot, res->sampler_binds[PIPE_SHADER_COMPUTE]) { 12137ec681f3Smrg if (ctx->di.textures[PIPE_SHADER_COMPUTE][slot].imageLayout != layout) { 12147ec681f3Smrg update_descriptor_state_sampler(ctx, PIPE_SHADER_COMPUTE, slot, res); 12157ec681f3Smrg zink_screen(ctx->base.screen)->context_invalidate_descriptor_state(ctx, PIPE_SHADER_COMPUTE, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, slot, 1); 12167ec681f3Smrg } 12177ec681f3Smrg } 12187ec681f3Smrg } else { 12197ec681f3Smrg for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) { 12207ec681f3Smrg u_foreach_bit(slot, res->sampler_binds[i]) { 12217ec681f3Smrg if (ctx->di.textures[i][slot].imageLayout != layout) { 12227ec681f3Smrg update_descriptor_state_sampler(ctx, i, slot, res); 12237ec681f3Smrg zink_screen(ctx->base.screen)->context_invalidate_descriptor_state(ctx, i, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, slot, 1); 12247ec681f3Smrg } 12257ec681f3Smrg } 12267ec681f3Smrg } 12277ec681f3Smrg } 12287ec681f3Smrg} 12297ec681f3Smrg 12307ec681f3Smrgstatic void 12317ec681f3Smrgflush_pending_clears(struct zink_context *ctx, struct zink_resource *res) 12327ec681f3Smrg{ 12337ec681f3Smrg if (res->fb_binds && ctx->clears_enabled) 12347ec681f3Smrg zink_fb_clears_apply(ctx, &res->base.b); 12357ec681f3Smrg} 12367ec681f3Smrg 12377ec681f3Smrgstatic inline void 12387ec681f3Smrgunbind_shader_image_counts(struct zink_context *ctx, struct zink_resource *res, bool is_compute, bool writable) 12397ec681f3Smrg{ 12407ec681f3Smrg update_res_bind_count(ctx, res, is_compute, true); 12417ec681f3Smrg if (writable) 12427ec681f3Smrg res->write_bind_count[is_compute]--; 12437ec681f3Smrg res->image_bind_count[is_compute]--; 12447ec681f3Smrg /* if this was the last image bind, the sampler bind layouts must be updated */ 12457ec681f3Smrg if (!res->obj->is_buffer && !res->image_bind_count[is_compute] && res->bind_count[is_compute]) 12467ec681f3Smrg update_binds_for_samplerviews(ctx, res, is_compute); 12477ec681f3Smrg} 12487ec681f3Smrg 12497ec681f3SmrgALWAYS_INLINE static void 12507ec681f3Smrgcheck_for_layout_update(struct zink_context *ctx, struct zink_resource *res, bool is_compute) 12517ec681f3Smrg{ 12527ec681f3Smrg VkImageLayout layout = res->bind_count[is_compute] ? zink_descriptor_util_image_layout_eval(res, is_compute) : VK_IMAGE_LAYOUT_UNDEFINED; 12537ec681f3Smrg VkImageLayout other_layout = res->bind_count[!is_compute] ? zink_descriptor_util_image_layout_eval(res, !is_compute) : VK_IMAGE_LAYOUT_UNDEFINED; 12547ec681f3Smrg if (res->bind_count[is_compute] && layout && res->layout != layout) 12557ec681f3Smrg _mesa_set_add(ctx->need_barriers[is_compute], res); 12567ec681f3Smrg if (res->bind_count[!is_compute] && other_layout && (layout != other_layout || res->layout != other_layout)) 12577ec681f3Smrg _mesa_set_add(ctx->need_barriers[!is_compute], res); 12587ec681f3Smrg} 12597ec681f3Smrg 12607ec681f3Smrgstatic void 12617ec681f3Smrgunbind_shader_image(struct zink_context *ctx, enum pipe_shader_type stage, unsigned slot) 12627ec681f3Smrg{ 12637ec681f3Smrg struct zink_image_view *image_view = &ctx->image_views[stage][slot]; 12647ec681f3Smrg bool is_compute = stage == PIPE_SHADER_COMPUTE; 12657ec681f3Smrg if (!image_view->base.resource) 12667ec681f3Smrg return; 12677ec681f3Smrg 12687ec681f3Smrg struct zink_resource *res = zink_resource(image_view->base.resource); 12697ec681f3Smrg unbind_shader_image_counts(ctx, res, is_compute, image_view->base.access & PIPE_IMAGE_ACCESS_WRITE); 12707ec681f3Smrg 12717ec681f3Smrg if (image_view->base.resource->target == PIPE_BUFFER) { 12727ec681f3Smrg if (zink_batch_usage_exists(image_view->buffer_view->batch_uses)) 12737ec681f3Smrg zink_batch_reference_bufferview(&ctx->batch, image_view->buffer_view); 12747ec681f3Smrg zink_buffer_view_reference(zink_screen(ctx->base.screen), &image_view->buffer_view, NULL); 12757ec681f3Smrg } else { 12767ec681f3Smrg if (!res->image_bind_count[is_compute]) 12777ec681f3Smrg check_for_layout_update(ctx, res, is_compute); 12787ec681f3Smrg if (zink_batch_usage_exists(image_view->surface->batch_uses)) 12797ec681f3Smrg zink_batch_reference_surface(&ctx->batch, image_view->surface); 12807ec681f3Smrg zink_surface_reference(zink_screen(ctx->base.screen), &image_view->surface, NULL); 12817ec681f3Smrg } 12827ec681f3Smrg pipe_resource_reference(&image_view->base.resource, NULL); 12837ec681f3Smrg image_view->base.resource = NULL; 12847ec681f3Smrg image_view->surface = NULL; 12857ec681f3Smrg} 12867ec681f3Smrg 12877ec681f3Smrgstatic struct zink_buffer_view * 12887ec681f3Smrgcreate_image_bufferview(struct zink_context *ctx, const struct pipe_image_view *view) 12897ec681f3Smrg{ 12907ec681f3Smrg struct zink_resource *res = zink_resource(view->resource); 12917ec681f3Smrg VkBufferViewCreateInfo bvci = create_bvci(ctx, res, view->format, view->u.buf.offset, view->u.buf.size); 12927ec681f3Smrg struct zink_buffer_view *buffer_view = get_buffer_view(ctx, res, &bvci); 12937ec681f3Smrg if (!buffer_view) 12947ec681f3Smrg return NULL; 12957ec681f3Smrg util_range_add(&res->base.b, &res->valid_buffer_range, view->u.buf.offset, 12967ec681f3Smrg view->u.buf.offset + view->u.buf.size); 12977ec681f3Smrg return buffer_view; 12987ec681f3Smrg} 12997ec681f3Smrg 13007ec681f3Smrgstatic void 13017ec681f3Smrgfinalize_image_bind(struct zink_context *ctx, struct zink_resource *res, bool is_compute) 13027ec681f3Smrg{ 13037ec681f3Smrg /* if this is the first image bind and there are sampler binds, the image's sampler layout 13047ec681f3Smrg * must be updated to GENERAL 13057ec681f3Smrg */ 13067ec681f3Smrg if (res->image_bind_count[is_compute] == 1 && 13077ec681f3Smrg res->bind_count[is_compute] > 1) 13087ec681f3Smrg update_binds_for_samplerviews(ctx, res, is_compute); 13097ec681f3Smrg check_for_layout_update(ctx, res, is_compute); 13107ec681f3Smrg} 13117ec681f3Smrg 13127ec681f3Smrgstatic struct zink_surface * 13137ec681f3Smrgcreate_image_surface(struct zink_context *ctx, const struct pipe_image_view *view, bool is_compute) 13147ec681f3Smrg{ 13157ec681f3Smrg struct zink_resource *res = zink_resource(view->resource); 13167ec681f3Smrg struct pipe_surface tmpl = {0}; 13177ec681f3Smrg tmpl.format = view->format; 13187ec681f3Smrg tmpl.u.tex.level = view->u.tex.level; 13197ec681f3Smrg tmpl.u.tex.first_layer = view->u.tex.first_layer; 13207ec681f3Smrg tmpl.u.tex.last_layer = view->u.tex.last_layer; 13217ec681f3Smrg struct pipe_surface *psurf = ctx->base.create_surface(&ctx->base, &res->base.b, &tmpl); 13227ec681f3Smrg if (!psurf) 13237ec681f3Smrg return NULL; 13247ec681f3Smrg /* this is actually a zink_ctx_surface, but we just want the inner surface */ 13257ec681f3Smrg struct zink_surface *surface = zink_csurface(psurf); 13267ec681f3Smrg FREE(psurf); 13277ec681f3Smrg flush_pending_clears(ctx, res); 13287ec681f3Smrg return surface; 13297ec681f3Smrg} 13307ec681f3Smrg 13317ec681f3Smrgstatic void 13327ec681f3Smrgzink_set_shader_images(struct pipe_context *pctx, 13337ec681f3Smrg enum pipe_shader_type p_stage, 13347ec681f3Smrg unsigned start_slot, unsigned count, 13357ec681f3Smrg unsigned unbind_num_trailing_slots, 13367ec681f3Smrg const struct pipe_image_view *images) 13377ec681f3Smrg{ 13387ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 13397ec681f3Smrg bool update = false; 13407ec681f3Smrg for (unsigned i = 0; i < count; i++) { 13417ec681f3Smrg struct zink_image_view *image_view = &ctx->image_views[p_stage][start_slot + i]; 13427ec681f3Smrg if (images && images[i].resource) { 13437ec681f3Smrg struct zink_resource *res = zink_resource(images[i].resource); 13447ec681f3Smrg struct zink_resource *old_res = zink_resource(image_view->base.resource); 13457ec681f3Smrg if (!zink_resource_object_init_storage(ctx, res)) { 13467ec681f3Smrg debug_printf("couldn't create storage image!"); 13477ec681f3Smrg continue; 13487ec681f3Smrg } 13497ec681f3Smrg if (res != old_res) { 13507ec681f3Smrg if (old_res) { 13517ec681f3Smrg unbind_shader_image_counts(ctx, old_res, p_stage == PIPE_SHADER_COMPUTE, image_view->base.access & PIPE_IMAGE_ACCESS_WRITE); 13527ec681f3Smrg if (!old_res->obj->is_buffer && !old_res->image_bind_count[p_stage == PIPE_SHADER_COMPUTE]) 13537ec681f3Smrg check_for_layout_update(ctx, old_res, p_stage == PIPE_SHADER_COMPUTE); 13547ec681f3Smrg } 13557ec681f3Smrg update_res_bind_count(ctx, res, p_stage == PIPE_SHADER_COMPUTE, false); 13567ec681f3Smrg } 13577ec681f3Smrg util_copy_image_view(&image_view->base, images + i); 13587ec681f3Smrg VkAccessFlags access = 0; 13597ec681f3Smrg if (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE) { 13607ec681f3Smrg zink_resource(image_view->base.resource)->write_bind_count[p_stage == PIPE_SHADER_COMPUTE]++; 13617ec681f3Smrg access |= VK_ACCESS_SHADER_WRITE_BIT; 13627ec681f3Smrg } 13637ec681f3Smrg if (image_view->base.access & PIPE_IMAGE_ACCESS_READ) { 13647ec681f3Smrg access |= VK_ACCESS_SHADER_READ_BIT; 13657ec681f3Smrg } 13667ec681f3Smrg res->image_bind_count[p_stage == PIPE_SHADER_COMPUTE]++; 13677ec681f3Smrg if (images[i].resource->target == PIPE_BUFFER) { 13687ec681f3Smrg image_view->buffer_view = create_image_bufferview(ctx, &images[i]); 13697ec681f3Smrg assert(image_view->buffer_view); 13707ec681f3Smrg zink_batch_usage_set(&image_view->buffer_view->batch_uses, ctx->batch.state); 13717ec681f3Smrg zink_resource_buffer_barrier(ctx, res, access, 13727ec681f3Smrg zink_pipeline_flags_from_pipe_stage(p_stage)); 13737ec681f3Smrg } else { 13747ec681f3Smrg image_view->surface = create_image_surface(ctx, &images[i], p_stage == PIPE_SHADER_COMPUTE); 13757ec681f3Smrg assert(image_view->surface); 13767ec681f3Smrg finalize_image_bind(ctx, res, p_stage == PIPE_SHADER_COMPUTE); 13777ec681f3Smrg zink_batch_usage_set(&image_view->surface->batch_uses, ctx->batch.state); 13787ec681f3Smrg } 13797ec681f3Smrg zink_batch_resource_usage_set(&ctx->batch, zink_resource(image_view->base.resource), 13807ec681f3Smrg zink_resource_access_is_write(access)); 13817ec681f3Smrg update = true; 13827ec681f3Smrg update_descriptor_state_image(ctx, p_stage, start_slot + i, res); 13837ec681f3Smrg } else if (image_view->base.resource) { 13847ec681f3Smrg update |= !!image_view->base.resource; 13857ec681f3Smrg 13867ec681f3Smrg unbind_shader_image(ctx, p_stage, start_slot + i); 13877ec681f3Smrg update_descriptor_state_image(ctx, p_stage, start_slot + i, NULL); 13887ec681f3Smrg } 13897ec681f3Smrg } 13907ec681f3Smrg for (unsigned i = 0; i < unbind_num_trailing_slots; i++) { 13917ec681f3Smrg update |= !!ctx->image_views[p_stage][start_slot + count + i].base.resource; 13927ec681f3Smrg unbind_shader_image(ctx, p_stage, start_slot + count + i); 13937ec681f3Smrg update_descriptor_state_image(ctx, p_stage, start_slot + count + i, NULL); 13947ec681f3Smrg } 13957ec681f3Smrg ctx->di.num_images[p_stage] = start_slot + count; 13967ec681f3Smrg if (update) 13977ec681f3Smrg zink_screen(pctx->screen)->context_invalidate_descriptor_state(ctx, p_stage, ZINK_DESCRIPTOR_TYPE_IMAGE, start_slot, count); 13987ec681f3Smrg} 13997ec681f3Smrg 14007ec681f3SmrgALWAYS_INLINE static void 14017ec681f3Smrgcheck_samplerview_for_batch_ref(struct zink_context *ctx, struct zink_sampler_view *sv) 14027ec681f3Smrg{ 14037ec681f3Smrg const struct zink_resource *res = zink_resource(sv->base.texture); 14047ec681f3Smrg if ((res->obj->is_buffer && zink_batch_usage_exists(sv->buffer_view->batch_uses)) || 14057ec681f3Smrg (!res->obj->is_buffer && zink_batch_usage_exists(sv->image_view->batch_uses))) 14067ec681f3Smrg zink_batch_reference_sampler_view(&ctx->batch, sv); 14077ec681f3Smrg} 14087ec681f3Smrg 14097ec681f3SmrgALWAYS_INLINE static void 14107ec681f3Smrgunbind_samplerview(struct zink_context *ctx, enum pipe_shader_type stage, unsigned slot) 14117ec681f3Smrg{ 14127ec681f3Smrg struct zink_sampler_view *sv = zink_sampler_view(ctx->sampler_views[stage][slot]); 14137ec681f3Smrg if (!sv || !sv->base.texture) 14147ec681f3Smrg return; 14157ec681f3Smrg struct zink_resource *res = zink_resource(sv->base.texture); 14167ec681f3Smrg check_samplerview_for_batch_ref(ctx, sv); 14177ec681f3Smrg update_res_bind_count(ctx, res, stage == PIPE_SHADER_COMPUTE, true); 14187ec681f3Smrg res->sampler_binds[stage] &= ~BITFIELD_BIT(slot); 14197ec681f3Smrg} 14207ec681f3Smrg 14217ec681f3Smrgstatic void 14227ec681f3Smrgzink_set_sampler_views(struct pipe_context *pctx, 14237ec681f3Smrg enum pipe_shader_type shader_type, 14247ec681f3Smrg unsigned start_slot, 14257ec681f3Smrg unsigned num_views, 14267ec681f3Smrg unsigned unbind_num_trailing_slots, 14277ec681f3Smrg bool take_ownership, 14287ec681f3Smrg struct pipe_sampler_view **views) 14297ec681f3Smrg{ 14307ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 14317ec681f3Smrg unsigned i; 14327ec681f3Smrg 14337ec681f3Smrg bool update = false; 14347ec681f3Smrg for (i = 0; i < num_views; ++i) { 14357ec681f3Smrg struct pipe_sampler_view *pview = views ? views[i] : NULL; 14367ec681f3Smrg struct zink_sampler_view *a = zink_sampler_view(ctx->sampler_views[shader_type][start_slot + i]); 14377ec681f3Smrg struct zink_sampler_view *b = zink_sampler_view(pview); 14387ec681f3Smrg struct zink_resource *res = b ? zink_resource(b->base.texture) : NULL; 14397ec681f3Smrg if (b && b->base.texture) { 14407ec681f3Smrg if (!a || zink_resource(a->base.texture) != res) { 14417ec681f3Smrg if (a) 14427ec681f3Smrg unbind_samplerview(ctx, shader_type, start_slot + i); 14437ec681f3Smrg update_res_bind_count(ctx, res, shader_type == PIPE_SHADER_COMPUTE, false); 14447ec681f3Smrg } else if (a != b) { 14457ec681f3Smrg check_samplerview_for_batch_ref(ctx, a); 14467ec681f3Smrg } 14477ec681f3Smrg if (res->base.b.target == PIPE_BUFFER) { 14487ec681f3Smrg if (b->buffer_view->bvci.buffer != res->obj->buffer) { 14497ec681f3Smrg /* if this resource has been rebound while it wasn't set here, 14507ec681f3Smrg * its backing resource will have changed and thus we need to update 14517ec681f3Smrg * the bufferview 14527ec681f3Smrg */ 14537ec681f3Smrg VkBufferViewCreateInfo bvci = b->buffer_view->bvci; 14547ec681f3Smrg bvci.buffer = res->obj->buffer; 14557ec681f3Smrg struct zink_buffer_view *buffer_view = get_buffer_view(ctx, res, &bvci); 14567ec681f3Smrg assert(buffer_view != b->buffer_view); 14577ec681f3Smrg if (zink_batch_usage_exists(b->buffer_view->batch_uses)) 14587ec681f3Smrg zink_batch_reference_bufferview(&ctx->batch, b->buffer_view); 14597ec681f3Smrg zink_buffer_view_reference(zink_screen(ctx->base.screen), &b->buffer_view, NULL); 14607ec681f3Smrg b->buffer_view = buffer_view; 14617ec681f3Smrg update = true; 14627ec681f3Smrg } 14637ec681f3Smrg zink_batch_usage_set(&b->buffer_view->batch_uses, ctx->batch.state); 14647ec681f3Smrg zink_resource_buffer_barrier(ctx, res, VK_ACCESS_SHADER_READ_BIT, 14657ec681f3Smrg zink_pipeline_flags_from_pipe_stage(shader_type)); 14667ec681f3Smrg if (!a || a->buffer_view->buffer_view != b->buffer_view->buffer_view) 14677ec681f3Smrg update = true; 14687ec681f3Smrg } else if (!res->obj->is_buffer) { 14697ec681f3Smrg if (res->obj != b->image_view->obj) { 14707ec681f3Smrg struct pipe_surface *psurf = &b->image_view->base; 14717ec681f3Smrg VkImageView iv = b->image_view->image_view; 14727ec681f3Smrg zink_rebind_surface(ctx, &psurf); 14737ec681f3Smrg b->image_view = zink_surface(psurf); 14747ec681f3Smrg update |= iv != b->image_view->image_view; 14757ec681f3Smrg } else if (a != b) 14767ec681f3Smrg update = true; 14777ec681f3Smrg flush_pending_clears(ctx, res); 14787ec681f3Smrg check_for_layout_update(ctx, res, shader_type == PIPE_SHADER_COMPUTE); 14797ec681f3Smrg zink_batch_usage_set(&b->image_view->batch_uses, ctx->batch.state); 14807ec681f3Smrg if (!a) 14817ec681f3Smrg update = true; 14827ec681f3Smrg } 14837ec681f3Smrg res->sampler_binds[shader_type] |= BITFIELD_BIT(start_slot + i); 14847ec681f3Smrg zink_batch_resource_usage_set(&ctx->batch, res, false); 14857ec681f3Smrg } else if (a) { 14867ec681f3Smrg unbind_samplerview(ctx, shader_type, start_slot + i); 14877ec681f3Smrg update = true; 14887ec681f3Smrg } 14897ec681f3Smrg if (take_ownership) { 14907ec681f3Smrg pipe_sampler_view_reference(&ctx->sampler_views[shader_type][start_slot + i], NULL); 14917ec681f3Smrg ctx->sampler_views[shader_type][start_slot + i] = pview; 14927ec681f3Smrg } else { 14937ec681f3Smrg pipe_sampler_view_reference(&ctx->sampler_views[shader_type][start_slot + i], pview); 14947ec681f3Smrg } 14957ec681f3Smrg update_descriptor_state_sampler(ctx, shader_type, start_slot + i, res); 14967ec681f3Smrg } 14977ec681f3Smrg for (; i < num_views + unbind_num_trailing_slots; ++i) { 14987ec681f3Smrg update |= !!ctx->sampler_views[shader_type][start_slot + i]; 14997ec681f3Smrg unbind_samplerview(ctx, shader_type, start_slot + i); 15007ec681f3Smrg pipe_sampler_view_reference( 15017ec681f3Smrg &ctx->sampler_views[shader_type][start_slot + i], 15027ec681f3Smrg NULL); 15037ec681f3Smrg update_descriptor_state_sampler(ctx, shader_type, start_slot + i, NULL); 15047ec681f3Smrg } 15057ec681f3Smrg ctx->di.num_sampler_views[shader_type] = start_slot + num_views; 15067ec681f3Smrg if (update) 15077ec681f3Smrg zink_screen(pctx->screen)->context_invalidate_descriptor_state(ctx, shader_type, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, start_slot, num_views); 15087ec681f3Smrg} 15097ec681f3Smrg 15107ec681f3Smrgstatic uint64_t 15117ec681f3Smrgzink_create_texture_handle(struct pipe_context *pctx, struct pipe_sampler_view *view, const struct pipe_sampler_state *state) 15127ec681f3Smrg{ 15137ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 15147ec681f3Smrg struct zink_resource *res = zink_resource(view->texture); 15157ec681f3Smrg struct zink_sampler_view *sv = zink_sampler_view(view); 15167ec681f3Smrg struct zink_bindless_descriptor *bd; 15177ec681f3Smrg bd = calloc(1, sizeof(struct zink_bindless_descriptor)); 15187ec681f3Smrg if (!bd) 15197ec681f3Smrg return 0; 15207ec681f3Smrg 15217ec681f3Smrg bd->sampler = pctx->create_sampler_state(pctx, state); 15227ec681f3Smrg if (!bd->sampler) { 15237ec681f3Smrg free(bd); 15247ec681f3Smrg return 0; 15257ec681f3Smrg } 15267ec681f3Smrg 15277ec681f3Smrg bd->ds.is_buffer = res->base.b.target == PIPE_BUFFER; 15287ec681f3Smrg if (res->base.b.target == PIPE_BUFFER) 15297ec681f3Smrg zink_buffer_view_reference(zink_screen(pctx->screen), &bd->ds.bufferview, sv->buffer_view); 15307ec681f3Smrg else 15317ec681f3Smrg zink_surface_reference(zink_screen(pctx->screen), &bd->ds.surface, sv->image_view); 15327ec681f3Smrg uint64_t handle = util_idalloc_alloc(&ctx->di.bindless[bd->ds.is_buffer].tex_slots); 15337ec681f3Smrg if (bd->ds.is_buffer) 15347ec681f3Smrg handle += ZINK_MAX_BINDLESS_HANDLES; 15357ec681f3Smrg bd->handle = handle; 15367ec681f3Smrg _mesa_hash_table_insert(&ctx->di.bindless[bd->ds.is_buffer].tex_handles, (void*)(uintptr_t)handle, bd); 15377ec681f3Smrg return handle; 15387ec681f3Smrg} 15397ec681f3Smrg 15407ec681f3Smrgstatic void 15417ec681f3Smrgzink_delete_texture_handle(struct pipe_context *pctx, uint64_t handle) 15427ec681f3Smrg{ 15437ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 15447ec681f3Smrg bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle); 15457ec681f3Smrg struct hash_entry *he = _mesa_hash_table_search(&ctx->di.bindless[is_buffer].tex_handles, (void*)(uintptr_t)handle); 15467ec681f3Smrg assert(he); 15477ec681f3Smrg struct zink_bindless_descriptor *bd = he->data; 15487ec681f3Smrg struct zink_descriptor_surface *ds = &bd->ds; 15497ec681f3Smrg _mesa_hash_table_remove(&ctx->di.bindless[is_buffer].tex_handles, he); 15507ec681f3Smrg uint32_t h = handle; 15517ec681f3Smrg util_dynarray_append(&ctx->batch.state->bindless_releases[0], uint32_t, h); 15527ec681f3Smrg 15537ec681f3Smrg struct zink_resource *res = zink_descriptor_surface_resource(ds); 15547ec681f3Smrg if (ds->is_buffer) { 15557ec681f3Smrg if (zink_resource_has_usage(res)) 15567ec681f3Smrg zink_batch_reference_bufferview(&ctx->batch, ds->bufferview); 15577ec681f3Smrg zink_buffer_view_reference(zink_screen(pctx->screen), &ds->bufferview, NULL); 15587ec681f3Smrg } else { 15597ec681f3Smrg if (zink_resource_has_usage(res)) 15607ec681f3Smrg zink_batch_reference_surface(&ctx->batch, ds->surface); 15617ec681f3Smrg zink_surface_reference(zink_screen(pctx->screen), &ds->surface, NULL); 15627ec681f3Smrg pctx->delete_sampler_state(pctx, bd->sampler); 15637ec681f3Smrg } 15647ec681f3Smrg free(ds); 15657ec681f3Smrg} 15667ec681f3Smrg 15677ec681f3Smrgstatic void 15687ec681f3Smrgrebind_bindless_bufferview(struct zink_context *ctx, struct zink_resource *res, struct zink_descriptor_surface *ds) 15697ec681f3Smrg{ 15707ec681f3Smrg /* if this resource has been rebound while it wasn't set here, 15717ec681f3Smrg * its backing resource will have changed and thus we need to update 15727ec681f3Smrg * the bufferview 15737ec681f3Smrg */ 15747ec681f3Smrg VkBufferViewCreateInfo bvci = ds->bufferview->bvci; 15757ec681f3Smrg bvci.buffer = res->obj->buffer; 15767ec681f3Smrg struct zink_buffer_view *buffer_view = get_buffer_view(ctx, res, &bvci); 15777ec681f3Smrg assert(buffer_view != ds->bufferview); 15787ec681f3Smrg if (zink_resource_has_usage(res)) 15797ec681f3Smrg zink_batch_reference_bufferview(&ctx->batch, ds->bufferview); 15807ec681f3Smrg zink_buffer_view_reference(zink_screen(ctx->base.screen), &ds->bufferview, NULL); 15817ec681f3Smrg ds->bufferview = buffer_view; 15827ec681f3Smrg} 15837ec681f3Smrg 15847ec681f3Smrgstatic void 15857ec681f3Smrgzero_bindless_descriptor(struct zink_context *ctx, uint32_t handle, bool is_buffer, bool is_image) 15867ec681f3Smrg{ 15877ec681f3Smrg if (likely(zink_screen(ctx->base.screen)->info.rb2_feats.nullDescriptor)) { 15887ec681f3Smrg if (is_buffer) { 15897ec681f3Smrg VkBufferView *bv = &ctx->di.bindless[is_image].buffer_infos[handle]; 15907ec681f3Smrg *bv = VK_NULL_HANDLE; 15917ec681f3Smrg } else { 15927ec681f3Smrg VkDescriptorImageInfo *ii = &ctx->di.bindless[is_image].img_infos[handle]; 15937ec681f3Smrg memset(ii, 0, sizeof(*ii)); 15947ec681f3Smrg } 15957ec681f3Smrg } else { 15967ec681f3Smrg if (is_buffer) { 15977ec681f3Smrg VkBufferView *bv = &ctx->di.bindless[is_image].buffer_infos[handle]; 15987ec681f3Smrg struct zink_buffer_view *null_bufferview = ctx->dummy_bufferview; 15997ec681f3Smrg *bv = null_bufferview->buffer_view; 16007ec681f3Smrg } else { 16017ec681f3Smrg struct zink_surface *null_surface = zink_csurface(ctx->dummy_surface[is_image]); 16027ec681f3Smrg VkDescriptorImageInfo *ii = &ctx->di.bindless[is_image].img_infos[handle]; 16037ec681f3Smrg ii->sampler = VK_NULL_HANDLE; 16047ec681f3Smrg ii->imageView = null_surface->image_view; 16057ec681f3Smrg ii->imageLayout = VK_IMAGE_LAYOUT_GENERAL; 16067ec681f3Smrg } 16077ec681f3Smrg } 16087ec681f3Smrg} 16097ec681f3Smrg 16107ec681f3Smrgstatic void 16117ec681f3Smrgzink_make_texture_handle_resident(struct pipe_context *pctx, uint64_t handle, bool resident) 16127ec681f3Smrg{ 16137ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 16147ec681f3Smrg bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle); 16157ec681f3Smrg struct hash_entry *he = _mesa_hash_table_search(&ctx->di.bindless[is_buffer].tex_handles, (void*)(uintptr_t)handle); 16167ec681f3Smrg assert(he); 16177ec681f3Smrg struct zink_bindless_descriptor *bd = he->data; 16187ec681f3Smrg struct zink_descriptor_surface *ds = &bd->ds; 16197ec681f3Smrg struct zink_resource *res = zink_descriptor_surface_resource(ds); 16207ec681f3Smrg if (is_buffer) 16217ec681f3Smrg handle -= ZINK_MAX_BINDLESS_HANDLES; 16227ec681f3Smrg if (resident) { 16237ec681f3Smrg update_res_bind_count(ctx, res, false, false); 16247ec681f3Smrg update_res_bind_count(ctx, res, true, false); 16257ec681f3Smrg res->bindless[0]++; 16267ec681f3Smrg if (is_buffer) { 16277ec681f3Smrg if (ds->bufferview->bvci.buffer != res->obj->buffer) 16287ec681f3Smrg rebind_bindless_bufferview(ctx, res, ds); 16297ec681f3Smrg VkBufferView *bv = &ctx->di.bindless[0].buffer_infos[handle]; 16307ec681f3Smrg *bv = ds->bufferview->buffer_view; 16317ec681f3Smrg zink_resource_buffer_barrier(ctx, res, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT); 16327ec681f3Smrg } else { 16337ec681f3Smrg VkDescriptorImageInfo *ii = &ctx->di.bindless[0].img_infos[handle]; 16347ec681f3Smrg ii->sampler = bd->sampler->sampler; 16357ec681f3Smrg ii->imageView = ds->surface->image_view; 16367ec681f3Smrg ii->imageLayout = zink_descriptor_util_image_layout_eval(res, false); 16377ec681f3Smrg flush_pending_clears(ctx, res); 16387ec681f3Smrg check_for_layout_update(ctx, res, false); 16397ec681f3Smrg check_for_layout_update(ctx, res, true); 16407ec681f3Smrg } 16417ec681f3Smrg zink_batch_resource_usage_set(&ctx->batch, res, false); 16427ec681f3Smrg util_dynarray_append(&ctx->di.bindless[0].resident, struct zink_bindless_descriptor *, bd); 16437ec681f3Smrg uint32_t h = is_buffer ? handle + ZINK_MAX_BINDLESS_HANDLES : handle; 16447ec681f3Smrg util_dynarray_append(&ctx->di.bindless[0].updates, uint32_t, h); 16457ec681f3Smrg } else { 16467ec681f3Smrg zero_bindless_descriptor(ctx, handle, is_buffer, false); 16477ec681f3Smrg util_dynarray_delete_unordered(&ctx->di.bindless[0].resident, struct zink_bindless_descriptor *, bd); 16487ec681f3Smrg update_res_bind_count(ctx, res, false, true); 16497ec681f3Smrg update_res_bind_count(ctx, res, true, true); 16507ec681f3Smrg res->bindless[0]--; 16517ec681f3Smrg for (unsigned i = 0; i < 2; i++) { 16527ec681f3Smrg if (!res->image_bind_count[i]) 16537ec681f3Smrg check_for_layout_update(ctx, res, i); 16547ec681f3Smrg } 16557ec681f3Smrg } 16567ec681f3Smrg ctx->di.bindless_dirty[0] = true; 16577ec681f3Smrg} 16587ec681f3Smrg 16597ec681f3Smrgstatic uint64_t 16607ec681f3Smrgzink_create_image_handle(struct pipe_context *pctx, const struct pipe_image_view *view) 16617ec681f3Smrg{ 16627ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 16637ec681f3Smrg struct zink_resource *res = zink_resource(view->resource); 16647ec681f3Smrg struct zink_bindless_descriptor *bd; 16657ec681f3Smrg if (!zink_resource_object_init_storage(ctx, res)) { 16667ec681f3Smrg debug_printf("couldn't create storage image!"); 16677ec681f3Smrg return 0; 16687ec681f3Smrg } 16697ec681f3Smrg bd = malloc(sizeof(struct zink_bindless_descriptor)); 16707ec681f3Smrg if (!bd) 16717ec681f3Smrg return 0; 16727ec681f3Smrg bd->sampler = NULL; 16737ec681f3Smrg 16747ec681f3Smrg bd->ds.is_buffer = res->base.b.target == PIPE_BUFFER; 16757ec681f3Smrg if (res->base.b.target == PIPE_BUFFER) 16767ec681f3Smrg bd->ds.bufferview = create_image_bufferview(ctx, view); 16777ec681f3Smrg else 16787ec681f3Smrg bd->ds.surface = create_image_surface(ctx, view, false); 16797ec681f3Smrg uint64_t handle = util_idalloc_alloc(&ctx->di.bindless[bd->ds.is_buffer].img_slots); 16807ec681f3Smrg if (bd->ds.is_buffer) 16817ec681f3Smrg handle += ZINK_MAX_BINDLESS_HANDLES; 16827ec681f3Smrg bd->handle = handle; 16837ec681f3Smrg _mesa_hash_table_insert(&ctx->di.bindless[bd->ds.is_buffer].img_handles, (void*)(uintptr_t)handle, bd); 16847ec681f3Smrg return handle; 16857ec681f3Smrg} 16867ec681f3Smrg 16877ec681f3Smrgstatic void 16887ec681f3Smrgzink_delete_image_handle(struct pipe_context *pctx, uint64_t handle) 16897ec681f3Smrg{ 16907ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 16917ec681f3Smrg bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle); 16927ec681f3Smrg struct hash_entry *he = _mesa_hash_table_search(&ctx->di.bindless[is_buffer].img_handles, (void*)(uintptr_t)handle); 16937ec681f3Smrg assert(he); 16947ec681f3Smrg struct zink_descriptor_surface *ds = he->data; 16957ec681f3Smrg _mesa_hash_table_remove(&ctx->di.bindless[is_buffer].img_handles, he); 16967ec681f3Smrg uint32_t h = handle; 16977ec681f3Smrg util_dynarray_append(&ctx->batch.state->bindless_releases[1], uint32_t, h); 16987ec681f3Smrg 16997ec681f3Smrg struct zink_resource *res = zink_descriptor_surface_resource(ds); 17007ec681f3Smrg if (ds->is_buffer) { 17017ec681f3Smrg if (zink_resource_has_usage(res)) 17027ec681f3Smrg zink_batch_reference_bufferview(&ctx->batch, ds->bufferview); 17037ec681f3Smrg zink_buffer_view_reference(zink_screen(pctx->screen), &ds->bufferview, NULL); 17047ec681f3Smrg } else { 17057ec681f3Smrg if (zink_resource_has_usage(res)) 17067ec681f3Smrg zink_batch_reference_surface(&ctx->batch, ds->surface); 17077ec681f3Smrg zink_surface_reference(zink_screen(pctx->screen), &ds->surface, NULL); 17087ec681f3Smrg } 17097ec681f3Smrg free(ds); 17107ec681f3Smrg} 17117ec681f3Smrg 17127ec681f3Smrgstatic void 17137ec681f3Smrgzink_make_image_handle_resident(struct pipe_context *pctx, uint64_t handle, unsigned paccess, bool resident) 17147ec681f3Smrg{ 17157ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 17167ec681f3Smrg bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle); 17177ec681f3Smrg struct hash_entry *he = _mesa_hash_table_search(&ctx->di.bindless[is_buffer].img_handles, (void*)(uintptr_t)handle); 17187ec681f3Smrg assert(he); 17197ec681f3Smrg struct zink_bindless_descriptor *bd = he->data; 17207ec681f3Smrg struct zink_descriptor_surface *ds = &bd->ds; 17217ec681f3Smrg bd->access = paccess; 17227ec681f3Smrg struct zink_resource *res = zink_descriptor_surface_resource(ds); 17237ec681f3Smrg VkAccessFlags access = 0; 17247ec681f3Smrg if (paccess & PIPE_IMAGE_ACCESS_WRITE) { 17257ec681f3Smrg if (resident) { 17267ec681f3Smrg res->write_bind_count[0]++; 17277ec681f3Smrg res->write_bind_count[1]++; 17287ec681f3Smrg } else { 17297ec681f3Smrg res->write_bind_count[0]--; 17307ec681f3Smrg res->write_bind_count[1]--; 17317ec681f3Smrg } 17327ec681f3Smrg access |= VK_ACCESS_SHADER_WRITE_BIT; 17337ec681f3Smrg } 17347ec681f3Smrg if (paccess & PIPE_IMAGE_ACCESS_READ) { 17357ec681f3Smrg access |= VK_ACCESS_SHADER_READ_BIT; 17367ec681f3Smrg } 17377ec681f3Smrg if (is_buffer) 17387ec681f3Smrg handle -= ZINK_MAX_BINDLESS_HANDLES; 17397ec681f3Smrg if (resident) { 17407ec681f3Smrg update_res_bind_count(ctx, res, false, false); 17417ec681f3Smrg update_res_bind_count(ctx, res, true, false); 17427ec681f3Smrg res->image_bind_count[0]++; 17437ec681f3Smrg res->image_bind_count[1]++; 17447ec681f3Smrg res->bindless[1]++; 17457ec681f3Smrg if (is_buffer) { 17467ec681f3Smrg if (ds->bufferview->bvci.buffer != res->obj->buffer) 17477ec681f3Smrg rebind_bindless_bufferview(ctx, res, ds); 17487ec681f3Smrg VkBufferView *bv = &ctx->di.bindless[1].buffer_infos[handle]; 17497ec681f3Smrg *bv = ds->bufferview->buffer_view; 17507ec681f3Smrg zink_resource_buffer_barrier(ctx, res, access, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT); 17517ec681f3Smrg } else { 17527ec681f3Smrg VkDescriptorImageInfo *ii = &ctx->di.bindless[1].img_infos[handle]; 17537ec681f3Smrg ii->sampler = VK_NULL_HANDLE; 17547ec681f3Smrg ii->imageView = ds->surface->image_view; 17557ec681f3Smrg ii->imageLayout = VK_IMAGE_LAYOUT_GENERAL; 17567ec681f3Smrg finalize_image_bind(ctx, res, false); 17577ec681f3Smrg finalize_image_bind(ctx, res, true); 17587ec681f3Smrg } 17597ec681f3Smrg zink_batch_resource_usage_set(&ctx->batch, res, zink_resource_access_is_write(access)); 17607ec681f3Smrg util_dynarray_append(&ctx->di.bindless[1].resident, struct zink_bindless_descriptor *, bd); 17617ec681f3Smrg uint32_t h = is_buffer ? handle + ZINK_MAX_BINDLESS_HANDLES : handle; 17627ec681f3Smrg util_dynarray_append(&ctx->di.bindless[1].updates, uint32_t, h); 17637ec681f3Smrg } else { 17647ec681f3Smrg zero_bindless_descriptor(ctx, handle, is_buffer, true); 17657ec681f3Smrg util_dynarray_delete_unordered(&ctx->di.bindless[1].resident, struct zink_bindless_descriptor *, bd); 17667ec681f3Smrg unbind_shader_image_counts(ctx, res, false, false); 17677ec681f3Smrg unbind_shader_image_counts(ctx, res, true, false); 17687ec681f3Smrg res->bindless[1]--; 17697ec681f3Smrg for (unsigned i = 0; i < 2; i++) { 17707ec681f3Smrg if (!res->image_bind_count[i]) 17717ec681f3Smrg check_for_layout_update(ctx, res, i); 17727ec681f3Smrg } 17737ec681f3Smrg } 17747ec681f3Smrg ctx->di.bindless_dirty[1] = true; 17757ec681f3Smrg} 17767ec681f3Smrg 17777ec681f3Smrgstatic void 17787ec681f3Smrgzink_set_stencil_ref(struct pipe_context *pctx, 17797ec681f3Smrg const struct pipe_stencil_ref ref) 17807ec681f3Smrg{ 17817ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 17827ec681f3Smrg ctx->stencil_ref = ref; 17837ec681f3Smrg ctx->stencil_ref_changed = true; 17847ec681f3Smrg} 17857ec681f3Smrg 17867ec681f3Smrgstatic void 17877ec681f3Smrgzink_set_clip_state(struct pipe_context *pctx, 17887ec681f3Smrg const struct pipe_clip_state *pcs) 17897ec681f3Smrg{ 17907ec681f3Smrg} 17917ec681f3Smrg 17927ec681f3Smrgstatic void 17937ec681f3Smrgzink_set_tess_state(struct pipe_context *pctx, 17947ec681f3Smrg const float default_outer_level[4], 17957ec681f3Smrg const float default_inner_level[2]) 17967ec681f3Smrg{ 17977ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 17987ec681f3Smrg memcpy(&ctx->default_inner_level, default_inner_level, sizeof(ctx->default_inner_level)); 17997ec681f3Smrg memcpy(&ctx->default_outer_level, default_outer_level, sizeof(ctx->default_outer_level)); 18007ec681f3Smrg} 18017ec681f3Smrg 18027ec681f3Smrgstatic void 18037ec681f3Smrgzink_set_patch_vertices(struct pipe_context *pctx, uint8_t patch_vertices) 18047ec681f3Smrg{ 18057ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 18067ec681f3Smrg ctx->gfx_pipeline_state.patch_vertices = patch_vertices; 18077ec681f3Smrg} 18087ec681f3Smrg 18097ec681f3Smrgvoid 18107ec681f3Smrgzink_update_fbfetch(struct zink_context *ctx) 18117ec681f3Smrg{ 18127ec681f3Smrg const bool had_fbfetch = ctx->di.fbfetch.imageLayout == VK_IMAGE_LAYOUT_GENERAL; 18137ec681f3Smrg if (!ctx->gfx_stages[PIPE_SHADER_FRAGMENT] || 18147ec681f3Smrg !ctx->gfx_stages[PIPE_SHADER_FRAGMENT]->nir->info.fs.uses_fbfetch_output) { 18157ec681f3Smrg if (!had_fbfetch) 18167ec681f3Smrg return; 18177ec681f3Smrg ctx->rp_changed = true; 18187ec681f3Smrg zink_batch_no_rp(ctx); 18197ec681f3Smrg ctx->di.fbfetch.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED; 18207ec681f3Smrg ctx->di.fbfetch.imageView = zink_screen(ctx->base.screen)->info.rb2_feats.nullDescriptor ? 18217ec681f3Smrg VK_NULL_HANDLE : 18227ec681f3Smrg zink_csurface(ctx->dummy_surface[0])->image_view; 18237ec681f3Smrg zink_screen(ctx->base.screen)->context_invalidate_descriptor_state(ctx, PIPE_SHADER_FRAGMENT, ZINK_DESCRIPTOR_TYPE_UBO, 0, 1); 18247ec681f3Smrg return; 18257ec681f3Smrg } 18267ec681f3Smrg 18277ec681f3Smrg bool changed = !had_fbfetch; 18287ec681f3Smrg if (ctx->fb_state.cbufs[0]) { 18297ec681f3Smrg VkImageView fbfetch = zink_csurface(ctx->fb_state.cbufs[0])->image_view; 18307ec681f3Smrg changed |= fbfetch != ctx->di.fbfetch.imageView; 18317ec681f3Smrg ctx->di.fbfetch.imageView = zink_csurface(ctx->fb_state.cbufs[0])->image_view; 18327ec681f3Smrg } 18337ec681f3Smrg ctx->di.fbfetch.imageLayout = VK_IMAGE_LAYOUT_GENERAL; 18347ec681f3Smrg if (changed) { 18357ec681f3Smrg zink_screen(ctx->base.screen)->context_invalidate_descriptor_state(ctx, PIPE_SHADER_FRAGMENT, ZINK_DESCRIPTOR_TYPE_UBO, 0, 1); 18367ec681f3Smrg ctx->rp_changed = true; 18377ec681f3Smrg zink_batch_no_rp(ctx); 18387ec681f3Smrg } 18397ec681f3Smrg} 18407ec681f3Smrg 18417ec681f3Smrgstatic size_t 18427ec681f3Smrgrp_state_size(const struct zink_render_pass_pipeline_state *pstate) 18437ec681f3Smrg{ 18447ec681f3Smrg return offsetof(struct zink_render_pass_pipeline_state, attachments) + 18457ec681f3Smrg sizeof(pstate->attachments[0]) * pstate->num_attachments; 18467ec681f3Smrg} 18477ec681f3Smrg 18487ec681f3Smrgstatic uint32_t 18497ec681f3Smrghash_rp_state(const void *key) 18507ec681f3Smrg{ 18517ec681f3Smrg const struct zink_render_pass_pipeline_state *s = key; 18527ec681f3Smrg return _mesa_hash_data(key, rp_state_size(s)); 18537ec681f3Smrg} 18547ec681f3Smrg 18557ec681f3Smrgstatic bool 18567ec681f3Smrgequals_rp_state(const void *a, const void *b) 18577ec681f3Smrg{ 18587ec681f3Smrg return !memcmp(a, b, rp_state_size(a)); 18597ec681f3Smrg} 18607ec681f3Smrg 18617ec681f3Smrgstatic uint32_t 18627ec681f3Smrghash_render_pass_state(const void *key) 18637ec681f3Smrg{ 18647ec681f3Smrg struct zink_render_pass_state* s = (struct zink_render_pass_state*)key; 18657ec681f3Smrg return _mesa_hash_data(key, offsetof(struct zink_render_pass_state, rts) + sizeof(s->rts[0]) * s->num_rts); 18667ec681f3Smrg} 18677ec681f3Smrg 18687ec681f3Smrgstatic bool 18697ec681f3Smrgequals_render_pass_state(const void *a, const void *b) 18707ec681f3Smrg{ 18717ec681f3Smrg const struct zink_render_pass_state *s_a = a, *s_b = b; 18727ec681f3Smrg if (s_a->num_rts != s_b->num_rts) 18737ec681f3Smrg return false; 18747ec681f3Smrg return memcmp(a, b, offsetof(struct zink_render_pass_state, rts) + sizeof(s_a->rts[0]) * s_a->num_rts) == 0; 18757ec681f3Smrg} 18767ec681f3Smrg 18777ec681f3Smrgstatic struct zink_render_pass * 18787ec681f3Smrgget_render_pass(struct zink_context *ctx) 18797ec681f3Smrg{ 18807ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 18817ec681f3Smrg const struct pipe_framebuffer_state *fb = &ctx->fb_state; 18827ec681f3Smrg struct zink_render_pass_state state = {0}; 18837ec681f3Smrg uint32_t clears = 0; 18847ec681f3Smrg state.swapchain_init = ctx->new_swapchain; 18857ec681f3Smrg state.samples = fb->samples > 0; 18867ec681f3Smrg 18877ec681f3Smrg u_foreach_bit(i, ctx->fbfetch_outputs) 18887ec681f3Smrg state.rts[i].fbfetch = true; 18897ec681f3Smrg 18907ec681f3Smrg for (int i = 0; i < fb->nr_cbufs; i++) { 18917ec681f3Smrg struct pipe_surface *surf = fb->cbufs[i]; 18927ec681f3Smrg if (surf) { 18937ec681f3Smrg struct zink_surface *transient = zink_transient_surface(surf); 18947ec681f3Smrg state.rts[i].format = zink_get_format(screen, surf->format); 18957ec681f3Smrg state.rts[i].samples = MAX3(transient ? transient->base.nr_samples : 0, surf->texture->nr_samples, 1); 18967ec681f3Smrg state.rts[i].clear_color = zink_fb_clear_enabled(ctx, i) && !zink_fb_clear_first_needs_explicit(&ctx->fb_clears[i]); 18977ec681f3Smrg clears |= !!state.rts[i].clear_color ? PIPE_CLEAR_COLOR0 << i : 0; 18987ec681f3Smrg state.rts[i].swapchain = surf->texture->bind & PIPE_BIND_SCANOUT; 18997ec681f3Smrg if (transient) { 19007ec681f3Smrg state.num_cresolves++; 19017ec681f3Smrg state.rts[i].resolve = true; 19027ec681f3Smrg if (!state.rts[i].clear_color) 19037ec681f3Smrg state.msaa_expand_mask |= BITFIELD_BIT(i); 19047ec681f3Smrg } 19057ec681f3Smrg } else { 19067ec681f3Smrg state.rts[i].format = VK_FORMAT_R8_UINT; 19077ec681f3Smrg state.rts[i].samples = fb->samples; 19087ec681f3Smrg } 19097ec681f3Smrg state.num_rts++; 19107ec681f3Smrg } 19117ec681f3Smrg state.num_cbufs = fb->nr_cbufs; 19127ec681f3Smrg assert(!state.num_cresolves || state.num_cbufs == state.num_cresolves); 19137ec681f3Smrg 19147ec681f3Smrg if (fb->zsbuf) { 19157ec681f3Smrg struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture); 19167ec681f3Smrg struct zink_framebuffer_clear *fb_clear = &ctx->fb_clears[PIPE_MAX_COLOR_BUFS]; 19177ec681f3Smrg struct zink_surface *transient = zink_transient_surface(fb->zsbuf); 19187ec681f3Smrg state.rts[fb->nr_cbufs].format = zsbuf->format; 19197ec681f3Smrg state.rts[fb->nr_cbufs].samples = MAX3(transient ? transient->base.nr_samples : 0, fb->zsbuf->texture->nr_samples, 1); 19207ec681f3Smrg if (transient) { 19217ec681f3Smrg state.num_zsresolves = 1; 19227ec681f3Smrg state.rts[fb->nr_cbufs].resolve = true; 19237ec681f3Smrg } 19247ec681f3Smrg state.rts[fb->nr_cbufs].clear_color = zink_fb_clear_enabled(ctx, PIPE_MAX_COLOR_BUFS) && 19257ec681f3Smrg !zink_fb_clear_first_needs_explicit(fb_clear) && 19267ec681f3Smrg (zink_fb_clear_element(fb_clear, 0)->zs.bits & PIPE_CLEAR_DEPTH); 19277ec681f3Smrg state.rts[fb->nr_cbufs].clear_stencil = zink_fb_clear_enabled(ctx, PIPE_MAX_COLOR_BUFS) && 19287ec681f3Smrg !zink_fb_clear_first_needs_explicit(fb_clear) && 19297ec681f3Smrg (zink_fb_clear_element(fb_clear, 0)->zs.bits & PIPE_CLEAR_STENCIL); 19307ec681f3Smrg if (state.rts[fb->nr_cbufs].clear_color) 19317ec681f3Smrg clears |= PIPE_CLEAR_DEPTH; 19327ec681f3Smrg if (state.rts[fb->nr_cbufs].clear_stencil) 19337ec681f3Smrg clears |= PIPE_CLEAR_STENCIL; 19347ec681f3Smrg const uint64_t outputs_written = ctx->gfx_stages[PIPE_SHADER_FRAGMENT] ? 19357ec681f3Smrg ctx->gfx_stages[PIPE_SHADER_FRAGMENT]->nir->info.outputs_written : 0; 19367ec681f3Smrg bool needs_write = (ctx->dsa_state && ctx->dsa_state->hw_state.depth_write) || 19377ec681f3Smrg outputs_written & (BITFIELD64_BIT(FRAG_RESULT_DEPTH) | BITFIELD64_BIT(FRAG_RESULT_STENCIL)); 19387ec681f3Smrg state.rts[fb->nr_cbufs].needs_write = needs_write || state.num_zsresolves || state.rts[fb->nr_cbufs].clear_color || state.rts[fb->nr_cbufs].clear_stencil; 19397ec681f3Smrg state.num_rts++; 19407ec681f3Smrg } 19417ec681f3Smrg state.have_zsbuf = fb->zsbuf != NULL; 19427ec681f3Smrg assert(clears == ctx->rp_clears_enabled); 19437ec681f3Smrg state.clears = clears; 19447ec681f3Smrg uint32_t hash = hash_render_pass_state(&state); 19457ec681f3Smrg struct hash_entry *entry = _mesa_hash_table_search_pre_hashed(ctx->render_pass_cache, hash, 19467ec681f3Smrg &state); 19477ec681f3Smrg struct zink_render_pass *rp; 19487ec681f3Smrg if (entry) { 19497ec681f3Smrg rp = entry->data; 19507ec681f3Smrg assert(rp->state.clears == clears); 19517ec681f3Smrg } else { 19527ec681f3Smrg struct zink_render_pass_pipeline_state pstate; 19537ec681f3Smrg pstate.samples = state.samples; 19547ec681f3Smrg rp = zink_create_render_pass(screen, &state, &pstate); 19557ec681f3Smrg if (!_mesa_hash_table_insert_pre_hashed(ctx->render_pass_cache, hash, &rp->state, rp)) 19567ec681f3Smrg return NULL; 19577ec681f3Smrg bool found = false; 19587ec681f3Smrg struct set_entry *entry = _mesa_set_search_or_add(&ctx->render_pass_state_cache, &pstate, &found); 19597ec681f3Smrg struct zink_render_pass_pipeline_state *ppstate; 19607ec681f3Smrg if (!found) { 19617ec681f3Smrg entry->key = ralloc(ctx, struct zink_render_pass_pipeline_state); 19627ec681f3Smrg ppstate = (void*)entry->key; 19637ec681f3Smrg memcpy(ppstate, &pstate, rp_state_size(&pstate)); 19647ec681f3Smrg ppstate->id = ctx->render_pass_state_cache.entries; 19657ec681f3Smrg } 19667ec681f3Smrg ppstate = (void*)entry->key; 19677ec681f3Smrg rp->pipeline_state = ppstate->id; 19687ec681f3Smrg } 19697ec681f3Smrg return rp; 19707ec681f3Smrg} 19717ec681f3Smrg 19727ec681f3Smrgstatic uint32_t 19737ec681f3Smrghash_framebuffer_imageless(const void *key) 19747ec681f3Smrg{ 19757ec681f3Smrg struct zink_framebuffer_state* s = (struct zink_framebuffer_state*)key; 19767ec681f3Smrg return _mesa_hash_data(key, offsetof(struct zink_framebuffer_state, infos) + sizeof(s->infos[0]) * s->num_attachments); 19777ec681f3Smrg} 19787ec681f3Smrg 19797ec681f3Smrgstatic bool 19807ec681f3Smrgequals_framebuffer_imageless(const void *a, const void *b) 19817ec681f3Smrg{ 19827ec681f3Smrg struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a; 19837ec681f3Smrg return memcmp(a, b, offsetof(struct zink_framebuffer_state, infos) + sizeof(s->infos[0]) * s->num_attachments) == 0; 19847ec681f3Smrg} 19857ec681f3Smrg 19867ec681f3Smrgstatic void 19877ec681f3Smrgsetup_framebuffer(struct zink_context *ctx) 19887ec681f3Smrg{ 19897ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 19907ec681f3Smrg struct zink_render_pass *rp = ctx->gfx_pipeline_state.render_pass; 19917ec681f3Smrg 19927ec681f3Smrg if (ctx->gfx_pipeline_state.sample_locations_enabled && ctx->sample_locations_changed) { 19937ec681f3Smrg unsigned samples = ctx->gfx_pipeline_state.rast_samples + 1; 19947ec681f3Smrg unsigned idx = util_logbase2_ceil(MAX2(samples, 1)); 19957ec681f3Smrg VkExtent2D grid_size = screen->maxSampleLocationGridSize[idx]; 19967ec681f3Smrg 19977ec681f3Smrg for (unsigned pixel = 0; pixel < grid_size.width * grid_size.height; pixel++) { 19987ec681f3Smrg for (unsigned sample = 0; sample < samples; sample++) { 19997ec681f3Smrg unsigned pixel_x = pixel % grid_size.width; 20007ec681f3Smrg unsigned pixel_y = pixel / grid_size.width; 20017ec681f3Smrg unsigned wi = pixel * samples + sample; 20027ec681f3Smrg unsigned ri = (pixel_y * grid_size.width + pixel_x % grid_size.width); 20037ec681f3Smrg ri = ri * samples + sample; 20047ec681f3Smrg ctx->vk_sample_locations[wi].x = (ctx->sample_locations[ri] & 0xf) / 16.0f; 20057ec681f3Smrg ctx->vk_sample_locations[wi].y = (16 - (ctx->sample_locations[ri] >> 4)) / 16.0f; 20067ec681f3Smrg } 20077ec681f3Smrg } 20087ec681f3Smrg } 20097ec681f3Smrg 20107ec681f3Smrg if (rp) 20117ec681f3Smrg ctx->rp_changed |= ctx->rp_clears_enabled != rp->state.clears; 20127ec681f3Smrg if (ctx->rp_changed) 20137ec681f3Smrg rp = get_render_pass(ctx); 20147ec681f3Smrg 20157ec681f3Smrg ctx->fb_changed |= rp != ctx->gfx_pipeline_state.render_pass; 20167ec681f3Smrg if (rp->pipeline_state != ctx->gfx_pipeline_state.rp_state) { 20177ec681f3Smrg ctx->gfx_pipeline_state.rp_state = rp->pipeline_state; 20187ec681f3Smrg ctx->gfx_pipeline_state.dirty = true; 20197ec681f3Smrg } 20207ec681f3Smrg 20217ec681f3Smrg ctx->rp_changed = false; 20227ec681f3Smrg 20237ec681f3Smrg if (!ctx->fb_changed) 20247ec681f3Smrg return; 20257ec681f3Smrg 20267ec681f3Smrg ctx->init_framebuffer(screen, ctx->framebuffer, rp); 20277ec681f3Smrg ctx->fb_changed = false; 20287ec681f3Smrg ctx->gfx_pipeline_state.render_pass = rp; 20297ec681f3Smrg} 20307ec681f3Smrg 20317ec681f3Smrgstatic VkImageView 20327ec681f3Smrgprep_fb_attachment(struct zink_context *ctx, struct zink_surface *surf, unsigned i) 20337ec681f3Smrg{ 20347ec681f3Smrg if (!surf) 20357ec681f3Smrg return zink_csurface(ctx->dummy_surface[util_logbase2_ceil(ctx->fb_state.samples)])->image_view; 20367ec681f3Smrg 20377ec681f3Smrg zink_batch_resource_usage_set(&ctx->batch, zink_resource(surf->base.texture), true); 20387ec681f3Smrg zink_batch_usage_set(&surf->batch_uses, ctx->batch.state); 20397ec681f3Smrg 20407ec681f3Smrg struct zink_resource *res = zink_resource(surf->base.texture); 20417ec681f3Smrg VkAccessFlags access; 20427ec681f3Smrg VkPipelineStageFlags pipeline; 20437ec681f3Smrg VkImageLayout layout = zink_render_pass_attachment_get_barrier_info(ctx->gfx_pipeline_state.render_pass, 20447ec681f3Smrg i, &pipeline, &access); 20457ec681f3Smrg zink_resource_image_barrier(ctx, res, layout, access, pipeline); 20467ec681f3Smrg return surf->image_view; 20477ec681f3Smrg} 20487ec681f3Smrg 20497ec681f3Smrgstatic void 20507ec681f3Smrgprep_fb_attachments(struct zink_context *ctx, VkImageView *att) 20517ec681f3Smrg{ 20527ec681f3Smrg const unsigned cresolve_offset = ctx->fb_state.nr_cbufs + !!ctx->fb_state.zsbuf; 20537ec681f3Smrg unsigned num_resolves = 0; 20547ec681f3Smrg for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) { 20557ec681f3Smrg struct zink_surface *surf = zink_csurface(ctx->fb_state.cbufs[i]); 20567ec681f3Smrg struct zink_surface *transient = zink_transient_surface(ctx->fb_state.cbufs[i]); 20577ec681f3Smrg if (transient) { 20587ec681f3Smrg att[i] = prep_fb_attachment(ctx, transient, i); 20597ec681f3Smrg att[i + cresolve_offset] = prep_fb_attachment(ctx, surf, i); 20607ec681f3Smrg num_resolves++; 20617ec681f3Smrg } else { 20627ec681f3Smrg att[i] = prep_fb_attachment(ctx, surf, i); 20637ec681f3Smrg } 20647ec681f3Smrg } 20657ec681f3Smrg if (ctx->fb_state.zsbuf) { 20667ec681f3Smrg struct zink_surface *surf = zink_csurface(ctx->fb_state.zsbuf); 20677ec681f3Smrg struct zink_surface *transient = zink_transient_surface(ctx->fb_state.zsbuf); 20687ec681f3Smrg if (transient) { 20697ec681f3Smrg att[ctx->fb_state.nr_cbufs] = prep_fb_attachment(ctx, transient, ctx->fb_state.nr_cbufs); 20707ec681f3Smrg att[cresolve_offset + num_resolves] = prep_fb_attachment(ctx, surf, ctx->fb_state.nr_cbufs); 20717ec681f3Smrg } else { 20727ec681f3Smrg att[ctx->fb_state.nr_cbufs] = prep_fb_attachment(ctx, surf, ctx->fb_state.nr_cbufs); 20737ec681f3Smrg } 20747ec681f3Smrg } 20757ec681f3Smrg} 20767ec681f3Smrg 20777ec681f3Smrgstatic void 20787ec681f3Smrgupdate_framebuffer_state(struct zink_context *ctx, int old_w, int old_h) 20797ec681f3Smrg{ 20807ec681f3Smrg if (ctx->fb_state.width != old_w || ctx->fb_state.height != old_h) 20817ec681f3Smrg ctx->scissor_changed = true; 20827ec681f3Smrg /* get_framebuffer adds a ref if the fb is reused or created; 20837ec681f3Smrg * always do get_framebuffer first to avoid deleting the same fb 20847ec681f3Smrg * we're about to use 20857ec681f3Smrg */ 20867ec681f3Smrg struct zink_framebuffer *fb = ctx->get_framebuffer(ctx); 20877ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 20887ec681f3Smrg if (ctx->framebuffer && !screen->info.have_KHR_imageless_framebuffer) { 20897ec681f3Smrg simple_mtx_lock(&screen->framebuffer_mtx); 20907ec681f3Smrg struct hash_entry *he = _mesa_hash_table_search(&screen->framebuffer_cache, &ctx->framebuffer->state); 20917ec681f3Smrg if (ctx->framebuffer && !ctx->framebuffer->state.num_attachments) { 20927ec681f3Smrg /* if this has no attachments then its lifetime has ended */ 20937ec681f3Smrg _mesa_hash_table_remove(&screen->framebuffer_cache, he); 20947ec681f3Smrg he = NULL; 20957ec681f3Smrg /* ensure an unflushed fb doesn't get destroyed by deferring it */ 20967ec681f3Smrg util_dynarray_append(&ctx->batch.state->dead_framebuffers, struct zink_framebuffer*, ctx->framebuffer); 20977ec681f3Smrg ctx->framebuffer = NULL; 20987ec681f3Smrg } 20997ec681f3Smrg /* a framebuffer loses 1 ref every time we unset it; 21007ec681f3Smrg * we do NOT add refs here, as the ref has already been added in 21017ec681f3Smrg * get_framebuffer() 21027ec681f3Smrg */ 21037ec681f3Smrg if (zink_framebuffer_reference(screen, &ctx->framebuffer, NULL) && he) 21047ec681f3Smrg _mesa_hash_table_remove(&screen->framebuffer_cache, he); 21057ec681f3Smrg simple_mtx_unlock(&screen->framebuffer_mtx); 21067ec681f3Smrg } 21077ec681f3Smrg ctx->fb_changed |= ctx->framebuffer != fb; 21087ec681f3Smrg ctx->framebuffer = fb; 21097ec681f3Smrg} 21107ec681f3Smrg 21117ec681f3Smrgstatic unsigned 21127ec681f3Smrgbegin_render_pass(struct zink_context *ctx) 21137ec681f3Smrg{ 21147ec681f3Smrg struct zink_batch *batch = &ctx->batch; 21157ec681f3Smrg struct pipe_framebuffer_state *fb_state = &ctx->fb_state; 21167ec681f3Smrg 21177ec681f3Smrg VkRenderPassBeginInfo rpbi = {0}; 21187ec681f3Smrg rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; 21197ec681f3Smrg rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass; 21207ec681f3Smrg rpbi.renderArea.offset.x = 0; 21217ec681f3Smrg rpbi.renderArea.offset.y = 0; 21227ec681f3Smrg rpbi.renderArea.extent.width = fb_state->width; 21237ec681f3Smrg rpbi.renderArea.extent.height = fb_state->height; 21247ec681f3Smrg 21257ec681f3Smrg VkClearValue clears[PIPE_MAX_COLOR_BUFS + 1] = {0}; 21267ec681f3Smrg unsigned clear_buffers = 0; 21277ec681f3Smrg uint32_t clear_validate = 0; 21287ec681f3Smrg for (int i = 0; i < fb_state->nr_cbufs; i++) { 21297ec681f3Smrg /* these are no-ops */ 21307ec681f3Smrg if (!fb_state->cbufs[i] || !zink_fb_clear_enabled(ctx, i)) 21317ec681f3Smrg continue; 21327ec681f3Smrg /* these need actual clear calls inside the rp */ 21337ec681f3Smrg struct zink_framebuffer_clear_data *clear = zink_fb_clear_element(&ctx->fb_clears[i], 0); 21347ec681f3Smrg if (zink_fb_clear_needs_explicit(&ctx->fb_clears[i])) { 21357ec681f3Smrg clear_buffers |= (PIPE_CLEAR_COLOR0 << i); 21367ec681f3Smrg if (zink_fb_clear_count(&ctx->fb_clears[i]) < 2 || 21377ec681f3Smrg zink_fb_clear_element_needs_explicit(clear)) 21387ec681f3Smrg continue; 21397ec681f3Smrg } 21407ec681f3Smrg /* we now know there's one clear that can be done here */ 21417ec681f3Smrg zink_fb_clear_util_unpack_clear_color(clear, fb_state->cbufs[i]->format, (void*)&clears[i].color); 21427ec681f3Smrg rpbi.clearValueCount = i + 1; 21437ec681f3Smrg clear_validate |= PIPE_CLEAR_COLOR0 << i; 21447ec681f3Smrg assert(ctx->framebuffer->rp->state.clears); 21457ec681f3Smrg } 21467ec681f3Smrg if (fb_state->zsbuf && zink_fb_clear_enabled(ctx, PIPE_MAX_COLOR_BUFS)) { 21477ec681f3Smrg struct zink_framebuffer_clear *fb_clear = &ctx->fb_clears[PIPE_MAX_COLOR_BUFS]; 21487ec681f3Smrg struct zink_framebuffer_clear_data *clear = zink_fb_clear_element(fb_clear, 0); 21497ec681f3Smrg if (!zink_fb_clear_element_needs_explicit(clear)) { 21507ec681f3Smrg clears[fb_state->nr_cbufs].depthStencil.depth = clear->zs.depth; 21517ec681f3Smrg clears[fb_state->nr_cbufs].depthStencil.stencil = clear->zs.stencil; 21527ec681f3Smrg rpbi.clearValueCount = fb_state->nr_cbufs + 1; 21537ec681f3Smrg clear_validate |= clear->zs.bits; 21547ec681f3Smrg assert(ctx->framebuffer->rp->state.clears); 21557ec681f3Smrg } 21567ec681f3Smrg if (zink_fb_clear_needs_explicit(fb_clear)) { 21577ec681f3Smrg for (int j = !zink_fb_clear_element_needs_explicit(clear); 21587ec681f3Smrg (clear_buffers & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL && j < zink_fb_clear_count(fb_clear); 21597ec681f3Smrg j++) 21607ec681f3Smrg clear_buffers |= zink_fb_clear_element(fb_clear, j)->zs.bits; 21617ec681f3Smrg } 21627ec681f3Smrg } 21637ec681f3Smrg assert(clear_validate == ctx->framebuffer->rp->state.clears); 21647ec681f3Smrg rpbi.pClearValues = &clears[0]; 21657ec681f3Smrg rpbi.framebuffer = ctx->framebuffer->fb; 21667ec681f3Smrg 21677ec681f3Smrg assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer); 21687ec681f3Smrg 21697ec681f3Smrg VkRenderPassAttachmentBeginInfo infos; 21707ec681f3Smrg VkImageView att[2 * (PIPE_MAX_COLOR_BUFS + 1)]; 21717ec681f3Smrg infos.sType = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO; 21727ec681f3Smrg infos.pNext = NULL; 21737ec681f3Smrg infos.attachmentCount = ctx->framebuffer->state.num_attachments; 21747ec681f3Smrg infos.pAttachments = att; 21757ec681f3Smrg prep_fb_attachments(ctx, att); 21767ec681f3Smrg if (zink_screen(ctx->base.screen)->info.have_KHR_imageless_framebuffer) { 21777ec681f3Smrg#ifndef NDEBUG 21787ec681f3Smrg const unsigned cresolve_offset = ctx->fb_state.nr_cbufs + !!ctx->fb_state.zsbuf; 21797ec681f3Smrg for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) { 21807ec681f3Smrg if (ctx->fb_state.cbufs[i]) { 21817ec681f3Smrg struct zink_surface *surf = zink_csurface(ctx->fb_state.cbufs[i]); 21827ec681f3Smrg struct zink_surface *transient = zink_transient_surface(ctx->fb_state.cbufs[i]); 21837ec681f3Smrg if (transient) { 21847ec681f3Smrg assert(zink_resource(transient->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[i].usage); 21857ec681f3Smrg assert(zink_resource(surf->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[cresolve_offset].usage); 21867ec681f3Smrg } else { 21877ec681f3Smrg assert(zink_resource(surf->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[i].usage); 21887ec681f3Smrg } 21897ec681f3Smrg } 21907ec681f3Smrg } 21917ec681f3Smrg if (ctx->fb_state.zsbuf) { 21927ec681f3Smrg struct zink_surface *surf = zink_csurface(ctx->fb_state.zsbuf); 21937ec681f3Smrg struct zink_surface *transient = zink_transient_surface(ctx->fb_state.zsbuf); 21947ec681f3Smrg if (transient) { 21957ec681f3Smrg assert(zink_resource(transient->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[ctx->fb_state.nr_cbufs].usage); 21967ec681f3Smrg assert(zink_resource(surf->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[cresolve_offset].usage); 21977ec681f3Smrg } else { 21987ec681f3Smrg assert(zink_resource(surf->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[ctx->fb_state.nr_cbufs].usage); 21997ec681f3Smrg } 22007ec681f3Smrg } 22017ec681f3Smrg#endif 22027ec681f3Smrg rpbi.pNext = &infos; 22037ec681f3Smrg } 22047ec681f3Smrg 22057ec681f3Smrg VKCTX(CmdBeginRenderPass)(batch->state->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE); 22067ec681f3Smrg batch->in_rp = true; 22077ec681f3Smrg ctx->new_swapchain = false; 22087ec681f3Smrg return clear_buffers; 22097ec681f3Smrg} 22107ec681f3Smrg 22117ec681f3Smrgvoid 22127ec681f3Smrgzink_init_vk_sample_locations(struct zink_context *ctx, VkSampleLocationsInfoEXT *loc) 22137ec681f3Smrg{ 22147ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 22157ec681f3Smrg unsigned idx = util_logbase2_ceil(MAX2(ctx->gfx_pipeline_state.rast_samples + 1, 1)); 22167ec681f3Smrg loc->sType = VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT; 22177ec681f3Smrg loc->pNext = NULL; 22187ec681f3Smrg loc->sampleLocationsPerPixel = 1 << idx; 22197ec681f3Smrg loc->sampleLocationsCount = ctx->gfx_pipeline_state.rast_samples + 1; 22207ec681f3Smrg loc->sampleLocationGridSize = screen->maxSampleLocationGridSize[idx]; 22217ec681f3Smrg loc->pSampleLocations = ctx->vk_sample_locations; 22227ec681f3Smrg} 22237ec681f3Smrg 22247ec681f3Smrgstatic void 22257ec681f3Smrgzink_evaluate_depth_buffer(struct pipe_context *pctx) 22267ec681f3Smrg{ 22277ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 22287ec681f3Smrg 22297ec681f3Smrg if (!ctx->fb_state.zsbuf) 22307ec681f3Smrg return; 22317ec681f3Smrg 22327ec681f3Smrg struct zink_resource *res = zink_resource(ctx->fb_state.zsbuf->texture); 22337ec681f3Smrg res->obj->needs_zs_evaluate = true; 22347ec681f3Smrg zink_init_vk_sample_locations(ctx, &res->obj->zs_evaluate); 22357ec681f3Smrg zink_batch_no_rp(ctx); 22367ec681f3Smrg} 22377ec681f3Smrg 22387ec681f3Smrgvoid 22397ec681f3Smrgzink_begin_render_pass(struct zink_context *ctx) 22407ec681f3Smrg{ 22417ec681f3Smrg setup_framebuffer(ctx); 22427ec681f3Smrg /* TODO: need replicate EXT */ 22437ec681f3Smrg if (ctx->framebuffer->rp->state.msaa_expand_mask) { 22447ec681f3Smrg uint32_t rp_state = ctx->gfx_pipeline_state.rp_state; 22457ec681f3Smrg struct zink_render_pass *rp = ctx->gfx_pipeline_state.render_pass; 22467ec681f3Smrg 22477ec681f3Smrg u_foreach_bit(i, ctx->framebuffer->rp->state.msaa_expand_mask) { 22487ec681f3Smrg struct zink_ctx_surface *csurf = (struct zink_ctx_surface*)ctx->fb_state.cbufs[i]; 22497ec681f3Smrg if (csurf->transient_init) 22507ec681f3Smrg continue; 22517ec681f3Smrg struct pipe_surface *dst_view = (struct pipe_surface*)csurf->transient; 22527ec681f3Smrg assert(dst_view); 22537ec681f3Smrg struct pipe_sampler_view src_templ, *src_view; 22547ec681f3Smrg struct pipe_resource *src = ctx->fb_state.cbufs[i]->texture; 22557ec681f3Smrg struct pipe_box dstbox; 22567ec681f3Smrg 22577ec681f3Smrg u_box_3d(0, 0, 0, ctx->fb_state.width, ctx->fb_state.height, 22587ec681f3Smrg 1 + dst_view->u.tex.last_layer - dst_view->u.tex.first_layer, &dstbox); 22597ec681f3Smrg 22607ec681f3Smrg util_blitter_default_src_texture(ctx->blitter, &src_templ, src, ctx->fb_state.cbufs[i]->u.tex.level); 22617ec681f3Smrg src_view = ctx->base.create_sampler_view(&ctx->base, src, &src_templ); 22627ec681f3Smrg 22637ec681f3Smrg zink_blit_begin(ctx, ZINK_BLIT_SAVE_FB | ZINK_BLIT_SAVE_FS | ZINK_BLIT_SAVE_TEXTURES); 22647ec681f3Smrg util_blitter_blit_generic(ctx->blitter, dst_view, &dstbox, 22657ec681f3Smrg src_view, &dstbox, ctx->fb_state.width, ctx->fb_state.height, 22667ec681f3Smrg PIPE_MASK_RGBAZS, PIPE_TEX_FILTER_NEAREST, NULL, 22677ec681f3Smrg false, false); 22687ec681f3Smrg 22697ec681f3Smrg pipe_sampler_view_reference(&src_view, NULL); 22707ec681f3Smrg csurf->transient_init = true; 22717ec681f3Smrg } 22727ec681f3Smrg ctx->fb_changed = ctx->rp_changed = false; 22737ec681f3Smrg ctx->gfx_pipeline_state.rp_state = rp_state; 22747ec681f3Smrg ctx->gfx_pipeline_state.render_pass = rp; 22757ec681f3Smrg } 22767ec681f3Smrg assert(ctx->gfx_pipeline_state.render_pass); 22777ec681f3Smrg unsigned clear_buffers = begin_render_pass(ctx); 22787ec681f3Smrg 22797ec681f3Smrg if (ctx->render_condition.query) 22807ec681f3Smrg zink_start_conditional_render(ctx); 22817ec681f3Smrg zink_clear_framebuffer(ctx, clear_buffers); 22827ec681f3Smrg} 22837ec681f3Smrg 22847ec681f3Smrgvoid 22857ec681f3Smrgzink_end_render_pass(struct zink_context *ctx) 22867ec681f3Smrg{ 22877ec681f3Smrg if (ctx->batch.in_rp) { 22887ec681f3Smrg if (ctx->render_condition.query) 22897ec681f3Smrg zink_stop_conditional_render(ctx); 22907ec681f3Smrg VKCTX(CmdEndRenderPass)(ctx->batch.state->cmdbuf); 22917ec681f3Smrg for (unsigned i = 0; i < ctx->fb_state.nr_cbufs; i++) { 22927ec681f3Smrg struct zink_ctx_surface *csurf = (struct zink_ctx_surface*)ctx->fb_state.cbufs[i]; 22937ec681f3Smrg if (csurf) 22947ec681f3Smrg csurf->transient_init = true; 22957ec681f3Smrg } 22967ec681f3Smrg } 22977ec681f3Smrg ctx->batch.in_rp = false; 22987ec681f3Smrg} 22997ec681f3Smrg 23007ec681f3Smrgstatic void 23017ec681f3Smrgsync_flush(struct zink_context *ctx, struct zink_batch_state *bs) 23027ec681f3Smrg{ 23037ec681f3Smrg if (zink_screen(ctx->base.screen)->threaded) 23047ec681f3Smrg util_queue_fence_wait(&bs->flush_completed); 23057ec681f3Smrg} 23067ec681f3Smrg 23077ec681f3Smrgstatic inline VkAccessFlags 23087ec681f3Smrgget_access_flags_for_binding(struct zink_context *ctx, enum zink_descriptor_type type, enum pipe_shader_type stage, unsigned idx) 23097ec681f3Smrg{ 23107ec681f3Smrg VkAccessFlags flags = 0; 23117ec681f3Smrg switch (type) { 23127ec681f3Smrg case ZINK_DESCRIPTOR_TYPE_UBO: 23137ec681f3Smrg return VK_ACCESS_UNIFORM_READ_BIT; 23147ec681f3Smrg case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW: 23157ec681f3Smrg return VK_ACCESS_SHADER_READ_BIT; 23167ec681f3Smrg case ZINK_DESCRIPTOR_TYPE_SSBO: { 23177ec681f3Smrg flags = VK_ACCESS_SHADER_READ_BIT; 23187ec681f3Smrg if (ctx->writable_ssbos[stage] & (1 << idx)) 23197ec681f3Smrg flags |= VK_ACCESS_SHADER_WRITE_BIT; 23207ec681f3Smrg return flags; 23217ec681f3Smrg } 23227ec681f3Smrg case ZINK_DESCRIPTOR_TYPE_IMAGE: { 23237ec681f3Smrg struct zink_image_view *image_view = &ctx->image_views[stage][idx]; 23247ec681f3Smrg if (image_view->base.access & PIPE_IMAGE_ACCESS_READ) 23257ec681f3Smrg flags |= VK_ACCESS_SHADER_READ_BIT; 23267ec681f3Smrg if (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE) 23277ec681f3Smrg flags |= VK_ACCESS_SHADER_WRITE_BIT; 23287ec681f3Smrg return flags; 23297ec681f3Smrg } 23307ec681f3Smrg default: 23317ec681f3Smrg break; 23327ec681f3Smrg } 23337ec681f3Smrg unreachable("ACK"); 23347ec681f3Smrg return 0; 23357ec681f3Smrg} 23367ec681f3Smrg 23377ec681f3Smrgstatic void 23387ec681f3Smrgupdate_resource_refs_for_stage(struct zink_context *ctx, enum pipe_shader_type stage) 23397ec681f3Smrg{ 23407ec681f3Smrg struct zink_batch *batch = &ctx->batch; 23417ec681f3Smrg unsigned max_slot[] = { 23427ec681f3Smrg [ZINK_DESCRIPTOR_TYPE_UBO] = ctx->di.num_ubos[stage], 23437ec681f3Smrg [ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW] = ctx->di.num_samplers[stage], 23447ec681f3Smrg [ZINK_DESCRIPTOR_TYPE_SSBO] = ctx->di.num_ssbos[stage], 23457ec681f3Smrg [ZINK_DESCRIPTOR_TYPE_IMAGE] = ctx->di.num_images[stage] 23467ec681f3Smrg }; 23477ec681f3Smrg for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) { 23487ec681f3Smrg for (unsigned j = 0; j < max_slot[i]; j++) { 23497ec681f3Smrg if (ctx->di.descriptor_res[i][stage][j]) { 23507ec681f3Smrg struct zink_resource *res = ctx->di.descriptor_res[i][stage][j]; 23517ec681f3Smrg if (!res) 23527ec681f3Smrg continue; 23537ec681f3Smrg bool is_write = zink_resource_access_is_write(get_access_flags_for_binding(ctx, i, stage, j)); 23547ec681f3Smrg zink_batch_resource_usage_set(batch, res, is_write); 23557ec681f3Smrg 23567ec681f3Smrg struct zink_sampler_view *sv = zink_sampler_view(ctx->sampler_views[stage][j]); 23577ec681f3Smrg struct zink_sampler_state *sampler_state = ctx->sampler_states[stage][j]; 23587ec681f3Smrg struct zink_image_view *iv = &ctx->image_views[stage][j]; 23597ec681f3Smrg if (sampler_state && i == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW && j <= ctx->di.num_samplers[stage]) 23607ec681f3Smrg zink_batch_usage_set(&sampler_state->batch_uses, ctx->batch.state); 23617ec681f3Smrg if (sv && i == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW && j <= ctx->di.num_sampler_views[stage]) { 23627ec681f3Smrg if (res->obj->is_buffer) 23637ec681f3Smrg zink_batch_usage_set(&sv->buffer_view->batch_uses, ctx->batch.state); 23647ec681f3Smrg else 23657ec681f3Smrg zink_batch_usage_set(&sv->image_view->batch_uses, ctx->batch.state); 23667ec681f3Smrg zink_batch_reference_sampler_view(batch, sv); 23677ec681f3Smrg } else if (i == ZINK_DESCRIPTOR_TYPE_IMAGE && j <= ctx->di.num_images[stage]) { 23687ec681f3Smrg if (res->obj->is_buffer) 23697ec681f3Smrg zink_batch_usage_set(&iv->buffer_view->batch_uses, ctx->batch.state); 23707ec681f3Smrg else 23717ec681f3Smrg zink_batch_usage_set(&iv->surface->batch_uses, ctx->batch.state); 23727ec681f3Smrg zink_batch_reference_image_view(batch, iv); 23737ec681f3Smrg } 23747ec681f3Smrg } 23757ec681f3Smrg } 23767ec681f3Smrg } 23777ec681f3Smrg} 23787ec681f3Smrg 23797ec681f3Smrgvoid 23807ec681f3Smrgzink_update_descriptor_refs(struct zink_context *ctx, bool compute) 23817ec681f3Smrg{ 23827ec681f3Smrg struct zink_batch *batch = &ctx->batch; 23837ec681f3Smrg if (compute) { 23847ec681f3Smrg update_resource_refs_for_stage(ctx, PIPE_SHADER_COMPUTE); 23857ec681f3Smrg if (ctx->curr_compute) 23867ec681f3Smrg zink_batch_reference_program(batch, &ctx->curr_compute->base); 23877ec681f3Smrg } else { 23887ec681f3Smrg for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) 23897ec681f3Smrg update_resource_refs_for_stage(ctx, i); 23907ec681f3Smrg unsigned vertex_buffers_enabled_mask = ctx->gfx_pipeline_state.vertex_buffers_enabled_mask; 23917ec681f3Smrg unsigned last_vbo = util_last_bit(vertex_buffers_enabled_mask); 23927ec681f3Smrg for (unsigned i = 0; i < last_vbo + 1; i++) { 23937ec681f3Smrg if (ctx->vertex_buffers[i].buffer.resource) 23947ec681f3Smrg zink_batch_resource_usage_set(batch, zink_resource(ctx->vertex_buffers[i].buffer.resource), false); 23957ec681f3Smrg } 23967ec681f3Smrg if (ctx->curr_program) 23977ec681f3Smrg zink_batch_reference_program(batch, &ctx->curr_program->base); 23987ec681f3Smrg } 23997ec681f3Smrg if (ctx->di.bindless_refs_dirty) { 24007ec681f3Smrg ctx->di.bindless_refs_dirty = false; 24017ec681f3Smrg for (unsigned i = 0; i < 2; i++) { 24027ec681f3Smrg util_dynarray_foreach(&ctx->di.bindless[i].resident, struct zink_bindless_descriptor*, bd) { 24037ec681f3Smrg struct zink_resource *res = zink_descriptor_surface_resource(&(*bd)->ds); 24047ec681f3Smrg zink_batch_resource_usage_set(&ctx->batch, res, (*bd)->access & PIPE_IMAGE_ACCESS_WRITE); 24057ec681f3Smrg } 24067ec681f3Smrg } 24077ec681f3Smrg } 24087ec681f3Smrg} 24097ec681f3Smrg 24107ec681f3Smrgstatic void 24117ec681f3Smrgstall(struct zink_context *ctx) 24127ec681f3Smrg{ 24137ec681f3Smrg sync_flush(ctx, zink_batch_state(ctx->last_fence)); 24147ec681f3Smrg zink_vkfence_wait(zink_screen(ctx->base.screen), ctx->last_fence, PIPE_TIMEOUT_INFINITE); 24157ec681f3Smrg zink_batch_reset_all(ctx); 24167ec681f3Smrg} 24177ec681f3Smrg 24187ec681f3Smrgstatic void 24197ec681f3Smrgflush_batch(struct zink_context *ctx, bool sync) 24207ec681f3Smrg{ 24217ec681f3Smrg struct zink_batch *batch = &ctx->batch; 24227ec681f3Smrg if (ctx->clears_enabled) 24237ec681f3Smrg /* start rp to do all the clears */ 24247ec681f3Smrg zink_begin_render_pass(ctx); 24257ec681f3Smrg zink_end_render_pass(ctx); 24267ec681f3Smrg zink_end_batch(ctx, batch); 24277ec681f3Smrg ctx->deferred_fence = NULL; 24287ec681f3Smrg 24297ec681f3Smrg if (sync) 24307ec681f3Smrg sync_flush(ctx, ctx->batch.state); 24317ec681f3Smrg 24327ec681f3Smrg if (ctx->batch.state->is_device_lost) { 24337ec681f3Smrg check_device_lost(ctx); 24347ec681f3Smrg } else { 24357ec681f3Smrg zink_start_batch(ctx, batch); 24367ec681f3Smrg if (zink_screen(ctx->base.screen)->info.have_EXT_transform_feedback && ctx->num_so_targets) 24377ec681f3Smrg ctx->dirty_so_targets = true; 24387ec681f3Smrg ctx->pipeline_changed[0] = ctx->pipeline_changed[1] = true; 24397ec681f3Smrg zink_select_draw_vbo(ctx); 24407ec681f3Smrg zink_select_launch_grid(ctx); 24417ec681f3Smrg 24427ec681f3Smrg if (ctx->oom_stall) 24437ec681f3Smrg stall(ctx); 24447ec681f3Smrg ctx->oom_flush = false; 24457ec681f3Smrg ctx->oom_stall = false; 24467ec681f3Smrg ctx->dd->bindless_bound = false; 24477ec681f3Smrg ctx->di.bindless_refs_dirty = true; 24487ec681f3Smrg } 24497ec681f3Smrg} 24507ec681f3Smrg 24517ec681f3Smrgvoid 24527ec681f3Smrgzink_flush_queue(struct zink_context *ctx) 24537ec681f3Smrg{ 24547ec681f3Smrg flush_batch(ctx, true); 24557ec681f3Smrg} 24567ec681f3Smrg 24577ec681f3Smrgstatic bool 24587ec681f3Smrgrebind_fb_surface(struct zink_context *ctx, struct pipe_surface **surf, struct zink_resource *match_res) 24597ec681f3Smrg{ 24607ec681f3Smrg if (!*surf) 24617ec681f3Smrg return false; 24627ec681f3Smrg struct zink_resource *surf_res = zink_resource((*surf)->texture); 24637ec681f3Smrg if ((match_res == surf_res) || surf_res->obj != zink_csurface(*surf)->obj) 24647ec681f3Smrg return zink_rebind_ctx_surface(ctx, surf); 24657ec681f3Smrg return false; 24667ec681f3Smrg} 24677ec681f3Smrg 24687ec681f3Smrgstatic bool 24697ec681f3Smrgrebind_fb_state(struct zink_context *ctx, struct zink_resource *match_res, bool from_set_fb) 24707ec681f3Smrg{ 24717ec681f3Smrg bool rebind = false; 24727ec681f3Smrg for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) { 24737ec681f3Smrg rebind |= rebind_fb_surface(ctx, &ctx->fb_state.cbufs[i], match_res); 24747ec681f3Smrg if (from_set_fb && ctx->fb_state.cbufs[i] && ctx->fb_state.cbufs[i]->texture->bind & PIPE_BIND_SCANOUT) 24757ec681f3Smrg ctx->new_swapchain = true; 24767ec681f3Smrg } 24777ec681f3Smrg rebind |= rebind_fb_surface(ctx, &ctx->fb_state.zsbuf, match_res); 24787ec681f3Smrg return rebind; 24797ec681f3Smrg} 24807ec681f3Smrg 24817ec681f3Smrgstatic void 24827ec681f3Smrgunbind_fb_surface(struct zink_context *ctx, struct pipe_surface *surf, bool changed) 24837ec681f3Smrg{ 24847ec681f3Smrg if (!surf) 24857ec681f3Smrg return; 24867ec681f3Smrg struct zink_surface *transient = zink_transient_surface(surf); 24877ec681f3Smrg if (changed) { 24887ec681f3Smrg zink_fb_clears_apply(ctx, surf->texture); 24897ec681f3Smrg if (zink_batch_usage_exists(zink_csurface(surf)->batch_uses)) { 24907ec681f3Smrg zink_batch_reference_surface(&ctx->batch, zink_csurface(surf)); 24917ec681f3Smrg if (transient) 24927ec681f3Smrg zink_batch_reference_surface(&ctx->batch, transient); 24937ec681f3Smrg } 24947ec681f3Smrg ctx->rp_changed = true; 24957ec681f3Smrg } 24967ec681f3Smrg struct zink_resource *res = zink_resource(surf->texture); 24977ec681f3Smrg res->fb_binds--; 24987ec681f3Smrg if (!res->fb_binds) 24997ec681f3Smrg check_resource_for_batch_ref(ctx, res); 25007ec681f3Smrg} 25017ec681f3Smrg 25027ec681f3Smrgstatic void 25037ec681f3Smrgzink_set_framebuffer_state(struct pipe_context *pctx, 25047ec681f3Smrg const struct pipe_framebuffer_state *state) 25057ec681f3Smrg{ 25067ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 25077ec681f3Smrg unsigned samples = state->nr_cbufs || state->zsbuf ? 0 : state->samples; 25087ec681f3Smrg 25097ec681f3Smrg for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) { 25107ec681f3Smrg struct pipe_surface *surf = ctx->fb_state.cbufs[i]; 25117ec681f3Smrg if (i < state->nr_cbufs) 25127ec681f3Smrg ctx->rp_changed |= !!zink_transient_surface(surf) != !!zink_transient_surface(state->cbufs[i]); 25137ec681f3Smrg unbind_fb_surface(ctx, surf, i >= state->nr_cbufs || surf != state->cbufs[i]); 25147ec681f3Smrg } 25157ec681f3Smrg if (ctx->fb_state.zsbuf) { 25167ec681f3Smrg struct pipe_surface *surf = ctx->fb_state.zsbuf; 25177ec681f3Smrg struct zink_resource *res = zink_resource(surf->texture); 25187ec681f3Smrg bool changed = surf != state->zsbuf; 25197ec681f3Smrg unbind_fb_surface(ctx, surf, changed); 25207ec681f3Smrg if (!changed) 25217ec681f3Smrg ctx->rp_changed |= !!zink_transient_surface(surf) != !!zink_transient_surface(state->zsbuf); 25227ec681f3Smrg if (changed && unlikely(res->obj->needs_zs_evaluate)) 25237ec681f3Smrg /* have to flush zs eval while the sample location data still exists, 25247ec681f3Smrg * so just throw some random barrier */ 25257ec681f3Smrg zink_resource_image_barrier(ctx, res, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 25267ec681f3Smrg VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT); 25277ec681f3Smrg } 25287ec681f3Smrg /* renderpass changes if the number or types of attachments change */ 25297ec681f3Smrg ctx->rp_changed |= ctx->fb_state.nr_cbufs != state->nr_cbufs; 25307ec681f3Smrg ctx->rp_changed |= !!ctx->fb_state.zsbuf != !!state->zsbuf; 25317ec681f3Smrg 25327ec681f3Smrg unsigned w = ctx->fb_state.width; 25337ec681f3Smrg unsigned h = ctx->fb_state.height; 25347ec681f3Smrg 25357ec681f3Smrg util_copy_framebuffer_state(&ctx->fb_state, state); 25367ec681f3Smrg zink_update_fbfetch(ctx); 25377ec681f3Smrg unsigned prev_void_alpha_attachments = ctx->gfx_pipeline_state.void_alpha_attachments; 25387ec681f3Smrg ctx->gfx_pipeline_state.void_alpha_attachments = 0; 25397ec681f3Smrg for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) { 25407ec681f3Smrg struct pipe_surface *surf = ctx->fb_state.cbufs[i]; 25417ec681f3Smrg if (surf) { 25427ec681f3Smrg struct zink_surface *transient = zink_transient_surface(surf); 25437ec681f3Smrg if (!samples) 25447ec681f3Smrg samples = MAX3(transient ? transient->base.nr_samples : 1, surf->texture->nr_samples, 1); 25457ec681f3Smrg zink_resource(surf->texture)->fb_binds++; 25467ec681f3Smrg ctx->gfx_pipeline_state.void_alpha_attachments |= util_format_has_alpha1(surf->format) ? BITFIELD_BIT(i) : 0; 25477ec681f3Smrg } 25487ec681f3Smrg } 25497ec681f3Smrg if (ctx->gfx_pipeline_state.void_alpha_attachments != prev_void_alpha_attachments) 25507ec681f3Smrg ctx->gfx_pipeline_state.dirty = true; 25517ec681f3Smrg if (ctx->fb_state.zsbuf) { 25527ec681f3Smrg struct pipe_surface *surf = ctx->fb_state.zsbuf; 25537ec681f3Smrg struct zink_surface *transient = zink_transient_surface(surf); 25547ec681f3Smrg if (!samples) 25557ec681f3Smrg samples = MAX3(transient ? transient->base.nr_samples : 1, surf->texture->nr_samples, 1); 25567ec681f3Smrg zink_resource(surf->texture)->fb_binds++; 25577ec681f3Smrg } 25587ec681f3Smrg rebind_fb_state(ctx, NULL, true); 25597ec681f3Smrg ctx->fb_state.samples = MAX2(samples, 1); 25607ec681f3Smrg update_framebuffer_state(ctx, w, h); 25617ec681f3Smrg 25627ec681f3Smrg uint8_t rast_samples = ctx->fb_state.samples - 1; 25637ec681f3Smrg if (rast_samples != ctx->gfx_pipeline_state.rast_samples) 25647ec681f3Smrg zink_update_fs_key_samples(ctx); 25657ec681f3Smrg if (ctx->gfx_pipeline_state.rast_samples != rast_samples) { 25667ec681f3Smrg ctx->sample_locations_changed |= ctx->gfx_pipeline_state.sample_locations_enabled; 25677ec681f3Smrg ctx->gfx_pipeline_state.dirty = true; 25687ec681f3Smrg } 25697ec681f3Smrg ctx->gfx_pipeline_state.rast_samples = rast_samples; 25707ec681f3Smrg 25717ec681f3Smrg /* need to ensure we start a new rp on next draw */ 25727ec681f3Smrg zink_batch_no_rp(ctx); 25737ec681f3Smrg /* this is an ideal time to oom flush since it won't split a renderpass */ 25747ec681f3Smrg if (ctx->oom_flush) 25757ec681f3Smrg flush_batch(ctx, false); 25767ec681f3Smrg} 25777ec681f3Smrg 25787ec681f3Smrgstatic void 25797ec681f3Smrgzink_set_blend_color(struct pipe_context *pctx, 25807ec681f3Smrg const struct pipe_blend_color *color) 25817ec681f3Smrg{ 25827ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 25837ec681f3Smrg memcpy(ctx->blend_constants, color->color, sizeof(float) * 4); 25847ec681f3Smrg} 25857ec681f3Smrg 25867ec681f3Smrgstatic void 25877ec681f3Smrgzink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask) 25887ec681f3Smrg{ 25897ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 25907ec681f3Smrg ctx->gfx_pipeline_state.sample_mask = sample_mask; 25917ec681f3Smrg ctx->gfx_pipeline_state.dirty = true; 25927ec681f3Smrg} 25937ec681f3Smrg 25947ec681f3Smrgstatic void 25957ec681f3Smrgzink_set_sample_locations(struct pipe_context *pctx, size_t size, const uint8_t *locations) 25967ec681f3Smrg{ 25977ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 25987ec681f3Smrg 25997ec681f3Smrg ctx->gfx_pipeline_state.sample_locations_enabled = size && locations; 26007ec681f3Smrg ctx->sample_locations_changed = ctx->gfx_pipeline_state.sample_locations_enabled; 26017ec681f3Smrg if (size > sizeof(ctx->sample_locations)) 26027ec681f3Smrg size = sizeof(ctx->sample_locations); 26037ec681f3Smrg 26047ec681f3Smrg if (locations) 26057ec681f3Smrg memcpy(ctx->sample_locations, locations, size); 26067ec681f3Smrg} 26077ec681f3Smrg 26087ec681f3Smrgstatic VkAccessFlags 26097ec681f3Smrgaccess_src_flags(VkImageLayout layout) 26107ec681f3Smrg{ 26117ec681f3Smrg switch (layout) { 26127ec681f3Smrg case VK_IMAGE_LAYOUT_UNDEFINED: 26137ec681f3Smrg return 0; 26147ec681f3Smrg 26157ec681f3Smrg case VK_IMAGE_LAYOUT_GENERAL: 26167ec681f3Smrg return VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT; 26177ec681f3Smrg 26187ec681f3Smrg case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: 26197ec681f3Smrg return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT; 26207ec681f3Smrg case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: 26217ec681f3Smrg return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; 26227ec681f3Smrg 26237ec681f3Smrg case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: 26247ec681f3Smrg case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: 26257ec681f3Smrg return VK_ACCESS_SHADER_READ_BIT; 26267ec681f3Smrg 26277ec681f3Smrg case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: 26287ec681f3Smrg return VK_ACCESS_TRANSFER_READ_BIT; 26297ec681f3Smrg 26307ec681f3Smrg case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: 26317ec681f3Smrg return VK_ACCESS_TRANSFER_WRITE_BIT; 26327ec681f3Smrg 26337ec681f3Smrg case VK_IMAGE_LAYOUT_PREINITIALIZED: 26347ec681f3Smrg return VK_ACCESS_HOST_WRITE_BIT; 26357ec681f3Smrg 26367ec681f3Smrg case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: 26377ec681f3Smrg return 0; 26387ec681f3Smrg 26397ec681f3Smrg default: 26407ec681f3Smrg unreachable("unexpected layout"); 26417ec681f3Smrg } 26427ec681f3Smrg} 26437ec681f3Smrg 26447ec681f3Smrgstatic VkAccessFlags 26457ec681f3Smrgaccess_dst_flags(VkImageLayout layout) 26467ec681f3Smrg{ 26477ec681f3Smrg switch (layout) { 26487ec681f3Smrg case VK_IMAGE_LAYOUT_UNDEFINED: 26497ec681f3Smrg return 0; 26507ec681f3Smrg 26517ec681f3Smrg case VK_IMAGE_LAYOUT_GENERAL: 26527ec681f3Smrg return VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT; 26537ec681f3Smrg 26547ec681f3Smrg case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: 26557ec681f3Smrg return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; 26567ec681f3Smrg case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: 26577ec681f3Smrg return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; 26587ec681f3Smrg 26597ec681f3Smrg case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: 26607ec681f3Smrg return VK_ACCESS_SHADER_READ_BIT; 26617ec681f3Smrg 26627ec681f3Smrg case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: 26637ec681f3Smrg return VK_ACCESS_TRANSFER_READ_BIT; 26647ec681f3Smrg 26657ec681f3Smrg case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: 26667ec681f3Smrg return VK_ACCESS_SHADER_READ_BIT; 26677ec681f3Smrg 26687ec681f3Smrg case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: 26697ec681f3Smrg return VK_ACCESS_TRANSFER_WRITE_BIT; 26707ec681f3Smrg 26717ec681f3Smrg case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: 26727ec681f3Smrg return 0; 26737ec681f3Smrg 26747ec681f3Smrg default: 26757ec681f3Smrg unreachable("unexpected layout"); 26767ec681f3Smrg } 26777ec681f3Smrg} 26787ec681f3Smrg 26797ec681f3Smrgstatic VkPipelineStageFlags 26807ec681f3Smrgpipeline_dst_stage(VkImageLayout layout) 26817ec681f3Smrg{ 26827ec681f3Smrg switch (layout) { 26837ec681f3Smrg case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: 26847ec681f3Smrg return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; 26857ec681f3Smrg case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: 26867ec681f3Smrg return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; 26877ec681f3Smrg 26887ec681f3Smrg case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: 26897ec681f3Smrg return VK_PIPELINE_STAGE_TRANSFER_BIT; 26907ec681f3Smrg case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: 26917ec681f3Smrg return VK_PIPELINE_STAGE_TRANSFER_BIT; 26927ec681f3Smrg 26937ec681f3Smrg case VK_IMAGE_LAYOUT_GENERAL: 26947ec681f3Smrg return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT; 26957ec681f3Smrg 26967ec681f3Smrg case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: 26977ec681f3Smrg case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: 26987ec681f3Smrg return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; 26997ec681f3Smrg 27007ec681f3Smrg default: 27017ec681f3Smrg return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; 27027ec681f3Smrg } 27037ec681f3Smrg} 27047ec681f3Smrg 27057ec681f3Smrg#define ALL_READ_ACCESS_FLAGS \ 27067ec681f3Smrg (VK_ACCESS_INDIRECT_COMMAND_READ_BIT | \ 27077ec681f3Smrg VK_ACCESS_INDEX_READ_BIT | \ 27087ec681f3Smrg VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | \ 27097ec681f3Smrg VK_ACCESS_UNIFORM_READ_BIT | \ 27107ec681f3Smrg VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | \ 27117ec681f3Smrg VK_ACCESS_SHADER_READ_BIT | \ 27127ec681f3Smrg VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | \ 27137ec681f3Smrg VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | \ 27147ec681f3Smrg VK_ACCESS_TRANSFER_READ_BIT |\ 27157ec681f3Smrg VK_ACCESS_HOST_READ_BIT |\ 27167ec681f3Smrg VK_ACCESS_MEMORY_READ_BIT |\ 27177ec681f3Smrg VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT |\ 27187ec681f3Smrg VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT |\ 27197ec681f3Smrg VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |\ 27207ec681f3Smrg VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR |\ 27217ec681f3Smrg VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV |\ 27227ec681f3Smrg VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT |\ 27237ec681f3Smrg VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV |\ 27247ec681f3Smrg VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV |\ 27257ec681f3Smrg VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV) 27267ec681f3Smrg 27277ec681f3Smrg 27287ec681f3Smrgbool 27297ec681f3Smrgzink_resource_access_is_write(VkAccessFlags flags) 27307ec681f3Smrg{ 27317ec681f3Smrg return (flags & ALL_READ_ACCESS_FLAGS) != flags; 27327ec681f3Smrg} 27337ec681f3Smrg 27347ec681f3Smrgbool 27357ec681f3Smrgzink_resource_image_needs_barrier(struct zink_resource *res, VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline) 27367ec681f3Smrg{ 27377ec681f3Smrg if (!pipeline) 27387ec681f3Smrg pipeline = pipeline_dst_stage(new_layout); 27397ec681f3Smrg if (!flags) 27407ec681f3Smrg flags = access_dst_flags(new_layout); 27417ec681f3Smrg return res->layout != new_layout || (res->obj->access_stage & pipeline) != pipeline || 27427ec681f3Smrg (res->obj->access & flags) != flags || 27437ec681f3Smrg zink_resource_access_is_write(res->obj->access) || 27447ec681f3Smrg zink_resource_access_is_write(flags); 27457ec681f3Smrg} 27467ec681f3Smrg 27477ec681f3Smrgbool 27487ec681f3Smrgzink_resource_image_barrier_init(VkImageMemoryBarrier *imb, struct zink_resource *res, VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline) 27497ec681f3Smrg{ 27507ec681f3Smrg if (!pipeline) 27517ec681f3Smrg pipeline = pipeline_dst_stage(new_layout); 27527ec681f3Smrg if (!flags) 27537ec681f3Smrg flags = access_dst_flags(new_layout); 27547ec681f3Smrg 27557ec681f3Smrg VkImageSubresourceRange isr = { 27567ec681f3Smrg res->aspect, 27577ec681f3Smrg 0, VK_REMAINING_MIP_LEVELS, 27587ec681f3Smrg 0, VK_REMAINING_ARRAY_LAYERS 27597ec681f3Smrg }; 27607ec681f3Smrg *imb = (VkImageMemoryBarrier){ 27617ec681f3Smrg VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, 27627ec681f3Smrg NULL, 27637ec681f3Smrg res->obj->access ? res->obj->access : access_src_flags(res->layout), 27647ec681f3Smrg flags, 27657ec681f3Smrg res->layout, 27667ec681f3Smrg new_layout, 27677ec681f3Smrg VK_QUEUE_FAMILY_IGNORED, 27687ec681f3Smrg VK_QUEUE_FAMILY_IGNORED, 27697ec681f3Smrg res->obj->image, 27707ec681f3Smrg isr 27717ec681f3Smrg }; 27727ec681f3Smrg return res->obj->needs_zs_evaluate || zink_resource_image_needs_barrier(res, new_layout, flags, pipeline); 27737ec681f3Smrg} 27747ec681f3Smrg 27757ec681f3Smrgstatic inline bool 27767ec681f3Smrgis_shader_pipline_stage(VkPipelineStageFlags pipeline) 27777ec681f3Smrg{ 27787ec681f3Smrg return pipeline & (VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | 27797ec681f3Smrg VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | 27807ec681f3Smrg VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | 27817ec681f3Smrg VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | 27827ec681f3Smrg VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT); 27837ec681f3Smrg} 27847ec681f3Smrg 27857ec681f3Smrgstatic void 27867ec681f3Smrgresource_check_defer_buffer_barrier(struct zink_context *ctx, struct zink_resource *res, VkPipelineStageFlags pipeline) 27877ec681f3Smrg{ 27887ec681f3Smrg assert(res->obj->is_buffer); 27897ec681f3Smrg if (res->bind_count[0] - res->so_bind_count > 0) { 27907ec681f3Smrg if ((res->obj->is_buffer && res->vbo_bind_mask && !(pipeline & VK_PIPELINE_STAGE_VERTEX_INPUT_BIT)) || 27917ec681f3Smrg ((!res->obj->is_buffer || util_bitcount(res->vbo_bind_mask) != res->bind_count[0]) && !is_shader_pipline_stage(pipeline))) 27927ec681f3Smrg /* gfx rebind */ 27937ec681f3Smrg _mesa_set_add(ctx->need_barriers[0], res); 27947ec681f3Smrg } 27957ec681f3Smrg if (res->bind_count[1] && !(pipeline & VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)) 27967ec681f3Smrg /* compute rebind */ 27977ec681f3Smrg _mesa_set_add(ctx->need_barriers[1], res); 27987ec681f3Smrg} 27997ec681f3Smrg 28007ec681f3Smrgstatic inline VkCommandBuffer 28017ec681f3Smrgget_cmdbuf(struct zink_context *ctx, struct zink_resource *res) 28027ec681f3Smrg{ 28037ec681f3Smrg if ((res->obj->access && !res->obj->unordered_barrier) || !ctx->batch.in_rp) { 28047ec681f3Smrg zink_batch_no_rp(ctx); 28057ec681f3Smrg res->obj->unordered_barrier = false; 28067ec681f3Smrg return ctx->batch.state->cmdbuf; 28077ec681f3Smrg } 28087ec681f3Smrg res->obj->unordered_barrier = true; 28097ec681f3Smrg ctx->batch.state->has_barriers = true; 28107ec681f3Smrg return ctx->batch.state->barrier_cmdbuf; 28117ec681f3Smrg} 28127ec681f3Smrg 28137ec681f3Smrgstatic void 28147ec681f3Smrgresource_check_defer_image_barrier(struct zink_context *ctx, struct zink_resource *res, VkImageLayout layout, VkPipelineStageFlags pipeline) 28157ec681f3Smrg{ 28167ec681f3Smrg assert(!res->obj->is_buffer); 28177ec681f3Smrg 28187ec681f3Smrg bool is_compute = pipeline == VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; 28197ec681f3Smrg /* if this is a non-shader barrier and there are binds, always queue a shader barrier */ 28207ec681f3Smrg bool is_shader = is_shader_pipline_stage(pipeline); 28217ec681f3Smrg if ((is_shader || !res->bind_count[is_compute]) && 28227ec681f3Smrg /* if no layout change is needed between gfx and compute, do nothing */ 28237ec681f3Smrg !res->bind_count[!is_compute] && (!is_compute || !res->fb_binds)) 28247ec681f3Smrg return; 28257ec681f3Smrg 28267ec681f3Smrg if (res->bind_count[!is_compute] && is_shader) { 28277ec681f3Smrg /* if the layout is the same between gfx and compute, do nothing */ 28287ec681f3Smrg if (layout == zink_descriptor_util_image_layout_eval(res, !is_compute)) 28297ec681f3Smrg return; 28307ec681f3Smrg } 28317ec681f3Smrg /* queue a layout change if a layout change will be needed */ 28327ec681f3Smrg if (res->bind_count[!is_compute]) 28337ec681f3Smrg _mesa_set_add(ctx->need_barriers[!is_compute], res); 28347ec681f3Smrg /* also queue a layout change if this is a non-shader layout */ 28357ec681f3Smrg if (res->bind_count[is_compute] && !is_shader) 28367ec681f3Smrg _mesa_set_add(ctx->need_barriers[is_compute], res); 28377ec681f3Smrg} 28387ec681f3Smrg 28397ec681f3Smrgvoid 28407ec681f3Smrgzink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res, 28417ec681f3Smrg VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline) 28427ec681f3Smrg{ 28437ec681f3Smrg VkImageMemoryBarrier imb; 28447ec681f3Smrg if (!pipeline) 28457ec681f3Smrg pipeline = pipeline_dst_stage(new_layout); 28467ec681f3Smrg 28477ec681f3Smrg if (!zink_resource_image_barrier_init(&imb, res, new_layout, flags, pipeline)) 28487ec681f3Smrg return; 28497ec681f3Smrg /* only barrier if we're changing layout or doing something besides read -> read */ 28507ec681f3Smrg VkCommandBuffer cmdbuf = get_cmdbuf(ctx, res); 28517ec681f3Smrg assert(new_layout); 28527ec681f3Smrg if (!res->obj->access_stage) 28537ec681f3Smrg imb.srcAccessMask = 0; 28547ec681f3Smrg if (res->obj->needs_zs_evaluate) 28557ec681f3Smrg imb.pNext = &res->obj->zs_evaluate; 28567ec681f3Smrg res->obj->needs_zs_evaluate = false; 28577ec681f3Smrg if (res->dmabuf_acquire) { 28587ec681f3Smrg imb.srcQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT; 28597ec681f3Smrg imb.dstQueueFamilyIndex = zink_screen(ctx->base.screen)->gfx_queue; 28607ec681f3Smrg res->dmabuf_acquire = false; 28617ec681f3Smrg } 28627ec681f3Smrg VKCTX(CmdPipelineBarrier)( 28637ec681f3Smrg cmdbuf, 28647ec681f3Smrg res->obj->access_stage ? res->obj->access_stage : VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 28657ec681f3Smrg pipeline, 28667ec681f3Smrg 0, 28677ec681f3Smrg 0, NULL, 28687ec681f3Smrg 0, NULL, 28697ec681f3Smrg 1, &imb 28707ec681f3Smrg ); 28717ec681f3Smrg 28727ec681f3Smrg resource_check_defer_image_barrier(ctx, res, new_layout, pipeline); 28737ec681f3Smrg 28747ec681f3Smrg if (res->obj->unordered_barrier) { 28757ec681f3Smrg res->obj->access |= imb.dstAccessMask; 28767ec681f3Smrg res->obj->access_stage |= pipeline; 28777ec681f3Smrg } else { 28787ec681f3Smrg res->obj->access = imb.dstAccessMask; 28797ec681f3Smrg res->obj->access_stage = pipeline; 28807ec681f3Smrg } 28817ec681f3Smrg res->layout = new_layout; 28827ec681f3Smrg} 28837ec681f3Smrg 28847ec681f3Smrg 28857ec681f3SmrgVkPipelineStageFlags 28867ec681f3Smrgzink_pipeline_flags_from_stage(VkShaderStageFlagBits stage) 28877ec681f3Smrg{ 28887ec681f3Smrg switch (stage) { 28897ec681f3Smrg case VK_SHADER_STAGE_VERTEX_BIT: 28907ec681f3Smrg return VK_PIPELINE_STAGE_VERTEX_SHADER_BIT; 28917ec681f3Smrg case VK_SHADER_STAGE_FRAGMENT_BIT: 28927ec681f3Smrg return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; 28937ec681f3Smrg case VK_SHADER_STAGE_GEOMETRY_BIT: 28947ec681f3Smrg return VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT; 28957ec681f3Smrg case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: 28967ec681f3Smrg return VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT; 28977ec681f3Smrg case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: 28987ec681f3Smrg return VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT; 28997ec681f3Smrg case VK_SHADER_STAGE_COMPUTE_BIT: 29007ec681f3Smrg return VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; 29017ec681f3Smrg default: 29027ec681f3Smrg unreachable("unknown shader stage bit"); 29037ec681f3Smrg } 29047ec681f3Smrg} 29057ec681f3Smrg 29067ec681f3SmrgALWAYS_INLINE static VkPipelineStageFlags 29077ec681f3Smrgpipeline_access_stage(VkAccessFlags flags) 29087ec681f3Smrg{ 29097ec681f3Smrg if (flags & (VK_ACCESS_UNIFORM_READ_BIT | 29107ec681f3Smrg VK_ACCESS_SHADER_READ_BIT | 29117ec681f3Smrg VK_ACCESS_SHADER_WRITE_BIT)) 29127ec681f3Smrg return VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | 29137ec681f3Smrg VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | 29147ec681f3Smrg VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR | 29157ec681f3Smrg VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | 29167ec681f3Smrg VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | 29177ec681f3Smrg VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | 29187ec681f3Smrg VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | 29197ec681f3Smrg VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | 29207ec681f3Smrg VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; 29217ec681f3Smrg return VK_PIPELINE_STAGE_TRANSFER_BIT; 29227ec681f3Smrg} 29237ec681f3Smrg 29247ec681f3SmrgALWAYS_INLINE static bool 29257ec681f3Smrgzink_resource_buffer_needs_barrier(struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline) 29267ec681f3Smrg{ 29277ec681f3Smrg if (!res->obj->access || !res->obj->access_stage) 29287ec681f3Smrg return true; 29297ec681f3Smrg if (!pipeline) 29307ec681f3Smrg pipeline = pipeline_access_stage(flags); 29317ec681f3Smrg return zink_resource_access_is_write(res->obj->access) || 29327ec681f3Smrg zink_resource_access_is_write(flags) || 29337ec681f3Smrg ((res->obj->access_stage & pipeline) != pipeline && !(res->obj->access_stage & (pipeline - 1))) || 29347ec681f3Smrg (res->obj->access & flags) != flags; 29357ec681f3Smrg} 29367ec681f3Smrg 29377ec681f3Smrgvoid 29387ec681f3Smrgzink_resource_buffer_barrier(struct zink_context *ctx, struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline) 29397ec681f3Smrg{ 29407ec681f3Smrg VkMemoryBarrier bmb; 29417ec681f3Smrg if (!pipeline) 29427ec681f3Smrg pipeline = pipeline_access_stage(flags); 29437ec681f3Smrg if (!zink_resource_buffer_needs_barrier(res, flags, pipeline)) 29447ec681f3Smrg return; 29457ec681f3Smrg 29467ec681f3Smrg bmb.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; 29477ec681f3Smrg bmb.pNext = NULL; 29487ec681f3Smrg bmb.srcAccessMask = res->obj->access; 29497ec681f3Smrg bmb.dstAccessMask = flags; 29507ec681f3Smrg if (!res->obj->access_stage) 29517ec681f3Smrg bmb.srcAccessMask = 0; 29527ec681f3Smrg VkCommandBuffer cmdbuf = get_cmdbuf(ctx, res); 29537ec681f3Smrg /* only barrier if we're changing layout or doing something besides read -> read */ 29547ec681f3Smrg VKCTX(CmdPipelineBarrier)( 29557ec681f3Smrg cmdbuf, 29567ec681f3Smrg res->obj->access_stage ? res->obj->access_stage : pipeline_access_stage(res->obj->access), 29577ec681f3Smrg pipeline, 29587ec681f3Smrg 0, 29597ec681f3Smrg 1, &bmb, 29607ec681f3Smrg 0, NULL, 29617ec681f3Smrg 0, NULL 29627ec681f3Smrg ); 29637ec681f3Smrg 29647ec681f3Smrg resource_check_defer_buffer_barrier(ctx, res, pipeline); 29657ec681f3Smrg 29667ec681f3Smrg if (res->obj->unordered_barrier) { 29677ec681f3Smrg res->obj->access |= bmb.dstAccessMask; 29687ec681f3Smrg res->obj->access_stage |= pipeline; 29697ec681f3Smrg } else { 29707ec681f3Smrg res->obj->access = bmb.dstAccessMask; 29717ec681f3Smrg res->obj->access_stage = pipeline; 29727ec681f3Smrg } 29737ec681f3Smrg} 29747ec681f3Smrg 29757ec681f3Smrgbool 29767ec681f3Smrgzink_resource_needs_barrier(struct zink_resource *res, VkImageLayout layout, VkAccessFlags flags, VkPipelineStageFlags pipeline) 29777ec681f3Smrg{ 29787ec681f3Smrg if (res->base.b.target == PIPE_BUFFER) 29797ec681f3Smrg return zink_resource_buffer_needs_barrier(res, flags, pipeline); 29807ec681f3Smrg return zink_resource_image_needs_barrier(res, layout, flags, pipeline); 29817ec681f3Smrg} 29827ec681f3Smrg 29837ec681f3SmrgVkShaderStageFlagBits 29847ec681f3Smrgzink_shader_stage(enum pipe_shader_type type) 29857ec681f3Smrg{ 29867ec681f3Smrg VkShaderStageFlagBits stages[] = { 29877ec681f3Smrg [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT, 29887ec681f3Smrg [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT, 29897ec681f3Smrg [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT, 29907ec681f3Smrg [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, 29917ec681f3Smrg [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, 29927ec681f3Smrg [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT, 29937ec681f3Smrg }; 29947ec681f3Smrg return stages[type]; 29957ec681f3Smrg} 29967ec681f3Smrg 29977ec681f3Smrgstatic void 29987ec681f3Smrgzink_flush(struct pipe_context *pctx, 29997ec681f3Smrg struct pipe_fence_handle **pfence, 30007ec681f3Smrg unsigned flags) 30017ec681f3Smrg{ 30027ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 30037ec681f3Smrg bool deferred = flags & PIPE_FLUSH_DEFERRED; 30047ec681f3Smrg bool deferred_fence = false; 30057ec681f3Smrg struct zink_batch *batch = &ctx->batch; 30067ec681f3Smrg struct zink_fence *fence = NULL; 30077ec681f3Smrg struct zink_screen *screen = zink_screen(ctx->base.screen); 30087ec681f3Smrg unsigned submit_count = 0; 30097ec681f3Smrg 30107ec681f3Smrg /* triggering clears will force has_work */ 30117ec681f3Smrg if (!deferred && ctx->clears_enabled) 30127ec681f3Smrg /* start rp to do all the clears */ 30137ec681f3Smrg zink_begin_render_pass(ctx); 30147ec681f3Smrg 30157ec681f3Smrg if (!batch->has_work) { 30167ec681f3Smrg if (pfence) { 30177ec681f3Smrg /* reuse last fence */ 30187ec681f3Smrg fence = ctx->last_fence; 30197ec681f3Smrg } 30207ec681f3Smrg if (!deferred) { 30217ec681f3Smrg struct zink_batch_state *last = zink_batch_state(ctx->last_fence); 30227ec681f3Smrg if (last) { 30237ec681f3Smrg sync_flush(ctx, last); 30247ec681f3Smrg if (last->is_device_lost) 30257ec681f3Smrg check_device_lost(ctx); 30267ec681f3Smrg } 30277ec681f3Smrg } 30287ec681f3Smrg tc_driver_internal_flush_notify(ctx->tc); 30297ec681f3Smrg } else { 30307ec681f3Smrg fence = &batch->state->fence; 30317ec681f3Smrg submit_count = batch->state->submit_count; 30327ec681f3Smrg if (deferred && !(flags & PIPE_FLUSH_FENCE_FD) && pfence) 30337ec681f3Smrg deferred_fence = true; 30347ec681f3Smrg else 30357ec681f3Smrg flush_batch(ctx, true); 30367ec681f3Smrg } 30377ec681f3Smrg 30387ec681f3Smrg if (pfence) { 30397ec681f3Smrg struct zink_tc_fence *mfence; 30407ec681f3Smrg 30417ec681f3Smrg if (flags & TC_FLUSH_ASYNC) { 30427ec681f3Smrg mfence = zink_tc_fence(*pfence); 30437ec681f3Smrg assert(mfence); 30447ec681f3Smrg } else { 30457ec681f3Smrg mfence = zink_create_tc_fence(); 30467ec681f3Smrg 30477ec681f3Smrg screen->base.fence_reference(&screen->base, pfence, NULL); 30487ec681f3Smrg *pfence = (struct pipe_fence_handle *)mfence; 30497ec681f3Smrg } 30507ec681f3Smrg 30517ec681f3Smrg mfence->fence = fence; 30527ec681f3Smrg if (fence) 30537ec681f3Smrg mfence->submit_count = submit_count; 30547ec681f3Smrg 30557ec681f3Smrg if (deferred_fence) { 30567ec681f3Smrg assert(fence); 30577ec681f3Smrg mfence->deferred_ctx = pctx; 30587ec681f3Smrg assert(!ctx->deferred_fence || ctx->deferred_fence == fence); 30597ec681f3Smrg ctx->deferred_fence = fence; 30607ec681f3Smrg } 30617ec681f3Smrg 30627ec681f3Smrg if (!fence || flags & TC_FLUSH_ASYNC) { 30637ec681f3Smrg if (!util_queue_fence_is_signalled(&mfence->ready)) 30647ec681f3Smrg util_queue_fence_signal(&mfence->ready); 30657ec681f3Smrg } 30667ec681f3Smrg } 30677ec681f3Smrg if (fence) { 30687ec681f3Smrg if (!(flags & (PIPE_FLUSH_DEFERRED | PIPE_FLUSH_ASYNC))) 30697ec681f3Smrg sync_flush(ctx, zink_batch_state(fence)); 30707ec681f3Smrg 30717ec681f3Smrg if (flags & PIPE_FLUSH_END_OF_FRAME && !(flags & TC_FLUSH_ASYNC) && !deferred) { 30727ec681f3Smrg /* if the first frame has not yet occurred, we need an explicit fence here 30737ec681f3Smrg * in some cases in order to correctly draw the first frame, though it's 30747ec681f3Smrg * unknown at this time why this is the case 30757ec681f3Smrg */ 30767ec681f3Smrg if (!ctx->first_frame_done) 30777ec681f3Smrg zink_vkfence_wait(screen, fence, PIPE_TIMEOUT_INFINITE); 30787ec681f3Smrg ctx->first_frame_done = true; 30797ec681f3Smrg } 30807ec681f3Smrg } 30817ec681f3Smrg} 30827ec681f3Smrg 30837ec681f3Smrgvoid 30847ec681f3Smrgzink_fence_wait(struct pipe_context *pctx) 30857ec681f3Smrg{ 30867ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 30877ec681f3Smrg 30887ec681f3Smrg if (ctx->batch.has_work) 30897ec681f3Smrg pctx->flush(pctx, NULL, PIPE_FLUSH_HINT_FINISH); 30907ec681f3Smrg if (ctx->last_fence) 30917ec681f3Smrg stall(ctx); 30927ec681f3Smrg} 30937ec681f3Smrg 30947ec681f3Smrgvoid 30957ec681f3Smrgzink_wait_on_batch(struct zink_context *ctx, uint32_t batch_id) 30967ec681f3Smrg{ 30977ec681f3Smrg struct zink_batch_state *bs; 30987ec681f3Smrg if (!batch_id) { 30997ec681f3Smrg /* not submitted yet */ 31007ec681f3Smrg flush_batch(ctx, true); 31017ec681f3Smrg bs = zink_batch_state(ctx->last_fence); 31027ec681f3Smrg assert(bs); 31037ec681f3Smrg batch_id = bs->fence.batch_id; 31047ec681f3Smrg } 31057ec681f3Smrg assert(batch_id); 31067ec681f3Smrg if (ctx->have_timelines) { 31077ec681f3Smrg if (!zink_screen_timeline_wait(zink_screen(ctx->base.screen), batch_id, UINT64_MAX)) 31087ec681f3Smrg check_device_lost(ctx); 31097ec681f3Smrg return; 31107ec681f3Smrg } 31117ec681f3Smrg simple_mtx_lock(&ctx->batch_mtx); 31127ec681f3Smrg struct zink_fence *fence; 31137ec681f3Smrg 31147ec681f3Smrg assert(ctx->last_fence); 31157ec681f3Smrg if (batch_id == zink_batch_state(ctx->last_fence)->fence.batch_id) 31167ec681f3Smrg fence = ctx->last_fence; 31177ec681f3Smrg else { 31187ec681f3Smrg for (bs = ctx->batch_states; bs; bs = bs->next) { 31197ec681f3Smrg if (bs->fence.batch_id < batch_id) 31207ec681f3Smrg continue; 31217ec681f3Smrg if (!bs->fence.batch_id || bs->fence.batch_id > batch_id) 31227ec681f3Smrg break; 31237ec681f3Smrg } 31247ec681f3Smrg if (!bs || bs->fence.batch_id != batch_id) { 31257ec681f3Smrg simple_mtx_unlock(&ctx->batch_mtx); 31267ec681f3Smrg /* if we can't find it, it either must have finished already or is on a different context */ 31277ec681f3Smrg if (!zink_screen_check_last_finished(zink_screen(ctx->base.screen), batch_id)) { 31287ec681f3Smrg /* if it hasn't finished, it's on another context, so force a flush so there's something to wait on */ 31297ec681f3Smrg ctx->batch.has_work = true; 31307ec681f3Smrg zink_fence_wait(&ctx->base); 31317ec681f3Smrg } 31327ec681f3Smrg return; 31337ec681f3Smrg } 31347ec681f3Smrg fence = &bs->fence; 31357ec681f3Smrg } 31367ec681f3Smrg simple_mtx_unlock(&ctx->batch_mtx); 31377ec681f3Smrg assert(fence); 31387ec681f3Smrg sync_flush(ctx, zink_batch_state(fence)); 31397ec681f3Smrg zink_vkfence_wait(zink_screen(ctx->base.screen), fence, PIPE_TIMEOUT_INFINITE); 31407ec681f3Smrg} 31417ec681f3Smrg 31427ec681f3Smrgbool 31437ec681f3Smrgzink_check_batch_completion(struct zink_context *ctx, uint32_t batch_id, bool have_lock) 31447ec681f3Smrg{ 31457ec681f3Smrg assert(ctx->batch.state); 31467ec681f3Smrg if (!batch_id) 31477ec681f3Smrg /* not submitted yet */ 31487ec681f3Smrg return false; 31497ec681f3Smrg 31507ec681f3Smrg if (zink_screen_check_last_finished(zink_screen(ctx->base.screen), batch_id)) 31517ec681f3Smrg return true; 31527ec681f3Smrg 31537ec681f3Smrg if (ctx->have_timelines) { 31547ec681f3Smrg bool success = zink_screen_timeline_wait(zink_screen(ctx->base.screen), batch_id, 0); 31557ec681f3Smrg if (!success) 31567ec681f3Smrg check_device_lost(ctx); 31577ec681f3Smrg return success; 31587ec681f3Smrg } 31597ec681f3Smrg struct zink_fence *fence; 31607ec681f3Smrg 31617ec681f3Smrg if (!have_lock) 31627ec681f3Smrg simple_mtx_lock(&ctx->batch_mtx); 31637ec681f3Smrg 31647ec681f3Smrg if (ctx->last_fence && batch_id == zink_batch_state(ctx->last_fence)->fence.batch_id) 31657ec681f3Smrg fence = ctx->last_fence; 31667ec681f3Smrg else { 31677ec681f3Smrg struct zink_batch_state *bs; 31687ec681f3Smrg for (bs = ctx->batch_states; bs; bs = bs->next) { 31697ec681f3Smrg if (bs->fence.batch_id < batch_id) 31707ec681f3Smrg continue; 31717ec681f3Smrg if (!bs->fence.batch_id || bs->fence.batch_id > batch_id) 31727ec681f3Smrg break; 31737ec681f3Smrg } 31747ec681f3Smrg if (!bs || bs->fence.batch_id != batch_id) { 31757ec681f3Smrg if (!have_lock) 31767ec681f3Smrg simple_mtx_unlock(&ctx->batch_mtx); 31777ec681f3Smrg /* return compare against last_finished, since this has info from all contexts */ 31787ec681f3Smrg return zink_screen_check_last_finished(zink_screen(ctx->base.screen), batch_id); 31797ec681f3Smrg } 31807ec681f3Smrg fence = &bs->fence; 31817ec681f3Smrg } 31827ec681f3Smrg if (!have_lock) 31837ec681f3Smrg simple_mtx_unlock(&ctx->batch_mtx); 31847ec681f3Smrg assert(fence); 31857ec681f3Smrg if (zink_screen(ctx->base.screen)->threaded && 31867ec681f3Smrg !util_queue_fence_is_signalled(&zink_batch_state(fence)->flush_completed)) 31877ec681f3Smrg return false; 31887ec681f3Smrg return zink_vkfence_wait(zink_screen(ctx->base.screen), fence, 0); 31897ec681f3Smrg} 31907ec681f3Smrg 31917ec681f3Smrgstatic void 31927ec681f3Smrgzink_texture_barrier(struct pipe_context *pctx, unsigned flags) 31937ec681f3Smrg{ 31947ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 31957ec681f3Smrg if (!ctx->framebuffer || !ctx->framebuffer->state.num_attachments) 31967ec681f3Smrg return; 31977ec681f3Smrg 31987ec681f3Smrg zink_batch_no_rp(ctx); 31997ec681f3Smrg if (ctx->fb_state.zsbuf) { 32007ec681f3Smrg VkMemoryBarrier dmb; 32017ec681f3Smrg dmb.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; 32027ec681f3Smrg dmb.pNext = NULL; 32037ec681f3Smrg dmb.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; 32047ec681f3Smrg dmb.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; 32057ec681f3Smrg VKCTX(CmdPipelineBarrier)( 32067ec681f3Smrg ctx->batch.state->cmdbuf, 32077ec681f3Smrg VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, 32087ec681f3Smrg VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 32097ec681f3Smrg 0, 32107ec681f3Smrg 1, &dmb, 32117ec681f3Smrg 0, NULL, 32127ec681f3Smrg 0, NULL 32137ec681f3Smrg ); 32147ec681f3Smrg } 32157ec681f3Smrg if (!ctx->fb_state.nr_cbufs) 32167ec681f3Smrg return; 32177ec681f3Smrg 32187ec681f3Smrg VkMemoryBarrier bmb; 32197ec681f3Smrg bmb.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; 32207ec681f3Smrg bmb.pNext = NULL; 32217ec681f3Smrg bmb.srcAccessMask = 0; 32227ec681f3Smrg bmb.dstAccessMask = 0; 32237ec681f3Smrg bmb.srcAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; 32247ec681f3Smrg bmb.dstAccessMask |= VK_ACCESS_SHADER_READ_BIT; 32257ec681f3Smrg VKCTX(CmdPipelineBarrier)( 32267ec681f3Smrg ctx->batch.state->cmdbuf, 32277ec681f3Smrg VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 32287ec681f3Smrg VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 32297ec681f3Smrg 0, 32307ec681f3Smrg 1, &bmb, 32317ec681f3Smrg 0, NULL, 32327ec681f3Smrg 0, NULL 32337ec681f3Smrg ); 32347ec681f3Smrg} 32357ec681f3Smrg 32367ec681f3Smrgstatic inline void 32377ec681f3Smrgmem_barrier(struct zink_context *ctx, VkPipelineStageFlags src_stage, VkPipelineStageFlags dst_stage, VkAccessFlags src, VkAccessFlags dst) 32387ec681f3Smrg{ 32397ec681f3Smrg struct zink_batch *batch = &ctx->batch; 32407ec681f3Smrg VkMemoryBarrier mb; 32417ec681f3Smrg mb.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; 32427ec681f3Smrg mb.pNext = NULL; 32437ec681f3Smrg mb.srcAccessMask = src; 32447ec681f3Smrg mb.dstAccessMask = dst; 32457ec681f3Smrg zink_end_render_pass(ctx); 32467ec681f3Smrg VKCTX(CmdPipelineBarrier)(batch->state->cmdbuf, src_stage, dst_stage, 0, 1, &mb, 0, NULL, 0, NULL); 32477ec681f3Smrg} 32487ec681f3Smrg 32497ec681f3Smrgvoid 32507ec681f3Smrgzink_flush_memory_barrier(struct zink_context *ctx, bool is_compute) 32517ec681f3Smrg{ 32527ec681f3Smrg const VkPipelineStageFlags gfx_flags = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | 32537ec681f3Smrg VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | 32547ec681f3Smrg VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | 32557ec681f3Smrg VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | 32567ec681f3Smrg VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; 32577ec681f3Smrg const VkPipelineStageFlags cs_flags = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT; 32587ec681f3Smrg VkPipelineStageFlags src = ctx->batch.last_was_compute ? cs_flags : gfx_flags; 32597ec681f3Smrg VkPipelineStageFlags dst = is_compute ? cs_flags : gfx_flags; 32607ec681f3Smrg 32617ec681f3Smrg if (ctx->memory_barrier & (PIPE_BARRIER_TEXTURE | PIPE_BARRIER_SHADER_BUFFER | PIPE_BARRIER_IMAGE)) 32627ec681f3Smrg mem_barrier(ctx, src, dst, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT); 32637ec681f3Smrg 32647ec681f3Smrg if (ctx->memory_barrier & PIPE_BARRIER_CONSTANT_BUFFER) 32657ec681f3Smrg mem_barrier(ctx, src, dst, 32667ec681f3Smrg VK_ACCESS_SHADER_WRITE_BIT, 32677ec681f3Smrg VK_ACCESS_UNIFORM_READ_BIT); 32687ec681f3Smrg 32697ec681f3Smrg if (!is_compute) { 32707ec681f3Smrg if (ctx->memory_barrier & PIPE_BARRIER_INDIRECT_BUFFER) 32717ec681f3Smrg mem_barrier(ctx, src, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, 32727ec681f3Smrg VK_ACCESS_SHADER_WRITE_BIT, 32737ec681f3Smrg VK_ACCESS_INDIRECT_COMMAND_READ_BIT); 32747ec681f3Smrg if (ctx->memory_barrier & PIPE_BARRIER_VERTEX_BUFFER) 32757ec681f3Smrg mem_barrier(ctx, gfx_flags, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 32767ec681f3Smrg VK_ACCESS_SHADER_WRITE_BIT, 32777ec681f3Smrg VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT); 32787ec681f3Smrg 32797ec681f3Smrg if (ctx->memory_barrier & PIPE_BARRIER_INDEX_BUFFER) 32807ec681f3Smrg mem_barrier(ctx, gfx_flags, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 32817ec681f3Smrg VK_ACCESS_SHADER_WRITE_BIT, 32827ec681f3Smrg VK_ACCESS_INDEX_READ_BIT); 32837ec681f3Smrg if (ctx->memory_barrier & PIPE_BARRIER_FRAMEBUFFER) 32847ec681f3Smrg zink_texture_barrier(&ctx->base, 0); 32857ec681f3Smrg if (ctx->memory_barrier & PIPE_BARRIER_STREAMOUT_BUFFER) 32867ec681f3Smrg mem_barrier(ctx, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | 32877ec681f3Smrg VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | 32887ec681f3Smrg VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, 32897ec681f3Smrg VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, 32907ec681f3Smrg VK_ACCESS_SHADER_READ_BIT, 32917ec681f3Smrg VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | 32927ec681f3Smrg VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT); 32937ec681f3Smrg } 32947ec681f3Smrg ctx->memory_barrier = 0; 32957ec681f3Smrg} 32967ec681f3Smrg 32977ec681f3Smrgstatic void 32987ec681f3Smrgzink_memory_barrier(struct pipe_context *pctx, unsigned flags) 32997ec681f3Smrg{ 33007ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 33017ec681f3Smrg 33027ec681f3Smrg flags &= ~PIPE_BARRIER_UPDATE; 33037ec681f3Smrg if (!flags) 33047ec681f3Smrg return; 33057ec681f3Smrg 33067ec681f3Smrg if (flags & PIPE_BARRIER_MAPPED_BUFFER) { 33077ec681f3Smrg /* TODO: this should flush all persistent buffers in use as I think */ 33087ec681f3Smrg flags &= ~PIPE_BARRIER_MAPPED_BUFFER; 33097ec681f3Smrg } 33107ec681f3Smrg ctx->memory_barrier = flags; 33117ec681f3Smrg} 33127ec681f3Smrg 33137ec681f3Smrgstatic void 33147ec681f3Smrgzink_flush_resource(struct pipe_context *pctx, 33157ec681f3Smrg struct pipe_resource *pres) 33167ec681f3Smrg{ 33177ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 33187ec681f3Smrg /* TODO: this is not futureproof and should be updated once proper 33197ec681f3Smrg * WSI support is added 33207ec681f3Smrg */ 33217ec681f3Smrg if (pres->bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT)) 33227ec681f3Smrg pipe_resource_reference(&ctx->batch.state->flush_res, pres); 33237ec681f3Smrg} 33247ec681f3Smrg 33257ec681f3Smrgvoid 33267ec681f3Smrgzink_copy_buffer(struct zink_context *ctx, struct zink_resource *dst, struct zink_resource *src, 33277ec681f3Smrg unsigned dst_offset, unsigned src_offset, unsigned size) 33287ec681f3Smrg{ 33297ec681f3Smrg VkBufferCopy region; 33307ec681f3Smrg region.srcOffset = src_offset; 33317ec681f3Smrg region.dstOffset = dst_offset; 33327ec681f3Smrg region.size = size; 33337ec681f3Smrg 33347ec681f3Smrg struct zink_batch *batch = &ctx->batch; 33357ec681f3Smrg zink_batch_no_rp(ctx); 33367ec681f3Smrg zink_batch_reference_resource_rw(batch, src, false); 33377ec681f3Smrg zink_batch_reference_resource_rw(batch, dst, true); 33387ec681f3Smrg util_range_add(&dst->base.b, &dst->valid_buffer_range, dst_offset, dst_offset + size); 33397ec681f3Smrg zink_resource_buffer_barrier(ctx, src, VK_ACCESS_TRANSFER_READ_BIT, 0); 33407ec681f3Smrg zink_resource_buffer_barrier(ctx, dst, VK_ACCESS_TRANSFER_WRITE_BIT, 0); 33417ec681f3Smrg VKCTX(CmdCopyBuffer)(batch->state->cmdbuf, src->obj->buffer, dst->obj->buffer, 1, ®ion); 33427ec681f3Smrg} 33437ec681f3Smrg 33447ec681f3Smrgvoid 33457ec681f3Smrgzink_copy_image_buffer(struct zink_context *ctx, struct zink_resource *dst, struct zink_resource *src, 33467ec681f3Smrg unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, 33477ec681f3Smrg unsigned src_level, const struct pipe_box *src_box, enum pipe_map_flags map_flags) 33487ec681f3Smrg{ 33497ec681f3Smrg struct zink_resource *img = dst->base.b.target == PIPE_BUFFER ? src : dst; 33507ec681f3Smrg struct zink_resource *buf = dst->base.b.target == PIPE_BUFFER ? dst : src; 33517ec681f3Smrg struct zink_batch *batch = &ctx->batch; 33527ec681f3Smrg zink_batch_no_rp(ctx); 33537ec681f3Smrg 33547ec681f3Smrg bool buf2img = buf == src; 33557ec681f3Smrg 33567ec681f3Smrg if (buf2img) { 33577ec681f3Smrg zink_resource_image_barrier(ctx, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, 0); 33587ec681f3Smrg zink_resource_buffer_barrier(ctx, buf, VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT); 33597ec681f3Smrg } else { 33607ec681f3Smrg zink_resource_image_barrier(ctx, img, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 0, 0); 33617ec681f3Smrg zink_resource_buffer_barrier(ctx, buf, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT); 33627ec681f3Smrg util_range_add(&dst->base.b, &dst->valid_buffer_range, dstx, dstx + src_box->width); 33637ec681f3Smrg } 33647ec681f3Smrg 33657ec681f3Smrg VkBufferImageCopy region = {0}; 33667ec681f3Smrg region.bufferOffset = buf2img ? src_box->x : dstx; 33677ec681f3Smrg region.bufferRowLength = 0; 33687ec681f3Smrg region.bufferImageHeight = 0; 33697ec681f3Smrg region.imageSubresource.mipLevel = buf2img ? dst_level : src_level; 33707ec681f3Smrg switch (img->base.b.target) { 33717ec681f3Smrg case PIPE_TEXTURE_CUBE: 33727ec681f3Smrg case PIPE_TEXTURE_CUBE_ARRAY: 33737ec681f3Smrg case PIPE_TEXTURE_2D_ARRAY: 33747ec681f3Smrg case PIPE_TEXTURE_1D_ARRAY: 33757ec681f3Smrg /* these use layer */ 33767ec681f3Smrg region.imageSubresource.baseArrayLayer = buf2img ? dstz : src_box->z; 33777ec681f3Smrg region.imageSubresource.layerCount = src_box->depth; 33787ec681f3Smrg region.imageOffset.z = 0; 33797ec681f3Smrg region.imageExtent.depth = 1; 33807ec681f3Smrg break; 33817ec681f3Smrg case PIPE_TEXTURE_3D: 33827ec681f3Smrg /* this uses depth */ 33837ec681f3Smrg region.imageSubresource.baseArrayLayer = 0; 33847ec681f3Smrg region.imageSubresource.layerCount = 1; 33857ec681f3Smrg region.imageOffset.z = buf2img ? dstz : src_box->z; 33867ec681f3Smrg region.imageExtent.depth = src_box->depth; 33877ec681f3Smrg break; 33887ec681f3Smrg default: 33897ec681f3Smrg /* these must only copy one layer */ 33907ec681f3Smrg region.imageSubresource.baseArrayLayer = 0; 33917ec681f3Smrg region.imageSubresource.layerCount = 1; 33927ec681f3Smrg region.imageOffset.z = 0; 33937ec681f3Smrg region.imageExtent.depth = 1; 33947ec681f3Smrg } 33957ec681f3Smrg region.imageOffset.x = buf2img ? dstx : src_box->x; 33967ec681f3Smrg region.imageOffset.y = buf2img ? dsty : src_box->y; 33977ec681f3Smrg 33987ec681f3Smrg region.imageExtent.width = src_box->width; 33997ec681f3Smrg region.imageExtent.height = src_box->height; 34007ec681f3Smrg 34017ec681f3Smrg zink_batch_reference_resource_rw(batch, img, buf2img); 34027ec681f3Smrg zink_batch_reference_resource_rw(batch, buf, !buf2img); 34037ec681f3Smrg 34047ec681f3Smrg /* we're using u_transfer_helper_deinterleave, which means we'll be getting PIPE_MAP_* usage 34057ec681f3Smrg * to indicate whether to copy either the depth or stencil aspects 34067ec681f3Smrg */ 34077ec681f3Smrg unsigned aspects = 0; 34087ec681f3Smrg if (map_flags) { 34097ec681f3Smrg assert((map_flags & (PIPE_MAP_DEPTH_ONLY | PIPE_MAP_STENCIL_ONLY)) != 34107ec681f3Smrg (PIPE_MAP_DEPTH_ONLY | PIPE_MAP_STENCIL_ONLY)); 34117ec681f3Smrg if (map_flags & PIPE_MAP_DEPTH_ONLY) 34127ec681f3Smrg aspects = VK_IMAGE_ASPECT_DEPTH_BIT; 34137ec681f3Smrg else if (map_flags & PIPE_MAP_STENCIL_ONLY) 34147ec681f3Smrg aspects = VK_IMAGE_ASPECT_STENCIL_BIT; 34157ec681f3Smrg } 34167ec681f3Smrg if (!aspects) 34177ec681f3Smrg aspects = img->aspect; 34187ec681f3Smrg while (aspects) { 34197ec681f3Smrg int aspect = 1 << u_bit_scan(&aspects); 34207ec681f3Smrg region.imageSubresource.aspectMask = aspect; 34217ec681f3Smrg 34227ec681f3Smrg /* this may or may not work with multisampled depth/stencil buffers depending on the driver implementation: 34237ec681f3Smrg * 34247ec681f3Smrg * srcImage must have a sample count equal to VK_SAMPLE_COUNT_1_BIT 34257ec681f3Smrg * - vkCmdCopyImageToBuffer spec 34267ec681f3Smrg * 34277ec681f3Smrg * dstImage must have a sample count equal to VK_SAMPLE_COUNT_1_BIT 34287ec681f3Smrg * - vkCmdCopyBufferToImage spec 34297ec681f3Smrg */ 34307ec681f3Smrg if (buf2img) 34317ec681f3Smrg VKCTX(CmdCopyBufferToImage)(batch->state->cmdbuf, buf->obj->buffer, img->obj->image, img->layout, 1, ®ion); 34327ec681f3Smrg else 34337ec681f3Smrg VKCTX(CmdCopyImageToBuffer)(batch->state->cmdbuf, img->obj->image, img->layout, buf->obj->buffer, 1, ®ion); 34347ec681f3Smrg } 34357ec681f3Smrg} 34367ec681f3Smrg 34377ec681f3Smrgstatic void 34387ec681f3Smrgzink_resource_copy_region(struct pipe_context *pctx, 34397ec681f3Smrg struct pipe_resource *pdst, 34407ec681f3Smrg unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, 34417ec681f3Smrg struct pipe_resource *psrc, 34427ec681f3Smrg unsigned src_level, const struct pipe_box *src_box) 34437ec681f3Smrg{ 34447ec681f3Smrg struct zink_resource *dst = zink_resource(pdst); 34457ec681f3Smrg struct zink_resource *src = zink_resource(psrc); 34467ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 34477ec681f3Smrg if (dst->base.b.target != PIPE_BUFFER && src->base.b.target != PIPE_BUFFER) { 34487ec681f3Smrg VkImageCopy region = {0}; 34497ec681f3Smrg if (util_format_get_num_planes(src->base.b.format) == 1 && 34507ec681f3Smrg util_format_get_num_planes(dst->base.b.format) == 1) { 34517ec681f3Smrg /* If neither the calling command’s srcImage nor the calling command’s dstImage 34527ec681f3Smrg * has a multi-planar image format then the aspectMask member of srcSubresource 34537ec681f3Smrg * and dstSubresource must match 34547ec681f3Smrg * 34557ec681f3Smrg * -VkImageCopy spec 34567ec681f3Smrg */ 34577ec681f3Smrg assert(src->aspect == dst->aspect); 34587ec681f3Smrg } else 34597ec681f3Smrg unreachable("planar formats not yet handled"); 34607ec681f3Smrg 34617ec681f3Smrg zink_fb_clears_apply_or_discard(ctx, pdst, (struct u_rect){dstx, dstx + src_box->width, dsty, dsty + src_box->height}, false); 34627ec681f3Smrg zink_fb_clears_apply_region(ctx, psrc, zink_rect_from_box(src_box)); 34637ec681f3Smrg 34647ec681f3Smrg region.srcSubresource.aspectMask = src->aspect; 34657ec681f3Smrg region.srcSubresource.mipLevel = src_level; 34667ec681f3Smrg switch (src->base.b.target) { 34677ec681f3Smrg case PIPE_TEXTURE_CUBE: 34687ec681f3Smrg case PIPE_TEXTURE_CUBE_ARRAY: 34697ec681f3Smrg case PIPE_TEXTURE_2D_ARRAY: 34707ec681f3Smrg case PIPE_TEXTURE_1D_ARRAY: 34717ec681f3Smrg /* these use layer */ 34727ec681f3Smrg region.srcSubresource.baseArrayLayer = src_box->z; 34737ec681f3Smrg region.srcSubresource.layerCount = src_box->depth; 34747ec681f3Smrg region.srcOffset.z = 0; 34757ec681f3Smrg region.extent.depth = 1; 34767ec681f3Smrg break; 34777ec681f3Smrg case PIPE_TEXTURE_3D: 34787ec681f3Smrg /* this uses depth */ 34797ec681f3Smrg region.srcSubresource.baseArrayLayer = 0; 34807ec681f3Smrg region.srcSubresource.layerCount = 1; 34817ec681f3Smrg region.srcOffset.z = src_box->z; 34827ec681f3Smrg region.extent.depth = src_box->depth; 34837ec681f3Smrg break; 34847ec681f3Smrg default: 34857ec681f3Smrg /* these must only copy one layer */ 34867ec681f3Smrg region.srcSubresource.baseArrayLayer = 0; 34877ec681f3Smrg region.srcSubresource.layerCount = 1; 34887ec681f3Smrg region.srcOffset.z = 0; 34897ec681f3Smrg region.extent.depth = 1; 34907ec681f3Smrg } 34917ec681f3Smrg 34927ec681f3Smrg region.srcOffset.x = src_box->x; 34937ec681f3Smrg region.srcOffset.y = src_box->y; 34947ec681f3Smrg 34957ec681f3Smrg region.dstSubresource.aspectMask = dst->aspect; 34967ec681f3Smrg region.dstSubresource.mipLevel = dst_level; 34977ec681f3Smrg switch (dst->base.b.target) { 34987ec681f3Smrg case PIPE_TEXTURE_CUBE: 34997ec681f3Smrg case PIPE_TEXTURE_CUBE_ARRAY: 35007ec681f3Smrg case PIPE_TEXTURE_2D_ARRAY: 35017ec681f3Smrg case PIPE_TEXTURE_1D_ARRAY: 35027ec681f3Smrg /* these use layer */ 35037ec681f3Smrg region.dstSubresource.baseArrayLayer = dstz; 35047ec681f3Smrg region.dstSubresource.layerCount = src_box->depth; 35057ec681f3Smrg region.dstOffset.z = 0; 35067ec681f3Smrg break; 35077ec681f3Smrg case PIPE_TEXTURE_3D: 35087ec681f3Smrg /* this uses depth */ 35097ec681f3Smrg region.dstSubresource.baseArrayLayer = 0; 35107ec681f3Smrg region.dstSubresource.layerCount = 1; 35117ec681f3Smrg region.dstOffset.z = dstz; 35127ec681f3Smrg break; 35137ec681f3Smrg default: 35147ec681f3Smrg /* these must only copy one layer */ 35157ec681f3Smrg region.dstSubresource.baseArrayLayer = 0; 35167ec681f3Smrg region.dstSubresource.layerCount = 1; 35177ec681f3Smrg region.dstOffset.z = 0; 35187ec681f3Smrg } 35197ec681f3Smrg 35207ec681f3Smrg region.dstOffset.x = dstx; 35217ec681f3Smrg region.dstOffset.y = dsty; 35227ec681f3Smrg region.extent.width = src_box->width; 35237ec681f3Smrg region.extent.height = src_box->height; 35247ec681f3Smrg 35257ec681f3Smrg struct zink_batch *batch = &ctx->batch; 35267ec681f3Smrg zink_batch_no_rp(ctx); 35277ec681f3Smrg zink_batch_reference_resource_rw(batch, src, false); 35287ec681f3Smrg zink_batch_reference_resource_rw(batch, dst, true); 35297ec681f3Smrg 35307ec681f3Smrg zink_resource_setup_transfer_layouts(ctx, src, dst); 35317ec681f3Smrg VKCTX(CmdCopyImage)(batch->state->cmdbuf, src->obj->image, src->layout, 35327ec681f3Smrg dst->obj->image, dst->layout, 35337ec681f3Smrg 1, ®ion); 35347ec681f3Smrg } else if (dst->base.b.target == PIPE_BUFFER && 35357ec681f3Smrg src->base.b.target == PIPE_BUFFER) { 35367ec681f3Smrg zink_copy_buffer(ctx, dst, src, dstx, src_box->x, src_box->width); 35377ec681f3Smrg } else 35387ec681f3Smrg zink_copy_image_buffer(ctx, dst, src, dst_level, dstx, dsty, dstz, src_level, src_box, 0); 35397ec681f3Smrg} 35407ec681f3Smrg 35417ec681f3Smrgstatic struct pipe_stream_output_target * 35427ec681f3Smrgzink_create_stream_output_target(struct pipe_context *pctx, 35437ec681f3Smrg struct pipe_resource *pres, 35447ec681f3Smrg unsigned buffer_offset, 35457ec681f3Smrg unsigned buffer_size) 35467ec681f3Smrg{ 35477ec681f3Smrg struct zink_so_target *t; 35487ec681f3Smrg t = CALLOC_STRUCT(zink_so_target); 35497ec681f3Smrg if (!t) 35507ec681f3Smrg return NULL; 35517ec681f3Smrg 35527ec681f3Smrg /* using PIPE_BIND_CUSTOM here lets us create a custom pipe buffer resource, 35537ec681f3Smrg * which allows us to differentiate and use VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT 35547ec681f3Smrg * as we must for this case 35557ec681f3Smrg */ 35567ec681f3Smrg t->counter_buffer = pipe_buffer_create(pctx->screen, PIPE_BIND_STREAM_OUTPUT | PIPE_BIND_CUSTOM, PIPE_USAGE_DEFAULT, 4); 35577ec681f3Smrg if (!t->counter_buffer) { 35587ec681f3Smrg FREE(t); 35597ec681f3Smrg return NULL; 35607ec681f3Smrg } 35617ec681f3Smrg 35627ec681f3Smrg t->base.reference.count = 1; 35637ec681f3Smrg t->base.context = pctx; 35647ec681f3Smrg pipe_resource_reference(&t->base.buffer, pres); 35657ec681f3Smrg t->base.buffer_offset = buffer_offset; 35667ec681f3Smrg t->base.buffer_size = buffer_size; 35677ec681f3Smrg 35687ec681f3Smrg zink_resource(t->base.buffer)->so_valid = true; 35697ec681f3Smrg 35707ec681f3Smrg return &t->base; 35717ec681f3Smrg} 35727ec681f3Smrg 35737ec681f3Smrgstatic void 35747ec681f3Smrgzink_stream_output_target_destroy(struct pipe_context *pctx, 35757ec681f3Smrg struct pipe_stream_output_target *psot) 35767ec681f3Smrg{ 35777ec681f3Smrg struct zink_so_target *t = (struct zink_so_target *)psot; 35787ec681f3Smrg pipe_resource_reference(&t->counter_buffer, NULL); 35797ec681f3Smrg pipe_resource_reference(&t->base.buffer, NULL); 35807ec681f3Smrg FREE(t); 35817ec681f3Smrg} 35827ec681f3Smrg 35837ec681f3Smrgstatic void 35847ec681f3Smrgzink_set_stream_output_targets(struct pipe_context *pctx, 35857ec681f3Smrg unsigned num_targets, 35867ec681f3Smrg struct pipe_stream_output_target **targets, 35877ec681f3Smrg const unsigned *offsets) 35887ec681f3Smrg{ 35897ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 35907ec681f3Smrg 35917ec681f3Smrg /* always set counter_buffer_valid=false on unbind: 35927ec681f3Smrg * - on resume (indicated by offset==-1), set counter_buffer_valid=true 35937ec681f3Smrg * - otherwise the counter buffer is invalidated 35947ec681f3Smrg */ 35957ec681f3Smrg 35967ec681f3Smrg if (num_targets == 0) { 35977ec681f3Smrg for (unsigned i = 0; i < ctx->num_so_targets; i++) { 35987ec681f3Smrg if (ctx->so_targets[i]) { 35997ec681f3Smrg struct zink_resource *so = zink_resource(ctx->so_targets[i]->buffer); 36007ec681f3Smrg if (so) { 36017ec681f3Smrg so->so_bind_count--; 36027ec681f3Smrg update_res_bind_count(ctx, so, false, true); 36037ec681f3Smrg } 36047ec681f3Smrg } 36057ec681f3Smrg pipe_so_target_reference(&ctx->so_targets[i], NULL); 36067ec681f3Smrg } 36077ec681f3Smrg ctx->num_so_targets = 0; 36087ec681f3Smrg } else { 36097ec681f3Smrg for (unsigned i = 0; i < num_targets; i++) { 36107ec681f3Smrg struct zink_so_target *t = zink_so_target(targets[i]); 36117ec681f3Smrg pipe_so_target_reference(&ctx->so_targets[i], targets[i]); 36127ec681f3Smrg if (!t) 36137ec681f3Smrg continue; 36147ec681f3Smrg struct zink_resource *res = zink_resource(t->counter_buffer); 36157ec681f3Smrg if (offsets[0] == (unsigned)-1) { 36167ec681f3Smrg ctx->xfb_barrier |= zink_resource_buffer_needs_barrier(res, 36177ec681f3Smrg VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT, 36187ec681f3Smrg VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT); 36197ec681f3Smrg } else { 36207ec681f3Smrg ctx->xfb_barrier |= zink_resource_buffer_needs_barrier(res, 36217ec681f3Smrg VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT, 36227ec681f3Smrg VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT); 36237ec681f3Smrg t->counter_buffer_valid = false; 36247ec681f3Smrg } 36257ec681f3Smrg struct zink_resource *so = zink_resource(ctx->so_targets[i]->buffer); 36267ec681f3Smrg if (so) { 36277ec681f3Smrg so->so_bind_count++; 36287ec681f3Smrg update_res_bind_count(ctx, so, false, false); 36297ec681f3Smrg } 36307ec681f3Smrg } 36317ec681f3Smrg for (unsigned i = num_targets; i < ctx->num_so_targets; i++) { 36327ec681f3Smrg if (ctx->so_targets[i]) { 36337ec681f3Smrg struct zink_resource *so = zink_resource(ctx->so_targets[i]->buffer); 36347ec681f3Smrg if (so) { 36357ec681f3Smrg so->so_bind_count--; 36367ec681f3Smrg update_res_bind_count(ctx, so, false, true); 36377ec681f3Smrg } 36387ec681f3Smrg } 36397ec681f3Smrg pipe_so_target_reference(&ctx->so_targets[i], NULL); 36407ec681f3Smrg } 36417ec681f3Smrg ctx->num_so_targets = num_targets; 36427ec681f3Smrg 36437ec681f3Smrg /* TODO: possibly avoid rebinding on resume if resuming from same buffers? */ 36447ec681f3Smrg ctx->dirty_so_targets = true; 36457ec681f3Smrg } 36467ec681f3Smrg} 36477ec681f3Smrg 36487ec681f3Smrgvoid 36497ec681f3Smrgzink_rebind_framebuffer(struct zink_context *ctx, struct zink_resource *res) 36507ec681f3Smrg{ 36517ec681f3Smrg if (!ctx->framebuffer) 36527ec681f3Smrg return; 36537ec681f3Smrg bool did_rebind = false; 36547ec681f3Smrg if (res->aspect & VK_IMAGE_ASPECT_COLOR_BIT) { 36557ec681f3Smrg for (unsigned i = 0; i < ctx->fb_state.nr_cbufs; i++) { 36567ec681f3Smrg if (!ctx->fb_state.cbufs[i] || 36577ec681f3Smrg zink_resource(ctx->fb_state.cbufs[i]->texture) != res) 36587ec681f3Smrg continue; 36597ec681f3Smrg zink_rebind_ctx_surface(ctx, &ctx->fb_state.cbufs[i]); 36607ec681f3Smrg did_rebind = true; 36617ec681f3Smrg } 36627ec681f3Smrg } else { 36637ec681f3Smrg if (ctx->fb_state.zsbuf && zink_resource(ctx->fb_state.zsbuf->texture) != res) { 36647ec681f3Smrg zink_rebind_ctx_surface(ctx, &ctx->fb_state.zsbuf); 36657ec681f3Smrg did_rebind = true; 36667ec681f3Smrg } 36677ec681f3Smrg } 36687ec681f3Smrg 36697ec681f3Smrg did_rebind |= rebind_fb_state(ctx, res, false); 36707ec681f3Smrg 36717ec681f3Smrg if (!did_rebind) 36727ec681f3Smrg return; 36737ec681f3Smrg 36747ec681f3Smrg zink_batch_no_rp(ctx); 36757ec681f3Smrg if (zink_screen(ctx->base.screen)->info.have_KHR_imageless_framebuffer) { 36767ec681f3Smrg struct zink_framebuffer *fb = ctx->get_framebuffer(ctx); 36777ec681f3Smrg ctx->fb_changed |= ctx->framebuffer != fb; 36787ec681f3Smrg ctx->framebuffer = fb; 36797ec681f3Smrg } 36807ec681f3Smrg} 36817ec681f3Smrg 36827ec681f3SmrgALWAYS_INLINE static struct zink_resource * 36837ec681f3Smrgrebind_ubo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot) 36847ec681f3Smrg{ 36857ec681f3Smrg struct zink_resource *res = update_descriptor_state_ubo(ctx, shader, slot, 36867ec681f3Smrg ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_UBO][shader][slot]); 36877ec681f3Smrg zink_screen(ctx->base.screen)->context_invalidate_descriptor_state(ctx, shader, ZINK_DESCRIPTOR_TYPE_UBO, slot, 1); 36887ec681f3Smrg return res; 36897ec681f3Smrg} 36907ec681f3Smrg 36917ec681f3SmrgALWAYS_INLINE static struct zink_resource * 36927ec681f3Smrgrebind_ssbo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot) 36937ec681f3Smrg{ 36947ec681f3Smrg const struct pipe_shader_buffer *ssbo = &ctx->ssbos[shader][slot]; 36957ec681f3Smrg struct zink_resource *res = zink_resource(ssbo->buffer); 36967ec681f3Smrg if (!res) 36977ec681f3Smrg return NULL; 36987ec681f3Smrg util_range_add(&res->base.b, &res->valid_buffer_range, ssbo->buffer_offset, 36997ec681f3Smrg ssbo->buffer_offset + ssbo->buffer_size); 37007ec681f3Smrg update_descriptor_state_ssbo(ctx, shader, slot, res); 37017ec681f3Smrg zink_screen(ctx->base.screen)->context_invalidate_descriptor_state(ctx, shader, ZINK_DESCRIPTOR_TYPE_SSBO, slot, 1); 37027ec681f3Smrg return res; 37037ec681f3Smrg} 37047ec681f3Smrg 37057ec681f3SmrgALWAYS_INLINE static struct zink_resource * 37067ec681f3Smrgrebind_tbo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot) 37077ec681f3Smrg{ 37087ec681f3Smrg struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[shader][slot]); 37097ec681f3Smrg if (!sampler_view || sampler_view->base.texture->target != PIPE_BUFFER) 37107ec681f3Smrg return NULL; 37117ec681f3Smrg struct zink_resource *res = zink_resource(sampler_view->base.texture); 37127ec681f3Smrg if (zink_batch_usage_exists(sampler_view->buffer_view->batch_uses)) 37137ec681f3Smrg zink_batch_reference_bufferview(&ctx->batch, sampler_view->buffer_view); 37147ec681f3Smrg VkBufferViewCreateInfo bvci = sampler_view->buffer_view->bvci; 37157ec681f3Smrg bvci.buffer = res->obj->buffer; 37167ec681f3Smrg zink_buffer_view_reference(zink_screen(ctx->base.screen), &sampler_view->buffer_view, NULL); 37177ec681f3Smrg sampler_view->buffer_view = get_buffer_view(ctx, res, &bvci); 37187ec681f3Smrg update_descriptor_state_sampler(ctx, shader, slot, res); 37197ec681f3Smrg zink_screen(ctx->base.screen)->context_invalidate_descriptor_state(ctx, shader, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, slot, 1); 37207ec681f3Smrg return res; 37217ec681f3Smrg} 37227ec681f3Smrg 37237ec681f3SmrgALWAYS_INLINE static struct zink_resource * 37247ec681f3Smrgrebind_ibo(struct zink_context *ctx, enum pipe_shader_type shader, unsigned slot) 37257ec681f3Smrg{ 37267ec681f3Smrg struct zink_image_view *image_view = &ctx->image_views[shader][slot]; 37277ec681f3Smrg struct zink_resource *res = zink_resource(image_view->base.resource); 37287ec681f3Smrg if (!res || res->base.b.target != PIPE_BUFFER) 37297ec681f3Smrg return NULL; 37307ec681f3Smrg zink_descriptor_set_refs_clear(&image_view->buffer_view->desc_set_refs, image_view->buffer_view); 37317ec681f3Smrg if (zink_batch_usage_exists(image_view->buffer_view->batch_uses)) 37327ec681f3Smrg zink_batch_reference_bufferview(&ctx->batch, image_view->buffer_view); 37337ec681f3Smrg VkBufferViewCreateInfo bvci = image_view->buffer_view->bvci; 37347ec681f3Smrg bvci.buffer = res->obj->buffer; 37357ec681f3Smrg zink_buffer_view_reference(zink_screen(ctx->base.screen), &image_view->buffer_view, NULL); 37367ec681f3Smrg if (!zink_resource_object_init_storage(ctx, res)) { 37377ec681f3Smrg debug_printf("couldn't create storage image!"); 37387ec681f3Smrg return NULL; 37397ec681f3Smrg } 37407ec681f3Smrg image_view->buffer_view = get_buffer_view(ctx, res, &bvci); 37417ec681f3Smrg assert(image_view->buffer_view); 37427ec681f3Smrg util_range_add(&res->base.b, &res->valid_buffer_range, image_view->base.u.buf.offset, 37437ec681f3Smrg image_view->base.u.buf.offset + image_view->base.u.buf.size); 37447ec681f3Smrg update_descriptor_state_image(ctx, shader, slot, res); 37457ec681f3Smrg zink_screen(ctx->base.screen)->context_invalidate_descriptor_state(ctx, shader, ZINK_DESCRIPTOR_TYPE_IMAGE, slot, 1); 37467ec681f3Smrg return res; 37477ec681f3Smrg} 37487ec681f3Smrg 37497ec681f3Smrgstatic unsigned 37507ec681f3Smrgrebind_buffer(struct zink_context *ctx, struct zink_resource *res, uint32_t rebind_mask, const unsigned expected_num_rebinds) 37517ec681f3Smrg{ 37527ec681f3Smrg unsigned num_rebinds = 0; 37537ec681f3Smrg bool has_write = false; 37547ec681f3Smrg 37557ec681f3Smrg if (!zink_resource_has_binds(res)) 37567ec681f3Smrg return 0; 37577ec681f3Smrg 37587ec681f3Smrg assert(!res->bindless[1]); //TODO 37597ec681f3Smrg if ((rebind_mask & BITFIELD_BIT(TC_BINDING_STREAMOUT_BUFFER)) || (!rebind_mask && res->so_bind_count && ctx->num_so_targets)) { 37607ec681f3Smrg for (unsigned i = 0; i < ctx->num_so_targets; i++) { 37617ec681f3Smrg if (ctx->so_targets[i]) { 37627ec681f3Smrg struct zink_resource *so = zink_resource(ctx->so_targets[i]->buffer); 37637ec681f3Smrg if (so && so == res) { 37647ec681f3Smrg ctx->dirty_so_targets = true; 37657ec681f3Smrg num_rebinds++; 37667ec681f3Smrg } 37677ec681f3Smrg } 37687ec681f3Smrg } 37697ec681f3Smrg rebind_mask &= ~BITFIELD_BIT(TC_BINDING_STREAMOUT_BUFFER); 37707ec681f3Smrg } 37717ec681f3Smrg if (num_rebinds && expected_num_rebinds >= num_rebinds && !rebind_mask) 37727ec681f3Smrg goto end; 37737ec681f3Smrg 37747ec681f3Smrg if ((rebind_mask & BITFIELD_BIT(TC_BINDING_VERTEX_BUFFER)) || (!rebind_mask && res->vbo_bind_mask)) { 37757ec681f3Smrg u_foreach_bit(slot, res->vbo_bind_mask) { 37767ec681f3Smrg if (ctx->vertex_buffers[slot].buffer.resource != &res->base.b) //wrong context 37777ec681f3Smrg goto end; 37787ec681f3Smrg num_rebinds++; 37797ec681f3Smrg } 37807ec681f3Smrg rebind_mask &= ~BITFIELD_BIT(TC_BINDING_VERTEX_BUFFER); 37817ec681f3Smrg ctx->vertex_buffers_dirty = true; 37827ec681f3Smrg } 37837ec681f3Smrg if (num_rebinds && expected_num_rebinds >= num_rebinds && !rebind_mask) 37847ec681f3Smrg goto end; 37857ec681f3Smrg 37867ec681f3Smrg const uint32_t ubo_mask = rebind_mask ? 37877ec681f3Smrg rebind_mask & BITFIELD_RANGE(TC_BINDING_UBO_VS, PIPE_SHADER_TYPES) : 37887ec681f3Smrg ((res->ubo_bind_count[0] ? BITFIELD_RANGE(TC_BINDING_UBO_VS, (PIPE_SHADER_TYPES - 1)) : 0) | 37897ec681f3Smrg (res->ubo_bind_count[1] ? BITFIELD_BIT(TC_BINDING_UBO_CS) : 0)); 37907ec681f3Smrg u_foreach_bit(shader, ubo_mask >> TC_BINDING_UBO_VS) { 37917ec681f3Smrg u_foreach_bit(slot, res->ubo_bind_mask[shader]) { 37927ec681f3Smrg if (&res->base.b != ctx->ubos[shader][slot].buffer) //wrong context 37937ec681f3Smrg goto end; 37947ec681f3Smrg rebind_ubo(ctx, shader, slot); 37957ec681f3Smrg num_rebinds++; 37967ec681f3Smrg } 37977ec681f3Smrg } 37987ec681f3Smrg rebind_mask &= ~BITFIELD_RANGE(TC_BINDING_UBO_VS, PIPE_SHADER_TYPES); 37997ec681f3Smrg if (num_rebinds && expected_num_rebinds >= num_rebinds && !rebind_mask) 38007ec681f3Smrg goto end; 38017ec681f3Smrg 38027ec681f3Smrg const unsigned ssbo_mask = rebind_mask ? 38037ec681f3Smrg rebind_mask & BITFIELD_RANGE(TC_BINDING_SSBO_VS, PIPE_SHADER_TYPES) : 38047ec681f3Smrg BITFIELD_RANGE(TC_BINDING_SSBO_VS, PIPE_SHADER_TYPES); 38057ec681f3Smrg u_foreach_bit(shader, ssbo_mask >> TC_BINDING_SSBO_VS) { 38067ec681f3Smrg u_foreach_bit(slot, res->ssbo_bind_mask[shader]) { 38077ec681f3Smrg struct pipe_shader_buffer *ssbo = &ctx->ssbos[shader][slot]; 38087ec681f3Smrg if (&res->base.b != ssbo->buffer) //wrong context 38097ec681f3Smrg goto end; 38107ec681f3Smrg rebind_ssbo(ctx, shader, slot); 38117ec681f3Smrg has_write |= (ctx->writable_ssbos[shader] & BITFIELD64_BIT(slot)) != 0; 38127ec681f3Smrg num_rebinds++; 38137ec681f3Smrg } 38147ec681f3Smrg } 38157ec681f3Smrg rebind_mask &= ~BITFIELD_RANGE(TC_BINDING_SSBO_VS, PIPE_SHADER_TYPES); 38167ec681f3Smrg if (num_rebinds && expected_num_rebinds >= num_rebinds && !rebind_mask) 38177ec681f3Smrg goto end; 38187ec681f3Smrg const unsigned sampler_mask = rebind_mask ? 38197ec681f3Smrg rebind_mask & BITFIELD_RANGE(TC_BINDING_SAMPLERVIEW_VS, PIPE_SHADER_TYPES) : 38207ec681f3Smrg BITFIELD_RANGE(TC_BINDING_SAMPLERVIEW_VS, PIPE_SHADER_TYPES); 38217ec681f3Smrg u_foreach_bit(shader, sampler_mask >> TC_BINDING_SAMPLERVIEW_VS) { 38227ec681f3Smrg u_foreach_bit(slot, res->sampler_binds[shader]) { 38237ec681f3Smrg struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[shader][slot]); 38247ec681f3Smrg if (&res->base.b != sampler_view->base.texture) //wrong context 38257ec681f3Smrg goto end; 38267ec681f3Smrg rebind_tbo(ctx, shader, slot); 38277ec681f3Smrg num_rebinds++; 38287ec681f3Smrg } 38297ec681f3Smrg } 38307ec681f3Smrg rebind_mask &= ~BITFIELD_RANGE(TC_BINDING_SAMPLERVIEW_VS, PIPE_SHADER_TYPES); 38317ec681f3Smrg if (num_rebinds && expected_num_rebinds >= num_rebinds && !rebind_mask) 38327ec681f3Smrg goto end; 38337ec681f3Smrg 38347ec681f3Smrg const unsigned image_mask = rebind_mask ? 38357ec681f3Smrg rebind_mask & BITFIELD_RANGE(TC_BINDING_IMAGE_VS, PIPE_SHADER_TYPES) : 38367ec681f3Smrg BITFIELD_RANGE(TC_BINDING_IMAGE_VS, PIPE_SHADER_TYPES); 38377ec681f3Smrg unsigned num_image_rebinds_remaining = rebind_mask ? expected_num_rebinds - num_rebinds : res->image_bind_count[0] + res->image_bind_count[1]; 38387ec681f3Smrg u_foreach_bit(shader, image_mask >> TC_BINDING_IMAGE_VS) { 38397ec681f3Smrg for (unsigned slot = 0; num_image_rebinds_remaining && slot < ctx->di.num_images[shader]; slot++) { 38407ec681f3Smrg struct zink_resource *cres = ctx->di.descriptor_res[ZINK_DESCRIPTOR_TYPE_IMAGE][shader][slot]; 38417ec681f3Smrg if (res != cres) 38427ec681f3Smrg continue; 38437ec681f3Smrg 38447ec681f3Smrg rebind_ibo(ctx, shader, slot); 38457ec681f3Smrg const struct zink_image_view *image_view = &ctx->image_views[shader][slot]; 38467ec681f3Smrg has_write |= (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE) != 0; 38477ec681f3Smrg num_image_rebinds_remaining--; 38487ec681f3Smrg num_rebinds++; 38497ec681f3Smrg } 38507ec681f3Smrg } 38517ec681f3Smrgend: 38527ec681f3Smrg zink_batch_resource_usage_set(&ctx->batch, res, has_write); 38537ec681f3Smrg return num_rebinds; 38547ec681f3Smrg} 38557ec681f3Smrg 38567ec681f3Smrgstatic bool 38577ec681f3Smrgzink_resource_commit(struct pipe_context *pctx, struct pipe_resource *pres, unsigned level, struct pipe_box *box, bool commit) 38587ec681f3Smrg{ 38597ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 38607ec681f3Smrg struct zink_resource *res = zink_resource(pres); 38617ec681f3Smrg struct zink_screen *screen = zink_screen(pctx->screen); 38627ec681f3Smrg 38637ec681f3Smrg /* if any current usage exists, flush the queue */ 38647ec681f3Smrg if (zink_resource_has_unflushed_usage(res)) 38657ec681f3Smrg zink_flush_queue(ctx); 38667ec681f3Smrg 38677ec681f3Smrg bool ret = zink_bo_commit(screen, res, box->x, box->width, commit); 38687ec681f3Smrg if (!ret) 38697ec681f3Smrg check_device_lost(ctx); 38707ec681f3Smrg 38717ec681f3Smrg return ret; 38727ec681f3Smrg} 38737ec681f3Smrg 38747ec681f3Smrgstatic void 38757ec681f3Smrgrebind_image(struct zink_context *ctx, struct zink_resource *res) 38767ec681f3Smrg{ 38777ec681f3Smrg zink_rebind_framebuffer(ctx, res); 38787ec681f3Smrg if (!zink_resource_has_binds(res)) 38797ec681f3Smrg return; 38807ec681f3Smrg for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) { 38817ec681f3Smrg if (res->sampler_binds[i]) { 38827ec681f3Smrg for (unsigned j = 0; j < ctx->di.num_sampler_views[i]; j++) { 38837ec681f3Smrg struct zink_sampler_view *sv = zink_sampler_view(ctx->sampler_views[i][j]); 38847ec681f3Smrg if (sv && sv->base.texture == &res->base.b) { 38857ec681f3Smrg struct pipe_surface *psurf = &sv->image_view->base; 38867ec681f3Smrg zink_rebind_surface(ctx, &psurf); 38877ec681f3Smrg sv->image_view = zink_surface(psurf); 38887ec681f3Smrg zink_screen(ctx->base.screen)->context_invalidate_descriptor_state(ctx, i, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, j, 1); 38897ec681f3Smrg update_descriptor_state_sampler(ctx, i, j, res); 38907ec681f3Smrg } 38917ec681f3Smrg } 38927ec681f3Smrg } 38937ec681f3Smrg if (!res->image_bind_count[i == PIPE_SHADER_COMPUTE]) 38947ec681f3Smrg continue; 38957ec681f3Smrg for (unsigned j = 0; j < ctx->di.num_images[i]; j++) { 38967ec681f3Smrg if (zink_resource(ctx->image_views[i][j].base.resource) == res) { 38977ec681f3Smrg zink_screen(ctx->base.screen)->context_invalidate_descriptor_state(ctx, i, ZINK_DESCRIPTOR_TYPE_IMAGE, j, 1); 38987ec681f3Smrg update_descriptor_state_sampler(ctx, i, j, res); 38997ec681f3Smrg _mesa_set_add(ctx->need_barriers[i == PIPE_SHADER_COMPUTE], res); 39007ec681f3Smrg } 39017ec681f3Smrg } 39027ec681f3Smrg } 39037ec681f3Smrg} 39047ec681f3Smrg 39057ec681f3Smrgbool 39067ec681f3Smrgzink_resource_rebind(struct zink_context *ctx, struct zink_resource *res) 39077ec681f3Smrg{ 39087ec681f3Smrg if (res->base.b.target == PIPE_BUFFER) { 39097ec681f3Smrg /* force counter buffer reset */ 39107ec681f3Smrg res->so_valid = false; 39117ec681f3Smrg return rebind_buffer(ctx, res, 0, 0) == res->bind_count[0] + res->bind_count[1]; 39127ec681f3Smrg } 39137ec681f3Smrg rebind_image(ctx, res); 39147ec681f3Smrg return false; 39157ec681f3Smrg} 39167ec681f3Smrg 39177ec681f3Smrgvoid 39187ec681f3Smrgzink_rebind_all_buffers(struct zink_context *ctx) 39197ec681f3Smrg{ 39207ec681f3Smrg struct zink_batch *batch = &ctx->batch; 39217ec681f3Smrg ctx->vertex_buffers_dirty = ctx->gfx_pipeline_state.vertex_buffers_enabled_mask > 0; 39227ec681f3Smrg ctx->dirty_so_targets = ctx->num_so_targets > 0; 39237ec681f3Smrg if (ctx->num_so_targets) 39247ec681f3Smrg zink_resource_buffer_barrier(ctx, zink_resource(ctx->dummy_xfb_buffer), 39257ec681f3Smrg VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT); 39267ec681f3Smrg for (unsigned shader = PIPE_SHADER_VERTEX; shader < PIPE_SHADER_TYPES; shader++) { 39277ec681f3Smrg for (unsigned slot = 0; slot < ctx->di.num_ubos[shader]; slot++) { 39287ec681f3Smrg struct zink_resource *res = rebind_ubo(ctx, shader, slot); 39297ec681f3Smrg if (res) 39307ec681f3Smrg zink_batch_resource_usage_set(batch, res, false); 39317ec681f3Smrg } 39327ec681f3Smrg for (unsigned slot = 0; slot < ctx->di.num_sampler_views[shader]; slot++) { 39337ec681f3Smrg struct zink_resource *res = rebind_tbo(ctx, shader, slot); 39347ec681f3Smrg if (res) 39357ec681f3Smrg zink_batch_resource_usage_set(batch, res, false); 39367ec681f3Smrg } 39377ec681f3Smrg for (unsigned slot = 0; slot < ctx->di.num_ssbos[shader]; slot++) { 39387ec681f3Smrg struct zink_resource *res = rebind_ssbo(ctx, shader, slot); 39397ec681f3Smrg if (res) 39407ec681f3Smrg zink_batch_resource_usage_set(batch, res, (ctx->writable_ssbos[shader] & BITFIELD64_BIT(slot)) != 0); 39417ec681f3Smrg } 39427ec681f3Smrg for (unsigned slot = 0; slot < ctx->di.num_images[shader]; slot++) { 39437ec681f3Smrg struct zink_resource *res = rebind_ibo(ctx, shader, slot); 39447ec681f3Smrg if (res) 39457ec681f3Smrg zink_batch_resource_usage_set(batch, res, (ctx->image_views[shader][slot].base.access & PIPE_IMAGE_ACCESS_WRITE) != 0); 39467ec681f3Smrg } 39477ec681f3Smrg } 39487ec681f3Smrg} 39497ec681f3Smrg 39507ec681f3Smrgstatic void 39517ec681f3Smrgzink_context_replace_buffer_storage(struct pipe_context *pctx, struct pipe_resource *dst, 39527ec681f3Smrg struct pipe_resource *src, unsigned num_rebinds, 39537ec681f3Smrg uint32_t rebind_mask, uint32_t delete_buffer_id) 39547ec681f3Smrg{ 39557ec681f3Smrg struct zink_resource *d = zink_resource(dst); 39567ec681f3Smrg struct zink_resource *s = zink_resource(src); 39577ec681f3Smrg struct zink_context *ctx = zink_context(pctx); 39587ec681f3Smrg struct zink_screen *screen = zink_screen(pctx->screen); 39597ec681f3Smrg 39607ec681f3Smrg assert(d->internal_format == s->internal_format); 39617ec681f3Smrg assert(d->obj); 39627ec681f3Smrg assert(s->obj); 39637ec681f3Smrg util_idalloc_mt_free(&screen->buffer_ids, delete_buffer_id); 39647ec681f3Smrg zink_descriptor_set_refs_clear(&d->obj->desc_set_refs, d->obj); 39657ec681f3Smrg /* add a ref just like check_resource_for_batch_ref() would've */ 39667ec681f3Smrg if (zink_resource_has_binds(d) && zink_resource_has_usage(d)) 39677ec681f3Smrg zink_batch_reference_resource(&ctx->batch, d); 39687ec681f3Smrg /* don't be too creative */ 39697ec681f3Smrg zink_resource_object_reference(screen, &d->obj, s->obj); 39707ec681f3Smrg /* force counter buffer reset */ 39717ec681f3Smrg d->so_valid = false; 39727ec681f3Smrg if (num_rebinds && rebind_buffer(ctx, d, rebind_mask, num_rebinds) < num_rebinds) 39737ec681f3Smrg ctx->buffer_rebind_counter = p_atomic_inc_return(&screen->buffer_rebind_counter); 39747ec681f3Smrg} 39757ec681f3Smrg 39767ec681f3Smrgstatic bool 39777ec681f3Smrgzink_context_is_resource_busy(struct pipe_screen *pscreen, struct pipe_resource *pres, unsigned usage) 39787ec681f3Smrg{ 39797ec681f3Smrg struct zink_screen *screen = zink_screen(pscreen); 39807ec681f3Smrg struct zink_resource *res = zink_resource(pres); 39817ec681f3Smrg uint32_t check_usage = 0; 39827ec681f3Smrg if (usage & PIPE_MAP_READ) 39837ec681f3Smrg check_usage |= ZINK_RESOURCE_ACCESS_WRITE; 39847ec681f3Smrg if (usage & PIPE_MAP_WRITE) 39857ec681f3Smrg check_usage |= ZINK_RESOURCE_ACCESS_RW; 39867ec681f3Smrg return !zink_resource_usage_check_completion(screen, res, check_usage); 39877ec681f3Smrg} 39887ec681f3Smrg 39897ec681f3Smrgstatic void 39907ec681f3Smrgzink_emit_string_marker(struct pipe_context *pctx, 39917ec681f3Smrg const char *string, int len) 39927ec681f3Smrg{ 39937ec681f3Smrg struct zink_screen *screen = zink_screen(pctx->screen); 39947ec681f3Smrg struct zink_batch *batch = &zink_context(pctx)->batch; 39957ec681f3Smrg 39967ec681f3Smrg /* make sure string is nul-terminated */ 39977ec681f3Smrg char buf[512], *temp = NULL; 39987ec681f3Smrg if (len < ARRAY_SIZE(buf)) { 39997ec681f3Smrg memcpy(buf, string, len); 40007ec681f3Smrg buf[len] = '\0'; 40017ec681f3Smrg string = buf; 40027ec681f3Smrg } else 40037ec681f3Smrg string = temp = strndup(string, len); 40047ec681f3Smrg 40057ec681f3Smrg VkDebugUtilsLabelEXT label = { 40067ec681f3Smrg VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT, NULL, 40077ec681f3Smrg string, 40087ec681f3Smrg { 0 } 40097ec681f3Smrg }; 40107ec681f3Smrg screen->vk.CmdInsertDebugUtilsLabelEXT(batch->state->cmdbuf, &label); 40117ec681f3Smrg free(temp); 40127ec681f3Smrg} 40137ec681f3Smrg 40147ec681f3Smrgstruct pipe_context * 40157ec681f3Smrgzink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags) 40167ec681f3Smrg{ 40177ec681f3Smrg struct zink_screen *screen = zink_screen(pscreen); 40187ec681f3Smrg struct zink_context *ctx = rzalloc(NULL, struct zink_context); 40197ec681f3Smrg if (!ctx) 40207ec681f3Smrg goto fail; 40217ec681f3Smrg ctx->have_timelines = screen->info.have_KHR_timeline_semaphore; 40227ec681f3Smrg 40237ec681f3Smrg ctx->pipeline_changed[0] = ctx->pipeline_changed[1] = true; 40247ec681f3Smrg ctx->gfx_pipeline_state.dirty = true; 40257ec681f3Smrg ctx->compute_pipeline_state.dirty = true; 40267ec681f3Smrg ctx->fb_changed = ctx->rp_changed = true; 40277ec681f3Smrg ctx->gfx_pipeline_state.gfx_prim_mode = PIPE_PRIM_MAX; 40287ec681f3Smrg 40297ec681f3Smrg zink_init_draw_functions(ctx, screen); 40307ec681f3Smrg zink_init_grid_functions(ctx); 40317ec681f3Smrg 40327ec681f3Smrg ctx->base.screen = pscreen; 40337ec681f3Smrg ctx->base.priv = priv; 40347ec681f3Smrg 40357ec681f3Smrg if (screen->info.have_KHR_imageless_framebuffer) { 40367ec681f3Smrg ctx->get_framebuffer = zink_get_framebuffer_imageless; 40377ec681f3Smrg ctx->init_framebuffer = zink_init_framebuffer_imageless; 40387ec681f3Smrg } else { 40397ec681f3Smrg ctx->get_framebuffer = zink_get_framebuffer; 40407ec681f3Smrg ctx->init_framebuffer = zink_init_framebuffer; 40417ec681f3Smrg } 40427ec681f3Smrg 40437ec681f3Smrg ctx->base.destroy = zink_context_destroy; 40447ec681f3Smrg ctx->base.get_device_reset_status = zink_get_device_reset_status; 40457ec681f3Smrg ctx->base.set_device_reset_callback = zink_set_device_reset_callback; 40467ec681f3Smrg 40477ec681f3Smrg zink_context_state_init(&ctx->base); 40487ec681f3Smrg 40497ec681f3Smrg ctx->base.create_sampler_state = zink_create_sampler_state; 40507ec681f3Smrg ctx->base.bind_sampler_states = zink_bind_sampler_states; 40517ec681f3Smrg ctx->base.delete_sampler_state = zink_delete_sampler_state; 40527ec681f3Smrg 40537ec681f3Smrg ctx->base.create_sampler_view = zink_create_sampler_view; 40547ec681f3Smrg ctx->base.set_sampler_views = zink_set_sampler_views; 40557ec681f3Smrg ctx->base.sampler_view_destroy = zink_sampler_view_destroy; 40567ec681f3Smrg ctx->base.get_sample_position = zink_get_sample_position; 40577ec681f3Smrg ctx->base.set_sample_locations = zink_set_sample_locations; 40587ec681f3Smrg 40597ec681f3Smrg zink_program_init(ctx); 40607ec681f3Smrg 40617ec681f3Smrg ctx->base.set_polygon_stipple = zink_set_polygon_stipple; 40627ec681f3Smrg ctx->base.set_vertex_buffers = zink_set_vertex_buffers; 40637ec681f3Smrg ctx->base.set_viewport_states = zink_set_viewport_states; 40647ec681f3Smrg ctx->base.set_scissor_states = zink_set_scissor_states; 40657ec681f3Smrg ctx->base.set_inlinable_constants = zink_set_inlinable_constants; 40667ec681f3Smrg ctx->base.set_constant_buffer = zink_set_constant_buffer; 40677ec681f3Smrg ctx->base.set_shader_buffers = zink_set_shader_buffers; 40687ec681f3Smrg ctx->base.set_shader_images = zink_set_shader_images; 40697ec681f3Smrg ctx->base.set_framebuffer_state = zink_set_framebuffer_state; 40707ec681f3Smrg ctx->base.set_stencil_ref = zink_set_stencil_ref; 40717ec681f3Smrg ctx->base.set_clip_state = zink_set_clip_state; 40727ec681f3Smrg ctx->base.set_blend_color = zink_set_blend_color; 40737ec681f3Smrg ctx->base.set_tess_state = zink_set_tess_state; 40747ec681f3Smrg ctx->base.set_patch_vertices = zink_set_patch_vertices; 40757ec681f3Smrg 40767ec681f3Smrg ctx->base.set_sample_mask = zink_set_sample_mask; 40777ec681f3Smrg ctx->gfx_pipeline_state.sample_mask = UINT32_MAX; 40787ec681f3Smrg 40797ec681f3Smrg ctx->base.clear = zink_clear; 40807ec681f3Smrg ctx->base.clear_texture = zink_clear_texture; 40817ec681f3Smrg ctx->base.clear_buffer = zink_clear_buffer; 40827ec681f3Smrg ctx->base.clear_render_target = zink_clear_render_target; 40837ec681f3Smrg ctx->base.clear_depth_stencil = zink_clear_depth_stencil; 40847ec681f3Smrg 40857ec681f3Smrg ctx->base.fence_server_sync = zink_fence_server_sync; 40867ec681f3Smrg ctx->base.flush = zink_flush; 40877ec681f3Smrg ctx->base.memory_barrier = zink_memory_barrier; 40887ec681f3Smrg ctx->base.texture_barrier = zink_texture_barrier; 40897ec681f3Smrg ctx->base.evaluate_depth_buffer = zink_evaluate_depth_buffer; 40907ec681f3Smrg 40917ec681f3Smrg ctx->base.resource_commit = zink_resource_commit; 40927ec681f3Smrg ctx->base.resource_copy_region = zink_resource_copy_region; 40937ec681f3Smrg ctx->base.blit = zink_blit; 40947ec681f3Smrg ctx->base.create_stream_output_target = zink_create_stream_output_target; 40957ec681f3Smrg ctx->base.stream_output_target_destroy = zink_stream_output_target_destroy; 40967ec681f3Smrg 40977ec681f3Smrg ctx->base.set_stream_output_targets = zink_set_stream_output_targets; 40987ec681f3Smrg ctx->base.flush_resource = zink_flush_resource; 40997ec681f3Smrg 41007ec681f3Smrg ctx->base.emit_string_marker = zink_emit_string_marker; 41017ec681f3Smrg 41027ec681f3Smrg zink_context_surface_init(&ctx->base); 41037ec681f3Smrg zink_context_resource_init(&ctx->base); 41047ec681f3Smrg zink_context_query_init(&ctx->base); 41057ec681f3Smrg 41067ec681f3Smrg _mesa_set_init(&ctx->update_barriers[0][0], ctx, _mesa_hash_pointer, _mesa_key_pointer_equal); 41077ec681f3Smrg _mesa_set_init(&ctx->update_barriers[1][0], ctx, _mesa_hash_pointer, _mesa_key_pointer_equal); 41087ec681f3Smrg _mesa_set_init(&ctx->update_barriers[0][1], ctx, _mesa_hash_pointer, _mesa_key_pointer_equal); 41097ec681f3Smrg _mesa_set_init(&ctx->update_barriers[1][1], ctx, _mesa_hash_pointer, _mesa_key_pointer_equal); 41107ec681f3Smrg ctx->need_barriers[0] = &ctx->update_barriers[0][0]; 41117ec681f3Smrg ctx->need_barriers[1] = &ctx->update_barriers[1][0]; 41127ec681f3Smrg 41137ec681f3Smrg util_dynarray_init(&ctx->free_batch_states, ctx); 41147ec681f3Smrg 41157ec681f3Smrg ctx->gfx_pipeline_state.have_EXT_extended_dynamic_state = screen->info.have_EXT_extended_dynamic_state; 41167ec681f3Smrg ctx->gfx_pipeline_state.have_EXT_extended_dynamic_state2 = screen->info.have_EXT_extended_dynamic_state2; 41177ec681f3Smrg 41187ec681f3Smrg slab_create_child(&ctx->transfer_pool, &screen->transfer_pool); 41197ec681f3Smrg slab_create_child(&ctx->transfer_pool_unsync, &screen->transfer_pool); 41207ec681f3Smrg 41217ec681f3Smrg ctx->base.stream_uploader = u_upload_create_default(&ctx->base); 41227ec681f3Smrg ctx->base.const_uploader = u_upload_create_default(&ctx->base); 41237ec681f3Smrg for (int i = 0; i < ARRAY_SIZE(ctx->fb_clears); i++) 41247ec681f3Smrg util_dynarray_init(&ctx->fb_clears[i].clears, ctx); 41257ec681f3Smrg 41267ec681f3Smrg ctx->blitter = util_blitter_create(&ctx->base); 41277ec681f3Smrg if (!ctx->blitter) 41287ec681f3Smrg goto fail; 41297ec681f3Smrg 41307ec681f3Smrg ctx->gfx_pipeline_state.shader_keys.last_vertex.key.vs_base.last_vertex_stage = true; 41317ec681f3Smrg ctx->last_vertex_stage_dirty = true; 41327ec681f3Smrg ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_VERTEX].size = sizeof(struct zink_vs_key_base); 41337ec681f3Smrg ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_TESS_EVAL].size = sizeof(struct zink_vs_key_base); 41347ec681f3Smrg ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_GEOMETRY].size = sizeof(struct zink_vs_key_base); 41357ec681f3Smrg ctx->gfx_pipeline_state.shader_keys.key[PIPE_SHADER_FRAGMENT].size = sizeof(struct zink_fs_key); 41367ec681f3Smrg _mesa_hash_table_init(&ctx->compute_program_cache, ctx, _mesa_hash_pointer, _mesa_key_pointer_equal); 41377ec681f3Smrg _mesa_hash_table_init(&ctx->framebuffer_cache, ctx, hash_framebuffer_imageless, equals_framebuffer_imageless); 41387ec681f3Smrg _mesa_set_init(&ctx->render_pass_state_cache, ctx, hash_rp_state, equals_rp_state); 41397ec681f3Smrg ctx->render_pass_cache = _mesa_hash_table_create(NULL, 41407ec681f3Smrg hash_render_pass_state, 41417ec681f3Smrg equals_render_pass_state); 41427ec681f3Smrg if (!ctx->render_pass_cache) 41437ec681f3Smrg goto fail; 41447ec681f3Smrg 41457ec681f3Smrg const uint8_t data[] = {0}; 41467ec681f3Smrg ctx->dummy_vertex_buffer = pipe_buffer_create(&screen->base, 41477ec681f3Smrg PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_SHADER_IMAGE, PIPE_USAGE_IMMUTABLE, sizeof(data)); 41487ec681f3Smrg if (!ctx->dummy_vertex_buffer) 41497ec681f3Smrg goto fail; 41507ec681f3Smrg ctx->dummy_xfb_buffer = pipe_buffer_create(&screen->base, 41517ec681f3Smrg PIPE_BIND_STREAM_OUTPUT, PIPE_USAGE_DEFAULT, sizeof(data)); 41527ec681f3Smrg if (!ctx->dummy_xfb_buffer) 41537ec681f3Smrg goto fail; 41547ec681f3Smrg for (unsigned i = 0; i < ARRAY_SIZE(ctx->dummy_surface); i++) { 41557ec681f3Smrg if (!(screen->info.props.limits.framebufferDepthSampleCounts & BITFIELD_BIT(i))) 41567ec681f3Smrg continue; 41577ec681f3Smrg ctx->dummy_surface[i] = zink_surface_create_null(ctx, PIPE_TEXTURE_2D, 1024, 1024, BITFIELD_BIT(i)); 41587ec681f3Smrg if (!ctx->dummy_surface[i]) 41597ec681f3Smrg goto fail; 41607ec681f3Smrg } 41617ec681f3Smrg VkBufferViewCreateInfo bvci = create_bvci(ctx, zink_resource(ctx->dummy_vertex_buffer), PIPE_FORMAT_R8_UNORM, 0, sizeof(data)); 41627ec681f3Smrg ctx->dummy_bufferview = get_buffer_view(ctx, zink_resource(ctx->dummy_vertex_buffer), &bvci); 41637ec681f3Smrg if (!ctx->dummy_bufferview) 41647ec681f3Smrg goto fail; 41657ec681f3Smrg 41667ec681f3Smrg if (!zink_descriptor_layouts_init(ctx)) 41677ec681f3Smrg goto fail; 41687ec681f3Smrg 41697ec681f3Smrg if (!screen->descriptors_init(ctx)) { 41707ec681f3Smrg zink_screen_init_descriptor_funcs(screen, true); 41717ec681f3Smrg if (!screen->descriptors_init(ctx)) 41727ec681f3Smrg goto fail; 41737ec681f3Smrg } 41747ec681f3Smrg 41757ec681f3Smrg ctx->base.create_texture_handle = zink_create_texture_handle; 41767ec681f3Smrg ctx->base.delete_texture_handle = zink_delete_texture_handle; 41777ec681f3Smrg ctx->base.make_texture_handle_resident = zink_make_texture_handle_resident; 41787ec681f3Smrg ctx->base.create_image_handle = zink_create_image_handle; 41797ec681f3Smrg ctx->base.delete_image_handle = zink_delete_image_handle; 41807ec681f3Smrg ctx->base.make_image_handle_resident = zink_make_image_handle_resident; 41817ec681f3Smrg for (unsigned i = 0; i < 2; i++) { 41827ec681f3Smrg _mesa_hash_table_init(&ctx->di.bindless[i].img_handles, ctx, _mesa_hash_pointer, _mesa_key_pointer_equal); 41837ec681f3Smrg _mesa_hash_table_init(&ctx->di.bindless[i].tex_handles, ctx, _mesa_hash_pointer, _mesa_key_pointer_equal); 41847ec681f3Smrg 41857ec681f3Smrg /* allocate 1024 slots and reserve slot 0 */ 41867ec681f3Smrg util_idalloc_init(&ctx->di.bindless[i].tex_slots, ZINK_MAX_BINDLESS_HANDLES); 41877ec681f3Smrg util_idalloc_alloc(&ctx->di.bindless[i].tex_slots); 41887ec681f3Smrg util_idalloc_init(&ctx->di.bindless[i].img_slots, ZINK_MAX_BINDLESS_HANDLES); 41897ec681f3Smrg util_idalloc_alloc(&ctx->di.bindless[i].img_slots); 41907ec681f3Smrg ctx->di.bindless[i].buffer_infos = malloc(sizeof(VkImageView) * ZINK_MAX_BINDLESS_HANDLES); 41917ec681f3Smrg ctx->di.bindless[i].img_infos = malloc(sizeof(VkDescriptorImageInfo) * ZINK_MAX_BINDLESS_HANDLES); 41927ec681f3Smrg util_dynarray_init(&ctx->di.bindless[i].updates, NULL); 41937ec681f3Smrg util_dynarray_init(&ctx->di.bindless[i].resident, NULL); 41947ec681f3Smrg } 41957ec681f3Smrg 41967ec681f3Smrg ctx->have_timelines = screen->info.have_KHR_timeline_semaphore; 41977ec681f3Smrg simple_mtx_init(&ctx->batch_mtx, mtx_plain); 41987ec681f3Smrg zink_start_batch(ctx, &ctx->batch); 41997ec681f3Smrg if (!ctx->batch.state) 42007ec681f3Smrg goto fail; 42017ec681f3Smrg 42027ec681f3Smrg pipe_buffer_write(&ctx->base, ctx->dummy_vertex_buffer, 0, sizeof(data), data); 42037ec681f3Smrg pipe_buffer_write(&ctx->base, ctx->dummy_xfb_buffer, 0, sizeof(data), data); 42047ec681f3Smrg 42057ec681f3Smrg for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) { 42067ec681f3Smrg /* need to update these based on screen config for null descriptors */ 42077ec681f3Smrg for (unsigned j = 0; j < 32; j++) { 42087ec681f3Smrg update_descriptor_state_ubo(ctx, i, j, NULL); 42097ec681f3Smrg update_descriptor_state_sampler(ctx, i, j, NULL); 42107ec681f3Smrg update_descriptor_state_ssbo(ctx, i, j, NULL); 42117ec681f3Smrg update_descriptor_state_image(ctx, i, j, NULL); 42127ec681f3Smrg } 42137ec681f3Smrg } 42147ec681f3Smrg if (!screen->info.rb2_feats.nullDescriptor) 42157ec681f3Smrg ctx->di.fbfetch.imageView = zink_csurface(ctx->dummy_surface[0])->image_view; 42167ec681f3Smrg p_atomic_inc(&screen->base.num_contexts); 42177ec681f3Smrg 42187ec681f3Smrg zink_select_draw_vbo(ctx); 42197ec681f3Smrg zink_select_launch_grid(ctx); 42207ec681f3Smrg 42217ec681f3Smrg if (!(flags & PIPE_CONTEXT_PREFER_THREADED) || flags & PIPE_CONTEXT_COMPUTE_ONLY) { 42227ec681f3Smrg return &ctx->base; 42237ec681f3Smrg } 42247ec681f3Smrg 42257ec681f3Smrg struct threaded_context *tc = (struct threaded_context*)threaded_context_create(&ctx->base, &screen->transfer_pool, 42267ec681f3Smrg zink_context_replace_buffer_storage, 42277ec681f3Smrg &(struct threaded_context_options){ 42287ec681f3Smrg .create_fence = zink_create_tc_fence_for_tc, 42297ec681f3Smrg .is_resource_busy = zink_context_is_resource_busy, 42307ec681f3Smrg .driver_calls_flush_notify = true, 42317ec681f3Smrg .unsynchronized_get_device_reset_status = true, 42327ec681f3Smrg }, 42337ec681f3Smrg &ctx->tc); 42347ec681f3Smrg 42357ec681f3Smrg if (tc && (struct zink_context*)tc != ctx) { 42367ec681f3Smrg threaded_context_init_bytes_mapped_limit(tc, 4); 42377ec681f3Smrg ctx->base.set_context_param = zink_set_context_param; 42387ec681f3Smrg } 42397ec681f3Smrg 42407ec681f3Smrg return (struct pipe_context*)tc; 42417ec681f3Smrg 42427ec681f3Smrgfail: 42437ec681f3Smrg if (ctx) 42447ec681f3Smrg zink_context_destroy(&ctx->base); 42457ec681f3Smrg return NULL; 42467ec681f3Smrg} 4247