17ec681f3Smrg/*
27ec681f3Smrg * Copyright © 2019 Red Hat.
37ec681f3Smrg *
47ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a
57ec681f3Smrg * copy of this software and associated documentation files (the "Software"),
67ec681f3Smrg * to deal in the Software without restriction, including without limitation
77ec681f3Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
87ec681f3Smrg * and/or sell copies of the Software, and to permit persons to whom the
97ec681f3Smrg * Software is furnished to do so, subject to the following conditions:
107ec681f3Smrg *
117ec681f3Smrg * The above copyright notice and this permission notice (including the next
127ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the
137ec681f3Smrg * Software.
147ec681f3Smrg *
157ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
167ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
177ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
187ec681f3Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
197ec681f3Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
207ec681f3Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
217ec681f3Smrg * IN THE SOFTWARE.
227ec681f3Smrg */
237ec681f3Smrg
247ec681f3Smrg#include "lvp_private.h"
257ec681f3Smrg#include "vk_util.h"
267ec681f3Smrg#include "glsl_types.h"
277ec681f3Smrg#include "spirv/nir_spirv.h"
287ec681f3Smrg#include "nir/nir_builder.h"
297ec681f3Smrg#include "lvp_lower_vulkan_resource.h"
307ec681f3Smrg#include "pipe/p_state.h"
317ec681f3Smrg#include "pipe/p_context.h"
327ec681f3Smrg#include "nir/nir_xfb_info.h"
337ec681f3Smrg
347ec681f3Smrg#define SPIR_V_MAGIC_NUMBER 0x07230203
357ec681f3Smrg
367ec681f3Smrg#define LVP_PIPELINE_DUP(dst, src, type, count) do {             \
377ec681f3Smrg      type *temp = ralloc_array(mem_ctx, type, count);           \
387ec681f3Smrg      if (!temp) return VK_ERROR_OUT_OF_HOST_MEMORY;             \
397ec681f3Smrg      memcpy(temp, (src), sizeof(type) * count);                 \
407ec681f3Smrg      dst = temp;                                                \
417ec681f3Smrg   } while(0)
427ec681f3Smrg
437ec681f3SmrgVKAPI_ATTR void VKAPI_CALL lvp_DestroyPipeline(
447ec681f3Smrg   VkDevice                                    _device,
457ec681f3Smrg   VkPipeline                                  _pipeline,
467ec681f3Smrg   const VkAllocationCallbacks*                pAllocator)
477ec681f3Smrg{
487ec681f3Smrg   LVP_FROM_HANDLE(lvp_device, device, _device);
497ec681f3Smrg   LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
507ec681f3Smrg
517ec681f3Smrg   if (!_pipeline)
527ec681f3Smrg      return;
537ec681f3Smrg
547ec681f3Smrg   if (pipeline->shader_cso[PIPE_SHADER_VERTEX])
557ec681f3Smrg      device->queue.ctx->delete_vs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_VERTEX]);
567ec681f3Smrg   if (pipeline->shader_cso[PIPE_SHADER_FRAGMENT])
577ec681f3Smrg      device->queue.ctx->delete_fs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
587ec681f3Smrg   if (pipeline->shader_cso[PIPE_SHADER_GEOMETRY])
597ec681f3Smrg      device->queue.ctx->delete_gs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_GEOMETRY]);
607ec681f3Smrg   if (pipeline->shader_cso[PIPE_SHADER_TESS_CTRL])
617ec681f3Smrg      device->queue.ctx->delete_tcs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_CTRL]);
627ec681f3Smrg   if (pipeline->shader_cso[PIPE_SHADER_TESS_EVAL])
637ec681f3Smrg      device->queue.ctx->delete_tes_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_EVAL]);
647ec681f3Smrg   if (pipeline->shader_cso[PIPE_SHADER_COMPUTE])
657ec681f3Smrg      device->queue.ctx->delete_compute_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_COMPUTE]);
667ec681f3Smrg
677ec681f3Smrg   ralloc_free(pipeline->mem_ctx);
687ec681f3Smrg   vk_object_base_finish(&pipeline->base);
697ec681f3Smrg   vk_free2(&device->vk.alloc, pAllocator, pipeline);
707ec681f3Smrg}
717ec681f3Smrg
727ec681f3Smrgstatic VkResult
737ec681f3Smrgdeep_copy_shader_stage(void *mem_ctx,
747ec681f3Smrg                       struct VkPipelineShaderStageCreateInfo *dst,
757ec681f3Smrg                       const struct VkPipelineShaderStageCreateInfo *src)
767ec681f3Smrg{
777ec681f3Smrg   dst->sType = src->sType;
787ec681f3Smrg   dst->pNext = NULL;
797ec681f3Smrg   dst->flags = src->flags;
807ec681f3Smrg   dst->stage = src->stage;
817ec681f3Smrg   dst->module = src->module;
827ec681f3Smrg   dst->pName = src->pName;
837ec681f3Smrg   dst->pSpecializationInfo = NULL;
847ec681f3Smrg   if (src->pSpecializationInfo) {
857ec681f3Smrg      const VkSpecializationInfo *src_spec = src->pSpecializationInfo;
867ec681f3Smrg      VkSpecializationInfo *dst_spec = ralloc_size(mem_ctx, sizeof(VkSpecializationInfo) +
877ec681f3Smrg                                                   src_spec->mapEntryCount * sizeof(VkSpecializationMapEntry) +
887ec681f3Smrg                                                   src_spec->dataSize);
897ec681f3Smrg      VkSpecializationMapEntry *maps = (VkSpecializationMapEntry *)(dst_spec + 1);
907ec681f3Smrg      dst_spec->pMapEntries = maps;
917ec681f3Smrg      void *pdata = (void *)(dst_spec->pMapEntries + src_spec->mapEntryCount);
927ec681f3Smrg      dst_spec->pData = pdata;
937ec681f3Smrg
947ec681f3Smrg
957ec681f3Smrg      dst_spec->mapEntryCount = src_spec->mapEntryCount;
967ec681f3Smrg      dst_spec->dataSize = src_spec->dataSize;
977ec681f3Smrg      memcpy(pdata, src_spec->pData, src->pSpecializationInfo->dataSize);
987ec681f3Smrg      memcpy(maps, src_spec->pMapEntries, src_spec->mapEntryCount * sizeof(VkSpecializationMapEntry));
997ec681f3Smrg      dst->pSpecializationInfo = dst_spec;
1007ec681f3Smrg   }
1017ec681f3Smrg   return VK_SUCCESS;
1027ec681f3Smrg}
1037ec681f3Smrg
1047ec681f3Smrgstatic VkResult
1057ec681f3Smrgdeep_copy_vertex_input_state(void *mem_ctx,
1067ec681f3Smrg                             struct VkPipelineVertexInputStateCreateInfo *dst,
1077ec681f3Smrg                             const struct VkPipelineVertexInputStateCreateInfo *src)
1087ec681f3Smrg{
1097ec681f3Smrg   dst->sType = src->sType;
1107ec681f3Smrg   dst->pNext = NULL;
1117ec681f3Smrg   dst->flags = src->flags;
1127ec681f3Smrg   dst->vertexBindingDescriptionCount = src->vertexBindingDescriptionCount;
1137ec681f3Smrg
1147ec681f3Smrg   LVP_PIPELINE_DUP(dst->pVertexBindingDescriptions,
1157ec681f3Smrg                    src->pVertexBindingDescriptions,
1167ec681f3Smrg                    VkVertexInputBindingDescription,
1177ec681f3Smrg                    src->vertexBindingDescriptionCount);
1187ec681f3Smrg
1197ec681f3Smrg   dst->vertexAttributeDescriptionCount = src->vertexAttributeDescriptionCount;
1207ec681f3Smrg
1217ec681f3Smrg   LVP_PIPELINE_DUP(dst->pVertexAttributeDescriptions,
1227ec681f3Smrg                    src->pVertexAttributeDescriptions,
1237ec681f3Smrg                    VkVertexInputAttributeDescription,
1247ec681f3Smrg                    src->vertexAttributeDescriptionCount);
1257ec681f3Smrg
1267ec681f3Smrg   if (src->pNext) {
1277ec681f3Smrg      vk_foreach_struct(ext, src->pNext) {
1287ec681f3Smrg         switch (ext->sType) {
1297ec681f3Smrg         case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: {
1307ec681f3Smrg            VkPipelineVertexInputDivisorStateCreateInfoEXT *ext_src = (VkPipelineVertexInputDivisorStateCreateInfoEXT *)ext;
1317ec681f3Smrg            VkPipelineVertexInputDivisorStateCreateInfoEXT *ext_dst = ralloc(mem_ctx, VkPipelineVertexInputDivisorStateCreateInfoEXT);
1327ec681f3Smrg
1337ec681f3Smrg            ext_dst->sType = ext_src->sType;
1347ec681f3Smrg            ext_dst->vertexBindingDivisorCount = ext_src->vertexBindingDivisorCount;
1357ec681f3Smrg
1367ec681f3Smrg            LVP_PIPELINE_DUP(ext_dst->pVertexBindingDivisors,
1377ec681f3Smrg                             ext_src->pVertexBindingDivisors,
1387ec681f3Smrg                             VkVertexInputBindingDivisorDescriptionEXT,
1397ec681f3Smrg                             ext_src->vertexBindingDivisorCount);
1407ec681f3Smrg
1417ec681f3Smrg            dst->pNext = ext_dst;
1427ec681f3Smrg            break;
1437ec681f3Smrg         }
1447ec681f3Smrg         default:
1457ec681f3Smrg            break;
1467ec681f3Smrg         }
1477ec681f3Smrg      }
1487ec681f3Smrg   }
1497ec681f3Smrg   return VK_SUCCESS;
1507ec681f3Smrg}
1517ec681f3Smrg
1527ec681f3Smrgstatic bool
1537ec681f3Smrgdynamic_state_contains(const VkPipelineDynamicStateCreateInfo *src, VkDynamicState state)
1547ec681f3Smrg{
1557ec681f3Smrg   if (!src)
1567ec681f3Smrg      return false;
1577ec681f3Smrg
1587ec681f3Smrg   for (unsigned i = 0; i < src->dynamicStateCount; i++)
1597ec681f3Smrg      if (src->pDynamicStates[i] == state)
1607ec681f3Smrg         return true;
1617ec681f3Smrg   return false;
1627ec681f3Smrg}
1637ec681f3Smrg
1647ec681f3Smrgstatic VkResult
1657ec681f3Smrgdeep_copy_viewport_state(void *mem_ctx,
1667ec681f3Smrg                         const VkPipelineDynamicStateCreateInfo *dyn_state,
1677ec681f3Smrg                         VkPipelineViewportStateCreateInfo *dst,
1687ec681f3Smrg                         const VkPipelineViewportStateCreateInfo *src)
1697ec681f3Smrg{
1707ec681f3Smrg   dst->sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
1717ec681f3Smrg   dst->pNext = NULL;
1727ec681f3Smrg   dst->pViewports = NULL;
1737ec681f3Smrg   dst->pScissors = NULL;
1747ec681f3Smrg
1757ec681f3Smrg   if (!dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_VIEWPORT) &&
1767ec681f3Smrg       !dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT)) {
1777ec681f3Smrg      LVP_PIPELINE_DUP(dst->pViewports,
1787ec681f3Smrg                       src->pViewports,
1797ec681f3Smrg                       VkViewport,
1807ec681f3Smrg                       src->viewportCount);
1817ec681f3Smrg   }
1827ec681f3Smrg   if (!dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT))
1837ec681f3Smrg      dst->viewportCount = src->viewportCount;
1847ec681f3Smrg   else
1857ec681f3Smrg      dst->viewportCount = 0;
1867ec681f3Smrg
1877ec681f3Smrg   if (!dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_SCISSOR) &&
1887ec681f3Smrg       !dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT)) {
1897ec681f3Smrg      if (src->pScissors)
1907ec681f3Smrg         LVP_PIPELINE_DUP(dst->pScissors,
1917ec681f3Smrg                          src->pScissors,
1927ec681f3Smrg                          VkRect2D,
1937ec681f3Smrg                          src->scissorCount);
1947ec681f3Smrg   }
1957ec681f3Smrg   if (!dynamic_state_contains(dyn_state, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT))
1967ec681f3Smrg      dst->scissorCount = src->scissorCount;
1977ec681f3Smrg   else
1987ec681f3Smrg      dst->scissorCount = 0;
1997ec681f3Smrg
2007ec681f3Smrg   return VK_SUCCESS;
2017ec681f3Smrg}
2027ec681f3Smrg
2037ec681f3Smrgstatic VkResult
2047ec681f3Smrgdeep_copy_color_blend_state(void *mem_ctx,
2057ec681f3Smrg                            VkPipelineColorBlendStateCreateInfo *dst,
2067ec681f3Smrg                            const VkPipelineColorBlendStateCreateInfo *src)
2077ec681f3Smrg{
2087ec681f3Smrg   dst->sType = src->sType;
2097ec681f3Smrg   dst->pNext = NULL;
2107ec681f3Smrg   dst->flags = src->flags;
2117ec681f3Smrg   dst->logicOpEnable = src->logicOpEnable;
2127ec681f3Smrg   dst->logicOp = src->logicOp;
2137ec681f3Smrg
2147ec681f3Smrg   LVP_PIPELINE_DUP(dst->pAttachments,
2157ec681f3Smrg                    src->pAttachments,
2167ec681f3Smrg                    VkPipelineColorBlendAttachmentState,
2177ec681f3Smrg                    src->attachmentCount);
2187ec681f3Smrg   dst->attachmentCount = src->attachmentCount;
2197ec681f3Smrg
2207ec681f3Smrg   memcpy(&dst->blendConstants, &src->blendConstants, sizeof(float) * 4);
2217ec681f3Smrg
2227ec681f3Smrg   return VK_SUCCESS;
2237ec681f3Smrg}
2247ec681f3Smrg
2257ec681f3Smrgstatic VkResult
2267ec681f3Smrgdeep_copy_dynamic_state(void *mem_ctx,
2277ec681f3Smrg                        VkPipelineDynamicStateCreateInfo *dst,
2287ec681f3Smrg                        const VkPipelineDynamicStateCreateInfo *src)
2297ec681f3Smrg{
2307ec681f3Smrg   dst->sType = src->sType;
2317ec681f3Smrg   dst->pNext = NULL;
2327ec681f3Smrg   dst->flags = src->flags;
2337ec681f3Smrg
2347ec681f3Smrg   LVP_PIPELINE_DUP(dst->pDynamicStates,
2357ec681f3Smrg                    src->pDynamicStates,
2367ec681f3Smrg                    VkDynamicState,
2377ec681f3Smrg                    src->dynamicStateCount);
2387ec681f3Smrg   dst->dynamicStateCount = src->dynamicStateCount;
2397ec681f3Smrg   return VK_SUCCESS;
2407ec681f3Smrg}
2417ec681f3Smrg
2427ec681f3Smrg
2437ec681f3Smrgstatic VkResult
2447ec681f3Smrgdeep_copy_rasterization_state(void *mem_ctx,
2457ec681f3Smrg                              VkPipelineRasterizationStateCreateInfo *dst,
2467ec681f3Smrg                              const VkPipelineRasterizationStateCreateInfo *src)
2477ec681f3Smrg{
2487ec681f3Smrg   memcpy(dst, src, sizeof(VkPipelineRasterizationStateCreateInfo));
2497ec681f3Smrg   dst->pNext = NULL;
2507ec681f3Smrg
2517ec681f3Smrg   if (src->pNext) {
2527ec681f3Smrg      vk_foreach_struct(ext, src->pNext) {
2537ec681f3Smrg         switch (ext->sType) {
2547ec681f3Smrg         case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT: {
2557ec681f3Smrg            VkPipelineRasterizationDepthClipStateCreateInfoEXT *ext_src = (VkPipelineRasterizationDepthClipStateCreateInfoEXT *)ext;
2567ec681f3Smrg            VkPipelineRasterizationDepthClipStateCreateInfoEXT *ext_dst = ralloc(mem_ctx, VkPipelineRasterizationDepthClipStateCreateInfoEXT);
2577ec681f3Smrg            ext_dst->sType = ext_src->sType;
2587ec681f3Smrg            ext_dst->flags = ext_src->flags;
2597ec681f3Smrg            ext_dst->depthClipEnable = ext_src->depthClipEnable;
2607ec681f3Smrg            dst->pNext = ext_dst;
2617ec681f3Smrg            break;
2627ec681f3Smrg         }
2637ec681f3Smrg         default:
2647ec681f3Smrg            break;
2657ec681f3Smrg         }
2667ec681f3Smrg      }
2677ec681f3Smrg   }
2687ec681f3Smrg   return VK_SUCCESS;
2697ec681f3Smrg}
2707ec681f3Smrg
2717ec681f3Smrgstatic VkResult
2727ec681f3Smrgdeep_copy_graphics_create_info(void *mem_ctx,
2737ec681f3Smrg                               VkGraphicsPipelineCreateInfo *dst,
2747ec681f3Smrg                               const VkGraphicsPipelineCreateInfo *src)
2757ec681f3Smrg{
2767ec681f3Smrg   int i;
2777ec681f3Smrg   VkResult result;
2787ec681f3Smrg   VkPipelineShaderStageCreateInfo *stages;
2797ec681f3Smrg   VkPipelineVertexInputStateCreateInfo *vertex_input;
2807ec681f3Smrg   VkPipelineRasterizationStateCreateInfo *rasterization_state;
2817ec681f3Smrg   LVP_FROM_HANDLE(lvp_render_pass, pass, src->renderPass);
2827ec681f3Smrg
2837ec681f3Smrg   dst->sType = src->sType;
2847ec681f3Smrg   dst->pNext = NULL;
2857ec681f3Smrg   dst->flags = src->flags;
2867ec681f3Smrg   dst->layout = src->layout;
2877ec681f3Smrg   dst->renderPass = src->renderPass;
2887ec681f3Smrg   dst->subpass = src->subpass;
2897ec681f3Smrg   dst->basePipelineHandle = src->basePipelineHandle;
2907ec681f3Smrg   dst->basePipelineIndex = src->basePipelineIndex;
2917ec681f3Smrg
2927ec681f3Smrg   /* pStages */
2937ec681f3Smrg   VkShaderStageFlags stages_present = 0;
2947ec681f3Smrg   dst->stageCount = src->stageCount;
2957ec681f3Smrg   stages = ralloc_array(mem_ctx, VkPipelineShaderStageCreateInfo, dst->stageCount);
2967ec681f3Smrg   for (i = 0 ; i < dst->stageCount; i++) {
2977ec681f3Smrg      result = deep_copy_shader_stage(mem_ctx, &stages[i], &src->pStages[i]);
2987ec681f3Smrg      if (result != VK_SUCCESS)
2997ec681f3Smrg         return result;
3007ec681f3Smrg      stages_present |= src->pStages[i].stage;
3017ec681f3Smrg   }
3027ec681f3Smrg   dst->pStages = stages;
3037ec681f3Smrg
3047ec681f3Smrg   /* pVertexInputState */
3057ec681f3Smrg   if (!dynamic_state_contains(src->pDynamicState, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) {
3067ec681f3Smrg      vertex_input = ralloc(mem_ctx, VkPipelineVertexInputStateCreateInfo);
3077ec681f3Smrg      result = deep_copy_vertex_input_state(mem_ctx, vertex_input,
3087ec681f3Smrg                                            src->pVertexInputState);
3097ec681f3Smrg      if (result != VK_SUCCESS)
3107ec681f3Smrg         return result;
3117ec681f3Smrg      dst->pVertexInputState = vertex_input;
3127ec681f3Smrg   } else
3137ec681f3Smrg      dst->pVertexInputState = NULL;
3147ec681f3Smrg
3157ec681f3Smrg   /* pInputAssemblyState */
3167ec681f3Smrg   LVP_PIPELINE_DUP(dst->pInputAssemblyState,
3177ec681f3Smrg                    src->pInputAssemblyState,
3187ec681f3Smrg                    VkPipelineInputAssemblyStateCreateInfo,
3197ec681f3Smrg                    1);
3207ec681f3Smrg
3217ec681f3Smrg   /* pTessellationState */
3227ec681f3Smrg   if (src->pTessellationState &&
3237ec681f3Smrg      (stages_present & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) ==
3247ec681f3Smrg                        (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3257ec681f3Smrg      LVP_PIPELINE_DUP(dst->pTessellationState,
3267ec681f3Smrg                       src->pTessellationState,
3277ec681f3Smrg                       VkPipelineTessellationStateCreateInfo,
3287ec681f3Smrg                       1);
3297ec681f3Smrg   }
3307ec681f3Smrg
3317ec681f3Smrg   /* pViewportState */
3327ec681f3Smrg   bool rasterization_disabled = !dynamic_state_contains(src->pDynamicState, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) &&
3337ec681f3Smrg                                 src->pRasterizationState->rasterizerDiscardEnable;
3347ec681f3Smrg   if (src->pViewportState && !rasterization_disabled) {
3357ec681f3Smrg      VkPipelineViewportStateCreateInfo *viewport_state;
3367ec681f3Smrg      viewport_state = ralloc(mem_ctx, VkPipelineViewportStateCreateInfo);
3377ec681f3Smrg      if (!viewport_state)
3387ec681f3Smrg         return VK_ERROR_OUT_OF_HOST_MEMORY;
3397ec681f3Smrg      deep_copy_viewport_state(mem_ctx, src->pDynamicState,
3407ec681f3Smrg			       viewport_state, src->pViewportState);
3417ec681f3Smrg      dst->pViewportState = viewport_state;
3427ec681f3Smrg   } else
3437ec681f3Smrg      dst->pViewportState = NULL;
3447ec681f3Smrg
3457ec681f3Smrg   /* pRasterizationState */
3467ec681f3Smrg   rasterization_state = ralloc(mem_ctx, VkPipelineRasterizationStateCreateInfo);
3477ec681f3Smrg   if (!rasterization_state)
3487ec681f3Smrg      return VK_ERROR_OUT_OF_HOST_MEMORY;
3497ec681f3Smrg   deep_copy_rasterization_state(mem_ctx, rasterization_state, src->pRasterizationState);
3507ec681f3Smrg   dst->pRasterizationState = rasterization_state;
3517ec681f3Smrg
3527ec681f3Smrg   /* pMultisampleState */
3537ec681f3Smrg   if (src->pMultisampleState && !rasterization_disabled) {
3547ec681f3Smrg      VkPipelineMultisampleStateCreateInfo*   ms_state;
3557ec681f3Smrg      ms_state = ralloc_size(mem_ctx, sizeof(VkPipelineMultisampleStateCreateInfo) + sizeof(VkSampleMask));
3567ec681f3Smrg      if (!ms_state)
3577ec681f3Smrg         return VK_ERROR_OUT_OF_HOST_MEMORY;
3587ec681f3Smrg      /* does samplemask need deep copy? */
3597ec681f3Smrg      memcpy(ms_state, src->pMultisampleState, sizeof(VkPipelineMultisampleStateCreateInfo));
3607ec681f3Smrg      if (src->pMultisampleState->pSampleMask) {
3617ec681f3Smrg         VkSampleMask *sample_mask = (VkSampleMask *)(ms_state + 1);
3627ec681f3Smrg         sample_mask[0] = src->pMultisampleState->pSampleMask[0];
3637ec681f3Smrg         ms_state->pSampleMask = sample_mask;
3647ec681f3Smrg      }
3657ec681f3Smrg      dst->pMultisampleState = ms_state;
3667ec681f3Smrg   } else
3677ec681f3Smrg      dst->pMultisampleState = NULL;
3687ec681f3Smrg
3697ec681f3Smrg   /* pDepthStencilState */
3707ec681f3Smrg   if (src->pDepthStencilState && !rasterization_disabled && pass->has_zs_attachment) {
3717ec681f3Smrg      LVP_PIPELINE_DUP(dst->pDepthStencilState,
3727ec681f3Smrg                       src->pDepthStencilState,
3737ec681f3Smrg                       VkPipelineDepthStencilStateCreateInfo,
3747ec681f3Smrg                       1);
3757ec681f3Smrg   } else
3767ec681f3Smrg      dst->pDepthStencilState = NULL;
3777ec681f3Smrg
3787ec681f3Smrg   /* pColorBlendState */
3797ec681f3Smrg   if (src->pColorBlendState && !rasterization_disabled && pass->has_color_attachment) {
3807ec681f3Smrg      VkPipelineColorBlendStateCreateInfo*    cb_state;
3817ec681f3Smrg
3827ec681f3Smrg      cb_state = ralloc(mem_ctx, VkPipelineColorBlendStateCreateInfo);
3837ec681f3Smrg      if (!cb_state)
3847ec681f3Smrg         return VK_ERROR_OUT_OF_HOST_MEMORY;
3857ec681f3Smrg      deep_copy_color_blend_state(mem_ctx, cb_state, src->pColorBlendState);
3867ec681f3Smrg      dst->pColorBlendState = cb_state;
3877ec681f3Smrg   } else
3887ec681f3Smrg      dst->pColorBlendState = NULL;
3897ec681f3Smrg
3907ec681f3Smrg   if (src->pDynamicState) {
3917ec681f3Smrg      VkPipelineDynamicStateCreateInfo*       dyn_state;
3927ec681f3Smrg
3937ec681f3Smrg      /* pDynamicState */
3947ec681f3Smrg      dyn_state = ralloc(mem_ctx, VkPipelineDynamicStateCreateInfo);
3957ec681f3Smrg      if (!dyn_state)
3967ec681f3Smrg         return VK_ERROR_OUT_OF_HOST_MEMORY;
3977ec681f3Smrg      deep_copy_dynamic_state(mem_ctx, dyn_state, src->pDynamicState);
3987ec681f3Smrg      dst->pDynamicState = dyn_state;
3997ec681f3Smrg   } else
4007ec681f3Smrg      dst->pDynamicState = NULL;
4017ec681f3Smrg
4027ec681f3Smrg   return VK_SUCCESS;
4037ec681f3Smrg}
4047ec681f3Smrg
4057ec681f3Smrgstatic VkResult
4067ec681f3Smrgdeep_copy_compute_create_info(void *mem_ctx,
4077ec681f3Smrg                              VkComputePipelineCreateInfo *dst,
4087ec681f3Smrg                              const VkComputePipelineCreateInfo *src)
4097ec681f3Smrg{
4107ec681f3Smrg   VkResult result;
4117ec681f3Smrg   dst->sType = src->sType;
4127ec681f3Smrg   dst->pNext = NULL;
4137ec681f3Smrg   dst->flags = src->flags;
4147ec681f3Smrg   dst->layout = src->layout;
4157ec681f3Smrg   dst->basePipelineHandle = src->basePipelineHandle;
4167ec681f3Smrg   dst->basePipelineIndex = src->basePipelineIndex;
4177ec681f3Smrg
4187ec681f3Smrg   result = deep_copy_shader_stage(mem_ctx, &dst->stage, &src->stage);
4197ec681f3Smrg   if (result != VK_SUCCESS)
4207ec681f3Smrg      return result;
4217ec681f3Smrg   return VK_SUCCESS;
4227ec681f3Smrg}
4237ec681f3Smrg
4247ec681f3Smrgstatic inline unsigned
4257ec681f3Smrgst_shader_stage_to_ptarget(gl_shader_stage stage)
4267ec681f3Smrg{
4277ec681f3Smrg   switch (stage) {
4287ec681f3Smrg   case MESA_SHADER_VERTEX:
4297ec681f3Smrg      return PIPE_SHADER_VERTEX;
4307ec681f3Smrg   case MESA_SHADER_FRAGMENT:
4317ec681f3Smrg      return PIPE_SHADER_FRAGMENT;
4327ec681f3Smrg   case MESA_SHADER_GEOMETRY:
4337ec681f3Smrg      return PIPE_SHADER_GEOMETRY;
4347ec681f3Smrg   case MESA_SHADER_TESS_CTRL:
4357ec681f3Smrg      return PIPE_SHADER_TESS_CTRL;
4367ec681f3Smrg   case MESA_SHADER_TESS_EVAL:
4377ec681f3Smrg      return PIPE_SHADER_TESS_EVAL;
4387ec681f3Smrg   case MESA_SHADER_COMPUTE:
4397ec681f3Smrg      return PIPE_SHADER_COMPUTE;
4407ec681f3Smrg   default:
4417ec681f3Smrg      break;
4427ec681f3Smrg   }
4437ec681f3Smrg
4447ec681f3Smrg   assert(!"should not be reached");
4457ec681f3Smrg   return PIPE_SHADER_VERTEX;
4467ec681f3Smrg}
4477ec681f3Smrg
4487ec681f3Smrgstatic void
4497ec681f3Smrgshared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
4507ec681f3Smrg{
4517ec681f3Smrg   assert(glsl_type_is_vector_or_scalar(type));
4527ec681f3Smrg
4537ec681f3Smrg   uint32_t comp_size = glsl_type_is_boolean(type)
4547ec681f3Smrg      ? 4 : glsl_get_bit_size(type) / 8;
4557ec681f3Smrg   unsigned length = glsl_get_vector_elements(type);
4567ec681f3Smrg   *size = comp_size * length,
4577ec681f3Smrg      *align = comp_size;
4587ec681f3Smrg}
4597ec681f3Smrg
4607ec681f3Smrgstatic void
4617ec681f3Smrglvp_shader_compile_to_ir(struct lvp_pipeline *pipeline,
4627ec681f3Smrg                         struct vk_shader_module *module,
4637ec681f3Smrg                         const char *entrypoint_name,
4647ec681f3Smrg                         gl_shader_stage stage,
4657ec681f3Smrg                         const VkSpecializationInfo *spec_info)
4667ec681f3Smrg{
4677ec681f3Smrg   nir_shader *nir;
4687ec681f3Smrg   const nir_shader_compiler_options *drv_options = pipeline->device->pscreen->get_compiler_options(pipeline->device->pscreen, PIPE_SHADER_IR_NIR, st_shader_stage_to_ptarget(stage));
4697ec681f3Smrg   bool progress;
4707ec681f3Smrg   uint32_t *spirv = (uint32_t *) module->data;
4717ec681f3Smrg   assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
4727ec681f3Smrg   assert(module->size % 4 == 0);
4737ec681f3Smrg
4747ec681f3Smrg   uint32_t num_spec_entries = 0;
4757ec681f3Smrg   struct nir_spirv_specialization *spec_entries =
4767ec681f3Smrg      vk_spec_info_to_nir_spirv(spec_info, &num_spec_entries);
4777ec681f3Smrg
4787ec681f3Smrg   struct lvp_device *pdevice = pipeline->device;
4797ec681f3Smrg   const struct spirv_to_nir_options spirv_options = {
4807ec681f3Smrg      .environment = NIR_SPIRV_VULKAN,
4817ec681f3Smrg      .caps = {
4827ec681f3Smrg         .float64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DOUBLES) == 1),
4837ec681f3Smrg         .int16 = true,
4847ec681f3Smrg         .int64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_INT64) == 1),
4857ec681f3Smrg         .tessellation = true,
4867ec681f3Smrg         .float_controls = true,
4877ec681f3Smrg         .image_ms_array = true,
4887ec681f3Smrg         .image_read_without_format = true,
4897ec681f3Smrg         .image_write_without_format = true,
4907ec681f3Smrg         .storage_image_ms = true,
4917ec681f3Smrg         .geometry_streams = true,
4927ec681f3Smrg         .storage_8bit = true,
4937ec681f3Smrg         .storage_16bit = true,
4947ec681f3Smrg         .variable_pointers = true,
4957ec681f3Smrg         .stencil_export = true,
4967ec681f3Smrg         .post_depth_coverage = true,
4977ec681f3Smrg         .transform_feedback = true,
4987ec681f3Smrg         .device_group = true,
4997ec681f3Smrg         .draw_parameters = true,
5007ec681f3Smrg         .shader_viewport_index_layer = true,
5017ec681f3Smrg         .multiview = true,
5027ec681f3Smrg         .physical_storage_buffer_address = true,
5037ec681f3Smrg         .int64_atomics = true,
5047ec681f3Smrg         .subgroup_arithmetic = true,
5057ec681f3Smrg         .subgroup_basic = true,
5067ec681f3Smrg         .subgroup_ballot = true,
5077ec681f3Smrg         .subgroup_quad = true,
5087ec681f3Smrg         .subgroup_vote = true,
5097ec681f3Smrg         .int8 = true,
5107ec681f3Smrg         .float16 = true,
5117ec681f3Smrg      },
5127ec681f3Smrg      .ubo_addr_format = nir_address_format_32bit_index_offset,
5137ec681f3Smrg      .ssbo_addr_format = nir_address_format_32bit_index_offset,
5147ec681f3Smrg      .phys_ssbo_addr_format = nir_address_format_64bit_global,
5157ec681f3Smrg      .push_const_addr_format = nir_address_format_logical,
5167ec681f3Smrg      .shared_addr_format = nir_address_format_32bit_offset,
5177ec681f3Smrg   };
5187ec681f3Smrg
5197ec681f3Smrg   nir = spirv_to_nir(spirv, module->size / 4,
5207ec681f3Smrg                      spec_entries, num_spec_entries,
5217ec681f3Smrg                      stage, entrypoint_name, &spirv_options, drv_options);
5227ec681f3Smrg
5237ec681f3Smrg   if (!nir) {
5247ec681f3Smrg      free(spec_entries);
5257ec681f3Smrg      return;
5267ec681f3Smrg   }
5277ec681f3Smrg   nir_validate_shader(nir, NULL);
5287ec681f3Smrg
5297ec681f3Smrg   free(spec_entries);
5307ec681f3Smrg
5317ec681f3Smrg   const struct nir_lower_sysvals_to_varyings_options sysvals_to_varyings = {
5327ec681f3Smrg      .frag_coord = true,
5337ec681f3Smrg      .point_coord = true,
5347ec681f3Smrg   };
5357ec681f3Smrg   NIR_PASS_V(nir, nir_lower_sysvals_to_varyings, &sysvals_to_varyings);
5367ec681f3Smrg
5377ec681f3Smrg   NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
5387ec681f3Smrg   NIR_PASS_V(nir, nir_lower_returns);
5397ec681f3Smrg   NIR_PASS_V(nir, nir_inline_functions);
5407ec681f3Smrg   NIR_PASS_V(nir, nir_copy_prop);
5417ec681f3Smrg   NIR_PASS_V(nir, nir_opt_deref);
5427ec681f3Smrg
5437ec681f3Smrg   /* Pick off the single entrypoint that we want */
5447ec681f3Smrg   foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
5457ec681f3Smrg      if (!func->is_entrypoint)
5467ec681f3Smrg         exec_node_remove(&func->node);
5477ec681f3Smrg   }
5487ec681f3Smrg   assert(exec_list_length(&nir->functions) == 1);
5497ec681f3Smrg
5507ec681f3Smrg   NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
5517ec681f3Smrg   NIR_PASS_V(nir, nir_split_var_copies);
5527ec681f3Smrg   NIR_PASS_V(nir, nir_split_per_member_structs);
5537ec681f3Smrg
5547ec681f3Smrg   NIR_PASS_V(nir, nir_remove_dead_variables,
5557ec681f3Smrg              nir_var_shader_in | nir_var_shader_out | nir_var_system_value, NULL);
5567ec681f3Smrg
5577ec681f3Smrg   if (stage == MESA_SHADER_FRAGMENT)
5587ec681f3Smrg      lvp_lower_input_attachments(nir, false);
5597ec681f3Smrg   NIR_PASS_V(nir, nir_lower_system_values);
5607ec681f3Smrg   NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
5617ec681f3Smrg
5627ec681f3Smrg   NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
5637ec681f3Smrg   NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_uniform, NULL);
5647ec681f3Smrg
5657ec681f3Smrg   lvp_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);
5667ec681f3Smrg
5677ec681f3Smrg   NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
5687ec681f3Smrg   NIR_PASS_V(nir, nir_split_var_copies);
5697ec681f3Smrg   NIR_PASS_V(nir, nir_lower_global_vars_to_local);
5707ec681f3Smrg
5717ec681f3Smrg   NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_push_const,
5727ec681f3Smrg              nir_address_format_32bit_offset);
5737ec681f3Smrg
5747ec681f3Smrg   NIR_PASS_V(nir, nir_lower_explicit_io,
5757ec681f3Smrg              nir_var_mem_ubo | nir_var_mem_ssbo,
5767ec681f3Smrg              nir_address_format_32bit_index_offset);
5777ec681f3Smrg
5787ec681f3Smrg   NIR_PASS_V(nir, nir_lower_explicit_io,
5797ec681f3Smrg              nir_var_mem_global,
5807ec681f3Smrg              nir_address_format_64bit_global);
5817ec681f3Smrg
5827ec681f3Smrg   if (nir->info.stage == MESA_SHADER_COMPUTE) {
5837ec681f3Smrg      NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_var_info);
5847ec681f3Smrg      NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared, nir_address_format_32bit_offset);
5857ec681f3Smrg   }
5867ec681f3Smrg
5877ec681f3Smrg   NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_shader_temp, NULL);
5887ec681f3Smrg
5897ec681f3Smrg   if (nir->info.stage == MESA_SHADER_VERTEX ||
5907ec681f3Smrg       nir->info.stage == MESA_SHADER_GEOMETRY) {
5917ec681f3Smrg      NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
5927ec681f3Smrg   } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
5937ec681f3Smrg      NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
5947ec681f3Smrg   }
5957ec681f3Smrg
5967ec681f3Smrg   do {
5977ec681f3Smrg      progress = false;
5987ec681f3Smrg
5997ec681f3Smrg      NIR_PASS(progress, nir, nir_lower_flrp, 32|64, true);
6007ec681f3Smrg      NIR_PASS(progress, nir, nir_split_array_vars, nir_var_function_temp);
6017ec681f3Smrg      NIR_PASS(progress, nir, nir_shrink_vec_array_vars, nir_var_function_temp);
6027ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_deref);
6037ec681f3Smrg      NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
6047ec681f3Smrg
6057ec681f3Smrg      NIR_PASS(progress, nir, nir_copy_prop);
6067ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_dce);
6077ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
6087ec681f3Smrg
6097ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_algebraic);
6107ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_constant_folding);
6117ec681f3Smrg
6127ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_remove_phis);
6137ec681f3Smrg      bool trivial_continues = false;
6147ec681f3Smrg      NIR_PASS(trivial_continues, nir, nir_opt_trivial_continues);
6157ec681f3Smrg      progress |= trivial_continues;
6167ec681f3Smrg      if (trivial_continues) {
6177ec681f3Smrg         /* If nir_opt_trivial_continues makes progress, then we need to clean
6187ec681f3Smrg          * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
6197ec681f3Smrg          * to make progress.
6207ec681f3Smrg          */
6217ec681f3Smrg         NIR_PASS(progress, nir, nir_copy_prop);
6227ec681f3Smrg         NIR_PASS(progress, nir, nir_opt_dce);
6237ec681f3Smrg         NIR_PASS(progress, nir, nir_opt_remove_phis);
6247ec681f3Smrg      }
6257ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_if, true);
6267ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_dead_cf);
6277ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_conditional_discard);
6287ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_remove_phis);
6297ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_cse);
6307ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_undef);
6317ec681f3Smrg
6327ec681f3Smrg      NIR_PASS(progress, nir, nir_opt_deref);
6337ec681f3Smrg      NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
6347ec681f3Smrg   } while (progress);
6357ec681f3Smrg
6367ec681f3Smrg   NIR_PASS_V(nir, nir_lower_var_copies);
6377ec681f3Smrg   NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
6387ec681f3Smrg   NIR_PASS_V(nir, nir_opt_dce);
6397ec681f3Smrg   nir_sweep(nir);
6407ec681f3Smrg
6417ec681f3Smrg   nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
6427ec681f3Smrg
6437ec681f3Smrg   if (nir->info.stage != MESA_SHADER_VERTEX)
6447ec681f3Smrg      nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, nir->info.stage);
6457ec681f3Smrg   else {
6467ec681f3Smrg      nir->num_inputs = util_last_bit64(nir->info.inputs_read);
6477ec681f3Smrg      nir_foreach_shader_in_variable(var, nir) {
6487ec681f3Smrg         var->data.driver_location = var->data.location - VERT_ATTRIB_GENERIC0;
6497ec681f3Smrg      }
6507ec681f3Smrg   }
6517ec681f3Smrg   nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs,
6527ec681f3Smrg                               nir->info.stage);
6537ec681f3Smrg   pipeline->pipeline_nir[stage] = nir;
6547ec681f3Smrg}
6557ec681f3Smrg
6567ec681f3Smrgstatic void fill_shader_prog(struct pipe_shader_state *state, gl_shader_stage stage, struct lvp_pipeline *pipeline)
6577ec681f3Smrg{
6587ec681f3Smrg   state->type = PIPE_SHADER_IR_NIR;
6597ec681f3Smrg   state->ir.nir = pipeline->pipeline_nir[stage];
6607ec681f3Smrg}
6617ec681f3Smrg
6627ec681f3Smrgstatic void
6637ec681f3Smrgmerge_tess_info(struct shader_info *tes_info,
6647ec681f3Smrg                const struct shader_info *tcs_info)
6657ec681f3Smrg{
6667ec681f3Smrg   /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
6677ec681f3Smrg    *
6687ec681f3Smrg    *    "PointMode. Controls generation of points rather than triangles
6697ec681f3Smrg    *     or lines. This functionality defaults to disabled, and is
6707ec681f3Smrg    *     enabled if either shader stage includes the execution mode.
6717ec681f3Smrg    *
6727ec681f3Smrg    * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
6737ec681f3Smrg    * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
6747ec681f3Smrg    * and OutputVertices, it says:
6757ec681f3Smrg    *
6767ec681f3Smrg    *    "One mode must be set in at least one of the tessellation
6777ec681f3Smrg    *     shader stages."
6787ec681f3Smrg    *
6797ec681f3Smrg    * So, the fields can be set in either the TCS or TES, but they must
6807ec681f3Smrg    * agree if set in both.  Our backend looks at TES, so bitwise-or in
6817ec681f3Smrg    * the values from the TCS.
6827ec681f3Smrg    */
6837ec681f3Smrg   assert(tcs_info->tess.tcs_vertices_out == 0 ||
6847ec681f3Smrg          tes_info->tess.tcs_vertices_out == 0 ||
6857ec681f3Smrg          tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
6867ec681f3Smrg   tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
6877ec681f3Smrg
6887ec681f3Smrg   assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
6897ec681f3Smrg          tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
6907ec681f3Smrg          tcs_info->tess.spacing == tes_info->tess.spacing);
6917ec681f3Smrg   tes_info->tess.spacing |= tcs_info->tess.spacing;
6927ec681f3Smrg
6937ec681f3Smrg   assert(tcs_info->tess.primitive_mode == 0 ||
6947ec681f3Smrg          tes_info->tess.primitive_mode == 0 ||
6957ec681f3Smrg          tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
6967ec681f3Smrg   tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
6977ec681f3Smrg   tes_info->tess.ccw |= tcs_info->tess.ccw;
6987ec681f3Smrg   tes_info->tess.point_mode |= tcs_info->tess.point_mode;
6997ec681f3Smrg}
7007ec681f3Smrg
7017ec681f3Smrgstatic gl_shader_stage
7027ec681f3Smrglvp_shader_stage(VkShaderStageFlagBits stage)
7037ec681f3Smrg{
7047ec681f3Smrg   switch (stage) {
7057ec681f3Smrg   case VK_SHADER_STAGE_VERTEX_BIT:
7067ec681f3Smrg      return MESA_SHADER_VERTEX;
7077ec681f3Smrg   case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
7087ec681f3Smrg      return MESA_SHADER_TESS_CTRL;
7097ec681f3Smrg   case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
7107ec681f3Smrg      return MESA_SHADER_TESS_EVAL;
7117ec681f3Smrg   case VK_SHADER_STAGE_GEOMETRY_BIT:
7127ec681f3Smrg      return MESA_SHADER_GEOMETRY;
7137ec681f3Smrg   case VK_SHADER_STAGE_FRAGMENT_BIT:
7147ec681f3Smrg      return MESA_SHADER_FRAGMENT;
7157ec681f3Smrg   case VK_SHADER_STAGE_COMPUTE_BIT:
7167ec681f3Smrg      return MESA_SHADER_COMPUTE;
7177ec681f3Smrg   default:
7187ec681f3Smrg      unreachable("invalid VkShaderStageFlagBits");
7197ec681f3Smrg      return MESA_SHADER_NONE;
7207ec681f3Smrg   }
7217ec681f3Smrg}
7227ec681f3Smrg
7237ec681f3Smrgstatic VkResult
7247ec681f3Smrglvp_pipeline_compile(struct lvp_pipeline *pipeline,
7257ec681f3Smrg                     gl_shader_stage stage)
7267ec681f3Smrg{
7277ec681f3Smrg   struct lvp_device *device = pipeline->device;
7287ec681f3Smrg   device->physical_device->pscreen->finalize_nir(device->physical_device->pscreen, pipeline->pipeline_nir[stage]);
7297ec681f3Smrg   if (stage == MESA_SHADER_COMPUTE) {
7307ec681f3Smrg      struct pipe_compute_state shstate = {0};
7317ec681f3Smrg      shstate.prog = (void *)pipeline->pipeline_nir[MESA_SHADER_COMPUTE];
7327ec681f3Smrg      shstate.ir_type = PIPE_SHADER_IR_NIR;
7337ec681f3Smrg      shstate.req_local_mem = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.shared_size;
7347ec681f3Smrg      pipeline->shader_cso[PIPE_SHADER_COMPUTE] = device->queue.ctx->create_compute_state(device->queue.ctx, &shstate);
7357ec681f3Smrg   } else {
7367ec681f3Smrg      struct pipe_shader_state shstate = {0};
7377ec681f3Smrg      fill_shader_prog(&shstate, stage, pipeline);
7387ec681f3Smrg
7397ec681f3Smrg      if (stage == MESA_SHADER_VERTEX ||
7407ec681f3Smrg          stage == MESA_SHADER_GEOMETRY ||
7417ec681f3Smrg          stage == MESA_SHADER_TESS_EVAL) {
7427ec681f3Smrg         nir_xfb_info *xfb_info = nir_gather_xfb_info(pipeline->pipeline_nir[stage], NULL);
7437ec681f3Smrg         if (xfb_info) {
7447ec681f3Smrg            uint8_t output_mapping[VARYING_SLOT_TESS_MAX];
7457ec681f3Smrg            memset(output_mapping, 0, sizeof(output_mapping));
7467ec681f3Smrg
7477ec681f3Smrg            nir_foreach_shader_out_variable(var, pipeline->pipeline_nir[stage]) {
7487ec681f3Smrg               unsigned slots = var->data.compact ? DIV_ROUND_UP(glsl_get_length(var->type), 4)
7497ec681f3Smrg                                                  : glsl_count_attribute_slots(var->type, false);
7507ec681f3Smrg               for (unsigned i = 0; i < slots; i++)
7517ec681f3Smrg                  output_mapping[var->data.location + i] = var->data.driver_location + i;
7527ec681f3Smrg            }
7537ec681f3Smrg
7547ec681f3Smrg            shstate.stream_output.num_outputs = xfb_info->output_count;
7557ec681f3Smrg            for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
7567ec681f3Smrg               if (xfb_info->buffers_written & (1 << i)) {
7577ec681f3Smrg                  shstate.stream_output.stride[i] = xfb_info->buffers[i].stride / 4;
7587ec681f3Smrg               }
7597ec681f3Smrg            }
7607ec681f3Smrg            for (unsigned i = 0; i < xfb_info->output_count; i++) {
7617ec681f3Smrg               shstate.stream_output.output[i].output_buffer = xfb_info->outputs[i].buffer;
7627ec681f3Smrg               shstate.stream_output.output[i].dst_offset = xfb_info->outputs[i].offset / 4;
7637ec681f3Smrg               shstate.stream_output.output[i].register_index = output_mapping[xfb_info->outputs[i].location];
7647ec681f3Smrg               shstate.stream_output.output[i].num_components = util_bitcount(xfb_info->outputs[i].component_mask);
7657ec681f3Smrg               shstate.stream_output.output[i].start_component = ffs(xfb_info->outputs[i].component_mask) - 1;
7667ec681f3Smrg               shstate.stream_output.output[i].stream = xfb_info->buffer_to_stream[xfb_info->outputs[i].buffer];
7677ec681f3Smrg            }
7687ec681f3Smrg
7697ec681f3Smrg            ralloc_free(xfb_info);
7707ec681f3Smrg         }
7717ec681f3Smrg      }
7727ec681f3Smrg
7737ec681f3Smrg      switch (stage) {
7747ec681f3Smrg      case MESA_SHADER_FRAGMENT:
7757ec681f3Smrg         pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
7767ec681f3Smrg         break;
7777ec681f3Smrg      case MESA_SHADER_VERTEX:
7787ec681f3Smrg         pipeline->shader_cso[PIPE_SHADER_VERTEX] = device->queue.ctx->create_vs_state(device->queue.ctx, &shstate);
7797ec681f3Smrg         break;
7807ec681f3Smrg      case MESA_SHADER_GEOMETRY:
7817ec681f3Smrg         pipeline->shader_cso[PIPE_SHADER_GEOMETRY] = device->queue.ctx->create_gs_state(device->queue.ctx, &shstate);
7827ec681f3Smrg         break;
7837ec681f3Smrg      case MESA_SHADER_TESS_CTRL:
7847ec681f3Smrg         pipeline->shader_cso[PIPE_SHADER_TESS_CTRL] = device->queue.ctx->create_tcs_state(device->queue.ctx, &shstate);
7857ec681f3Smrg         break;
7867ec681f3Smrg      case MESA_SHADER_TESS_EVAL:
7877ec681f3Smrg         pipeline->shader_cso[PIPE_SHADER_TESS_EVAL] = device->queue.ctx->create_tes_state(device->queue.ctx, &shstate);
7887ec681f3Smrg         break;
7897ec681f3Smrg      default:
7907ec681f3Smrg         unreachable("illegal shader");
7917ec681f3Smrg         break;
7927ec681f3Smrg      }
7937ec681f3Smrg   }
7947ec681f3Smrg   return VK_SUCCESS;
7957ec681f3Smrg}
7967ec681f3Smrg
7977ec681f3Smrgstatic VkResult
7987ec681f3Smrglvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
7997ec681f3Smrg                           struct lvp_device *device,
8007ec681f3Smrg                           struct lvp_pipeline_cache *cache,
8017ec681f3Smrg                           const VkGraphicsPipelineCreateInfo *pCreateInfo,
8027ec681f3Smrg                           const VkAllocationCallbacks *alloc)
8037ec681f3Smrg{
8047ec681f3Smrg   if (alloc == NULL)
8057ec681f3Smrg      alloc = &device->vk.alloc;
8067ec681f3Smrg   pipeline->device = device;
8077ec681f3Smrg   pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
8087ec681f3Smrg   pipeline->force_min_sample = false;
8097ec681f3Smrg
8107ec681f3Smrg   pipeline->mem_ctx = ralloc_context(NULL);
8117ec681f3Smrg   /* recreate createinfo */
8127ec681f3Smrg   deep_copy_graphics_create_info(pipeline->mem_ctx, &pipeline->graphics_create_info, pCreateInfo);
8137ec681f3Smrg   pipeline->is_compute_pipeline = false;
8147ec681f3Smrg
8157ec681f3Smrg   const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *pv_state =
8167ec681f3Smrg      vk_find_struct_const(pCreateInfo->pRasterizationState,
8177ec681f3Smrg                           PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT);
8187ec681f3Smrg   pipeline->provoking_vertex_last = pv_state && pv_state->provokingVertexMode == VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT;
8197ec681f3Smrg
8207ec681f3Smrg   const VkPipelineRasterizationLineStateCreateInfoEXT *line_state =
8217ec681f3Smrg      vk_find_struct_const(pCreateInfo->pRasterizationState,
8227ec681f3Smrg                           PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
8237ec681f3Smrg   if (line_state) {
8247ec681f3Smrg      /* always draw bresenham if !smooth */
8257ec681f3Smrg      pipeline->line_stipple_enable = line_state->stippledLineEnable;
8267ec681f3Smrg      pipeline->line_smooth = line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
8277ec681f3Smrg      pipeline->disable_multisample = line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT ||
8287ec681f3Smrg                                      line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
8297ec681f3Smrg      pipeline->line_rectangular = line_state->lineRasterizationMode != VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;
8307ec681f3Smrg      if (pipeline->line_stipple_enable) {
8317ec681f3Smrg         if (!dynamic_state_contains(pipeline->graphics_create_info.pDynamicState, VK_DYNAMIC_STATE_LINE_STIPPLE_EXT)) {
8327ec681f3Smrg            pipeline->line_stipple_factor = line_state->lineStippleFactor - 1;
8337ec681f3Smrg            pipeline->line_stipple_pattern = line_state->lineStipplePattern;
8347ec681f3Smrg         } else {
8357ec681f3Smrg            pipeline->line_stipple_factor = 0;
8367ec681f3Smrg            pipeline->line_stipple_pattern = UINT16_MAX;
8377ec681f3Smrg         }
8387ec681f3Smrg      }
8397ec681f3Smrg   } else
8407ec681f3Smrg      pipeline->line_rectangular = true;
8417ec681f3Smrg
8427ec681f3Smrg   bool rasterization_disabled = !dynamic_state_contains(pipeline->graphics_create_info.pDynamicState, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) &&
8437ec681f3Smrg      pipeline->graphics_create_info.pRasterizationState->rasterizerDiscardEnable;
8447ec681f3Smrg   LVP_FROM_HANDLE(lvp_render_pass, pass, pipeline->graphics_create_info.renderPass);
8457ec681f3Smrg   if (!dynamic_state_contains(pipeline->graphics_create_info.pDynamicState, VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT) &&
8467ec681f3Smrg       !rasterization_disabled && pass->has_color_attachment) {
8477ec681f3Smrg      const VkPipelineColorWriteCreateInfoEXT *cw_state =
8487ec681f3Smrg         vk_find_struct_const(pCreateInfo->pColorBlendState, PIPELINE_COLOR_WRITE_CREATE_INFO_EXT);
8497ec681f3Smrg      if (cw_state) {
8507ec681f3Smrg         for (unsigned i = 0; i < cw_state->attachmentCount; i++)
8517ec681f3Smrg            if (!cw_state->pColorWriteEnables[i]) {
8527ec681f3Smrg               VkPipelineColorBlendAttachmentState *att = (void*)&pipeline->graphics_create_info.pColorBlendState->pAttachments[i];
8537ec681f3Smrg               att->colorWriteMask = 0;
8547ec681f3Smrg            }
8557ec681f3Smrg      }
8567ec681f3Smrg   }
8577ec681f3Smrg
8587ec681f3Smrg
8597ec681f3Smrg   for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
8607ec681f3Smrg      VK_FROM_HANDLE(vk_shader_module, module,
8617ec681f3Smrg                      pCreateInfo->pStages[i].module);
8627ec681f3Smrg      gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
8637ec681f3Smrg      lvp_shader_compile_to_ir(pipeline, module,
8647ec681f3Smrg                               pCreateInfo->pStages[i].pName,
8657ec681f3Smrg                               stage,
8667ec681f3Smrg                               pCreateInfo->pStages[i].pSpecializationInfo);
8677ec681f3Smrg      if (!pipeline->pipeline_nir[stage])
8687ec681f3Smrg         return VK_ERROR_FEATURE_NOT_PRESENT;
8697ec681f3Smrg   }
8707ec681f3Smrg
8717ec681f3Smrg   if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]) {
8727ec681f3Smrg      if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.fs.uses_sample_qualifier ||
8737ec681f3Smrg          BITSET_TEST(pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
8747ec681f3Smrg          BITSET_TEST(pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS))
8757ec681f3Smrg         pipeline->force_min_sample = true;
8767ec681f3Smrg   }
8777ec681f3Smrg   if (pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]) {
8787ec681f3Smrg      nir_lower_patch_vertices(pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL], pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info.tess.tcs_vertices_out, NULL);
8797ec681f3Smrg      merge_tess_info(&pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info, &pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info);
8807ec681f3Smrg      const VkPipelineTessellationDomainOriginStateCreateInfo *domain_origin_state =
8817ec681f3Smrg         vk_find_struct_const(pCreateInfo->pTessellationState,
8827ec681f3Smrg                              PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO);
8837ec681f3Smrg      if (!domain_origin_state || domain_origin_state->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT)
8847ec681f3Smrg         pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw = !pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw;
8857ec681f3Smrg   }
8867ec681f3Smrg
8877ec681f3Smrg   pipeline->gs_output_lines = pipeline->pipeline_nir[MESA_SHADER_GEOMETRY] &&
8887ec681f3Smrg                               pipeline->pipeline_nir[MESA_SHADER_GEOMETRY]->info.gs.output_primitive == GL_LINES;
8897ec681f3Smrg
8907ec681f3Smrg
8917ec681f3Smrg   bool has_fragment_shader = false;
8927ec681f3Smrg   for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
8937ec681f3Smrg      gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
8947ec681f3Smrg      lvp_pipeline_compile(pipeline, stage);
8957ec681f3Smrg      if (stage == MESA_SHADER_FRAGMENT)
8967ec681f3Smrg         has_fragment_shader = true;
8977ec681f3Smrg   }
8987ec681f3Smrg
8997ec681f3Smrg   if (has_fragment_shader == false) {
9007ec681f3Smrg      /* create a dummy fragment shader for this pipeline. */
9017ec681f3Smrg      nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT, NULL,
9027ec681f3Smrg                                                     "dummy_frag");
9037ec681f3Smrg
9047ec681f3Smrg      pipeline->pipeline_nir[MESA_SHADER_FRAGMENT] = b.shader;
9057ec681f3Smrg      struct pipe_shader_state shstate = {0};
9067ec681f3Smrg      shstate.type = PIPE_SHADER_IR_NIR;
9077ec681f3Smrg      shstate.ir.nir = pipeline->pipeline_nir[MESA_SHADER_FRAGMENT];
9087ec681f3Smrg      pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
9097ec681f3Smrg   }
9107ec681f3Smrg   return VK_SUCCESS;
9117ec681f3Smrg}
9127ec681f3Smrg
9137ec681f3Smrgstatic VkResult
9147ec681f3Smrglvp_graphics_pipeline_create(
9157ec681f3Smrg   VkDevice _device,
9167ec681f3Smrg   VkPipelineCache _cache,
9177ec681f3Smrg   const VkGraphicsPipelineCreateInfo *pCreateInfo,
9187ec681f3Smrg   const VkAllocationCallbacks *pAllocator,
9197ec681f3Smrg   VkPipeline *pPipeline)
9207ec681f3Smrg{
9217ec681f3Smrg   LVP_FROM_HANDLE(lvp_device, device, _device);
9227ec681f3Smrg   LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
9237ec681f3Smrg   struct lvp_pipeline *pipeline;
9247ec681f3Smrg   VkResult result;
9257ec681f3Smrg
9267ec681f3Smrg   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
9277ec681f3Smrg
9287ec681f3Smrg   pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
9297ec681f3Smrg                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
9307ec681f3Smrg   if (pipeline == NULL)
9317ec681f3Smrg      return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
9327ec681f3Smrg
9337ec681f3Smrg   vk_object_base_init(&device->vk, &pipeline->base,
9347ec681f3Smrg                       VK_OBJECT_TYPE_PIPELINE);
9357ec681f3Smrg   result = lvp_graphics_pipeline_init(pipeline, device, cache, pCreateInfo,
9367ec681f3Smrg                                       pAllocator);
9377ec681f3Smrg   if (result != VK_SUCCESS) {
9387ec681f3Smrg      vk_free2(&device->vk.alloc, pAllocator, pipeline);
9397ec681f3Smrg      return result;
9407ec681f3Smrg   }
9417ec681f3Smrg
9427ec681f3Smrg   *pPipeline = lvp_pipeline_to_handle(pipeline);
9437ec681f3Smrg
9447ec681f3Smrg   return VK_SUCCESS;
9457ec681f3Smrg}
9467ec681f3Smrg
9477ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL lvp_CreateGraphicsPipelines(
9487ec681f3Smrg   VkDevice                                    _device,
9497ec681f3Smrg   VkPipelineCache                             pipelineCache,
9507ec681f3Smrg   uint32_t                                    count,
9517ec681f3Smrg   const VkGraphicsPipelineCreateInfo*         pCreateInfos,
9527ec681f3Smrg   const VkAllocationCallbacks*                pAllocator,
9537ec681f3Smrg   VkPipeline*                                 pPipelines)
9547ec681f3Smrg{
9557ec681f3Smrg   VkResult result = VK_SUCCESS;
9567ec681f3Smrg   unsigned i = 0;
9577ec681f3Smrg
9587ec681f3Smrg   for (; i < count; i++) {
9597ec681f3Smrg      VkResult r;
9607ec681f3Smrg      r = lvp_graphics_pipeline_create(_device,
9617ec681f3Smrg                                       pipelineCache,
9627ec681f3Smrg                                       &pCreateInfos[i],
9637ec681f3Smrg                                       pAllocator, &pPipelines[i]);
9647ec681f3Smrg      if (r != VK_SUCCESS) {
9657ec681f3Smrg         result = r;
9667ec681f3Smrg         pPipelines[i] = VK_NULL_HANDLE;
9677ec681f3Smrg      }
9687ec681f3Smrg   }
9697ec681f3Smrg
9707ec681f3Smrg   return result;
9717ec681f3Smrg}
9727ec681f3Smrg
9737ec681f3Smrgstatic VkResult
9747ec681f3Smrglvp_compute_pipeline_init(struct lvp_pipeline *pipeline,
9757ec681f3Smrg                          struct lvp_device *device,
9767ec681f3Smrg                          struct lvp_pipeline_cache *cache,
9777ec681f3Smrg                          const VkComputePipelineCreateInfo *pCreateInfo,
9787ec681f3Smrg                          const VkAllocationCallbacks *alloc)
9797ec681f3Smrg{
9807ec681f3Smrg   VK_FROM_HANDLE(vk_shader_module, module,
9817ec681f3Smrg                   pCreateInfo->stage.module);
9827ec681f3Smrg   if (alloc == NULL)
9837ec681f3Smrg      alloc = &device->vk.alloc;
9847ec681f3Smrg   pipeline->device = device;
9857ec681f3Smrg   pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
9867ec681f3Smrg   pipeline->force_min_sample = false;
9877ec681f3Smrg
9887ec681f3Smrg   pipeline->mem_ctx = ralloc_context(NULL);
9897ec681f3Smrg   deep_copy_compute_create_info(pipeline->mem_ctx,
9907ec681f3Smrg                                 &pipeline->compute_create_info, pCreateInfo);
9917ec681f3Smrg   pipeline->is_compute_pipeline = true;
9927ec681f3Smrg
9937ec681f3Smrg   lvp_shader_compile_to_ir(pipeline, module,
9947ec681f3Smrg                            pCreateInfo->stage.pName,
9957ec681f3Smrg                            MESA_SHADER_COMPUTE,
9967ec681f3Smrg                            pCreateInfo->stage.pSpecializationInfo);
9977ec681f3Smrg   if (!pipeline->pipeline_nir[MESA_SHADER_COMPUTE])
9987ec681f3Smrg      return VK_ERROR_FEATURE_NOT_PRESENT;
9997ec681f3Smrg   lvp_pipeline_compile(pipeline, MESA_SHADER_COMPUTE);
10007ec681f3Smrg   return VK_SUCCESS;
10017ec681f3Smrg}
10027ec681f3Smrg
10037ec681f3Smrgstatic VkResult
10047ec681f3Smrglvp_compute_pipeline_create(
10057ec681f3Smrg   VkDevice _device,
10067ec681f3Smrg   VkPipelineCache _cache,
10077ec681f3Smrg   const VkComputePipelineCreateInfo *pCreateInfo,
10087ec681f3Smrg   const VkAllocationCallbacks *pAllocator,
10097ec681f3Smrg   VkPipeline *pPipeline)
10107ec681f3Smrg{
10117ec681f3Smrg   LVP_FROM_HANDLE(lvp_device, device, _device);
10127ec681f3Smrg   LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
10137ec681f3Smrg   struct lvp_pipeline *pipeline;
10147ec681f3Smrg   VkResult result;
10157ec681f3Smrg
10167ec681f3Smrg   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
10177ec681f3Smrg
10187ec681f3Smrg   pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
10197ec681f3Smrg                         VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
10207ec681f3Smrg   if (pipeline == NULL)
10217ec681f3Smrg      return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
10227ec681f3Smrg
10237ec681f3Smrg   vk_object_base_init(&device->vk, &pipeline->base,
10247ec681f3Smrg                       VK_OBJECT_TYPE_PIPELINE);
10257ec681f3Smrg   result = lvp_compute_pipeline_init(pipeline, device, cache, pCreateInfo,
10267ec681f3Smrg                                      pAllocator);
10277ec681f3Smrg   if (result != VK_SUCCESS) {
10287ec681f3Smrg      vk_free2(&device->vk.alloc, pAllocator, pipeline);
10297ec681f3Smrg      return result;
10307ec681f3Smrg   }
10317ec681f3Smrg
10327ec681f3Smrg   *pPipeline = lvp_pipeline_to_handle(pipeline);
10337ec681f3Smrg
10347ec681f3Smrg   return VK_SUCCESS;
10357ec681f3Smrg}
10367ec681f3Smrg
10377ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL lvp_CreateComputePipelines(
10387ec681f3Smrg   VkDevice                                    _device,
10397ec681f3Smrg   VkPipelineCache                             pipelineCache,
10407ec681f3Smrg   uint32_t                                    count,
10417ec681f3Smrg   const VkComputePipelineCreateInfo*          pCreateInfos,
10427ec681f3Smrg   const VkAllocationCallbacks*                pAllocator,
10437ec681f3Smrg   VkPipeline*                                 pPipelines)
10447ec681f3Smrg{
10457ec681f3Smrg   VkResult result = VK_SUCCESS;
10467ec681f3Smrg   unsigned i = 0;
10477ec681f3Smrg
10487ec681f3Smrg   for (; i < count; i++) {
10497ec681f3Smrg      VkResult r;
10507ec681f3Smrg      r = lvp_compute_pipeline_create(_device,
10517ec681f3Smrg                                      pipelineCache,
10527ec681f3Smrg                                      &pCreateInfos[i],
10537ec681f3Smrg                                      pAllocator, &pPipelines[i]);
10547ec681f3Smrg      if (r != VK_SUCCESS) {
10557ec681f3Smrg         result = r;
10567ec681f3Smrg         pPipelines[i] = VK_NULL_HANDLE;
10577ec681f3Smrg      }
10587ec681f3Smrg   }
10597ec681f3Smrg
10607ec681f3Smrg   return result;
10617ec681f3Smrg}
1062