genX_pipeline.c revision 01e04c3f
1/* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include "anv_private.h" 25 26#include "genxml/gen_macros.h" 27#include "genxml/genX_pack.h" 28 29#include "common/gen_l3_config.h" 30#include "common/gen_sample_positions.h" 31#include "vk_util.h" 32#include "vk_format_info.h" 33 34static uint32_t 35vertex_element_comp_control(enum isl_format format, unsigned comp) 36{ 37 uint8_t bits; 38 switch (comp) { 39 case 0: bits = isl_format_layouts[format].channels.r.bits; break; 40 case 1: bits = isl_format_layouts[format].channels.g.bits; break; 41 case 2: bits = isl_format_layouts[format].channels.b.bits; break; 42 case 3: bits = isl_format_layouts[format].channels.a.bits; break; 43 default: unreachable("Invalid component"); 44 } 45 46 /* 47 * Take in account hardware restrictions when dealing with 64-bit floats. 48 * 49 * From Broadwell spec, command reference structures, page 586: 50 * "When SourceElementFormat is set to one of the *64*_PASSTHRU formats, 51 * 64-bit components are stored * in the URB without any conversion. In 52 * this case, vertex elements must be written as 128 or 256 bits, with 53 * VFCOMP_STORE_0 being used to pad the output as required. E.g., if 54 * R64_PASSTHRU is used to copy a 64-bit Red component into the URB, 55 * Component 1 must be specified as VFCOMP_STORE_0 (with Components 2,3 56 * set to VFCOMP_NOSTORE) in order to output a 128-bit vertex element, or 57 * Components 1-3 must be specified as VFCOMP_STORE_0 in order to output 58 * a 256-bit vertex element. Likewise, use of R64G64B64_PASSTHRU requires 59 * Component 3 to be specified as VFCOMP_STORE_0 in order to output a 60 * 256-bit vertex element." 61 */ 62 if (bits) { 63 return VFCOMP_STORE_SRC; 64 } else if (comp >= 2 && 65 !isl_format_layouts[format].channels.b.bits && 66 isl_format_layouts[format].channels.r.type == ISL_RAW) { 67 /* When emitting 64-bit attributes, we need to write either 128 or 256 68 * bit chunks, using VFCOMP_NOSTORE when not writing the chunk, and 69 * VFCOMP_STORE_0 to pad the written chunk */ 70 return VFCOMP_NOSTORE; 71 } else if (comp < 3 || 72 isl_format_layouts[format].channels.r.type == ISL_RAW) { 73 /* Note we need to pad with value 0, not 1, due hardware restrictions 74 * (see comment above) */ 75 return VFCOMP_STORE_0; 76 } else if (isl_format_layouts[format].channels.r.type == ISL_UINT || 77 isl_format_layouts[format].channels.r.type == ISL_SINT) { 78 assert(comp == 3); 79 return VFCOMP_STORE_1_INT; 80 } else { 81 assert(comp == 3); 82 return VFCOMP_STORE_1_FP; 83 } 84} 85 86static void 87emit_vertex_input(struct anv_pipeline *pipeline, 88 const VkPipelineVertexInputStateCreateInfo *info) 89{ 90 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); 91 92 /* Pull inputs_read out of the VS prog data */ 93 const uint64_t inputs_read = vs_prog_data->inputs_read; 94 const uint64_t double_inputs_read = 95 vs_prog_data->double_inputs_read & inputs_read; 96 assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0); 97 const uint32_t elements = inputs_read >> VERT_ATTRIB_GENERIC0; 98 const uint32_t elements_double = double_inputs_read >> VERT_ATTRIB_GENERIC0; 99 const bool needs_svgs_elem = vs_prog_data->uses_vertexid || 100 vs_prog_data->uses_instanceid || 101 vs_prog_data->uses_firstvertex || 102 vs_prog_data->uses_baseinstance; 103 104 uint32_t elem_count = __builtin_popcount(elements) - 105 __builtin_popcount(elements_double) / 2; 106 107 const uint32_t total_elems = 108 elem_count + needs_svgs_elem + vs_prog_data->uses_drawid; 109 if (total_elems == 0) 110 return; 111 112 uint32_t *p; 113 114 const uint32_t num_dwords = 1 + total_elems * 2; 115 p = anv_batch_emitn(&pipeline->batch, num_dwords, 116 GENX(3DSTATE_VERTEX_ELEMENTS)); 117 if (!p) 118 return; 119 120 for (uint32_t i = 0; i < total_elems; i++) { 121 /* The SKL docs for VERTEX_ELEMENT_STATE say: 122 * 123 * "All elements must be valid from Element[0] to the last valid 124 * element. (I.e. if Element[2] is valid then Element[1] and 125 * Element[0] must also be valid)." 126 * 127 * The SKL docs for 3D_Vertex_Component_Control say: 128 * 129 * "Don't store this component. (Not valid for Component 0, but can 130 * be used for Component 1-3)." 131 * 132 * So we can't just leave a vertex element blank and hope for the best. 133 * We have to tell the VF hardware to put something in it; so we just 134 * store a bunch of zero. 135 * 136 * TODO: Compact vertex elements so we never end up with holes. 137 */ 138 struct GENX(VERTEX_ELEMENT_STATE) element = { 139 .Valid = true, 140 .Component0Control = VFCOMP_STORE_0, 141 .Component1Control = VFCOMP_STORE_0, 142 .Component2Control = VFCOMP_STORE_0, 143 .Component3Control = VFCOMP_STORE_0, 144 }; 145 GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + i * 2], &element); 146 } 147 148 for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) { 149 const VkVertexInputAttributeDescription *desc = 150 &info->pVertexAttributeDescriptions[i]; 151 enum isl_format format = anv_get_isl_format(&pipeline->device->info, 152 desc->format, 153 VK_IMAGE_ASPECT_COLOR_BIT, 154 VK_IMAGE_TILING_LINEAR); 155 156 assert(desc->binding < MAX_VBS); 157 158 if ((elements & (1 << desc->location)) == 0) 159 continue; /* Binding unused */ 160 161 uint32_t slot = 162 __builtin_popcount(elements & ((1 << desc->location) - 1)) - 163 DIV_ROUND_UP(__builtin_popcount(elements_double & 164 ((1 << desc->location) -1)), 2); 165 166 struct GENX(VERTEX_ELEMENT_STATE) element = { 167 .VertexBufferIndex = desc->binding, 168 .Valid = true, 169 .SourceElementFormat = format, 170 .EdgeFlagEnable = false, 171 .SourceElementOffset = desc->offset, 172 .Component0Control = vertex_element_comp_control(format, 0), 173 .Component1Control = vertex_element_comp_control(format, 1), 174 .Component2Control = vertex_element_comp_control(format, 2), 175 .Component3Control = vertex_element_comp_control(format, 3), 176 }; 177 GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + slot * 2], &element); 178 179#if GEN_GEN >= 8 180 /* On Broadwell and later, we have a separate VF_INSTANCING packet 181 * that controls instancing. On Haswell and prior, that's part of 182 * VERTEX_BUFFER_STATE which we emit later. 183 */ 184 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) { 185 vfi.InstancingEnable = pipeline->vb[desc->binding].instanced; 186 vfi.VertexElementIndex = slot; 187 vfi.InstanceDataStepRate = 188 pipeline->vb[desc->binding].instance_divisor; 189 } 190#endif 191 } 192 193 const uint32_t id_slot = elem_count; 194 if (needs_svgs_elem) { 195 /* From the Broadwell PRM for the 3D_Vertex_Component_Control enum: 196 * "Within a VERTEX_ELEMENT_STATE structure, if a Component 197 * Control field is set to something other than VFCOMP_STORE_SRC, 198 * no higher-numbered Component Control fields may be set to 199 * VFCOMP_STORE_SRC" 200 * 201 * This means, that if we have BaseInstance, we need BaseVertex as 202 * well. Just do all or nothing. 203 */ 204 uint32_t base_ctrl = (vs_prog_data->uses_firstvertex || 205 vs_prog_data->uses_baseinstance) ? 206 VFCOMP_STORE_SRC : VFCOMP_STORE_0; 207 208 struct GENX(VERTEX_ELEMENT_STATE) element = { 209 .VertexBufferIndex = ANV_SVGS_VB_INDEX, 210 .Valid = true, 211 .SourceElementFormat = ISL_FORMAT_R32G32_UINT, 212 .Component0Control = base_ctrl, 213 .Component1Control = base_ctrl, 214#if GEN_GEN >= 8 215 .Component2Control = VFCOMP_STORE_0, 216 .Component3Control = VFCOMP_STORE_0, 217#else 218 .Component2Control = VFCOMP_STORE_VID, 219 .Component3Control = VFCOMP_STORE_IID, 220#endif 221 }; 222 GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + id_slot * 2], &element); 223 } 224 225#if GEN_GEN >= 8 226 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS), sgvs) { 227 sgvs.VertexIDEnable = vs_prog_data->uses_vertexid; 228 sgvs.VertexIDComponentNumber = 2; 229 sgvs.VertexIDElementOffset = id_slot; 230 sgvs.InstanceIDEnable = vs_prog_data->uses_instanceid; 231 sgvs.InstanceIDComponentNumber = 3; 232 sgvs.InstanceIDElementOffset = id_slot; 233 } 234#endif 235 236 const uint32_t drawid_slot = elem_count + needs_svgs_elem; 237 if (vs_prog_data->uses_drawid) { 238 struct GENX(VERTEX_ELEMENT_STATE) element = { 239 .VertexBufferIndex = ANV_DRAWID_VB_INDEX, 240 .Valid = true, 241 .SourceElementFormat = ISL_FORMAT_R32_UINT, 242 .Component0Control = VFCOMP_STORE_SRC, 243 .Component1Control = VFCOMP_STORE_0, 244 .Component2Control = VFCOMP_STORE_0, 245 .Component3Control = VFCOMP_STORE_0, 246 }; 247 GENX(VERTEX_ELEMENT_STATE_pack)(NULL, 248 &p[1 + drawid_slot * 2], 249 &element); 250 251#if GEN_GEN >= 8 252 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) { 253 vfi.VertexElementIndex = drawid_slot; 254 } 255#endif 256 } 257} 258 259void 260genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch, 261 const struct gen_l3_config *l3_config, 262 VkShaderStageFlags active_stages, 263 const unsigned entry_size[4]) 264{ 265 const struct gen_device_info *devinfo = &device->info; 266#if GEN_IS_HASWELL 267 const unsigned push_constant_kb = devinfo->gt == 3 ? 32 : 16; 268#else 269 const unsigned push_constant_kb = GEN_GEN >= 8 ? 32 : 16; 270#endif 271 272 const unsigned urb_size_kb = gen_get_l3_config_urb_size(devinfo, l3_config); 273 274 unsigned entries[4]; 275 unsigned start[4]; 276 gen_get_urb_config(devinfo, 277 1024 * push_constant_kb, 1024 * urb_size_kb, 278 active_stages & 279 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, 280 active_stages & VK_SHADER_STAGE_GEOMETRY_BIT, 281 entry_size, entries, start); 282 283#if GEN_GEN == 7 && !GEN_IS_HASWELL 284 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1: 285 * 286 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth stall 287 * needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS, 288 * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS, 289 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL 290 * needs to be sent before any combination of VS associated 3DSTATE." 291 */ 292 anv_batch_emit(batch, GEN7_PIPE_CONTROL, pc) { 293 pc.DepthStallEnable = true; 294 pc.PostSyncOperation = WriteImmediateData; 295 pc.Address = (struct anv_address) { &device->workaround_bo, 0 }; 296 } 297#endif 298 299 for (int i = 0; i <= MESA_SHADER_GEOMETRY; i++) { 300 anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) { 301 urb._3DCommandSubOpcode += i; 302 urb.VSURBStartingAddress = start[i]; 303 urb.VSURBEntryAllocationSize = entry_size[i] - 1; 304 urb.VSNumberofURBEntries = entries[i]; 305 } 306 } 307} 308 309static void 310emit_urb_setup(struct anv_pipeline *pipeline) 311{ 312 unsigned entry_size[4]; 313 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { 314 const struct brw_vue_prog_data *prog_data = 315 !anv_pipeline_has_stage(pipeline, i) ? NULL : 316 (const struct brw_vue_prog_data *) pipeline->shaders[i]->prog_data; 317 318 entry_size[i] = prog_data ? prog_data->urb_entry_size : 1; 319 } 320 321 genX(emit_urb_setup)(pipeline->device, &pipeline->batch, 322 pipeline->urb.l3_config, 323 pipeline->active_stages, entry_size); 324} 325 326static void 327emit_3dstate_sbe(struct anv_pipeline *pipeline) 328{ 329 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); 330 331 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { 332 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), sbe); 333#if GEN_GEN >= 8 334 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ), sbe); 335#endif 336 return; 337 } 338 339 const struct brw_vue_map *fs_input_map = 340 &anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map; 341 342 struct GENX(3DSTATE_SBE) sbe = { 343 GENX(3DSTATE_SBE_header), 344 .AttributeSwizzleEnable = true, 345 .PointSpriteTextureCoordinateOrigin = UPPERLEFT, 346 .NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs, 347 .ConstantInterpolationEnable = wm_prog_data->flat_inputs, 348 }; 349 350#if GEN_GEN >= 9 351 for (unsigned i = 0; i < 32; i++) 352 sbe.AttributeActiveComponentFormat[i] = ACF_XYZW; 353#endif 354 355#if GEN_GEN >= 8 356 /* On Broadwell, they broke 3DSTATE_SBE into two packets */ 357 struct GENX(3DSTATE_SBE_SWIZ) swiz = { 358 GENX(3DSTATE_SBE_SWIZ_header), 359 }; 360#else 361# define swiz sbe 362#endif 363 364 /* Skip the VUE header and position slots by default */ 365 unsigned urb_entry_read_offset = 1; 366 int max_source_attr = 0; 367 for (int attr = 0; attr < VARYING_SLOT_MAX; attr++) { 368 int input_index = wm_prog_data->urb_setup[attr]; 369 370 if (input_index < 0) 371 continue; 372 373 /* gl_Layer is stored in the VUE header */ 374 if (attr == VARYING_SLOT_LAYER) { 375 urb_entry_read_offset = 0; 376 continue; 377 } 378 379 if (attr == VARYING_SLOT_PNTC) { 380 sbe.PointSpriteTextureCoordinateEnable = 1 << input_index; 381 continue; 382 } 383 384 const int slot = fs_input_map->varying_to_slot[attr]; 385 386 if (input_index >= 16) 387 continue; 388 389 if (slot == -1) { 390 /* This attribute does not exist in the VUE--that means that the 391 * vertex shader did not write to it. It could be that it's a 392 * regular varying read by the fragment shader but not written by 393 * the vertex shader or it's gl_PrimitiveID. In the first case the 394 * value is undefined, in the second it needs to be 395 * gl_PrimitiveID. 396 */ 397 swiz.Attribute[input_index].ConstantSource = PRIM_ID; 398 swiz.Attribute[input_index].ComponentOverrideX = true; 399 swiz.Attribute[input_index].ComponentOverrideY = true; 400 swiz.Attribute[input_index].ComponentOverrideZ = true; 401 swiz.Attribute[input_index].ComponentOverrideW = true; 402 } else { 403 /* We have to subtract two slots to accout for the URB entry output 404 * read offset in the VS and GS stages. 405 */ 406 const int source_attr = slot - 2 * urb_entry_read_offset; 407 assert(source_attr >= 0 && source_attr < 32); 408 max_source_attr = MAX2(max_source_attr, source_attr); 409 swiz.Attribute[input_index].SourceAttribute = source_attr; 410 } 411 } 412 413 sbe.VertexURBEntryReadOffset = urb_entry_read_offset; 414 sbe.VertexURBEntryReadLength = DIV_ROUND_UP(max_source_attr + 1, 2); 415#if GEN_GEN >= 8 416 sbe.ForceVertexURBEntryReadOffset = true; 417 sbe.ForceVertexURBEntryReadLength = true; 418#endif 419 420 uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch, 421 GENX(3DSTATE_SBE_length)); 422 if (!dw) 423 return; 424 GENX(3DSTATE_SBE_pack)(&pipeline->batch, dw, &sbe); 425 426#if GEN_GEN >= 8 427 dw = anv_batch_emit_dwords(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ_length)); 428 if (!dw) 429 return; 430 GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz); 431#endif 432} 433 434static const uint32_t vk_to_gen_cullmode[] = { 435 [VK_CULL_MODE_NONE] = CULLMODE_NONE, 436 [VK_CULL_MODE_FRONT_BIT] = CULLMODE_FRONT, 437 [VK_CULL_MODE_BACK_BIT] = CULLMODE_BACK, 438 [VK_CULL_MODE_FRONT_AND_BACK] = CULLMODE_BOTH 439}; 440 441static const uint32_t vk_to_gen_fillmode[] = { 442 [VK_POLYGON_MODE_FILL] = FILL_MODE_SOLID, 443 [VK_POLYGON_MODE_LINE] = FILL_MODE_WIREFRAME, 444 [VK_POLYGON_MODE_POINT] = FILL_MODE_POINT, 445}; 446 447static const uint32_t vk_to_gen_front_face[] = { 448 [VK_FRONT_FACE_COUNTER_CLOCKWISE] = 1, 449 [VK_FRONT_FACE_CLOCKWISE] = 0 450}; 451 452static void 453emit_rs_state(struct anv_pipeline *pipeline, 454 const VkPipelineRasterizationStateCreateInfo *rs_info, 455 const VkPipelineMultisampleStateCreateInfo *ms_info, 456 const struct anv_render_pass *pass, 457 const struct anv_subpass *subpass) 458{ 459 struct GENX(3DSTATE_SF) sf = { 460 GENX(3DSTATE_SF_header), 461 }; 462 463 sf.ViewportTransformEnable = true; 464 sf.StatisticsEnable = true; 465 sf.TriangleStripListProvokingVertexSelect = 0; 466 sf.LineStripListProvokingVertexSelect = 0; 467 sf.TriangleFanProvokingVertexSelect = 1; 468 469 const struct brw_vue_prog_data *last_vue_prog_data = 470 anv_pipeline_get_last_vue_prog_data(pipeline); 471 472 if (last_vue_prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) { 473 sf.PointWidthSource = Vertex; 474 } else { 475 sf.PointWidthSource = State; 476 sf.PointWidth = 1.0; 477 } 478 479#if GEN_GEN >= 8 480 struct GENX(3DSTATE_RASTER) raster = { 481 GENX(3DSTATE_RASTER_header), 482 }; 483#else 484# define raster sf 485#endif 486 487 /* For details on 3DSTATE_RASTER multisample state, see the BSpec table 488 * "Multisample Modes State". 489 */ 490#if GEN_GEN >= 8 491 raster.DXMultisampleRasterizationEnable = true; 492 /* NOTE: 3DSTATE_RASTER::ForcedSampleCount affects the BDW and SKL PMA fix 493 * computations. If we ever set this bit to a different value, they will 494 * need to be updated accordingly. 495 */ 496 raster.ForcedSampleCount = FSC_NUMRASTSAMPLES_0; 497 raster.ForceMultisampling = false; 498#else 499 raster.MultisampleRasterizationMode = 500 (ms_info && ms_info->rasterizationSamples > 1) ? 501 MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL; 502#endif 503 504 raster.FrontWinding = vk_to_gen_front_face[rs_info->frontFace]; 505 raster.CullMode = vk_to_gen_cullmode[rs_info->cullMode]; 506 raster.FrontFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode]; 507 raster.BackFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode]; 508 raster.ScissorRectangleEnable = true; 509 510#if GEN_GEN >= 9 511 /* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */ 512 raster.ViewportZFarClipTestEnable = !pipeline->depth_clamp_enable; 513 raster.ViewportZNearClipTestEnable = !pipeline->depth_clamp_enable; 514#elif GEN_GEN >= 8 515 raster.ViewportZClipTestEnable = !pipeline->depth_clamp_enable; 516#endif 517 518 raster.GlobalDepthOffsetEnableSolid = rs_info->depthBiasEnable; 519 raster.GlobalDepthOffsetEnableWireframe = rs_info->depthBiasEnable; 520 raster.GlobalDepthOffsetEnablePoint = rs_info->depthBiasEnable; 521 522#if GEN_GEN == 7 523 /* Gen7 requires that we provide the depth format in 3DSTATE_SF so that it 524 * can get the depth offsets correct. 525 */ 526 if (subpass->depth_stencil_attachment) { 527 VkFormat vk_format = 528 pass->attachments[subpass->depth_stencil_attachment->attachment].format; 529 assert(vk_format_is_depth_or_stencil(vk_format)); 530 if (vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_DEPTH_BIT) { 531 enum isl_format isl_format = 532 anv_get_isl_format(&pipeline->device->info, vk_format, 533 VK_IMAGE_ASPECT_DEPTH_BIT, 534 VK_IMAGE_TILING_OPTIMAL); 535 sf.DepthBufferSurfaceFormat = 536 isl_format_get_depth_format(isl_format, false); 537 } 538 } 539#endif 540 541#if GEN_GEN >= 8 542 GENX(3DSTATE_SF_pack)(NULL, pipeline->gen8.sf, &sf); 543 GENX(3DSTATE_RASTER_pack)(NULL, pipeline->gen8.raster, &raster); 544#else 545# undef raster 546 GENX(3DSTATE_SF_pack)(NULL, &pipeline->gen7.sf, &sf); 547#endif 548} 549 550static void 551emit_ms_state(struct anv_pipeline *pipeline, 552 const VkPipelineMultisampleStateCreateInfo *info) 553{ 554 uint32_t samples = 1; 555 uint32_t log2_samples = 0; 556 557 /* From the Vulkan 1.0 spec: 558 * If pSampleMask is NULL, it is treated as if the mask has all bits 559 * enabled, i.e. no coverage is removed from fragments. 560 * 561 * 3DSTATE_SAMPLE_MASK.SampleMask is 16 bits. 562 */ 563#if GEN_GEN >= 8 564 uint32_t sample_mask = 0xffff; 565#else 566 uint32_t sample_mask = 0xff; 567#endif 568 569 if (info) { 570 samples = info->rasterizationSamples; 571 log2_samples = __builtin_ffs(samples) - 1; 572 } 573 574 if (info && info->pSampleMask) 575 sample_mask &= info->pSampleMask[0]; 576 577 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) { 578 ms.NumberofMultisamples = log2_samples; 579 580 ms.PixelLocation = CENTER; 581#if GEN_GEN >= 8 582 /* The PRM says that this bit is valid only for DX9: 583 * 584 * SW can choose to set this bit only for DX9 API. DX10/OGL API's 585 * should not have any effect by setting or not setting this bit. 586 */ 587 ms.PixelPositionOffsetEnable = false; 588#else 589 590 switch (samples) { 591 case 1: 592 GEN_SAMPLE_POS_1X(ms.Sample); 593 break; 594 case 2: 595 GEN_SAMPLE_POS_2X(ms.Sample); 596 break; 597 case 4: 598 GEN_SAMPLE_POS_4X(ms.Sample); 599 break; 600 case 8: 601 GEN_SAMPLE_POS_8X(ms.Sample); 602 break; 603 default: 604 break; 605 } 606#endif 607 } 608 609 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) { 610 sm.SampleMask = sample_mask; 611 } 612} 613 614static const uint32_t vk_to_gen_logic_op[] = { 615 [VK_LOGIC_OP_COPY] = LOGICOP_COPY, 616 [VK_LOGIC_OP_CLEAR] = LOGICOP_CLEAR, 617 [VK_LOGIC_OP_AND] = LOGICOP_AND, 618 [VK_LOGIC_OP_AND_REVERSE] = LOGICOP_AND_REVERSE, 619 [VK_LOGIC_OP_AND_INVERTED] = LOGICOP_AND_INVERTED, 620 [VK_LOGIC_OP_NO_OP] = LOGICOP_NOOP, 621 [VK_LOGIC_OP_XOR] = LOGICOP_XOR, 622 [VK_LOGIC_OP_OR] = LOGICOP_OR, 623 [VK_LOGIC_OP_NOR] = LOGICOP_NOR, 624 [VK_LOGIC_OP_EQUIVALENT] = LOGICOP_EQUIV, 625 [VK_LOGIC_OP_INVERT] = LOGICOP_INVERT, 626 [VK_LOGIC_OP_OR_REVERSE] = LOGICOP_OR_REVERSE, 627 [VK_LOGIC_OP_COPY_INVERTED] = LOGICOP_COPY_INVERTED, 628 [VK_LOGIC_OP_OR_INVERTED] = LOGICOP_OR_INVERTED, 629 [VK_LOGIC_OP_NAND] = LOGICOP_NAND, 630 [VK_LOGIC_OP_SET] = LOGICOP_SET, 631}; 632 633static const uint32_t vk_to_gen_blend[] = { 634 [VK_BLEND_FACTOR_ZERO] = BLENDFACTOR_ZERO, 635 [VK_BLEND_FACTOR_ONE] = BLENDFACTOR_ONE, 636 [VK_BLEND_FACTOR_SRC_COLOR] = BLENDFACTOR_SRC_COLOR, 637 [VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR] = BLENDFACTOR_INV_SRC_COLOR, 638 [VK_BLEND_FACTOR_DST_COLOR] = BLENDFACTOR_DST_COLOR, 639 [VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR] = BLENDFACTOR_INV_DST_COLOR, 640 [VK_BLEND_FACTOR_SRC_ALPHA] = BLENDFACTOR_SRC_ALPHA, 641 [VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA] = BLENDFACTOR_INV_SRC_ALPHA, 642 [VK_BLEND_FACTOR_DST_ALPHA] = BLENDFACTOR_DST_ALPHA, 643 [VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA] = BLENDFACTOR_INV_DST_ALPHA, 644 [VK_BLEND_FACTOR_CONSTANT_COLOR] = BLENDFACTOR_CONST_COLOR, 645 [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR]= BLENDFACTOR_INV_CONST_COLOR, 646 [VK_BLEND_FACTOR_CONSTANT_ALPHA] = BLENDFACTOR_CONST_ALPHA, 647 [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA]= BLENDFACTOR_INV_CONST_ALPHA, 648 [VK_BLEND_FACTOR_SRC_ALPHA_SATURATE] = BLENDFACTOR_SRC_ALPHA_SATURATE, 649 [VK_BLEND_FACTOR_SRC1_COLOR] = BLENDFACTOR_SRC1_COLOR, 650 [VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR] = BLENDFACTOR_INV_SRC1_COLOR, 651 [VK_BLEND_FACTOR_SRC1_ALPHA] = BLENDFACTOR_SRC1_ALPHA, 652 [VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA] = BLENDFACTOR_INV_SRC1_ALPHA, 653}; 654 655static const uint32_t vk_to_gen_blend_op[] = { 656 [VK_BLEND_OP_ADD] = BLENDFUNCTION_ADD, 657 [VK_BLEND_OP_SUBTRACT] = BLENDFUNCTION_SUBTRACT, 658 [VK_BLEND_OP_REVERSE_SUBTRACT] = BLENDFUNCTION_REVERSE_SUBTRACT, 659 [VK_BLEND_OP_MIN] = BLENDFUNCTION_MIN, 660 [VK_BLEND_OP_MAX] = BLENDFUNCTION_MAX, 661}; 662 663static const uint32_t vk_to_gen_compare_op[] = { 664 [VK_COMPARE_OP_NEVER] = PREFILTEROPNEVER, 665 [VK_COMPARE_OP_LESS] = PREFILTEROPLESS, 666 [VK_COMPARE_OP_EQUAL] = PREFILTEROPEQUAL, 667 [VK_COMPARE_OP_LESS_OR_EQUAL] = PREFILTEROPLEQUAL, 668 [VK_COMPARE_OP_GREATER] = PREFILTEROPGREATER, 669 [VK_COMPARE_OP_NOT_EQUAL] = PREFILTEROPNOTEQUAL, 670 [VK_COMPARE_OP_GREATER_OR_EQUAL] = PREFILTEROPGEQUAL, 671 [VK_COMPARE_OP_ALWAYS] = PREFILTEROPALWAYS, 672}; 673 674static const uint32_t vk_to_gen_stencil_op[] = { 675 [VK_STENCIL_OP_KEEP] = STENCILOP_KEEP, 676 [VK_STENCIL_OP_ZERO] = STENCILOP_ZERO, 677 [VK_STENCIL_OP_REPLACE] = STENCILOP_REPLACE, 678 [VK_STENCIL_OP_INCREMENT_AND_CLAMP] = STENCILOP_INCRSAT, 679 [VK_STENCIL_OP_DECREMENT_AND_CLAMP] = STENCILOP_DECRSAT, 680 [VK_STENCIL_OP_INVERT] = STENCILOP_INVERT, 681 [VK_STENCIL_OP_INCREMENT_AND_WRAP] = STENCILOP_INCR, 682 [VK_STENCIL_OP_DECREMENT_AND_WRAP] = STENCILOP_DECR, 683}; 684 685/* This function sanitizes the VkStencilOpState by looking at the compare ops 686 * and trying to determine whether or not a given stencil op can ever actually 687 * occur. Stencil ops which can never occur are set to VK_STENCIL_OP_KEEP. 688 * This function returns true if, after sanitation, any of the stencil ops are 689 * set to something other than VK_STENCIL_OP_KEEP. 690 */ 691static bool 692sanitize_stencil_face(VkStencilOpState *face, 693 VkCompareOp depthCompareOp) 694{ 695 /* If compareOp is ALWAYS then the stencil test will never fail and failOp 696 * will never happen. Set failOp to KEEP in this case. 697 */ 698 if (face->compareOp == VK_COMPARE_OP_ALWAYS) 699 face->failOp = VK_STENCIL_OP_KEEP; 700 701 /* If compareOp is NEVER or depthCompareOp is NEVER then one of the depth 702 * or stencil tests will fail and passOp will never happen. 703 */ 704 if (face->compareOp == VK_COMPARE_OP_NEVER || 705 depthCompareOp == VK_COMPARE_OP_NEVER) 706 face->passOp = VK_STENCIL_OP_KEEP; 707 708 /* If compareOp is NEVER or depthCompareOp is ALWAYS then either the 709 * stencil test will fail or the depth test will pass. In either case, 710 * depthFailOp will never happen. 711 */ 712 if (face->compareOp == VK_COMPARE_OP_NEVER || 713 depthCompareOp == VK_COMPARE_OP_ALWAYS) 714 face->depthFailOp = VK_STENCIL_OP_KEEP; 715 716 return face->failOp != VK_STENCIL_OP_KEEP || 717 face->depthFailOp != VK_STENCIL_OP_KEEP || 718 face->passOp != VK_STENCIL_OP_KEEP; 719} 720 721/* Intel hardware is fairly sensitive to whether or not depth/stencil writes 722 * are enabled. In the presence of discards, it's fairly easy to get into the 723 * non-promoted case which means a fairly big performance hit. From the Iron 724 * Lake PRM, Vol 2, pt. 1, section 8.4.3.2, "Early Depth Test Cases": 725 * 726 * "Non-promoted depth (N) is active whenever the depth test can be done 727 * early but it cannot determine whether or not to write source depth to 728 * the depth buffer, therefore the depth write must be performed post pixel 729 * shader. This includes cases where the pixel shader can kill pixels, 730 * including via sampler chroma key, as well as cases where the alpha test 731 * function is enabled, which kills pixels based on a programmable alpha 732 * test. In this case, even if the depth test fails, the pixel cannot be 733 * killed if a stencil write is indicated. Whether or not the stencil write 734 * happens depends on whether or not the pixel is killed later. In these 735 * cases if stencil test fails and stencil writes are off, the pixels can 736 * also be killed early. If stencil writes are enabled, the pixels must be 737 * treated as Computed depth (described above)." 738 * 739 * The same thing as mentioned in the stencil case can happen in the depth 740 * case as well if it thinks it writes depth but, thanks to the depth test 741 * being GL_EQUAL, the write doesn't actually matter. A little extra work 742 * up-front to try and disable depth and stencil writes can make a big 743 * difference. 744 * 745 * Unfortunately, the way depth and stencil testing is specified, there are 746 * many case where, regardless of depth/stencil writes being enabled, nothing 747 * actually gets written due to some other bit of state being set. This 748 * function attempts to "sanitize" the depth stencil state and disable writes 749 * and sometimes even testing whenever possible. 750 */ 751static void 752sanitize_ds_state(VkPipelineDepthStencilStateCreateInfo *state, 753 bool *stencilWriteEnable, 754 VkImageAspectFlags ds_aspects) 755{ 756 *stencilWriteEnable = state->stencilTestEnable; 757 758 /* If the depth test is disabled, we won't be writing anything. Make sure we 759 * treat the test as always passing later on as well. 760 * 761 * Also, the Vulkan spec requires that if either depth or stencil is not 762 * present, the pipeline is to act as if the test silently passes. In that 763 * case we won't write either. 764 */ 765 if (!state->depthTestEnable || !(ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) { 766 state->depthWriteEnable = false; 767 state->depthCompareOp = VK_COMPARE_OP_ALWAYS; 768 } 769 770 if (!(ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) { 771 *stencilWriteEnable = false; 772 state->front.compareOp = VK_COMPARE_OP_ALWAYS; 773 state->back.compareOp = VK_COMPARE_OP_ALWAYS; 774 } 775 776 /* If the stencil test is enabled and always fails, then we will never get 777 * to the depth test so we can just disable the depth test entirely. 778 */ 779 if (state->stencilTestEnable && 780 state->front.compareOp == VK_COMPARE_OP_NEVER && 781 state->back.compareOp == VK_COMPARE_OP_NEVER) { 782 state->depthTestEnable = false; 783 state->depthWriteEnable = false; 784 } 785 786 /* If depthCompareOp is EQUAL then the value we would be writing to the 787 * depth buffer is the same as the value that's already there so there's no 788 * point in writing it. 789 */ 790 if (state->depthCompareOp == VK_COMPARE_OP_EQUAL) 791 state->depthWriteEnable = false; 792 793 /* If the stencil ops are such that we don't actually ever modify the 794 * stencil buffer, we should disable writes. 795 */ 796 if (!sanitize_stencil_face(&state->front, state->depthCompareOp) && 797 !sanitize_stencil_face(&state->back, state->depthCompareOp)) 798 *stencilWriteEnable = false; 799 800 /* If the depth test always passes and we never write out depth, that's the 801 * same as if the depth test is disabled entirely. 802 */ 803 if (state->depthCompareOp == VK_COMPARE_OP_ALWAYS && 804 !state->depthWriteEnable) 805 state->depthTestEnable = false; 806 807 /* If the stencil test always passes and we never write out stencil, that's 808 * the same as if the stencil test is disabled entirely. 809 */ 810 if (state->front.compareOp == VK_COMPARE_OP_ALWAYS && 811 state->back.compareOp == VK_COMPARE_OP_ALWAYS && 812 !*stencilWriteEnable) 813 state->stencilTestEnable = false; 814} 815 816static void 817emit_ds_state(struct anv_pipeline *pipeline, 818 const VkPipelineDepthStencilStateCreateInfo *pCreateInfo, 819 const struct anv_render_pass *pass, 820 const struct anv_subpass *subpass) 821{ 822#if GEN_GEN == 7 823# define depth_stencil_dw pipeline->gen7.depth_stencil_state 824#elif GEN_GEN == 8 825# define depth_stencil_dw pipeline->gen8.wm_depth_stencil 826#else 827# define depth_stencil_dw pipeline->gen9.wm_depth_stencil 828#endif 829 830 if (pCreateInfo == NULL) { 831 /* We're going to OR this together with the dynamic state. We need 832 * to make sure it's initialized to something useful. 833 */ 834 pipeline->writes_stencil = false; 835 pipeline->stencil_test_enable = false; 836 pipeline->writes_depth = false; 837 pipeline->depth_test_enable = false; 838 memset(depth_stencil_dw, 0, sizeof(depth_stencil_dw)); 839 return; 840 } 841 842 VkImageAspectFlags ds_aspects = 0; 843 if (subpass->depth_stencil_attachment) { 844 VkFormat depth_stencil_format = 845 pass->attachments[subpass->depth_stencil_attachment->attachment].format; 846 ds_aspects = vk_format_aspects(depth_stencil_format); 847 } 848 849 VkPipelineDepthStencilStateCreateInfo info = *pCreateInfo; 850 sanitize_ds_state(&info, &pipeline->writes_stencil, ds_aspects); 851 pipeline->stencil_test_enable = info.stencilTestEnable; 852 pipeline->writes_depth = info.depthWriteEnable; 853 pipeline->depth_test_enable = info.depthTestEnable; 854 855 /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */ 856 857#if GEN_GEN <= 7 858 struct GENX(DEPTH_STENCIL_STATE) depth_stencil = { 859#else 860 struct GENX(3DSTATE_WM_DEPTH_STENCIL) depth_stencil = { 861#endif 862 .DepthTestEnable = info.depthTestEnable, 863 .DepthBufferWriteEnable = info.depthWriteEnable, 864 .DepthTestFunction = vk_to_gen_compare_op[info.depthCompareOp], 865 .DoubleSidedStencilEnable = true, 866 867 .StencilTestEnable = info.stencilTestEnable, 868 .StencilFailOp = vk_to_gen_stencil_op[info.front.failOp], 869 .StencilPassDepthPassOp = vk_to_gen_stencil_op[info.front.passOp], 870 .StencilPassDepthFailOp = vk_to_gen_stencil_op[info.front.depthFailOp], 871 .StencilTestFunction = vk_to_gen_compare_op[info.front.compareOp], 872 .BackfaceStencilFailOp = vk_to_gen_stencil_op[info.back.failOp], 873 .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info.back.passOp], 874 .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info.back.depthFailOp], 875 .BackfaceStencilTestFunction = vk_to_gen_compare_op[info.back.compareOp], 876 }; 877 878#if GEN_GEN <= 7 879 GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil); 880#else 881 GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, depth_stencil_dw, &depth_stencil); 882#endif 883} 884 885MAYBE_UNUSED static bool 886is_dual_src_blend_factor(VkBlendFactor factor) 887{ 888 return factor == VK_BLEND_FACTOR_SRC1_COLOR || 889 factor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR || 890 factor == VK_BLEND_FACTOR_SRC1_ALPHA || 891 factor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA; 892} 893 894static void 895emit_cb_state(struct anv_pipeline *pipeline, 896 const VkPipelineColorBlendStateCreateInfo *info, 897 const VkPipelineMultisampleStateCreateInfo *ms_info) 898{ 899 struct anv_device *device = pipeline->device; 900 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); 901 902 struct GENX(BLEND_STATE) blend_state = { 903#if GEN_GEN >= 8 904 .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable, 905 .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable, 906#endif 907 }; 908 909 uint32_t surface_count = 0; 910 struct anv_pipeline_bind_map *map; 911 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { 912 map = &pipeline->shaders[MESA_SHADER_FRAGMENT]->bind_map; 913 surface_count = map->surface_count; 914 } 915 916 const uint32_t num_dwords = GENX(BLEND_STATE_length) + 917 GENX(BLEND_STATE_ENTRY_length) * surface_count; 918 pipeline->blend_state = 919 anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64); 920 921 bool has_writeable_rt = false; 922 uint32_t *state_pos = pipeline->blend_state.map; 923 state_pos += GENX(BLEND_STATE_length); 924#if GEN_GEN >= 8 925 struct GENX(BLEND_STATE_ENTRY) bs0 = { 0 }; 926#endif 927 for (unsigned i = 0; i < surface_count; i++) { 928 struct anv_pipeline_binding *binding = &map->surface_to_descriptor[i]; 929 930 /* All color attachments are at the beginning of the binding table */ 931 if (binding->set != ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) 932 break; 933 934 /* We can have at most 8 attachments */ 935 assert(i < 8); 936 937 if (info == NULL || binding->index >= info->attachmentCount) { 938 /* Default everything to disabled */ 939 struct GENX(BLEND_STATE_ENTRY) entry = { 940 .WriteDisableAlpha = true, 941 .WriteDisableRed = true, 942 .WriteDisableGreen = true, 943 .WriteDisableBlue = true, 944 }; 945 GENX(BLEND_STATE_ENTRY_pack)(NULL, state_pos, &entry); 946 state_pos += GENX(BLEND_STATE_ENTRY_length); 947 continue; 948 } 949 950 assert(binding->binding == 0); 951 const VkPipelineColorBlendAttachmentState *a = 952 &info->pAttachments[binding->index]; 953 954 struct GENX(BLEND_STATE_ENTRY) entry = { 955#if GEN_GEN < 8 956 .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable, 957 .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable, 958#endif 959 .LogicOpEnable = info->logicOpEnable, 960 .LogicOpFunction = vk_to_gen_logic_op[info->logicOp], 961 .ColorBufferBlendEnable = a->blendEnable, 962 .ColorClampRange = COLORCLAMP_RTFORMAT, 963 .PreBlendColorClampEnable = true, 964 .PostBlendColorClampEnable = true, 965 .SourceBlendFactor = vk_to_gen_blend[a->srcColorBlendFactor], 966 .DestinationBlendFactor = vk_to_gen_blend[a->dstColorBlendFactor], 967 .ColorBlendFunction = vk_to_gen_blend_op[a->colorBlendOp], 968 .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcAlphaBlendFactor], 969 .DestinationAlphaBlendFactor = vk_to_gen_blend[a->dstAlphaBlendFactor], 970 .AlphaBlendFunction = vk_to_gen_blend_op[a->alphaBlendOp], 971 .WriteDisableAlpha = !(a->colorWriteMask & VK_COLOR_COMPONENT_A_BIT), 972 .WriteDisableRed = !(a->colorWriteMask & VK_COLOR_COMPONENT_R_BIT), 973 .WriteDisableGreen = !(a->colorWriteMask & VK_COLOR_COMPONENT_G_BIT), 974 .WriteDisableBlue = !(a->colorWriteMask & VK_COLOR_COMPONENT_B_BIT), 975 }; 976 977 if (a->srcColorBlendFactor != a->srcAlphaBlendFactor || 978 a->dstColorBlendFactor != a->dstAlphaBlendFactor || 979 a->colorBlendOp != a->alphaBlendOp) { 980#if GEN_GEN >= 8 981 blend_state.IndependentAlphaBlendEnable = true; 982#else 983 entry.IndependentAlphaBlendEnable = true; 984#endif 985 } 986 987 /* The Dual Source Blending documentation says: 988 * 989 * "If SRC1 is included in a src/dst blend factor and 990 * a DualSource RT Write message is not used, results 991 * are UNDEFINED. (This reflects the same restriction in DX APIs, 992 * where undefined results are produced if “o1” is not written 993 * by a PS – there are no default values defined)." 994 * 995 * There is no way to gracefully fix this undefined situation 996 * so we just disable the blending to prevent possible issues. 997 */ 998 if (!wm_prog_data->dual_src_blend && 999 (is_dual_src_blend_factor(a->srcColorBlendFactor) || 1000 is_dual_src_blend_factor(a->dstColorBlendFactor) || 1001 is_dual_src_blend_factor(a->srcAlphaBlendFactor) || 1002 is_dual_src_blend_factor(a->dstAlphaBlendFactor))) { 1003 vk_debug_report(&device->instance->debug_report_callbacks, 1004 VK_DEBUG_REPORT_WARNING_BIT_EXT, 1005 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 1006 (uint64_t)(uintptr_t)device, 1007 0, 0, "anv", 1008 "Enabled dual-src blend factors without writing both targets " 1009 "in the shader. Disabling blending to avoid GPU hangs."); 1010 entry.ColorBufferBlendEnable = false; 1011 } 1012 1013 if (a->colorWriteMask != 0) 1014 has_writeable_rt = true; 1015 1016 /* Our hardware applies the blend factor prior to the blend function 1017 * regardless of what function is used. Technically, this means the 1018 * hardware can do MORE than GL or Vulkan specify. However, it also 1019 * means that, for MIN and MAX, we have to stomp the blend factor to 1020 * ONE to make it a no-op. 1021 */ 1022 if (a->colorBlendOp == VK_BLEND_OP_MIN || 1023 a->colorBlendOp == VK_BLEND_OP_MAX) { 1024 entry.SourceBlendFactor = BLENDFACTOR_ONE; 1025 entry.DestinationBlendFactor = BLENDFACTOR_ONE; 1026 } 1027 if (a->alphaBlendOp == VK_BLEND_OP_MIN || 1028 a->alphaBlendOp == VK_BLEND_OP_MAX) { 1029 entry.SourceAlphaBlendFactor = BLENDFACTOR_ONE; 1030 entry.DestinationAlphaBlendFactor = BLENDFACTOR_ONE; 1031 } 1032 GENX(BLEND_STATE_ENTRY_pack)(NULL, state_pos, &entry); 1033 state_pos += GENX(BLEND_STATE_ENTRY_length); 1034#if GEN_GEN >= 8 1035 if (i == 0) 1036 bs0 = entry; 1037#endif 1038 } 1039 1040#if GEN_GEN >= 8 1041 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_BLEND), blend) { 1042 blend.AlphaToCoverageEnable = blend_state.AlphaToCoverageEnable; 1043 blend.HasWriteableRT = has_writeable_rt; 1044 blend.ColorBufferBlendEnable = bs0.ColorBufferBlendEnable; 1045 blend.SourceAlphaBlendFactor = bs0.SourceAlphaBlendFactor; 1046 blend.DestinationAlphaBlendFactor = bs0.DestinationAlphaBlendFactor; 1047 blend.SourceBlendFactor = bs0.SourceBlendFactor; 1048 blend.DestinationBlendFactor = bs0.DestinationBlendFactor; 1049 blend.AlphaTestEnable = false; 1050 blend.IndependentAlphaBlendEnable = 1051 blend_state.IndependentAlphaBlendEnable; 1052 } 1053#else 1054 (void)has_writeable_rt; 1055#endif 1056 1057 GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state); 1058 anv_state_flush(device, pipeline->blend_state); 1059 1060 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) { 1061 bsp.BlendStatePointer = pipeline->blend_state.offset; 1062#if GEN_GEN >= 8 1063 bsp.BlendStatePointerValid = true; 1064#endif 1065 } 1066} 1067 1068static void 1069emit_3dstate_clip(struct anv_pipeline *pipeline, 1070 const VkPipelineViewportStateCreateInfo *vp_info, 1071 const VkPipelineRasterizationStateCreateInfo *rs_info) 1072{ 1073 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); 1074 (void) wm_prog_data; 1075 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) { 1076 clip.ClipEnable = true; 1077 clip.StatisticsEnable = true; 1078 clip.EarlyCullEnable = true; 1079 clip.APIMode = APIMODE_D3D, 1080 clip.ViewportXYClipTestEnable = true; 1081 1082 clip.ClipMode = CLIPMODE_NORMAL; 1083 1084 clip.TriangleStripListProvokingVertexSelect = 0; 1085 clip.LineStripListProvokingVertexSelect = 0; 1086 clip.TriangleFanProvokingVertexSelect = 1; 1087 1088 clip.MinimumPointWidth = 0.125; 1089 clip.MaximumPointWidth = 255.875; 1090 1091 const struct brw_vue_prog_data *last = 1092 anv_pipeline_get_last_vue_prog_data(pipeline); 1093 1094 /* From the Vulkan 1.0.45 spec: 1095 * 1096 * "If the last active vertex processing stage shader entry point's 1097 * interface does not include a variable decorated with 1098 * ViewportIndex, then the first viewport is used." 1099 */ 1100 if (vp_info && (last->vue_map.slots_valid & VARYING_BIT_VIEWPORT)) { 1101 clip.MaximumVPIndex = vp_info->viewportCount - 1; 1102 } else { 1103 clip.MaximumVPIndex = 0; 1104 } 1105 1106 /* From the Vulkan 1.0.45 spec: 1107 * 1108 * "If the last active vertex processing stage shader entry point's 1109 * interface does not include a variable decorated with Layer, then 1110 * the first layer is used." 1111 */ 1112 clip.ForceZeroRTAIndexEnable = 1113 !(last->vue_map.slots_valid & VARYING_BIT_LAYER); 1114 1115#if GEN_GEN == 7 1116 clip.FrontWinding = vk_to_gen_front_face[rs_info->frontFace]; 1117 clip.CullMode = vk_to_gen_cullmode[rs_info->cullMode]; 1118 clip.ViewportZClipTestEnable = !pipeline->depth_clamp_enable; 1119 if (last) { 1120 clip.UserClipDistanceClipTestEnableBitmask = last->clip_distance_mask; 1121 clip.UserClipDistanceCullTestEnableBitmask = last->cull_distance_mask; 1122 } 1123#else 1124 clip.NonPerspectiveBarycentricEnable = wm_prog_data ? 1125 (wm_prog_data->barycentric_interp_modes & 1126 BRW_BARYCENTRIC_NONPERSPECTIVE_BITS) != 0 : 0; 1127#endif 1128 } 1129} 1130 1131static void 1132emit_3dstate_streamout(struct anv_pipeline *pipeline, 1133 const VkPipelineRasterizationStateCreateInfo *rs_info) 1134{ 1135 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_STREAMOUT), so) { 1136 so.RenderingDisable = rs_info->rasterizerDiscardEnable; 1137 } 1138} 1139 1140static uint32_t 1141get_sampler_count(const struct anv_shader_bin *bin) 1142{ 1143 uint32_t count_by_4 = DIV_ROUND_UP(bin->bind_map.sampler_count, 4); 1144 1145 /* We can potentially have way more than 32 samplers and that's ok. 1146 * However, the 3DSTATE_XS packets only have 3 bits to specify how 1147 * many to pre-fetch and all values above 4 are marked reserved. 1148 */ 1149 return MIN2(count_by_4, 4); 1150} 1151 1152static uint32_t 1153get_binding_table_entry_count(const struct anv_shader_bin *bin) 1154{ 1155 return DIV_ROUND_UP(bin->bind_map.surface_count, 32); 1156} 1157 1158static struct anv_address 1159get_scratch_address(struct anv_pipeline *pipeline, 1160 gl_shader_stage stage, 1161 const struct anv_shader_bin *bin) 1162{ 1163 return (struct anv_address) { 1164 .bo = anv_scratch_pool_alloc(pipeline->device, 1165 &pipeline->device->scratch_pool, 1166 stage, bin->prog_data->total_scratch), 1167 .offset = 0, 1168 }; 1169} 1170 1171static uint32_t 1172get_scratch_space(const struct anv_shader_bin *bin) 1173{ 1174 return ffs(bin->prog_data->total_scratch / 2048); 1175} 1176 1177static void 1178emit_3dstate_vs(struct anv_pipeline *pipeline) 1179{ 1180 const struct gen_device_info *devinfo = &pipeline->device->info; 1181 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); 1182 const struct anv_shader_bin *vs_bin = 1183 pipeline->shaders[MESA_SHADER_VERTEX]; 1184 1185 assert(anv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX)); 1186 1187 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) { 1188 vs.Enable = true; 1189 vs.StatisticsEnable = true; 1190 vs.KernelStartPointer = vs_bin->kernel.offset; 1191#if GEN_GEN >= 8 1192 vs.SIMD8DispatchEnable = 1193 vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8; 1194#endif 1195 1196 assert(!vs_prog_data->base.base.use_alt_mode); 1197#if GEN_GEN < 11 1198 vs.SingleVertexDispatch = false; 1199#endif 1200 vs.VectorMaskEnable = false; 1201 vs.SamplerCount = get_sampler_count(vs_bin); 1202 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable suggests to 1203 * disable prefetching of binding tables on A0 and B0 steppings. 1204 * TODO: Revisit this WA on newer steppings. 1205 */ 1206 vs.BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(vs_bin); 1207 vs.FloatingPointMode = IEEE754; 1208 vs.IllegalOpcodeExceptionEnable = false; 1209 vs.SoftwareExceptionEnable = false; 1210 vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1; 1211 1212 if (GEN_GEN == 9 && devinfo->gt == 4 && 1213 anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) { 1214 /* On Sky Lake GT4, we have experienced some hangs related to the VS 1215 * cache and tessellation. It is unknown exactly what is happening 1216 * but the Haswell docs for the "VS Reference Count Full Force Miss 1217 * Enable" field of the "Thread Mode" register refer to a HSW bug in 1218 * which the VUE handle reference count would overflow resulting in 1219 * internal reference counting bugs. My (Jason's) best guess is that 1220 * this bug cropped back up on SKL GT4 when we suddenly had more 1221 * threads in play than any previous gen9 hardware. 1222 * 1223 * What we do know for sure is that setting this bit when 1224 * tessellation shaders are in use fixes a GPU hang in Batman: Arkham 1225 * City when playing with DXVK (https://bugs.freedesktop.org/107280). 1226 * Disabling the vertex cache with tessellation shaders should only 1227 * have a minor performance impact as the tessellation shaders are 1228 * likely generating and processing far more geometry than the vertex 1229 * stage. 1230 */ 1231 vs.VertexCacheDisable = true; 1232 } 1233 1234 vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length; 1235 vs.VertexURBEntryReadOffset = 0; 1236 vs.DispatchGRFStartRegisterForURBData = 1237 vs_prog_data->base.base.dispatch_grf_start_reg; 1238 1239#if GEN_GEN >= 8 1240 vs.UserClipDistanceClipTestEnableBitmask = 1241 vs_prog_data->base.clip_distance_mask; 1242 vs.UserClipDistanceCullTestEnableBitmask = 1243 vs_prog_data->base.cull_distance_mask; 1244#endif 1245 1246 vs.PerThreadScratchSpace = get_scratch_space(vs_bin); 1247 vs.ScratchSpaceBasePointer = 1248 get_scratch_address(pipeline, MESA_SHADER_VERTEX, vs_bin); 1249 } 1250} 1251 1252static void 1253emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline, 1254 const VkPipelineTessellationStateCreateInfo *tess_info) 1255{ 1256 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) { 1257 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs); 1258 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te); 1259 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds); 1260 return; 1261 } 1262 1263 const struct gen_device_info *devinfo = &pipeline->device->info; 1264 const struct anv_shader_bin *tcs_bin = 1265 pipeline->shaders[MESA_SHADER_TESS_CTRL]; 1266 const struct anv_shader_bin *tes_bin = 1267 pipeline->shaders[MESA_SHADER_TESS_EVAL]; 1268 1269 const struct brw_tcs_prog_data *tcs_prog_data = get_tcs_prog_data(pipeline); 1270 const struct brw_tes_prog_data *tes_prog_data = get_tes_prog_data(pipeline); 1271 1272 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs) { 1273 hs.Enable = true; 1274 hs.StatisticsEnable = true; 1275 hs.KernelStartPointer = tcs_bin->kernel.offset; 1276 1277 hs.SamplerCount = get_sampler_count(tcs_bin); 1278 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */ 1279 hs.BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(tcs_bin); 1280 hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1; 1281 hs.IncludeVertexHandles = true; 1282 hs.InstanceCount = tcs_prog_data->instances - 1; 1283 1284 hs.VertexURBEntryReadLength = 0; 1285 hs.VertexURBEntryReadOffset = 0; 1286 hs.DispatchGRFStartRegisterForURBData = 1287 tcs_prog_data->base.base.dispatch_grf_start_reg; 1288 1289 hs.PerThreadScratchSpace = get_scratch_space(tcs_bin); 1290 hs.ScratchSpaceBasePointer = 1291 get_scratch_address(pipeline, MESA_SHADER_TESS_CTRL, tcs_bin); 1292 } 1293 1294 const VkPipelineTessellationDomainOriginStateCreateInfoKHR *domain_origin_state = 1295 tess_info ? vk_find_struct_const(tess_info, PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR) : NULL; 1296 1297 VkTessellationDomainOriginKHR uv_origin = 1298 domain_origin_state ? domain_origin_state->domainOrigin : 1299 VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR; 1300 1301 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te) { 1302 te.Partitioning = tes_prog_data->partitioning; 1303 1304 if (uv_origin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR) { 1305 te.OutputTopology = tes_prog_data->output_topology; 1306 } else { 1307 /* When the origin is upper-left, we have to flip the winding order */ 1308 if (tes_prog_data->output_topology == OUTPUT_TRI_CCW) { 1309 te.OutputTopology = OUTPUT_TRI_CW; 1310 } else if (tes_prog_data->output_topology == OUTPUT_TRI_CW) { 1311 te.OutputTopology = OUTPUT_TRI_CCW; 1312 } else { 1313 te.OutputTopology = tes_prog_data->output_topology; 1314 } 1315 } 1316 1317 te.TEDomain = tes_prog_data->domain; 1318 te.TEEnable = true; 1319 te.MaximumTessellationFactorOdd = 63.0; 1320 te.MaximumTessellationFactorNotOdd = 64.0; 1321 } 1322 1323 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds) { 1324 ds.Enable = true; 1325 ds.StatisticsEnable = true; 1326 ds.KernelStartPointer = tes_bin->kernel.offset; 1327 1328 ds.SamplerCount = get_sampler_count(tes_bin); 1329 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */ 1330 ds.BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(tes_bin); 1331 ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1; 1332 1333 ds.ComputeWCoordinateEnable = 1334 tes_prog_data->domain == BRW_TESS_DOMAIN_TRI; 1335 1336 ds.PatchURBEntryReadLength = tes_prog_data->base.urb_read_length; 1337 ds.PatchURBEntryReadOffset = 0; 1338 ds.DispatchGRFStartRegisterForURBData = 1339 tes_prog_data->base.base.dispatch_grf_start_reg; 1340 1341#if GEN_GEN >= 8 1342#if GEN_GEN < 11 1343 ds.DispatchMode = 1344 tes_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8 ? 1345 DISPATCH_MODE_SIMD8_SINGLE_PATCH : 1346 DISPATCH_MODE_SIMD4X2; 1347#else 1348 assert(tes_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8); 1349 ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH; 1350#endif 1351 1352 ds.UserClipDistanceClipTestEnableBitmask = 1353 tes_prog_data->base.clip_distance_mask; 1354 ds.UserClipDistanceCullTestEnableBitmask = 1355 tes_prog_data->base.cull_distance_mask; 1356#endif 1357 1358 ds.PerThreadScratchSpace = get_scratch_space(tes_bin); 1359 ds.ScratchSpaceBasePointer = 1360 get_scratch_address(pipeline, MESA_SHADER_TESS_EVAL, tes_bin); 1361 } 1362} 1363 1364static void 1365emit_3dstate_gs(struct anv_pipeline *pipeline) 1366{ 1367 const struct gen_device_info *devinfo = &pipeline->device->info; 1368 const struct anv_shader_bin *gs_bin = 1369 pipeline->shaders[MESA_SHADER_GEOMETRY]; 1370 1371 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) { 1372 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs); 1373 return; 1374 } 1375 1376 const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline); 1377 1378 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) { 1379 gs.Enable = true; 1380 gs.StatisticsEnable = true; 1381 gs.KernelStartPointer = gs_bin->kernel.offset; 1382 gs.DispatchMode = gs_prog_data->base.dispatch_mode; 1383 1384 gs.SingleProgramFlow = false; 1385 gs.VectorMaskEnable = false; 1386 gs.SamplerCount = get_sampler_count(gs_bin); 1387 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */ 1388 gs.BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(gs_bin); 1389 gs.IncludeVertexHandles = gs_prog_data->base.include_vue_handles; 1390 gs.IncludePrimitiveID = gs_prog_data->include_primitive_id; 1391 1392 if (GEN_GEN == 8) { 1393 /* Broadwell is weird. It needs us to divide by 2. */ 1394 gs.MaximumNumberofThreads = devinfo->max_gs_threads / 2 - 1; 1395 } else { 1396 gs.MaximumNumberofThreads = devinfo->max_gs_threads - 1; 1397 } 1398 1399 gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1; 1400 gs.OutputTopology = gs_prog_data->output_topology; 1401 gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length; 1402 gs.ControlDataFormat = gs_prog_data->control_data_format; 1403 gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords; 1404 gs.InstanceControl = MAX2(gs_prog_data->invocations, 1) - 1; 1405 gs.ReorderMode = TRAILING; 1406 1407#if GEN_GEN >= 8 1408 gs.ExpectedVertexCount = gs_prog_data->vertices_in; 1409 gs.StaticOutput = gs_prog_data->static_vertex_count >= 0; 1410 gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count >= 0 ? 1411 gs_prog_data->static_vertex_count : 0; 1412#endif 1413 1414 gs.VertexURBEntryReadOffset = 0; 1415 gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length; 1416 gs.DispatchGRFStartRegisterForURBData = 1417 gs_prog_data->base.base.dispatch_grf_start_reg; 1418 1419#if GEN_GEN >= 8 1420 gs.UserClipDistanceClipTestEnableBitmask = 1421 gs_prog_data->base.clip_distance_mask; 1422 gs.UserClipDistanceCullTestEnableBitmask = 1423 gs_prog_data->base.cull_distance_mask; 1424#endif 1425 1426 gs.PerThreadScratchSpace = get_scratch_space(gs_bin); 1427 gs.ScratchSpaceBasePointer = 1428 get_scratch_address(pipeline, MESA_SHADER_GEOMETRY, gs_bin); 1429 } 1430} 1431 1432static bool 1433has_color_buffer_write_enabled(const struct anv_pipeline *pipeline, 1434 const VkPipelineColorBlendStateCreateInfo *blend) 1435{ 1436 const struct anv_shader_bin *shader_bin = 1437 pipeline->shaders[MESA_SHADER_FRAGMENT]; 1438 if (!shader_bin) 1439 return false; 1440 1441 const struct anv_pipeline_bind_map *bind_map = &shader_bin->bind_map; 1442 for (int i = 0; i < bind_map->surface_count; i++) { 1443 struct anv_pipeline_binding *binding = &bind_map->surface_to_descriptor[i]; 1444 1445 if (binding->set != ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) 1446 continue; 1447 1448 if (binding->index == UINT32_MAX) 1449 continue; 1450 1451 if (blend && blend->pAttachments[binding->index].colorWriteMask != 0) 1452 return true; 1453 } 1454 1455 return false; 1456} 1457 1458static void 1459emit_3dstate_wm(struct anv_pipeline *pipeline, struct anv_subpass *subpass, 1460 const VkPipelineColorBlendStateCreateInfo *blend, 1461 const VkPipelineMultisampleStateCreateInfo *multisample) 1462{ 1463 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); 1464 1465 MAYBE_UNUSED uint32_t samples = 1466 multisample ? multisample->rasterizationSamples : 1; 1467 1468 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) { 1469 wm.StatisticsEnable = true; 1470 wm.LineEndCapAntialiasingRegionWidth = _05pixels; 1471 wm.LineAntialiasingRegionWidth = _10pixels; 1472 wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT; 1473 1474 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { 1475 if (wm_prog_data->early_fragment_tests) { 1476 wm.EarlyDepthStencilControl = EDSC_PREPS; 1477 } else if (wm_prog_data->has_side_effects) { 1478 wm.EarlyDepthStencilControl = EDSC_PSEXEC; 1479 } else { 1480 wm.EarlyDepthStencilControl = EDSC_NORMAL; 1481 } 1482 1483#if GEN_GEN >= 8 1484 /* Gen8 hardware tries to compute ThreadDispatchEnable for us but 1485 * doesn't take into account KillPixels when no depth or stencil 1486 * writes are enabled. In order for occlusion queries to work 1487 * correctly with no attachments, we need to force-enable PS thread 1488 * dispatch. 1489 * 1490 * The BDW docs are pretty clear that that this bit isn't validated 1491 * and probably shouldn't be used in production: 1492 * 1493 * "This must always be set to Normal. This field should not be 1494 * tested for functional validation." 1495 * 1496 * Unfortunately, however, the other mechanism we have for doing this 1497 * is 3DSTATE_PS_EXTRA::PixelShaderHasUAV which causes hangs on BDW. 1498 * Given two bad options, we choose the one which works. 1499 */ 1500 if ((wm_prog_data->has_side_effects || wm_prog_data->uses_kill) && 1501 !has_color_buffer_write_enabled(pipeline, blend)) 1502 wm.ForceThreadDispatchEnable = ForceON; 1503#endif 1504 1505 wm.BarycentricInterpolationMode = 1506 wm_prog_data->barycentric_interp_modes; 1507 1508#if GEN_GEN < 8 1509 wm.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode; 1510 wm.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth; 1511 wm.PixelShaderUsesSourceW = wm_prog_data->uses_src_w; 1512 wm.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask; 1513 1514 /* If the subpass has a depth or stencil self-dependency, then we 1515 * need to force the hardware to do the depth/stencil write *after* 1516 * fragment shader execution. Otherwise, the writes may hit memory 1517 * before we get around to fetching from the input attachment and we 1518 * may get the depth or stencil value from the current draw rather 1519 * than the previous one. 1520 */ 1521 wm.PixelShaderKillsPixel = subpass->has_ds_self_dep || 1522 wm_prog_data->uses_kill; 1523 1524 if (wm.PixelShaderComputedDepthMode != PSCDEPTH_OFF || 1525 wm_prog_data->has_side_effects || 1526 wm.PixelShaderKillsPixel || 1527 has_color_buffer_write_enabled(pipeline, blend)) 1528 wm.ThreadDispatchEnable = true; 1529 1530 if (samples > 1) { 1531 wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN; 1532 if (wm_prog_data->persample_dispatch) { 1533 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE; 1534 } else { 1535 wm.MultisampleDispatchMode = MSDISPMODE_PERPIXEL; 1536 } 1537 } else { 1538 wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL; 1539 wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE; 1540 } 1541#endif 1542 } 1543 } 1544} 1545 1546static void 1547emit_3dstate_ps(struct anv_pipeline *pipeline, 1548 const VkPipelineColorBlendStateCreateInfo *blend, 1549 const VkPipelineMultisampleStateCreateInfo *multisample) 1550{ 1551 MAYBE_UNUSED const struct gen_device_info *devinfo = &pipeline->device->info; 1552 const struct anv_shader_bin *fs_bin = 1553 pipeline->shaders[MESA_SHADER_FRAGMENT]; 1554 1555 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { 1556 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) { 1557#if GEN_GEN == 7 1558 /* Even if no fragments are ever dispatched, gen7 hardware hangs if 1559 * we don't at least set the maximum number of threads. 1560 */ 1561 ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1; 1562#endif 1563 } 1564 return; 1565 } 1566 1567 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); 1568 1569#if GEN_GEN < 8 1570 /* The hardware wedges if you have this bit set but don't turn on any dual 1571 * source blend factors. 1572 */ 1573 bool dual_src_blend = false; 1574 if (wm_prog_data->dual_src_blend && blend) { 1575 for (uint32_t i = 0; i < blend->attachmentCount; i++) { 1576 const VkPipelineColorBlendAttachmentState *bstate = 1577 &blend->pAttachments[i]; 1578 1579 if (bstate->blendEnable && 1580 (is_dual_src_blend_factor(bstate->srcColorBlendFactor) || 1581 is_dual_src_blend_factor(bstate->dstColorBlendFactor) || 1582 is_dual_src_blend_factor(bstate->srcAlphaBlendFactor) || 1583 is_dual_src_blend_factor(bstate->dstAlphaBlendFactor))) { 1584 dual_src_blend = true; 1585 break; 1586 } 1587 } 1588 } 1589#endif 1590 1591 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) { 1592 ps._8PixelDispatchEnable = wm_prog_data->dispatch_8; 1593 ps._16PixelDispatchEnable = wm_prog_data->dispatch_16; 1594 ps._32PixelDispatchEnable = wm_prog_data->dispatch_32; 1595 1596 /* From the Sky Lake PRM 3DSTATE_PS::32 Pixel Dispatch Enable: 1597 * 1598 * "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, SIMD32 1599 * Dispatch must not be enabled for PER_PIXEL dispatch mode." 1600 * 1601 * Since 16x MSAA is first introduced on SKL, we don't need to apply 1602 * the workaround on any older hardware. 1603 */ 1604 if (GEN_GEN >= 9 && !wm_prog_data->persample_dispatch && 1605 multisample && multisample->rasterizationSamples == 16) { 1606 assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable); 1607 ps._32PixelDispatchEnable = false; 1608 } 1609 1610 ps.KernelStartPointer0 = fs_bin->kernel.offset + 1611 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0); 1612 ps.KernelStartPointer1 = fs_bin->kernel.offset + 1613 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1); 1614 ps.KernelStartPointer2 = fs_bin->kernel.offset + 1615 brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2); 1616 1617 ps.SingleProgramFlow = false; 1618 ps.VectorMaskEnable = true; 1619 ps.SamplerCount = get_sampler_count(fs_bin); 1620 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */ 1621 ps.BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(fs_bin); 1622 ps.PushConstantEnable = wm_prog_data->base.nr_params > 0 || 1623 wm_prog_data->base.ubo_ranges[0].length; 1624 ps.PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ? 1625 POSOFFSET_SAMPLE: POSOFFSET_NONE; 1626#if GEN_GEN < 8 1627 ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0; 1628 ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask; 1629 ps.DualSourceBlendEnable = dual_src_blend; 1630#endif 1631 1632#if GEN_IS_HASWELL 1633 /* Haswell requires the sample mask to be set in this packet as well 1634 * as in 3DSTATE_SAMPLE_MASK; the values should match. 1635 */ 1636 ps.SampleMask = 0xff; 1637#endif 1638 1639#if GEN_GEN >= 9 1640 ps.MaximumNumberofThreadsPerPSD = 64 - 1; 1641#elif GEN_GEN >= 8 1642 ps.MaximumNumberofThreadsPerPSD = 64 - 2; 1643#else 1644 ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1; 1645#endif 1646 1647 ps.DispatchGRFStartRegisterForConstantSetupData0 = 1648 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0); 1649 ps.DispatchGRFStartRegisterForConstantSetupData1 = 1650 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1); 1651 ps.DispatchGRFStartRegisterForConstantSetupData2 = 1652 brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2); 1653 1654 ps.PerThreadScratchSpace = get_scratch_space(fs_bin); 1655 ps.ScratchSpaceBasePointer = 1656 get_scratch_address(pipeline, MESA_SHADER_FRAGMENT, fs_bin); 1657 } 1658} 1659 1660#if GEN_GEN >= 8 1661static void 1662emit_3dstate_ps_extra(struct anv_pipeline *pipeline, 1663 struct anv_subpass *subpass, 1664 const VkPipelineColorBlendStateCreateInfo *blend) 1665{ 1666 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); 1667 1668 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { 1669 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps); 1670 return; 1671 } 1672 1673 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps) { 1674 ps.PixelShaderValid = true; 1675 ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0; 1676 ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask; 1677 ps.PixelShaderIsPerSample = wm_prog_data->persample_dispatch; 1678 ps.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode; 1679 ps.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth; 1680 ps.PixelShaderUsesSourceW = wm_prog_data->uses_src_w; 1681 1682 /* If the subpass has a depth or stencil self-dependency, then we need 1683 * to force the hardware to do the depth/stencil write *after* fragment 1684 * shader execution. Otherwise, the writes may hit memory before we get 1685 * around to fetching from the input attachment and we may get the depth 1686 * or stencil value from the current draw rather than the previous one. 1687 */ 1688 ps.PixelShaderKillsPixel = subpass->has_ds_self_dep || 1689 wm_prog_data->uses_kill; 1690 1691#if GEN_GEN >= 9 1692 ps.PixelShaderComputesStencil = wm_prog_data->computed_stencil; 1693 ps.PixelShaderPullsBary = wm_prog_data->pulls_bary; 1694 1695 ps.InputCoverageMaskState = ICMS_NONE; 1696 if (wm_prog_data->uses_sample_mask) { 1697 if (wm_prog_data->post_depth_coverage) 1698 ps.InputCoverageMaskState = ICMS_DEPTH_COVERAGE; 1699 else 1700 ps.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE; 1701 } 1702#else 1703 ps.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask; 1704#endif 1705 } 1706} 1707 1708static void 1709emit_3dstate_vf_topology(struct anv_pipeline *pipeline) 1710{ 1711 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) { 1712 vft.PrimitiveTopologyType = pipeline->topology; 1713 } 1714} 1715#endif 1716 1717static void 1718emit_3dstate_vf_statistics(struct anv_pipeline *pipeline) 1719{ 1720 anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_STATISTICS), vfs) { 1721 vfs.StatisticsEnable = true; 1722 } 1723} 1724 1725static void 1726compute_kill_pixel(struct anv_pipeline *pipeline, 1727 const VkPipelineMultisampleStateCreateInfo *ms_info, 1728 const struct anv_subpass *subpass) 1729{ 1730 if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { 1731 pipeline->kill_pixel = false; 1732 return; 1733 } 1734 1735 const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); 1736 1737 /* This computes the KillPixel portion of the computation for whether or 1738 * not we want to enable the PMA fix on gen8 or gen9. It's given by this 1739 * chunk of the giant formula: 1740 * 1741 * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels || 1742 * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget || 1743 * 3DSTATE_PS_BLEND::AlphaToCoverageEnable || 1744 * 3DSTATE_PS_BLEND::AlphaTestEnable || 1745 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable) 1746 * 1747 * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable is always false and so is 1748 * 3DSTATE_PS_BLEND::AlphaTestEnable since Vulkan doesn't have a concept 1749 * of an alpha test. 1750 */ 1751 pipeline->kill_pixel = 1752 subpass->has_ds_self_dep || wm_prog_data->uses_kill || 1753 wm_prog_data->uses_omask || 1754 (ms_info && ms_info->alphaToCoverageEnable); 1755} 1756 1757static VkResult 1758genX(graphics_pipeline_create)( 1759 VkDevice _device, 1760 struct anv_pipeline_cache * cache, 1761 const VkGraphicsPipelineCreateInfo* pCreateInfo, 1762 const VkAllocationCallbacks* pAllocator, 1763 VkPipeline* pPipeline) 1764{ 1765 ANV_FROM_HANDLE(anv_device, device, _device); 1766 ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass); 1767 struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass]; 1768 struct anv_pipeline *pipeline; 1769 VkResult result; 1770 1771 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO); 1772 1773 /* Use the default pipeline cache if none is specified */ 1774 if (cache == NULL && device->instance->pipeline_cache_enabled) 1775 cache = &device->default_pipeline_cache; 1776 1777 pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8, 1778 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1779 if (pipeline == NULL) 1780 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); 1781 1782 result = anv_pipeline_init(pipeline, device, cache, 1783 pCreateInfo, pAllocator); 1784 if (result != VK_SUCCESS) { 1785 vk_free2(&device->alloc, pAllocator, pipeline); 1786 return result; 1787 } 1788 1789 assert(pCreateInfo->pVertexInputState); 1790 emit_vertex_input(pipeline, pCreateInfo->pVertexInputState); 1791 assert(pCreateInfo->pRasterizationState); 1792 emit_rs_state(pipeline, pCreateInfo->pRasterizationState, 1793 pCreateInfo->pMultisampleState, pass, subpass); 1794 emit_ms_state(pipeline, pCreateInfo->pMultisampleState); 1795 emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass); 1796 emit_cb_state(pipeline, pCreateInfo->pColorBlendState, 1797 pCreateInfo->pMultisampleState); 1798 compute_kill_pixel(pipeline, pCreateInfo->pMultisampleState, subpass); 1799 1800 emit_urb_setup(pipeline); 1801 1802 emit_3dstate_clip(pipeline, pCreateInfo->pViewportState, 1803 pCreateInfo->pRasterizationState); 1804 emit_3dstate_streamout(pipeline, pCreateInfo->pRasterizationState); 1805 1806#if 0 1807 /* From gen7_vs_state.c */ 1808 1809 /** 1810 * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages > 1811 * Geometry > Geometry Shader > State: 1812 * 1813 * "Note: Because of corruption in IVB:GT2, software needs to flush the 1814 * whole fixed function pipeline when the GS enable changes value in 1815 * the 3DSTATE_GS." 1816 * 1817 * The hardware architects have clarified that in this context "flush the 1818 * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS 1819 * Stall" bit set. 1820 */ 1821 if (!device->info.is_haswell && !device->info.is_baytrail) 1822 gen7_emit_vs_workaround_flush(brw); 1823#endif 1824 1825 emit_3dstate_vs(pipeline); 1826 emit_3dstate_hs_te_ds(pipeline, pCreateInfo->pTessellationState); 1827 emit_3dstate_gs(pipeline); 1828 emit_3dstate_sbe(pipeline); 1829 emit_3dstate_wm(pipeline, subpass, pCreateInfo->pColorBlendState, 1830 pCreateInfo->pMultisampleState); 1831 emit_3dstate_ps(pipeline, pCreateInfo->pColorBlendState, 1832 pCreateInfo->pMultisampleState); 1833#if GEN_GEN >= 8 1834 emit_3dstate_ps_extra(pipeline, subpass, pCreateInfo->pColorBlendState); 1835 emit_3dstate_vf_topology(pipeline); 1836#endif 1837 emit_3dstate_vf_statistics(pipeline); 1838 1839 *pPipeline = anv_pipeline_to_handle(pipeline); 1840 1841 return pipeline->batch.status; 1842} 1843 1844static VkResult 1845compute_pipeline_create( 1846 VkDevice _device, 1847 struct anv_pipeline_cache * cache, 1848 const VkComputePipelineCreateInfo* pCreateInfo, 1849 const VkAllocationCallbacks* pAllocator, 1850 VkPipeline* pPipeline) 1851{ 1852 ANV_FROM_HANDLE(anv_device, device, _device); 1853 const struct anv_physical_device *physical_device = 1854 &device->instance->physicalDevice; 1855 const struct gen_device_info *devinfo = &physical_device->info; 1856 struct anv_pipeline *pipeline; 1857 VkResult result; 1858 1859 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO); 1860 1861 /* Use the default pipeline cache if none is specified */ 1862 if (cache == NULL && device->instance->pipeline_cache_enabled) 1863 cache = &device->default_pipeline_cache; 1864 1865 pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8, 1866 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1867 if (pipeline == NULL) 1868 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); 1869 1870 pipeline->device = device; 1871 1872 pipeline->blend_state.map = NULL; 1873 1874 result = anv_reloc_list_init(&pipeline->batch_relocs, 1875 pAllocator ? pAllocator : &device->alloc); 1876 if (result != VK_SUCCESS) { 1877 vk_free2(&device->alloc, pAllocator, pipeline); 1878 return result; 1879 } 1880 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data; 1881 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data); 1882 pipeline->batch.relocs = &pipeline->batch_relocs; 1883 pipeline->batch.status = VK_SUCCESS; 1884 1885 /* When we free the pipeline, we detect stages based on the NULL status 1886 * of various prog_data pointers. Make them NULL by default. 1887 */ 1888 memset(pipeline->shaders, 0, sizeof(pipeline->shaders)); 1889 1890 pipeline->needs_data_cache = false; 1891 1892 assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT); 1893 pipeline->active_stages |= VK_SHADER_STAGE_COMPUTE_BIT; 1894 ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->stage.module); 1895 result = anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module, 1896 pCreateInfo->stage.pName, 1897 pCreateInfo->stage.pSpecializationInfo); 1898 if (result != VK_SUCCESS) { 1899 vk_free2(&device->alloc, pAllocator, pipeline); 1900 return result; 1901 } 1902 1903 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline); 1904 1905 anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0); 1906 1907 uint32_t group_size = cs_prog_data->local_size[0] * 1908 cs_prog_data->local_size[1] * cs_prog_data->local_size[2]; 1909 uint32_t remainder = group_size & (cs_prog_data->simd_size - 1); 1910 1911 if (remainder > 0) 1912 pipeline->cs_right_mask = ~0u >> (32 - remainder); 1913 else 1914 pipeline->cs_right_mask = ~0u >> (32 - cs_prog_data->simd_size); 1915 1916 const uint32_t vfe_curbe_allocation = 1917 ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads + 1918 cs_prog_data->push.cross_thread.regs, 2); 1919 1920 const uint32_t subslices = MAX2(physical_device->subslice_total, 1); 1921 1922 const struct anv_shader_bin *cs_bin = 1923 pipeline->shaders[MESA_SHADER_COMPUTE]; 1924 1925 anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), vfe) { 1926#if GEN_GEN > 7 1927 vfe.StackSize = 0; 1928#else 1929 vfe.GPGPUMode = true; 1930#endif 1931 vfe.MaximumNumberofThreads = 1932 devinfo->max_cs_threads * subslices - 1; 1933 vfe.NumberofURBEntries = GEN_GEN <= 7 ? 0 : 2; 1934#if GEN_GEN < 11 1935 vfe.ResetGatewayTimer = true; 1936#endif 1937#if GEN_GEN <= 8 1938 vfe.BypassGatewayControl = true; 1939#endif 1940 vfe.URBEntryAllocationSize = GEN_GEN <= 7 ? 0 : 2; 1941 vfe.CURBEAllocationSize = vfe_curbe_allocation; 1942 1943 vfe.PerThreadScratchSpace = get_scratch_space(cs_bin); 1944 vfe.ScratchSpaceBasePointer = 1945 get_scratch_address(pipeline, MESA_SHADER_COMPUTE, cs_bin); 1946 } 1947 1948 struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = { 1949 .KernelStartPointer = cs_bin->kernel.offset, 1950 1951 .SamplerCount = get_sampler_count(cs_bin), 1952 /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */ 1953 .BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(cs_bin), 1954 .BarrierEnable = cs_prog_data->uses_barrier, 1955 .SharedLocalMemorySize = 1956 encode_slm_size(GEN_GEN, cs_prog_data->base.total_shared), 1957 1958#if !GEN_IS_HASWELL 1959 .ConstantURBEntryReadOffset = 0, 1960#endif 1961 .ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs, 1962#if GEN_GEN >= 8 || GEN_IS_HASWELL 1963 .CrossThreadConstantDataReadLength = 1964 cs_prog_data->push.cross_thread.regs, 1965#endif 1966 1967 .NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads, 1968 }; 1969 GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL, 1970 pipeline->interface_descriptor_data, 1971 &desc); 1972 1973 *pPipeline = anv_pipeline_to_handle(pipeline); 1974 1975 return pipeline->batch.status; 1976} 1977 1978VkResult genX(CreateGraphicsPipelines)( 1979 VkDevice _device, 1980 VkPipelineCache pipelineCache, 1981 uint32_t count, 1982 const VkGraphicsPipelineCreateInfo* pCreateInfos, 1983 const VkAllocationCallbacks* pAllocator, 1984 VkPipeline* pPipelines) 1985{ 1986 ANV_FROM_HANDLE(anv_pipeline_cache, pipeline_cache, pipelineCache); 1987 1988 VkResult result = VK_SUCCESS; 1989 1990 unsigned i; 1991 for (i = 0; i < count; i++) { 1992 result = genX(graphics_pipeline_create)(_device, 1993 pipeline_cache, 1994 &pCreateInfos[i], 1995 pAllocator, &pPipelines[i]); 1996 1997 /* Bail out on the first error as it is not obvious what error should be 1998 * report upon 2 different failures. */ 1999 if (result != VK_SUCCESS) 2000 break; 2001 } 2002 2003 for (; i < count; i++) 2004 pPipelines[i] = VK_NULL_HANDLE; 2005 2006 return result; 2007} 2008 2009VkResult genX(CreateComputePipelines)( 2010 VkDevice _device, 2011 VkPipelineCache pipelineCache, 2012 uint32_t count, 2013 const VkComputePipelineCreateInfo* pCreateInfos, 2014 const VkAllocationCallbacks* pAllocator, 2015 VkPipeline* pPipelines) 2016{ 2017 ANV_FROM_HANDLE(anv_pipeline_cache, pipeline_cache, pipelineCache); 2018 2019 VkResult result = VK_SUCCESS; 2020 2021 unsigned i; 2022 for (i = 0; i < count; i++) { 2023 result = compute_pipeline_create(_device, pipeline_cache, 2024 &pCreateInfos[i], 2025 pAllocator, &pPipelines[i]); 2026 2027 /* Bail out on the first error as it is not obvious what error should be 2028 * report upon 2 different failures. */ 2029 if (result != VK_SUCCESS) 2030 break; 2031 } 2032 2033 for (; i < count; i++) 2034 pPipelines[i] = VK_NULL_HANDLE; 2035 2036 return result; 2037} 2038