anv_pipeline.c revision 9f464c52
1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <assert.h>
25#include <stdbool.h>
26#include <string.h>
27#include <unistd.h>
28#include <fcntl.h>
29
30#include "util/mesa-sha1.h"
31#include "util/os_time.h"
32#include "common/gen_l3_config.h"
33#include "anv_private.h"
34#include "compiler/brw_nir.h"
35#include "anv_nir.h"
36#include "nir/nir_xfb_info.h"
37#include "spirv/nir_spirv.h"
38#include "vk_util.h"
39
40/* Needed for SWIZZLE macros */
41#include "program/prog_instruction.h"
42
43// Shader functions
44
45VkResult anv_CreateShaderModule(
46    VkDevice                                    _device,
47    const VkShaderModuleCreateInfo*             pCreateInfo,
48    const VkAllocationCallbacks*                pAllocator,
49    VkShaderModule*                             pShaderModule)
50{
51   ANV_FROM_HANDLE(anv_device, device, _device);
52   struct anv_shader_module *module;
53
54   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
55   assert(pCreateInfo->flags == 0);
56
57   module = vk_alloc2(&device->alloc, pAllocator,
58                       sizeof(*module) + pCreateInfo->codeSize, 8,
59                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
60   if (module == NULL)
61      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
62
63   module->size = pCreateInfo->codeSize;
64   memcpy(module->data, pCreateInfo->pCode, module->size);
65
66   _mesa_sha1_compute(module->data, module->size, module->sha1);
67
68   *pShaderModule = anv_shader_module_to_handle(module);
69
70   return VK_SUCCESS;
71}
72
73void anv_DestroyShaderModule(
74    VkDevice                                    _device,
75    VkShaderModule                              _module,
76    const VkAllocationCallbacks*                pAllocator)
77{
78   ANV_FROM_HANDLE(anv_device, device, _device);
79   ANV_FROM_HANDLE(anv_shader_module, module, _module);
80
81   if (!module)
82      return;
83
84   vk_free2(&device->alloc, pAllocator, module);
85}
86
87#define SPIR_V_MAGIC_NUMBER 0x07230203
88
89static const uint64_t stage_to_debug[] = {
90   [MESA_SHADER_VERTEX] = DEBUG_VS,
91   [MESA_SHADER_TESS_CTRL] = DEBUG_TCS,
92   [MESA_SHADER_TESS_EVAL] = DEBUG_TES,
93   [MESA_SHADER_GEOMETRY] = DEBUG_GS,
94   [MESA_SHADER_FRAGMENT] = DEBUG_WM,
95   [MESA_SHADER_COMPUTE] = DEBUG_CS,
96};
97
98/* Eventually, this will become part of anv_CreateShader.  Unfortunately,
99 * we can't do that yet because we don't have the ability to copy nir.
100 */
101static nir_shader *
102anv_shader_compile_to_nir(struct anv_device *device,
103                          void *mem_ctx,
104                          const struct anv_shader_module *module,
105                          const char *entrypoint_name,
106                          gl_shader_stage stage,
107                          const VkSpecializationInfo *spec_info)
108{
109   const struct anv_physical_device *pdevice =
110      &device->instance->physicalDevice;
111   const struct brw_compiler *compiler = pdevice->compiler;
112   const nir_shader_compiler_options *nir_options =
113      compiler->glsl_compiler_options[stage].NirOptions;
114
115   uint32_t *spirv = (uint32_t *) module->data;
116   assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
117   assert(module->size % 4 == 0);
118
119   uint32_t num_spec_entries = 0;
120   struct nir_spirv_specialization *spec_entries = NULL;
121   if (spec_info && spec_info->mapEntryCount > 0) {
122      num_spec_entries = spec_info->mapEntryCount;
123      spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
124      for (uint32_t i = 0; i < num_spec_entries; i++) {
125         VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
126         const void *data = spec_info->pData + entry.offset;
127         assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
128
129         spec_entries[i].id = spec_info->pMapEntries[i].constantID;
130         if (spec_info->dataSize == 8)
131            spec_entries[i].data64 = *(const uint64_t *)data;
132         else
133            spec_entries[i].data32 = *(const uint32_t *)data;
134      }
135   }
136
137   nir_address_format ssbo_addr_format =
138      anv_nir_ssbo_addr_format(pdevice, device->robust_buffer_access);
139   struct spirv_to_nir_options spirv_options = {
140      .lower_workgroup_access_to_offsets = true,
141      .caps = {
142         .derivative_group = true,
143         .descriptor_array_dynamic_indexing = true,
144         .descriptor_array_non_uniform_indexing = true,
145         .descriptor_indexing = true,
146         .device_group = true,
147         .draw_parameters = true,
148         .float16 = pdevice->info.gen >= 8,
149         .float64 = pdevice->info.gen >= 8,
150         .geometry_streams = true,
151         .image_write_without_format = true,
152         .int8 = pdevice->info.gen >= 8,
153         .int16 = pdevice->info.gen >= 8,
154         .int64 = pdevice->info.gen >= 8,
155         .int64_atomics = pdevice->info.gen >= 9 && pdevice->use_softpin,
156         .min_lod = true,
157         .multiview = true,
158         .physical_storage_buffer_address = pdevice->has_a64_buffer_access,
159         .post_depth_coverage = pdevice->info.gen >= 9,
160         .runtime_descriptor_array = true,
161         .shader_viewport_index_layer = true,
162         .stencil_export = pdevice->info.gen >= 9,
163         .storage_8bit = pdevice->info.gen >= 8,
164         .storage_16bit = pdevice->info.gen >= 8,
165         .subgroup_arithmetic = true,
166         .subgroup_basic = true,
167         .subgroup_ballot = true,
168         .subgroup_quad = true,
169         .subgroup_shuffle = true,
170         .subgroup_vote = true,
171         .tessellation = true,
172         .transform_feedback = pdevice->info.gen >= 8,
173         .variable_pointers = true,
174      },
175      .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
176      .ssbo_ptr_type = nir_address_format_to_glsl_type(ssbo_addr_format),
177      .phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
178      .push_const_ptr_type = glsl_uint_type(),
179      .shared_ptr_type = glsl_uint_type(),
180   };
181
182
183   nir_function *entry_point =
184      spirv_to_nir(spirv, module->size / 4,
185                   spec_entries, num_spec_entries,
186                   stage, entrypoint_name, &spirv_options, nir_options);
187   nir_shader *nir = entry_point->shader;
188   assert(nir->info.stage == stage);
189   nir_validate_shader(nir, "after spirv_to_nir");
190   ralloc_steal(mem_ctx, nir);
191
192   free(spec_entries);
193
194   if (unlikely(INTEL_DEBUG & stage_to_debug[stage])) {
195      fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
196              gl_shader_stage_name(stage));
197      nir_print_shader(nir, stderr);
198   }
199
200   /* We have to lower away local constant initializers right before we
201    * inline functions.  That way they get properly initialized at the top
202    * of the function and not at the top of its caller.
203    */
204   NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
205   NIR_PASS_V(nir, nir_lower_returns);
206   NIR_PASS_V(nir, nir_inline_functions);
207   NIR_PASS_V(nir, nir_opt_deref);
208
209   /* Pick off the single entrypoint that we want */
210   foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
211      if (func != entry_point)
212         exec_node_remove(&func->node);
213   }
214   assert(exec_list_length(&nir->functions) == 1);
215
216   /* Now that we've deleted all but the main function, we can go ahead and
217    * lower the rest of the constant initializers.  We do this here so that
218    * nir_remove_dead_variables and split_per_member_structs below see the
219    * corresponding stores.
220    */
221   NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
222
223   /* Split member structs.  We do this before lower_io_to_temporaries so that
224    * it doesn't lower system values to temporaries by accident.
225    */
226   NIR_PASS_V(nir, nir_split_var_copies);
227   NIR_PASS_V(nir, nir_split_per_member_structs);
228
229   NIR_PASS_V(nir, nir_remove_dead_variables,
230              nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
231
232   NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
233              nir_address_format_64bit_global);
234
235   NIR_PASS_V(nir, nir_propagate_invariant);
236   NIR_PASS_V(nir, nir_lower_io_to_temporaries,
237              entry_point->impl, true, false);
238
239   NIR_PASS_V(nir, nir_lower_frexp);
240
241   /* Vulkan uses the separate-shader linking model */
242   nir->info.separate_shader = true;
243
244   nir = brw_preprocess_nir(compiler, nir, NULL);
245
246   return nir;
247}
248
249void anv_DestroyPipeline(
250    VkDevice                                    _device,
251    VkPipeline                                  _pipeline,
252    const VkAllocationCallbacks*                pAllocator)
253{
254   ANV_FROM_HANDLE(anv_device, device, _device);
255   ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
256
257   if (!pipeline)
258      return;
259
260   anv_reloc_list_finish(&pipeline->batch_relocs,
261                         pAllocator ? pAllocator : &device->alloc);
262   if (pipeline->blend_state.map)
263      anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
264
265   for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
266      if (pipeline->shaders[s])
267         anv_shader_bin_unref(device, pipeline->shaders[s]);
268   }
269
270   vk_free2(&device->alloc, pAllocator, pipeline);
271}
272
273static const uint32_t vk_to_gen_primitive_type[] = {
274   [VK_PRIMITIVE_TOPOLOGY_POINT_LIST]                    = _3DPRIM_POINTLIST,
275   [VK_PRIMITIVE_TOPOLOGY_LINE_LIST]                     = _3DPRIM_LINELIST,
276   [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP]                    = _3DPRIM_LINESTRIP,
277   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST]                 = _3DPRIM_TRILIST,
278   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP]                = _3DPRIM_TRISTRIP,
279   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN]                  = _3DPRIM_TRIFAN,
280   [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY]      = _3DPRIM_LINELIST_ADJ,
281   [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY]     = _3DPRIM_LINESTRIP_ADJ,
282   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY]  = _3DPRIM_TRILIST_ADJ,
283   [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
284};
285
286static void
287populate_sampler_prog_key(const struct gen_device_info *devinfo,
288                          struct brw_sampler_prog_key_data *key)
289{
290   /* Almost all multisampled textures are compressed.  The only time when we
291    * don't compress a multisampled texture is for 16x MSAA with a surface
292    * width greater than 8k which is a bit of an edge case.  Since the sampler
293    * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
294    * to tell the compiler to always assume compression.
295    */
296   key->compressed_multisample_layout_mask = ~0;
297
298   /* SkyLake added support for 16x MSAA.  With this came a new message for
299    * reading from a 16x MSAA surface with compression.  The new message was
300    * needed because now the MCS data is 64 bits instead of 32 or lower as is
301    * the case for 8x, 4x, and 2x.  The key->msaa_16 bit-field controls which
302    * message we use.  Fortunately, the 16x message works for 8x, 4x, and 2x
303    * so we can just use it unconditionally.  This may not be quite as
304    * efficient but it saves us from recompiling.
305    */
306   if (devinfo->gen >= 9)
307      key->msaa_16 = ~0;
308
309   /* XXX: Handle texture swizzle on HSW- */
310   for (int i = 0; i < MAX_SAMPLERS; i++) {
311      /* Assume color sampler, no swizzling. (Works for BDW+) */
312      key->swizzles[i] = SWIZZLE_XYZW;
313   }
314}
315
316static void
317populate_vs_prog_key(const struct gen_device_info *devinfo,
318                     struct brw_vs_prog_key *key)
319{
320   memset(key, 0, sizeof(*key));
321
322   populate_sampler_prog_key(devinfo, &key->tex);
323
324   /* XXX: Handle vertex input work-arounds */
325
326   /* XXX: Handle sampler_prog_key */
327}
328
329static void
330populate_tcs_prog_key(const struct gen_device_info *devinfo,
331                      unsigned input_vertices,
332                      struct brw_tcs_prog_key *key)
333{
334   memset(key, 0, sizeof(*key));
335
336   populate_sampler_prog_key(devinfo, &key->tex);
337
338   key->input_vertices = input_vertices;
339}
340
341static void
342populate_tes_prog_key(const struct gen_device_info *devinfo,
343                      struct brw_tes_prog_key *key)
344{
345   memset(key, 0, sizeof(*key));
346
347   populate_sampler_prog_key(devinfo, &key->tex);
348}
349
350static void
351populate_gs_prog_key(const struct gen_device_info *devinfo,
352                     struct brw_gs_prog_key *key)
353{
354   memset(key, 0, sizeof(*key));
355
356   populate_sampler_prog_key(devinfo, &key->tex);
357}
358
359static void
360populate_wm_prog_key(const struct gen_device_info *devinfo,
361                     const struct anv_subpass *subpass,
362                     const VkPipelineMultisampleStateCreateInfo *ms_info,
363                     struct brw_wm_prog_key *key)
364{
365   memset(key, 0, sizeof(*key));
366
367   populate_sampler_prog_key(devinfo, &key->tex);
368
369   /* We set this to 0 here and set to the actual value before we call
370    * brw_compile_fs.
371    */
372   key->input_slots_valid = 0;
373
374   /* Vulkan doesn't specify a default */
375   key->high_quality_derivatives = false;
376
377   /* XXX Vulkan doesn't appear to specify */
378   key->clamp_fragment_color = false;
379
380   assert(subpass->color_count <= MAX_RTS);
381   for (uint32_t i = 0; i < subpass->color_count; i++) {
382      if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
383         key->color_outputs_valid |= (1 << i);
384   }
385
386   key->nr_color_regions = util_bitcount(key->color_outputs_valid);
387
388   /* To reduce possible shader recompilations we would need to know if
389    * there is a SampleMask output variable to compute if we should emit
390    * code to workaround the issue that hardware disables alpha to coverage
391    * when there is SampleMask output.
392    */
393   key->alpha_to_coverage = ms_info && ms_info->alphaToCoverageEnable;
394
395   /* Vulkan doesn't support fixed-function alpha test */
396   key->alpha_test_replicate_alpha = false;
397
398   if (ms_info) {
399      /* We should probably pull this out of the shader, but it's fairly
400       * harmless to compute it and then let dead-code take care of it.
401       */
402      if (ms_info->rasterizationSamples > 1) {
403         key->persample_interp = ms_info->sampleShadingEnable &&
404            (ms_info->minSampleShading * ms_info->rasterizationSamples) > 1;
405         key->multisample_fbo = true;
406      }
407
408      key->frag_coord_adds_sample_pos = key->persample_interp;
409   }
410}
411
412static void
413populate_cs_prog_key(const struct gen_device_info *devinfo,
414                     struct brw_cs_prog_key *key)
415{
416   memset(key, 0, sizeof(*key));
417
418   populate_sampler_prog_key(devinfo, &key->tex);
419}
420
421struct anv_pipeline_stage {
422   gl_shader_stage stage;
423
424   const struct anv_shader_module *module;
425   const char *entrypoint;
426   const VkSpecializationInfo *spec_info;
427
428   unsigned char shader_sha1[20];
429
430   union brw_any_prog_key key;
431
432   struct {
433      gl_shader_stage stage;
434      unsigned char sha1[20];
435   } cache_key;
436
437   nir_shader *nir;
438
439   struct anv_pipeline_binding surface_to_descriptor[256];
440   struct anv_pipeline_binding sampler_to_descriptor[256];
441   struct anv_pipeline_bind_map bind_map;
442
443   union brw_any_prog_data prog_data;
444
445   VkPipelineCreationFeedbackEXT feedback;
446};
447
448static void
449anv_pipeline_hash_shader(const struct anv_shader_module *module,
450                         const char *entrypoint,
451                         gl_shader_stage stage,
452                         const VkSpecializationInfo *spec_info,
453                         unsigned char *sha1_out)
454{
455   struct mesa_sha1 ctx;
456   _mesa_sha1_init(&ctx);
457
458   _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
459   _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
460   _mesa_sha1_update(&ctx, &stage, sizeof(stage));
461   if (spec_info) {
462      _mesa_sha1_update(&ctx, spec_info->pMapEntries,
463                        spec_info->mapEntryCount *
464                        sizeof(*spec_info->pMapEntries));
465      _mesa_sha1_update(&ctx, spec_info->pData,
466                        spec_info->dataSize);
467   }
468
469   _mesa_sha1_final(&ctx, sha1_out);
470}
471
472static void
473anv_pipeline_hash_graphics(struct anv_pipeline *pipeline,
474                           struct anv_pipeline_layout *layout,
475                           struct anv_pipeline_stage *stages,
476                           unsigned char *sha1_out)
477{
478   struct mesa_sha1 ctx;
479   _mesa_sha1_init(&ctx);
480
481   _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
482                     sizeof(pipeline->subpass->view_mask));
483
484   if (layout)
485      _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
486
487   const bool rba = pipeline->device->robust_buffer_access;
488   _mesa_sha1_update(&ctx, &rba, sizeof(rba));
489
490   for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
491      if (stages[s].entrypoint) {
492         _mesa_sha1_update(&ctx, stages[s].shader_sha1,
493                           sizeof(stages[s].shader_sha1));
494         _mesa_sha1_update(&ctx, &stages[s].key, brw_prog_key_size(s));
495      }
496   }
497
498   _mesa_sha1_final(&ctx, sha1_out);
499}
500
501static void
502anv_pipeline_hash_compute(struct anv_pipeline *pipeline,
503                          struct anv_pipeline_layout *layout,
504                          struct anv_pipeline_stage *stage,
505                          unsigned char *sha1_out)
506{
507   struct mesa_sha1 ctx;
508   _mesa_sha1_init(&ctx);
509
510   if (layout)
511      _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
512
513   const bool rba = pipeline->device->robust_buffer_access;
514   _mesa_sha1_update(&ctx, &rba, sizeof(rba));
515
516   _mesa_sha1_update(&ctx, stage->shader_sha1,
517                     sizeof(stage->shader_sha1));
518   _mesa_sha1_update(&ctx, &stage->key.cs, sizeof(stage->key.cs));
519
520   _mesa_sha1_final(&ctx, sha1_out);
521}
522
523static nir_shader *
524anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
525                           struct anv_pipeline_cache *cache,
526                           void *mem_ctx,
527                           struct anv_pipeline_stage *stage)
528{
529   const struct brw_compiler *compiler =
530      pipeline->device->instance->physicalDevice.compiler;
531   const nir_shader_compiler_options *nir_options =
532      compiler->glsl_compiler_options[stage->stage].NirOptions;
533   nir_shader *nir;
534
535   nir = anv_device_search_for_nir(pipeline->device, cache,
536                                   nir_options,
537                                   stage->shader_sha1,
538                                   mem_ctx);
539   if (nir) {
540      assert(nir->info.stage == stage->stage);
541      return nir;
542   }
543
544   nir = anv_shader_compile_to_nir(pipeline->device,
545                                   mem_ctx,
546                                   stage->module,
547                                   stage->entrypoint,
548                                   stage->stage,
549                                   stage->spec_info);
550   if (nir) {
551      anv_device_upload_nir(pipeline->device, cache, nir, stage->shader_sha1);
552      return nir;
553   }
554
555   return NULL;
556}
557
558static void
559anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
560                       void *mem_ctx,
561                       struct anv_pipeline_stage *stage,
562                       struct anv_pipeline_layout *layout)
563{
564   const struct anv_physical_device *pdevice =
565      &pipeline->device->instance->physicalDevice;
566   const struct brw_compiler *compiler = pdevice->compiler;
567
568   struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
569   nir_shader *nir = stage->nir;
570
571   if (nir->info.stage == MESA_SHADER_FRAGMENT) {
572      NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
573      NIR_PASS_V(nir, anv_nir_lower_input_attachments);
574   }
575
576   NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
577
578   NIR_PASS_V(nir, anv_nir_lower_push_constants);
579
580   if (nir->info.stage != MESA_SHADER_COMPUTE)
581      NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
582
583   if (nir->info.stage == MESA_SHADER_COMPUTE)
584      prog_data->total_shared = nir->num_shared;
585
586   nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
587
588   if (nir->num_uniforms > 0) {
589      assert(prog_data->nr_params == 0);
590
591      /* If the shader uses any push constants at all, we'll just give
592       * them the maximum possible number
593       */
594      assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
595      nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE;
596      prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
597      prog_data->param = ralloc_array(mem_ctx, uint32_t, prog_data->nr_params);
598
599      /* We now set the param values to be offsets into a
600       * anv_push_constant_data structure.  Since the compiler doesn't
601       * actually dereference any of the gl_constant_value pointers in the
602       * params array, it doesn't really matter what we put here.
603       */
604      struct anv_push_constants *null_data = NULL;
605      /* Fill out the push constants section of the param array */
606      for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++) {
607         prog_data->param[i] = ANV_PARAM_PUSH(
608            (uintptr_t)&null_data->client_data[i * sizeof(float)]);
609      }
610   }
611
612   if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
613      pipeline->needs_data_cache = true;
614
615   NIR_PASS_V(nir, brw_nir_lower_image_load_store, compiler->devinfo);
616
617   /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
618   if (layout) {
619      anv_nir_apply_pipeline_layout(pdevice,
620                                    pipeline->device->robust_buffer_access,
621                                    layout, nir, prog_data,
622                                    &stage->bind_map);
623
624      NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ubo,
625                 nir_address_format_32bit_index_offset);
626      NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_ssbo,
627                 anv_nir_ssbo_addr_format(pdevice,
628                    pipeline->device->robust_buffer_access));
629
630      NIR_PASS_V(nir, nir_opt_constant_folding);
631
632      /* We don't support non-uniform UBOs and non-uniform SSBO access is
633       * handled naturally by falling back to A64 messages.
634       */
635      NIR_PASS_V(nir, nir_lower_non_uniform_access,
636                 nir_lower_non_uniform_texture_access |
637                 nir_lower_non_uniform_image_access);
638   }
639
640   if (nir->info.stage != MESA_SHADER_COMPUTE)
641      brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
642
643   assert(nir->num_uniforms == prog_data->nr_params * 4);
644
645   stage->nir = nir;
646}
647
648static void
649anv_pipeline_link_vs(const struct brw_compiler *compiler,
650                     struct anv_pipeline_stage *vs_stage,
651                     struct anv_pipeline_stage *next_stage)
652{
653   if (next_stage)
654      brw_nir_link_shaders(compiler, &vs_stage->nir, &next_stage->nir);
655}
656
657static const unsigned *
658anv_pipeline_compile_vs(const struct brw_compiler *compiler,
659                        void *mem_ctx,
660                        struct anv_device *device,
661                        struct anv_pipeline_stage *vs_stage)
662{
663   brw_compute_vue_map(compiler->devinfo,
664                       &vs_stage->prog_data.vs.base.vue_map,
665                       vs_stage->nir->info.outputs_written,
666                       vs_stage->nir->info.separate_shader);
667
668   return brw_compile_vs(compiler, device, mem_ctx, &vs_stage->key.vs,
669                         &vs_stage->prog_data.vs, vs_stage->nir, -1, NULL);
670}
671
672static void
673merge_tess_info(struct shader_info *tes_info,
674                const struct shader_info *tcs_info)
675{
676   /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
677    *
678    *    "PointMode. Controls generation of points rather than triangles
679    *     or lines. This functionality defaults to disabled, and is
680    *     enabled if either shader stage includes the execution mode.
681    *
682    * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
683    * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
684    * and OutputVertices, it says:
685    *
686    *    "One mode must be set in at least one of the tessellation
687    *     shader stages."
688    *
689    * So, the fields can be set in either the TCS or TES, but they must
690    * agree if set in both.  Our backend looks at TES, so bitwise-or in
691    * the values from the TCS.
692    */
693   assert(tcs_info->tess.tcs_vertices_out == 0 ||
694          tes_info->tess.tcs_vertices_out == 0 ||
695          tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
696   tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
697
698   assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
699          tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
700          tcs_info->tess.spacing == tes_info->tess.spacing);
701   tes_info->tess.spacing |= tcs_info->tess.spacing;
702
703   assert(tcs_info->tess.primitive_mode == 0 ||
704          tes_info->tess.primitive_mode == 0 ||
705          tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
706   tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
707   tes_info->tess.ccw |= tcs_info->tess.ccw;
708   tes_info->tess.point_mode |= tcs_info->tess.point_mode;
709}
710
711static void
712anv_pipeline_link_tcs(const struct brw_compiler *compiler,
713                      struct anv_pipeline_stage *tcs_stage,
714                      struct anv_pipeline_stage *tes_stage)
715{
716   assert(tes_stage && tes_stage->stage == MESA_SHADER_TESS_EVAL);
717
718   brw_nir_link_shaders(compiler, &tcs_stage->nir, &tes_stage->nir);
719
720   nir_lower_patch_vertices(tes_stage->nir,
721                            tcs_stage->nir->info.tess.tcs_vertices_out,
722                            NULL);
723
724   /* Copy TCS info into the TES info */
725   merge_tess_info(&tes_stage->nir->info, &tcs_stage->nir->info);
726
727   /* Whacking the key after cache lookup is a bit sketchy, but all of
728    * this comes from the SPIR-V, which is part of the hash used for the
729    * pipeline cache.  So it should be safe.
730    */
731   tcs_stage->key.tcs.tes_primitive_mode =
732      tes_stage->nir->info.tess.primitive_mode;
733   tcs_stage->key.tcs.quads_workaround =
734      compiler->devinfo->gen < 9 &&
735      tes_stage->nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
736      tes_stage->nir->info.tess.spacing == TESS_SPACING_EQUAL;
737}
738
739static const unsigned *
740anv_pipeline_compile_tcs(const struct brw_compiler *compiler,
741                         void *mem_ctx,
742                         struct anv_device *device,
743                         struct anv_pipeline_stage *tcs_stage,
744                         struct anv_pipeline_stage *prev_stage)
745{
746   tcs_stage->key.tcs.outputs_written =
747      tcs_stage->nir->info.outputs_written;
748   tcs_stage->key.tcs.patch_outputs_written =
749      tcs_stage->nir->info.patch_outputs_written;
750
751   return brw_compile_tcs(compiler, device, mem_ctx, &tcs_stage->key.tcs,
752                          &tcs_stage->prog_data.tcs, tcs_stage->nir,
753                          -1, NULL);
754}
755
756static void
757anv_pipeline_link_tes(const struct brw_compiler *compiler,
758                      struct anv_pipeline_stage *tes_stage,
759                      struct anv_pipeline_stage *next_stage)
760{
761   if (next_stage)
762      brw_nir_link_shaders(compiler, &tes_stage->nir, &next_stage->nir);
763}
764
765static const unsigned *
766anv_pipeline_compile_tes(const struct brw_compiler *compiler,
767                         void *mem_ctx,
768                         struct anv_device *device,
769                         struct anv_pipeline_stage *tes_stage,
770                         struct anv_pipeline_stage *tcs_stage)
771{
772   tes_stage->key.tes.inputs_read =
773      tcs_stage->nir->info.outputs_written;
774   tes_stage->key.tes.patch_inputs_read =
775      tcs_stage->nir->info.patch_outputs_written;
776
777   return brw_compile_tes(compiler, device, mem_ctx, &tes_stage->key.tes,
778                          &tcs_stage->prog_data.tcs.base.vue_map,
779                          &tes_stage->prog_data.tes, tes_stage->nir,
780                          NULL, -1, NULL);
781}
782
783static void
784anv_pipeline_link_gs(const struct brw_compiler *compiler,
785                     struct anv_pipeline_stage *gs_stage,
786                     struct anv_pipeline_stage *next_stage)
787{
788   if (next_stage)
789      brw_nir_link_shaders(compiler, &gs_stage->nir, &next_stage->nir);
790}
791
792static const unsigned *
793anv_pipeline_compile_gs(const struct brw_compiler *compiler,
794                        void *mem_ctx,
795                        struct anv_device *device,
796                        struct anv_pipeline_stage *gs_stage,
797                        struct anv_pipeline_stage *prev_stage)
798{
799   brw_compute_vue_map(compiler->devinfo,
800                       &gs_stage->prog_data.gs.base.vue_map,
801                       gs_stage->nir->info.outputs_written,
802                       gs_stage->nir->info.separate_shader);
803
804   return brw_compile_gs(compiler, device, mem_ctx, &gs_stage->key.gs,
805                         &gs_stage->prog_data.gs, gs_stage->nir,
806                         NULL, -1, NULL);
807}
808
809static void
810anv_pipeline_link_fs(const struct brw_compiler *compiler,
811                     struct anv_pipeline_stage *stage)
812{
813   unsigned num_rts = 0;
814   const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
815   struct anv_pipeline_binding rt_bindings[max_rt];
816   nir_function_impl *impl = nir_shader_get_entrypoint(stage->nir);
817   int rt_to_bindings[max_rt];
818   memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
819   bool rt_used[max_rt];
820   memset(rt_used, 0, sizeof(rt_used));
821
822   /* Flag used render targets */
823   nir_foreach_variable_safe(var, &stage->nir->outputs) {
824      if (var->data.location < FRAG_RESULT_DATA0)
825         continue;
826
827      const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
828      /* Out-of-bounds */
829      if (rt >= MAX_RTS)
830         continue;
831
832      const unsigned array_len =
833         glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
834      assert(rt + array_len <= max_rt);
835
836      /* Unused */
837      if (!(stage->key.wm.color_outputs_valid & BITFIELD_RANGE(rt, array_len))) {
838         /* If this is the RT at location 0 and we have alpha to coverage
839          * enabled we will have to create a null RT for it, so mark it as
840          * used.
841          */
842         if (rt > 0 || !stage->key.wm.alpha_to_coverage)
843            continue;
844      }
845
846      for (unsigned i = 0; i < array_len; i++)
847         rt_used[rt + i] = true;
848   }
849
850   /* Set new, compacted, location */
851   for (unsigned i = 0; i < max_rt; i++) {
852      if (!rt_used[i])
853         continue;
854
855      rt_to_bindings[i] = num_rts;
856
857      if (stage->key.wm.color_outputs_valid & (1 << i)) {
858         rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
859            .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
860            .binding = 0,
861            .index = i,
862         };
863      } else {
864         /* Setup a null render target */
865         rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
866            .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
867            .binding = 0,
868            .index = UINT32_MAX,
869         };
870      }
871
872      num_rts++;
873   }
874
875   bool deleted_output = false;
876   nir_foreach_variable_safe(var, &stage->nir->outputs) {
877      if (var->data.location < FRAG_RESULT_DATA0)
878         continue;
879
880      const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
881
882      if (rt >= MAX_RTS || !rt_used[rt]) {
883         /* Unused or out-of-bounds, throw it away, unless it is the first
884          * RT and we have alpha to coverage enabled.
885          */
886         deleted_output = true;
887         var->data.mode = nir_var_function_temp;
888         exec_node_remove(&var->node);
889         exec_list_push_tail(&impl->locals, &var->node);
890         continue;
891      }
892
893      /* Give it the new location */
894      assert(rt_to_bindings[rt] != -1);
895      var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
896   }
897
898   if (deleted_output)
899      nir_fixup_deref_modes(stage->nir);
900
901   if (num_rts == 0) {
902      /* If we have no render targets, we need a null render target */
903      rt_bindings[0] = (struct anv_pipeline_binding) {
904         .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
905         .binding = 0,
906         .index = UINT32_MAX,
907      };
908      num_rts = 1;
909   }
910
911   /* Now that we've determined the actual number of render targets, adjust
912    * the key accordingly.
913    */
914   stage->key.wm.nr_color_regions = num_rts;
915   stage->key.wm.color_outputs_valid = (1 << num_rts) - 1;
916
917   assert(num_rts <= max_rt);
918   assert(stage->bind_map.surface_count == 0);
919   typed_memcpy(stage->bind_map.surface_to_descriptor,
920                rt_bindings, num_rts);
921   stage->bind_map.surface_count += num_rts;
922}
923
924static const unsigned *
925anv_pipeline_compile_fs(const struct brw_compiler *compiler,
926                        void *mem_ctx,
927                        struct anv_device *device,
928                        struct anv_pipeline_stage *fs_stage,
929                        struct anv_pipeline_stage *prev_stage)
930{
931   /* TODO: we could set this to 0 based on the information in nir_shader, but
932    * we need this before we call spirv_to_nir.
933    */
934   assert(prev_stage);
935   fs_stage->key.wm.input_slots_valid =
936      prev_stage->prog_data.vue.vue_map.slots_valid;
937
938   const unsigned *code =
939      brw_compile_fs(compiler, device, mem_ctx, &fs_stage->key.wm,
940                     &fs_stage->prog_data.wm, fs_stage->nir,
941                     NULL, -1, -1, -1, true, false, NULL, NULL);
942
943   if (fs_stage->key.wm.nr_color_regions == 0 &&
944       !fs_stage->prog_data.wm.has_side_effects &&
945       !fs_stage->prog_data.wm.uses_kill &&
946       fs_stage->prog_data.wm.computed_depth_mode == BRW_PSCDEPTH_OFF &&
947       !fs_stage->prog_data.wm.computed_stencil) {
948      /* This fragment shader has no outputs and no side effects.  Go ahead
949       * and return the code pointer so we don't accidentally think the
950       * compile failed but zero out prog_data which will set program_size to
951       * zero and disable the stage.
952       */
953      memset(&fs_stage->prog_data, 0, sizeof(fs_stage->prog_data));
954   }
955
956   return code;
957}
958
959static VkResult
960anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
961                              struct anv_pipeline_cache *cache,
962                              const VkGraphicsPipelineCreateInfo *info)
963{
964   VkPipelineCreationFeedbackEXT pipeline_feedback = {
965      .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
966   };
967   int64_t pipeline_start = os_time_get_nano();
968
969   const struct brw_compiler *compiler =
970      pipeline->device->instance->physicalDevice.compiler;
971   struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
972
973   pipeline->active_stages = 0;
974
975   VkResult result;
976   for (uint32_t i = 0; i < info->stageCount; i++) {
977      const VkPipelineShaderStageCreateInfo *sinfo = &info->pStages[i];
978      gl_shader_stage stage = vk_to_mesa_shader_stage(sinfo->stage);
979
980      pipeline->active_stages |= sinfo->stage;
981
982      int64_t stage_start = os_time_get_nano();
983
984      stages[stage].stage = stage;
985      stages[stage].module = anv_shader_module_from_handle(sinfo->module);
986      stages[stage].entrypoint = sinfo->pName;
987      stages[stage].spec_info = sinfo->pSpecializationInfo;
988      anv_pipeline_hash_shader(stages[stage].module,
989                               stages[stage].entrypoint,
990                               stage,
991                               stages[stage].spec_info,
992                               stages[stage].shader_sha1);
993
994      const struct gen_device_info *devinfo = &pipeline->device->info;
995      switch (stage) {
996      case MESA_SHADER_VERTEX:
997         populate_vs_prog_key(devinfo, &stages[stage].key.vs);
998         break;
999      case MESA_SHADER_TESS_CTRL:
1000         populate_tcs_prog_key(devinfo,
1001                               info->pTessellationState->patchControlPoints,
1002                               &stages[stage].key.tcs);
1003         break;
1004      case MESA_SHADER_TESS_EVAL:
1005         populate_tes_prog_key(devinfo, &stages[stage].key.tes);
1006         break;
1007      case MESA_SHADER_GEOMETRY:
1008         populate_gs_prog_key(devinfo, &stages[stage].key.gs);
1009         break;
1010      case MESA_SHADER_FRAGMENT:
1011         populate_wm_prog_key(devinfo, pipeline->subpass,
1012                              info->pMultisampleState,
1013                              &stages[stage].key.wm);
1014         break;
1015      default:
1016         unreachable("Invalid graphics shader stage");
1017      }
1018
1019      stages[stage].feedback.duration += os_time_get_nano() - stage_start;
1020      stages[stage].feedback.flags |= VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT;
1021   }
1022
1023   if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1024      pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
1025
1026   assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1027
1028   ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1029
1030   unsigned char sha1[20];
1031   anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
1032
1033   unsigned found = 0;
1034   unsigned cache_hits = 0;
1035   for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1036      if (!stages[s].entrypoint)
1037         continue;
1038
1039      int64_t stage_start = os_time_get_nano();
1040
1041      stages[s].cache_key.stage = s;
1042      memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
1043
1044      bool cache_hit;
1045      struct anv_shader_bin *bin =
1046         anv_device_search_for_kernel(pipeline->device, cache,
1047                                      &stages[s].cache_key,
1048                                      sizeof(stages[s].cache_key), &cache_hit);
1049      if (bin) {
1050         found++;
1051         pipeline->shaders[s] = bin;
1052      }
1053
1054      if (cache_hit) {
1055         cache_hits++;
1056         stages[s].feedback.flags |=
1057            VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1058      }
1059      stages[s].feedback.duration += os_time_get_nano() - stage_start;
1060   }
1061
1062   if (found == __builtin_popcount(pipeline->active_stages)) {
1063      if (cache_hits == found) {
1064         pipeline_feedback.flags |=
1065            VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1066      }
1067      /* We found all our shaders in the cache.  We're done. */
1068      goto done;
1069   } else if (found > 0) {
1070      /* We found some but not all of our shaders.  This shouldn't happen
1071       * most of the time but it can if we have a partially populated
1072       * pipeline cache.
1073       */
1074      assert(found < __builtin_popcount(pipeline->active_stages));
1075
1076      vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
1077                      VK_DEBUG_REPORT_WARNING_BIT_EXT |
1078                      VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1079                      VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
1080                      (uint64_t)(uintptr_t)cache,
1081                      0, 0, "anv",
1082                      "Found a partial pipeline in the cache.  This is "
1083                      "most likely caused by an incomplete pipeline cache "
1084                      "import or export");
1085
1086      /* We're going to have to recompile anyway, so just throw away our
1087       * references to the shaders in the cache.  We'll get them out of the
1088       * cache again as part of the compilation process.
1089       */
1090      for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1091         stages[s].feedback.flags = 0;
1092         if (pipeline->shaders[s]) {
1093            anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1094            pipeline->shaders[s] = NULL;
1095         }
1096      }
1097   }
1098
1099   void *pipeline_ctx = ralloc_context(NULL);
1100
1101   for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1102      if (!stages[s].entrypoint)
1103         continue;
1104
1105      int64_t stage_start = os_time_get_nano();
1106
1107      assert(stages[s].stage == s);
1108      assert(pipeline->shaders[s] == NULL);
1109
1110      stages[s].bind_map = (struct anv_pipeline_bind_map) {
1111         .surface_to_descriptor = stages[s].surface_to_descriptor,
1112         .sampler_to_descriptor = stages[s].sampler_to_descriptor
1113      };
1114
1115      stages[s].nir = anv_pipeline_stage_get_nir(pipeline, cache,
1116                                                 pipeline_ctx,
1117                                                 &stages[s]);
1118      if (stages[s].nir == NULL) {
1119         result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1120         goto fail;
1121      }
1122
1123      stages[s].feedback.duration += os_time_get_nano() - stage_start;
1124   }
1125
1126   /* Walk backwards to link */
1127   struct anv_pipeline_stage *next_stage = NULL;
1128   for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
1129      if (!stages[s].entrypoint)
1130         continue;
1131
1132      switch (s) {
1133      case MESA_SHADER_VERTEX:
1134         anv_pipeline_link_vs(compiler, &stages[s], next_stage);
1135         break;
1136      case MESA_SHADER_TESS_CTRL:
1137         anv_pipeline_link_tcs(compiler, &stages[s], next_stage);
1138         break;
1139      case MESA_SHADER_TESS_EVAL:
1140         anv_pipeline_link_tes(compiler, &stages[s], next_stage);
1141         break;
1142      case MESA_SHADER_GEOMETRY:
1143         anv_pipeline_link_gs(compiler, &stages[s], next_stage);
1144         break;
1145      case MESA_SHADER_FRAGMENT:
1146         anv_pipeline_link_fs(compiler, &stages[s]);
1147         break;
1148      default:
1149         unreachable("Invalid graphics shader stage");
1150      }
1151
1152      next_stage = &stages[s];
1153   }
1154
1155   struct anv_pipeline_stage *prev_stage = NULL;
1156   for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1157      if (!stages[s].entrypoint)
1158         continue;
1159
1160      int64_t stage_start = os_time_get_nano();
1161
1162      void *stage_ctx = ralloc_context(NULL);
1163
1164      nir_xfb_info *xfb_info = NULL;
1165      if (s == MESA_SHADER_VERTEX ||
1166          s == MESA_SHADER_TESS_EVAL ||
1167          s == MESA_SHADER_GEOMETRY)
1168         xfb_info = nir_gather_xfb_info(stages[s].nir, stage_ctx);
1169
1170      anv_pipeline_lower_nir(pipeline, stage_ctx, &stages[s], layout);
1171
1172      const unsigned *code;
1173      switch (s) {
1174      case MESA_SHADER_VERTEX:
1175         code = anv_pipeline_compile_vs(compiler, stage_ctx, pipeline->device,
1176                                        &stages[s]);
1177         break;
1178      case MESA_SHADER_TESS_CTRL:
1179         code = anv_pipeline_compile_tcs(compiler, stage_ctx, pipeline->device,
1180                                         &stages[s], prev_stage);
1181         break;
1182      case MESA_SHADER_TESS_EVAL:
1183         code = anv_pipeline_compile_tes(compiler, stage_ctx, pipeline->device,
1184                                         &stages[s], prev_stage);
1185         break;
1186      case MESA_SHADER_GEOMETRY:
1187         code = anv_pipeline_compile_gs(compiler, stage_ctx, pipeline->device,
1188                                        &stages[s], prev_stage);
1189         break;
1190      case MESA_SHADER_FRAGMENT:
1191         code = anv_pipeline_compile_fs(compiler, stage_ctx, pipeline->device,
1192                                        &stages[s], prev_stage);
1193         break;
1194      default:
1195         unreachable("Invalid graphics shader stage");
1196      }
1197      if (code == NULL) {
1198         ralloc_free(stage_ctx);
1199         result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1200         goto fail;
1201      }
1202
1203      struct anv_shader_bin *bin =
1204         anv_device_upload_kernel(pipeline->device, cache,
1205                                  &stages[s].cache_key,
1206                                  sizeof(stages[s].cache_key),
1207                                  code, stages[s].prog_data.base.program_size,
1208                                  stages[s].nir->constant_data,
1209                                  stages[s].nir->constant_data_size,
1210                                  &stages[s].prog_data.base,
1211                                  brw_prog_data_size(s),
1212                                  xfb_info, &stages[s].bind_map);
1213      if (!bin) {
1214         ralloc_free(stage_ctx);
1215         result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1216         goto fail;
1217      }
1218
1219      pipeline->shaders[s] = bin;
1220      ralloc_free(stage_ctx);
1221
1222      stages[s].feedback.duration += os_time_get_nano() - stage_start;
1223
1224      prev_stage = &stages[s];
1225   }
1226
1227   ralloc_free(pipeline_ctx);
1228
1229done:
1230
1231   if (pipeline->shaders[MESA_SHADER_FRAGMENT] &&
1232       pipeline->shaders[MESA_SHADER_FRAGMENT]->prog_data->program_size == 0) {
1233      /* This can happen if we decided to implicitly disable the fragment
1234       * shader.  See anv_pipeline_compile_fs().
1235       */
1236      anv_shader_bin_unref(pipeline->device,
1237                           pipeline->shaders[MESA_SHADER_FRAGMENT]);
1238      pipeline->shaders[MESA_SHADER_FRAGMENT] = NULL;
1239      pipeline->active_stages &= ~VK_SHADER_STAGE_FRAGMENT_BIT;
1240   }
1241
1242   pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1243
1244   const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1245      vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1246   if (create_feedback) {
1247      *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1248
1249      assert(info->stageCount == create_feedback->pipelineStageCreationFeedbackCount);
1250      for (uint32_t i = 0; i < info->stageCount; i++) {
1251         gl_shader_stage s = vk_to_mesa_shader_stage(info->pStages[i].stage);
1252         create_feedback->pPipelineStageCreationFeedbacks[i] = stages[s].feedback;
1253      }
1254   }
1255
1256   return VK_SUCCESS;
1257
1258fail:
1259   ralloc_free(pipeline_ctx);
1260
1261   for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1262      if (pipeline->shaders[s])
1263         anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
1264   }
1265
1266   return result;
1267}
1268
1269VkResult
1270anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1271                        struct anv_pipeline_cache *cache,
1272                        const VkComputePipelineCreateInfo *info,
1273                        const struct anv_shader_module *module,
1274                        const char *entrypoint,
1275                        const VkSpecializationInfo *spec_info)
1276{
1277   VkPipelineCreationFeedbackEXT pipeline_feedback = {
1278      .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1279   };
1280   int64_t pipeline_start = os_time_get_nano();
1281
1282   const struct brw_compiler *compiler =
1283      pipeline->device->instance->physicalDevice.compiler;
1284
1285   struct anv_pipeline_stage stage = {
1286      .stage = MESA_SHADER_COMPUTE,
1287      .module = module,
1288      .entrypoint = entrypoint,
1289      .spec_info = spec_info,
1290      .cache_key = {
1291         .stage = MESA_SHADER_COMPUTE,
1292      },
1293      .feedback = {
1294         .flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT,
1295      },
1296   };
1297   anv_pipeline_hash_shader(stage.module,
1298                            stage.entrypoint,
1299                            MESA_SHADER_COMPUTE,
1300                            stage.spec_info,
1301                            stage.shader_sha1);
1302
1303   struct anv_shader_bin *bin = NULL;
1304
1305   populate_cs_prog_key(&pipeline->device->info, &stage.key.cs);
1306
1307   ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1308
1309   anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
1310   bool cache_hit;
1311   bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
1312                                      sizeof(stage.cache_key), &cache_hit);
1313
1314   if (bin == NULL) {
1315      int64_t stage_start = os_time_get_nano();
1316
1317      stage.bind_map = (struct anv_pipeline_bind_map) {
1318         .surface_to_descriptor = stage.surface_to_descriptor,
1319         .sampler_to_descriptor = stage.sampler_to_descriptor
1320      };
1321
1322      /* Set up a binding for the gl_NumWorkGroups */
1323      stage.bind_map.surface_count = 1;
1324      stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
1325         .set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
1326      };
1327
1328      void *mem_ctx = ralloc_context(NULL);
1329
1330      stage.nir = anv_pipeline_stage_get_nir(pipeline, cache, mem_ctx, &stage);
1331      if (stage.nir == NULL) {
1332         ralloc_free(mem_ctx);
1333         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1334      }
1335
1336      anv_pipeline_lower_nir(pipeline, mem_ctx, &stage, layout);
1337
1338      NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id,
1339                 &stage.prog_data.cs);
1340
1341      const unsigned *shader_code =
1342         brw_compile_cs(compiler, pipeline->device, mem_ctx, &stage.key.cs,
1343                        &stage.prog_data.cs, stage.nir, -1, NULL);
1344      if (shader_code == NULL) {
1345         ralloc_free(mem_ctx);
1346         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1347      }
1348
1349      const unsigned code_size = stage.prog_data.base.program_size;
1350      bin = anv_device_upload_kernel(pipeline->device, cache,
1351                                     &stage.cache_key, sizeof(stage.cache_key),
1352                                     shader_code, code_size,
1353                                     stage.nir->constant_data,
1354                                     stage.nir->constant_data_size,
1355                                     &stage.prog_data.base,
1356                                     sizeof(stage.prog_data.cs),
1357                                     NULL, &stage.bind_map);
1358      if (!bin) {
1359         ralloc_free(mem_ctx);
1360         return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1361      }
1362
1363      ralloc_free(mem_ctx);
1364
1365      stage.feedback.duration = os_time_get_nano() - stage_start;
1366   }
1367
1368   if (cache_hit) {
1369      stage.feedback.flags |=
1370         VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1371      pipeline_feedback.flags |=
1372         VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
1373   }
1374   pipeline_feedback.duration = os_time_get_nano() - pipeline_start;
1375
1376   const VkPipelineCreationFeedbackCreateInfoEXT *create_feedback =
1377      vk_find_struct_const(info->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT);
1378   if (create_feedback) {
1379      *create_feedback->pPipelineCreationFeedback = pipeline_feedback;
1380
1381      assert(create_feedback->pipelineStageCreationFeedbackCount == 1);
1382      create_feedback->pPipelineStageCreationFeedbacks[0] = stage.feedback;
1383   }
1384
1385   pipeline->active_stages = VK_SHADER_STAGE_COMPUTE_BIT;
1386   pipeline->shaders[MESA_SHADER_COMPUTE] = bin;
1387
1388   return VK_SUCCESS;
1389}
1390
1391/**
1392 * Copy pipeline state not marked as dynamic.
1393 * Dynamic state is pipeline state which hasn't been provided at pipeline
1394 * creation time, but is dynamically provided afterwards using various
1395 * vkCmdSet* functions.
1396 *
1397 * The set of state considered "non_dynamic" is determined by the pieces of
1398 * state that have their corresponding VkDynamicState enums omitted from
1399 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1400 *
1401 * @param[out] pipeline    Destination non_dynamic state.
1402 * @param[in]  pCreateInfo Source of non_dynamic state to be copied.
1403 */
1404static void
1405copy_non_dynamic_state(struct anv_pipeline *pipeline,
1406                       const VkGraphicsPipelineCreateInfo *pCreateInfo)
1407{
1408   anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1409   struct anv_subpass *subpass = pipeline->subpass;
1410
1411   pipeline->dynamic_state = default_dynamic_state;
1412
1413   if (pCreateInfo->pDynamicState) {
1414      /* Remove all of the states that are marked as dynamic */
1415      uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1416      for (uint32_t s = 0; s < count; s++)
1417         states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
1418   }
1419
1420   struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1421
1422   /* Section 9.2 of the Vulkan 1.0.15 spec says:
1423    *
1424    *    pViewportState is [...] NULL if the pipeline
1425    *    has rasterization disabled.
1426    */
1427   if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1428      assert(pCreateInfo->pViewportState);
1429
1430      dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1431      if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
1432         typed_memcpy(dynamic->viewport.viewports,
1433                     pCreateInfo->pViewportState->pViewports,
1434                     pCreateInfo->pViewportState->viewportCount);
1435      }
1436
1437      dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1438      if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
1439         typed_memcpy(dynamic->scissor.scissors,
1440                     pCreateInfo->pViewportState->pScissors,
1441                     pCreateInfo->pViewportState->scissorCount);
1442      }
1443   }
1444
1445   if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
1446      assert(pCreateInfo->pRasterizationState);
1447      dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1448   }
1449
1450   if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1451      assert(pCreateInfo->pRasterizationState);
1452      dynamic->depth_bias.bias =
1453         pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1454      dynamic->depth_bias.clamp =
1455         pCreateInfo->pRasterizationState->depthBiasClamp;
1456      dynamic->depth_bias.slope =
1457         pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1458   }
1459
1460   /* Section 9.2 of the Vulkan 1.0.15 spec says:
1461    *
1462    *    pColorBlendState is [...] NULL if the pipeline has rasterization
1463    *    disabled or if the subpass of the render pass the pipeline is
1464    *    created against does not use any color attachments.
1465    */
1466   bool uses_color_att = false;
1467   for (unsigned i = 0; i < subpass->color_count; ++i) {
1468      if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1469         uses_color_att = true;
1470         break;
1471      }
1472   }
1473
1474   if (uses_color_att &&
1475       !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1476      assert(pCreateInfo->pColorBlendState);
1477
1478      if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1479         typed_memcpy(dynamic->blend_constants,
1480                     pCreateInfo->pColorBlendState->blendConstants, 4);
1481   }
1482
1483   /* If there is no depthstencil attachment, then don't read
1484    * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1485    * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1486    * no need to override the depthstencil defaults in
1487    * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1488    *
1489    * Section 9.2 of the Vulkan 1.0.15 spec says:
1490    *
1491    *    pDepthStencilState is [...] NULL if the pipeline has rasterization
1492    *    disabled or if the subpass of the render pass the pipeline is created
1493    *    against does not use a depth/stencil attachment.
1494    */
1495   if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1496       subpass->depth_stencil_attachment) {
1497      assert(pCreateInfo->pDepthStencilState);
1498
1499      if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1500         dynamic->depth_bounds.min =
1501            pCreateInfo->pDepthStencilState->minDepthBounds;
1502         dynamic->depth_bounds.max =
1503            pCreateInfo->pDepthStencilState->maxDepthBounds;
1504      }
1505
1506      if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1507         dynamic->stencil_compare_mask.front =
1508            pCreateInfo->pDepthStencilState->front.compareMask;
1509         dynamic->stencil_compare_mask.back =
1510            pCreateInfo->pDepthStencilState->back.compareMask;
1511      }
1512
1513      if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1514         dynamic->stencil_write_mask.front =
1515            pCreateInfo->pDepthStencilState->front.writeMask;
1516         dynamic->stencil_write_mask.back =
1517            pCreateInfo->pDepthStencilState->back.writeMask;
1518      }
1519
1520      if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1521         dynamic->stencil_reference.front =
1522            pCreateInfo->pDepthStencilState->front.reference;
1523         dynamic->stencil_reference.back =
1524            pCreateInfo->pDepthStencilState->back.reference;
1525      }
1526   }
1527
1528   pipeline->dynamic_state_mask = states;
1529}
1530
1531static void
1532anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1533{
1534#ifdef DEBUG
1535   struct anv_render_pass *renderpass = NULL;
1536   struct anv_subpass *subpass = NULL;
1537
1538   /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1539    * present.  See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1540    */
1541   assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1542
1543   renderpass = anv_render_pass_from_handle(info->renderPass);
1544   assert(renderpass);
1545
1546   assert(info->subpass < renderpass->subpass_count);
1547   subpass = &renderpass->subpasses[info->subpass];
1548
1549   assert(info->stageCount >= 1);
1550   assert(info->pVertexInputState);
1551   assert(info->pInputAssemblyState);
1552   assert(info->pRasterizationState);
1553   if (!info->pRasterizationState->rasterizerDiscardEnable) {
1554      assert(info->pViewportState);
1555      assert(info->pMultisampleState);
1556
1557      if (subpass && subpass->depth_stencil_attachment)
1558         assert(info->pDepthStencilState);
1559
1560      if (subpass && subpass->color_count > 0) {
1561         bool all_color_unused = true;
1562         for (int i = 0; i < subpass->color_count; i++) {
1563            if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1564               all_color_unused = false;
1565         }
1566         /* pColorBlendState is ignored if the pipeline has rasterization
1567          * disabled or if the subpass of the render pass the pipeline is
1568          * created against does not use any color attachments.
1569          */
1570         assert(info->pColorBlendState || all_color_unused);
1571      }
1572   }
1573
1574   for (uint32_t i = 0; i < info->stageCount; ++i) {
1575      switch (info->pStages[i].stage) {
1576      case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1577      case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1578         assert(info->pTessellationState);
1579         break;
1580      default:
1581         break;
1582      }
1583   }
1584#endif
1585}
1586
1587/**
1588 * Calculate the desired L3 partitioning based on the current state of the
1589 * pipeline.  For now this simply returns the conservative defaults calculated
1590 * by get_default_l3_weights(), but we could probably do better by gathering
1591 * more statistics from the pipeline state (e.g. guess of expected URB usage
1592 * and bound surfaces), or by using feed-back from performance counters.
1593 */
1594void
1595anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1596{
1597   const struct gen_device_info *devinfo = &pipeline->device->info;
1598
1599   const struct gen_l3_weights w =
1600      gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
1601
1602   pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
1603   pipeline->urb.total_size =
1604      gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
1605}
1606
1607VkResult
1608anv_pipeline_init(struct anv_pipeline *pipeline,
1609                  struct anv_device *device,
1610                  struct anv_pipeline_cache *cache,
1611                  const VkGraphicsPipelineCreateInfo *pCreateInfo,
1612                  const VkAllocationCallbacks *alloc)
1613{
1614   VkResult result;
1615
1616   anv_pipeline_validate_create_info(pCreateInfo);
1617
1618   if (alloc == NULL)
1619      alloc = &device->alloc;
1620
1621   pipeline->device = device;
1622
1623   ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1624   assert(pCreateInfo->subpass < render_pass->subpass_count);
1625   pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1626
1627   result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1628   if (result != VK_SUCCESS)
1629      return result;
1630
1631   pipeline->batch.alloc = alloc;
1632   pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1633   pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1634   pipeline->batch.relocs = &pipeline->batch_relocs;
1635   pipeline->batch.status = VK_SUCCESS;
1636
1637   copy_non_dynamic_state(pipeline, pCreateInfo);
1638   pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1639                                  pCreateInfo->pRasterizationState->depthClampEnable;
1640
1641   /* Previously we enabled depth clipping when !depthClampEnable.
1642    * DepthClipStateCreateInfo now makes depth clipping explicit so if the
1643    * clipping info is available, use its enable value to determine clipping,
1644    * otherwise fallback to the previous !depthClampEnable logic.
1645    */
1646   const VkPipelineRasterizationDepthClipStateCreateInfoEXT *clip_info =
1647      vk_find_struct_const(pCreateInfo->pRasterizationState->pNext,
1648                           PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
1649   pipeline->depth_clip_enable = clip_info ? clip_info->depthClipEnable : !pipeline->depth_clamp_enable;
1650
1651   pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
1652                                     pCreateInfo->pMultisampleState->sampleShadingEnable;
1653
1654   pipeline->needs_data_cache = false;
1655
1656   /* When we free the pipeline, we detect stages based on the NULL status
1657    * of various prog_data pointers.  Make them NULL by default.
1658    */
1659   memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1660
1661   result = anv_pipeline_compile_graphics(pipeline, cache, pCreateInfo);
1662   if (result != VK_SUCCESS) {
1663      anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1664      return result;
1665   }
1666
1667   assert(pipeline->shaders[MESA_SHADER_VERTEX]);
1668
1669   anv_pipeline_setup_l3_config(pipeline, false);
1670
1671   const VkPipelineVertexInputStateCreateInfo *vi_info =
1672      pCreateInfo->pVertexInputState;
1673
1674   const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1675
1676   pipeline->vb_used = 0;
1677   for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1678      const VkVertexInputAttributeDescription *desc =
1679         &vi_info->pVertexAttributeDescriptions[i];
1680
1681      if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
1682         pipeline->vb_used |= 1 << desc->binding;
1683   }
1684
1685   for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1686      const VkVertexInputBindingDescription *desc =
1687         &vi_info->pVertexBindingDescriptions[i];
1688
1689      pipeline->vb[desc->binding].stride = desc->stride;
1690
1691      /* Step rate is programmed per vertex element (attribute), not
1692       * binding. Set up a map of which bindings step per instance, for
1693       * reference by vertex element setup. */
1694      switch (desc->inputRate) {
1695      default:
1696      case VK_VERTEX_INPUT_RATE_VERTEX:
1697         pipeline->vb[desc->binding].instanced = false;
1698         break;
1699      case VK_VERTEX_INPUT_RATE_INSTANCE:
1700         pipeline->vb[desc->binding].instanced = true;
1701         break;
1702      }
1703
1704      pipeline->vb[desc->binding].instance_divisor = 1;
1705   }
1706
1707   const VkPipelineVertexInputDivisorStateCreateInfoEXT *vi_div_state =
1708      vk_find_struct_const(vi_info->pNext,
1709                           PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
1710   if (vi_div_state) {
1711      for (uint32_t i = 0; i < vi_div_state->vertexBindingDivisorCount; i++) {
1712         const VkVertexInputBindingDivisorDescriptionEXT *desc =
1713            &vi_div_state->pVertexBindingDivisors[i];
1714
1715         pipeline->vb[desc->binding].instance_divisor = desc->divisor;
1716      }
1717   }
1718
1719   /* Our implementation of VK_KHR_multiview uses instancing to draw the
1720    * different views.  If the client asks for instancing, we need to multiply
1721    * the instance divisor by the number of views ensure that we repeat the
1722    * client's per-instance data once for each view.
1723    */
1724   if (pipeline->subpass->view_mask) {
1725      const uint32_t view_count = anv_subpass_view_count(pipeline->subpass);
1726      for (uint32_t vb = 0; vb < MAX_VBS; vb++) {
1727         if (pipeline->vb[vb].instanced)
1728            pipeline->vb[vb].instance_divisor *= view_count;
1729      }
1730   }
1731
1732   const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1733      pCreateInfo->pInputAssemblyState;
1734   const VkPipelineTessellationStateCreateInfo *tess_info =
1735      pCreateInfo->pTessellationState;
1736   pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1737
1738   if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1739      pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
1740   else
1741      pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1742
1743   return VK_SUCCESS;
1744}
1745