101e04c3fSmrg/*
201e04c3fSmrg * Copyright © 2014 Intel Corporation
301e04c3fSmrg *
401e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a
501e04c3fSmrg * copy of this software and associated documentation files (the "Software"),
601e04c3fSmrg * to deal in the Software without restriction, including without limitation
701e04c3fSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
801e04c3fSmrg * and/or sell copies of the Software, and to permit persons to whom the
901e04c3fSmrg * Software is furnished to do so, subject to the following conditions:
1001e04c3fSmrg *
1101e04c3fSmrg * The above copyright notice and this permission notice (including the next
1201e04c3fSmrg * paragraph) shall be included in all copies or substantial portions of the
1301e04c3fSmrg * Software.
1401e04c3fSmrg *
1501e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1601e04c3fSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1701e04c3fSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1801e04c3fSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1901e04c3fSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2001e04c3fSmrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2101e04c3fSmrg * IN THE SOFTWARE.
2201e04c3fSmrg */
2301e04c3fSmrg
2401e04c3fSmrg#include "brw_nir.h"
2501e04c3fSmrg#include "brw_shader.h"
267ec681f3Smrg#include "dev/intel_debug.h"
2701e04c3fSmrg#include "compiler/glsl_types.h"
2801e04c3fSmrg#include "compiler/nir/nir_builder.h"
2901e04c3fSmrg#include "util/u_math.h"
3001e04c3fSmrg
3101e04c3fSmrgstatic bool
3201e04c3fSmrgremap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
3301e04c3fSmrg                  GLenum primitive_mode)
3401e04c3fSmrg{
3501e04c3fSmrg   const int location = nir_intrinsic_base(intr);
3601e04c3fSmrg   const unsigned component = nir_intrinsic_component(intr);
3701e04c3fSmrg   bool out_of_bounds;
3801e04c3fSmrg
3901e04c3fSmrg   if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
4001e04c3fSmrg      switch (primitive_mode) {
4101e04c3fSmrg      case GL_QUADS:
4201e04c3fSmrg         /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
4301e04c3fSmrg         nir_intrinsic_set_base(intr, 0);
4401e04c3fSmrg         nir_intrinsic_set_component(intr, 3 - component);
4501e04c3fSmrg         out_of_bounds = false;
4601e04c3fSmrg         break;
4701e04c3fSmrg      case GL_TRIANGLES:
4801e04c3fSmrg         /* gl_TessLevelInner[0] lives at DWord 4. */
4901e04c3fSmrg         nir_intrinsic_set_base(intr, 1);
5001e04c3fSmrg         out_of_bounds = component > 0;
5101e04c3fSmrg         break;
5201e04c3fSmrg      case GL_ISOLINES:
5301e04c3fSmrg         out_of_bounds = true;
5401e04c3fSmrg         break;
5501e04c3fSmrg      default:
5601e04c3fSmrg         unreachable("Bogus tessellation domain");
5701e04c3fSmrg      }
5801e04c3fSmrg   } else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
5901e04c3fSmrg      if (primitive_mode == GL_ISOLINES) {
6001e04c3fSmrg         /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
6101e04c3fSmrg         nir_intrinsic_set_base(intr, 1);
6201e04c3fSmrg         nir_intrinsic_set_component(intr, 2 + nir_intrinsic_component(intr));
6301e04c3fSmrg         out_of_bounds = component > 1;
6401e04c3fSmrg      } else {
6501e04c3fSmrg         /* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
6601e04c3fSmrg         nir_intrinsic_set_base(intr, 1);
6701e04c3fSmrg         nir_intrinsic_set_component(intr, 3 - nir_intrinsic_component(intr));
6801e04c3fSmrg         out_of_bounds = component == 3 && primitive_mode == GL_TRIANGLES;
6901e04c3fSmrg      }
7001e04c3fSmrg   } else {
7101e04c3fSmrg      return false;
7201e04c3fSmrg   }
7301e04c3fSmrg
7401e04c3fSmrg   if (out_of_bounds) {
7501e04c3fSmrg      if (nir_intrinsic_infos[intr->intrinsic].has_dest) {
7601e04c3fSmrg         b->cursor = nir_before_instr(&intr->instr);
7701e04c3fSmrg         nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
787ec681f3Smrg         nir_ssa_def_rewrite_uses(&intr->dest.ssa, undef);
7901e04c3fSmrg      }
8001e04c3fSmrg      nir_instr_remove(&intr->instr);
8101e04c3fSmrg   }
8201e04c3fSmrg
8301e04c3fSmrg   return true;
8401e04c3fSmrg}
8501e04c3fSmrg
867ec681f3Smrgstatic bool
877ec681f3Smrgis_input(nir_intrinsic_instr *intrin)
887ec681f3Smrg{
897ec681f3Smrg   return intrin->intrinsic == nir_intrinsic_load_input ||
907ec681f3Smrg          intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
917ec681f3Smrg          intrin->intrinsic == nir_intrinsic_load_interpolated_input;
927ec681f3Smrg}
937ec681f3Smrg
947ec681f3Smrgstatic bool
957ec681f3Smrgis_output(nir_intrinsic_instr *intrin)
967ec681f3Smrg{
977ec681f3Smrg   return intrin->intrinsic == nir_intrinsic_load_output ||
987ec681f3Smrg          intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
997ec681f3Smrg          intrin->intrinsic == nir_intrinsic_store_output ||
1007ec681f3Smrg          intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
1017ec681f3Smrg}
1027ec681f3Smrg
1037ec681f3Smrg
10401e04c3fSmrgstatic bool
10501e04c3fSmrgremap_patch_urb_offsets(nir_block *block, nir_builder *b,
10601e04c3fSmrg                        const struct brw_vue_map *vue_map,
10701e04c3fSmrg                        GLenum tes_primitive_mode)
10801e04c3fSmrg{
10901e04c3fSmrg   const bool is_passthrough_tcs = b->shader->info.name &&
1107ec681f3Smrg      strcmp(b->shader->info.name, "passthrough TCS") == 0;
11101e04c3fSmrg
11201e04c3fSmrg   nir_foreach_instr_safe(instr, block) {
11301e04c3fSmrg      if (instr->type != nir_instr_type_intrinsic)
11401e04c3fSmrg         continue;
11501e04c3fSmrg
11601e04c3fSmrg      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
11701e04c3fSmrg
11801e04c3fSmrg      gl_shader_stage stage = b->shader->info.stage;
11901e04c3fSmrg
12001e04c3fSmrg      if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
12101e04c3fSmrg          (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
12201e04c3fSmrg
12301e04c3fSmrg         if (!is_passthrough_tcs &&
12401e04c3fSmrg             remap_tess_levels(b, intrin, tes_primitive_mode))
12501e04c3fSmrg            continue;
12601e04c3fSmrg
12701e04c3fSmrg         int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
12801e04c3fSmrg         assert(vue_slot != -1);
12901e04c3fSmrg         intrin->const_index[0] = vue_slot;
13001e04c3fSmrg
13101e04c3fSmrg         nir_src *vertex = nir_get_io_vertex_index_src(intrin);
13201e04c3fSmrg         if (vertex) {
1339f464c52Smaya            if (nir_src_is_const(*vertex)) {
1349f464c52Smaya               intrin->const_index[0] += nir_src_as_uint(*vertex) *
13501e04c3fSmrg                                         vue_map->num_per_vertex_slots;
13601e04c3fSmrg            } else {
13701e04c3fSmrg               b->cursor = nir_before_instr(&intrin->instr);
13801e04c3fSmrg
13901e04c3fSmrg               /* Multiply by the number of per-vertex slots. */
14001e04c3fSmrg               nir_ssa_def *vertex_offset =
14101e04c3fSmrg                  nir_imul(b,
14201e04c3fSmrg                           nir_ssa_for_src(b, *vertex, 1),
14301e04c3fSmrg                           nir_imm_int(b,
14401e04c3fSmrg                                       vue_map->num_per_vertex_slots));
14501e04c3fSmrg
14601e04c3fSmrg               /* Add it to the existing offset */
14701e04c3fSmrg               nir_src *offset = nir_get_io_offset_src(intrin);
14801e04c3fSmrg               nir_ssa_def *total_offset =
14901e04c3fSmrg                  nir_iadd(b, vertex_offset,
15001e04c3fSmrg                           nir_ssa_for_src(b, *offset, 1));
15101e04c3fSmrg
15201e04c3fSmrg               nir_instr_rewrite_src(&intrin->instr, offset,
15301e04c3fSmrg                                     nir_src_for_ssa(total_offset));
15401e04c3fSmrg            }
15501e04c3fSmrg         }
15601e04c3fSmrg      }
15701e04c3fSmrg   }
15801e04c3fSmrg   return true;
15901e04c3fSmrg}
16001e04c3fSmrg
16101e04c3fSmrgvoid
16201e04c3fSmrgbrw_nir_lower_vs_inputs(nir_shader *nir,
1637ec681f3Smrg                        bool edgeflag_is_last,
16401e04c3fSmrg                        const uint8_t *vs_attrib_wa_flags)
16501e04c3fSmrg{
16601e04c3fSmrg   /* Start with the location of the variable's base. */
1677ec681f3Smrg   nir_foreach_shader_in_variable(var, nir)
16801e04c3fSmrg      var->data.driver_location = var->data.location;
16901e04c3fSmrg
17001e04c3fSmrg   /* Now use nir_lower_io to walk dereference chains.  Attribute arrays are
17101e04c3fSmrg    * loaded as one vec4 or dvec4 per element (or matrix column), depending on
17201e04c3fSmrg    * whether it is a double-precision type or not.
17301e04c3fSmrg    */
1747ec681f3Smrg   nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
1757ec681f3Smrg                nir_lower_io_lower_64bit_to_32);
17601e04c3fSmrg
17701e04c3fSmrg   /* This pass needs actual constants */
17801e04c3fSmrg   nir_opt_constant_folding(nir);
17901e04c3fSmrg
1807ec681f3Smrg   nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
18101e04c3fSmrg
18201e04c3fSmrg   brw_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
18301e04c3fSmrg
18401e04c3fSmrg   /* The last step is to remap VERT_ATTRIB_* to actual registers */
18501e04c3fSmrg
18601e04c3fSmrg   /* Whether or not we have any system generated values.  gl_DrawID is not
18701e04c3fSmrg    * included here as it lives in its own vec4.
18801e04c3fSmrg    */
18901e04c3fSmrg   const bool has_sgvs =
1907ec681f3Smrg      BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
1917ec681f3Smrg      BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
1927ec681f3Smrg      BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
1937ec681f3Smrg      BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
19401e04c3fSmrg
19501e04c3fSmrg   const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
19601e04c3fSmrg
19701e04c3fSmrg   nir_foreach_function(function, nir) {
19801e04c3fSmrg      if (!function->impl)
19901e04c3fSmrg         continue;
20001e04c3fSmrg
20101e04c3fSmrg      nir_builder b;
20201e04c3fSmrg      nir_builder_init(&b, function->impl);
20301e04c3fSmrg
20401e04c3fSmrg      nir_foreach_block(block, function->impl) {
20501e04c3fSmrg         nir_foreach_instr_safe(instr, block) {
20601e04c3fSmrg            if (instr->type != nir_instr_type_intrinsic)
20701e04c3fSmrg               continue;
20801e04c3fSmrg
20901e04c3fSmrg            nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
21001e04c3fSmrg
21101e04c3fSmrg            switch (intrin->intrinsic) {
21201e04c3fSmrg            case nir_intrinsic_load_first_vertex:
21301e04c3fSmrg            case nir_intrinsic_load_base_instance:
21401e04c3fSmrg            case nir_intrinsic_load_vertex_id_zero_base:
21501e04c3fSmrg            case nir_intrinsic_load_instance_id:
21601e04c3fSmrg            case nir_intrinsic_load_is_indexed_draw:
21701e04c3fSmrg            case nir_intrinsic_load_draw_id: {
21801e04c3fSmrg               b.cursor = nir_after_instr(&intrin->instr);
21901e04c3fSmrg
22001e04c3fSmrg               /* gl_VertexID and friends are stored by the VF as the last
22101e04c3fSmrg                * vertex element.  We convert them to load_input intrinsics at
22201e04c3fSmrg                * the right location.
22301e04c3fSmrg                */
22401e04c3fSmrg               nir_intrinsic_instr *load =
22501e04c3fSmrg                  nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
22601e04c3fSmrg               load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
22701e04c3fSmrg
22801e04c3fSmrg               nir_intrinsic_set_base(load, num_inputs);
22901e04c3fSmrg               switch (intrin->intrinsic) {
23001e04c3fSmrg               case nir_intrinsic_load_first_vertex:
23101e04c3fSmrg                  nir_intrinsic_set_component(load, 0);
23201e04c3fSmrg                  break;
23301e04c3fSmrg               case nir_intrinsic_load_base_instance:
23401e04c3fSmrg                  nir_intrinsic_set_component(load, 1);
23501e04c3fSmrg                  break;
23601e04c3fSmrg               case nir_intrinsic_load_vertex_id_zero_base:
23701e04c3fSmrg                  nir_intrinsic_set_component(load, 2);
23801e04c3fSmrg                  break;
23901e04c3fSmrg               case nir_intrinsic_load_instance_id:
24001e04c3fSmrg                  nir_intrinsic_set_component(load, 3);
24101e04c3fSmrg                  break;
24201e04c3fSmrg               case nir_intrinsic_load_draw_id:
24301e04c3fSmrg               case nir_intrinsic_load_is_indexed_draw:
24401e04c3fSmrg                  /* gl_DrawID and IsIndexedDraw are stored right after
24501e04c3fSmrg                   * gl_VertexID and friends if any of them exist.
24601e04c3fSmrg                   */
24701e04c3fSmrg                  nir_intrinsic_set_base(load, num_inputs + has_sgvs);
24801e04c3fSmrg                  if (intrin->intrinsic == nir_intrinsic_load_draw_id)
24901e04c3fSmrg                     nir_intrinsic_set_component(load, 0);
25001e04c3fSmrg                  else
25101e04c3fSmrg                     nir_intrinsic_set_component(load, 1);
25201e04c3fSmrg                  break;
25301e04c3fSmrg               default:
25401e04c3fSmrg                  unreachable("Invalid system value intrinsic");
25501e04c3fSmrg               }
25601e04c3fSmrg
25701e04c3fSmrg               load->num_components = 1;
25801e04c3fSmrg               nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
25901e04c3fSmrg               nir_builder_instr_insert(&b, &load->instr);
26001e04c3fSmrg
26101e04c3fSmrg               nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
2627ec681f3Smrg                                        &load->dest.ssa);
26301e04c3fSmrg               nir_instr_remove(&intrin->instr);
26401e04c3fSmrg               break;
26501e04c3fSmrg            }
26601e04c3fSmrg
26701e04c3fSmrg            case nir_intrinsic_load_input: {
26801e04c3fSmrg               /* Attributes come in a contiguous block, ordered by their
26901e04c3fSmrg                * gl_vert_attrib value.  That means we can compute the slot
27001e04c3fSmrg                * number for an attribute by masking out the enabled attributes
27101e04c3fSmrg                * before it and counting the bits.
27201e04c3fSmrg                */
27301e04c3fSmrg               int attr = nir_intrinsic_base(intrin);
2747ec681f3Smrg               uint64_t inputs_read = nir->info.inputs_read;
2757ec681f3Smrg               int slot = -1;
2767ec681f3Smrg               if (edgeflag_is_last) {
2777ec681f3Smrg                  inputs_read &= ~BITFIELD64_BIT(VERT_ATTRIB_EDGEFLAG);
2787ec681f3Smrg                  if (attr == VERT_ATTRIB_EDGEFLAG)
2797ec681f3Smrg                     slot = num_inputs - 1;
2807ec681f3Smrg               }
2817ec681f3Smrg               if (slot == -1)
2827ec681f3Smrg                  slot = util_bitcount64(inputs_read &
2837ec681f3Smrg                                         BITFIELD64_MASK(attr));
28401e04c3fSmrg               nir_intrinsic_set_base(intrin, slot);
28501e04c3fSmrg               break;
28601e04c3fSmrg            }
28701e04c3fSmrg
28801e04c3fSmrg            default:
28901e04c3fSmrg               break; /* Nothing to do */
29001e04c3fSmrg            }
29101e04c3fSmrg         }
29201e04c3fSmrg      }
29301e04c3fSmrg   }
29401e04c3fSmrg}
29501e04c3fSmrg
29601e04c3fSmrgvoid
29701e04c3fSmrgbrw_nir_lower_vue_inputs(nir_shader *nir,
29801e04c3fSmrg                         const struct brw_vue_map *vue_map)
29901e04c3fSmrg{
3007ec681f3Smrg   nir_foreach_shader_in_variable(var, nir)
30101e04c3fSmrg      var->data.driver_location = var->data.location;
30201e04c3fSmrg
30301e04c3fSmrg   /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
3047ec681f3Smrg   nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
3057ec681f3Smrg                nir_lower_io_lower_64bit_to_32);
30601e04c3fSmrg
30701e04c3fSmrg   /* This pass needs actual constants */
30801e04c3fSmrg   nir_opt_constant_folding(nir);
30901e04c3fSmrg
3107ec681f3Smrg   nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
31101e04c3fSmrg
31201e04c3fSmrg   nir_foreach_function(function, nir) {
31301e04c3fSmrg      if (!function->impl)
31401e04c3fSmrg         continue;
31501e04c3fSmrg
31601e04c3fSmrg      nir_foreach_block(block, function->impl) {
31701e04c3fSmrg         nir_foreach_instr(instr, block) {
31801e04c3fSmrg            if (instr->type != nir_instr_type_intrinsic)
31901e04c3fSmrg               continue;
32001e04c3fSmrg
32101e04c3fSmrg            nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
32201e04c3fSmrg
32301e04c3fSmrg            if (intrin->intrinsic == nir_intrinsic_load_input ||
32401e04c3fSmrg                intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
32501e04c3fSmrg               /* Offset 0 is the VUE header, which contains
32601e04c3fSmrg                * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
32701e04c3fSmrg                * VARYING_SLOT_PSIZ [.w].
32801e04c3fSmrg                */
32901e04c3fSmrg               int varying = nir_intrinsic_base(intrin);
33001e04c3fSmrg               int vue_slot;
33101e04c3fSmrg               switch (varying) {
33201e04c3fSmrg               case VARYING_SLOT_PSIZ:
33301e04c3fSmrg                  nir_intrinsic_set_base(intrin, 0);
33401e04c3fSmrg                  nir_intrinsic_set_component(intrin, 3);
33501e04c3fSmrg                  break;
33601e04c3fSmrg
33701e04c3fSmrg               default:
33801e04c3fSmrg                  vue_slot = vue_map->varying_to_slot[varying];
33901e04c3fSmrg                  assert(vue_slot != -1);
34001e04c3fSmrg                  nir_intrinsic_set_base(intrin, vue_slot);
34101e04c3fSmrg                  break;
34201e04c3fSmrg               }
34301e04c3fSmrg            }
34401e04c3fSmrg         }
34501e04c3fSmrg      }
34601e04c3fSmrg   }
34701e04c3fSmrg}
34801e04c3fSmrg
34901e04c3fSmrgvoid
35001e04c3fSmrgbrw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
35101e04c3fSmrg{
3527ec681f3Smrg   nir_foreach_shader_in_variable(var, nir)
35301e04c3fSmrg      var->data.driver_location = var->data.location;
35401e04c3fSmrg
3557ec681f3Smrg   nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
3567ec681f3Smrg                nir_lower_io_lower_64bit_to_32);
35701e04c3fSmrg
35801e04c3fSmrg   /* This pass needs actual constants */
35901e04c3fSmrg   nir_opt_constant_folding(nir);
36001e04c3fSmrg
3617ec681f3Smrg   nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
36201e04c3fSmrg
36301e04c3fSmrg   nir_foreach_function(function, nir) {
36401e04c3fSmrg      if (function->impl) {
36501e04c3fSmrg         nir_builder b;
36601e04c3fSmrg         nir_builder_init(&b, function->impl);
36701e04c3fSmrg         nir_foreach_block(block, function->impl) {
36801e04c3fSmrg            remap_patch_urb_offsets(block, &b, vue_map,
36901e04c3fSmrg                                    nir->info.tess.primitive_mode);
37001e04c3fSmrg         }
37101e04c3fSmrg      }
37201e04c3fSmrg   }
37301e04c3fSmrg}
37401e04c3fSmrg
3757ec681f3Smrg/**
3767ec681f3Smrg * Convert interpolateAtOffset() offsets from [-0.5, +0.5] floating point
3777ec681f3Smrg * offsets to integer [-8, +7] offsets (in units of 1/16th of a pixel).
3787ec681f3Smrg *
3797ec681f3Smrg * We clamp to +7/16 on the upper end of the range, since +0.5 isn't
3807ec681f3Smrg * representable in a S0.4 value; a naive conversion would give us -8/16,
3817ec681f3Smrg * which is the opposite of what was intended.
3827ec681f3Smrg *
3837ec681f3Smrg * This is allowed by GL_ARB_gpu_shader5's quantization rules:
3847ec681f3Smrg *
3857ec681f3Smrg *    "Not all values of <offset> may be supported; x and y offsets may
3867ec681f3Smrg *     be rounded to fixed-point values with the number of fraction bits
3877ec681f3Smrg *     given by the implementation-dependent constant
3887ec681f3Smrg *     FRAGMENT_INTERPOLATION_OFFSET_BITS."
3897ec681f3Smrg */
3907ec681f3Smrgstatic bool
3917ec681f3Smrglower_barycentric_at_offset(nir_builder *b, nir_instr *instr, void *data)
3927ec681f3Smrg{
3937ec681f3Smrg   if (instr->type != nir_instr_type_intrinsic)
3947ec681f3Smrg      return false;
3957ec681f3Smrg
3967ec681f3Smrg   nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
3977ec681f3Smrg
3987ec681f3Smrg   if (intrin->intrinsic != nir_intrinsic_load_barycentric_at_offset)
3997ec681f3Smrg      return false;
4007ec681f3Smrg
4017ec681f3Smrg   b->cursor = nir_before_instr(instr);
4027ec681f3Smrg
4037ec681f3Smrg   assert(intrin->src[0].ssa);
4047ec681f3Smrg   nir_ssa_def *offset =
4057ec681f3Smrg      nir_imin(b, nir_imm_int(b, 7),
4067ec681f3Smrg               nir_f2i32(b, nir_fmul(b, nir_imm_float(b, 16),
4077ec681f3Smrg                                     intrin->src[0].ssa)));
4087ec681f3Smrg
4097ec681f3Smrg   nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(offset));
4107ec681f3Smrg
4117ec681f3Smrg   return true;
4127ec681f3Smrg}
4137ec681f3Smrg
41401e04c3fSmrgvoid
41501e04c3fSmrgbrw_nir_lower_fs_inputs(nir_shader *nir,
4167ec681f3Smrg                        const struct intel_device_info *devinfo,
41701e04c3fSmrg                        const struct brw_wm_prog_key *key)
41801e04c3fSmrg{
4197ec681f3Smrg   nir_foreach_shader_in_variable(var, nir) {
42001e04c3fSmrg      var->data.driver_location = var->data.location;
42101e04c3fSmrg
42201e04c3fSmrg      /* Apply default interpolation mode.
42301e04c3fSmrg       *
42401e04c3fSmrg       * Everything defaults to smooth except for the legacy GL color
42501e04c3fSmrg       * built-in variables, which might be flat depending on API state.
42601e04c3fSmrg       */
42701e04c3fSmrg      if (var->data.interpolation == INTERP_MODE_NONE) {
42801e04c3fSmrg         const bool flat = key->flat_shade &&
42901e04c3fSmrg            (var->data.location == VARYING_SLOT_COL0 ||
43001e04c3fSmrg             var->data.location == VARYING_SLOT_COL1);
43101e04c3fSmrg
43201e04c3fSmrg         var->data.interpolation = flat ? INTERP_MODE_FLAT
43301e04c3fSmrg                                        : INTERP_MODE_SMOOTH;
43401e04c3fSmrg      }
43501e04c3fSmrg
43601e04c3fSmrg      /* On Ironlake and below, there is only one interpolation mode.
43701e04c3fSmrg       * Centroid interpolation doesn't mean anything on this hardware --
43801e04c3fSmrg       * there is no multisampling.
43901e04c3fSmrg       */
4407ec681f3Smrg      if (devinfo->ver < 6) {
44101e04c3fSmrg         var->data.centroid = false;
44201e04c3fSmrg         var->data.sample = false;
44301e04c3fSmrg      }
44401e04c3fSmrg   }
44501e04c3fSmrg
4467ec681f3Smrg   nir_lower_io_options lower_io_options = nir_lower_io_lower_64bit_to_32;
44701e04c3fSmrg   if (key->persample_interp)
44801e04c3fSmrg      lower_io_options |= nir_lower_io_force_sample_interpolation;
44901e04c3fSmrg
45001e04c3fSmrg   nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options);
4517ec681f3Smrg   if (devinfo->ver >= 11)
4527ec681f3Smrg      nir_lower_interpolation(nir, ~0);
4537ec681f3Smrg
4547ec681f3Smrg   nir_shader_instructions_pass(nir, lower_barycentric_at_offset,
4557ec681f3Smrg                                nir_metadata_block_index |
4567ec681f3Smrg                                nir_metadata_dominance,
4577ec681f3Smrg                                NULL);
45801e04c3fSmrg
45901e04c3fSmrg   /* This pass needs actual constants */
46001e04c3fSmrg   nir_opt_constant_folding(nir);
46101e04c3fSmrg
4627ec681f3Smrg   nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
46301e04c3fSmrg}
46401e04c3fSmrg
46501e04c3fSmrgvoid
46601e04c3fSmrgbrw_nir_lower_vue_outputs(nir_shader *nir)
46701e04c3fSmrg{
4687ec681f3Smrg   nir_foreach_shader_out_variable(var, nir) {
46901e04c3fSmrg      var->data.driver_location = var->data.location;
47001e04c3fSmrg   }
47101e04c3fSmrg
4727ec681f3Smrg   nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
4737ec681f3Smrg                nir_lower_io_lower_64bit_to_32);
47401e04c3fSmrg}
47501e04c3fSmrg
47601e04c3fSmrgvoid
47701e04c3fSmrgbrw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map,
47801e04c3fSmrg                          GLenum tes_primitive_mode)
47901e04c3fSmrg{
4807ec681f3Smrg   nir_foreach_shader_out_variable(var, nir) {
48101e04c3fSmrg      var->data.driver_location = var->data.location;
48201e04c3fSmrg   }
48301e04c3fSmrg
4847ec681f3Smrg   nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
4857ec681f3Smrg                nir_lower_io_lower_64bit_to_32);
48601e04c3fSmrg
48701e04c3fSmrg   /* This pass needs actual constants */
48801e04c3fSmrg   nir_opt_constant_folding(nir);
48901e04c3fSmrg
4907ec681f3Smrg   nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
49101e04c3fSmrg
49201e04c3fSmrg   nir_foreach_function(function, nir) {
49301e04c3fSmrg      if (function->impl) {
49401e04c3fSmrg         nir_builder b;
49501e04c3fSmrg         nir_builder_init(&b, function->impl);
49601e04c3fSmrg         nir_foreach_block(block, function->impl) {
49701e04c3fSmrg            remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
49801e04c3fSmrg         }
49901e04c3fSmrg      }
50001e04c3fSmrg   }
50101e04c3fSmrg}
50201e04c3fSmrg
50301e04c3fSmrgvoid
50401e04c3fSmrgbrw_nir_lower_fs_outputs(nir_shader *nir)
50501e04c3fSmrg{
5067ec681f3Smrg   nir_foreach_shader_out_variable(var, nir) {
50701e04c3fSmrg      var->data.driver_location =
50801e04c3fSmrg         SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
50901e04c3fSmrg         SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
51001e04c3fSmrg   }
51101e04c3fSmrg
51201e04c3fSmrg   nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
51301e04c3fSmrg}
51401e04c3fSmrg
51501e04c3fSmrg#define OPT(pass, ...) ({                                  \
51601e04c3fSmrg   bool this_progress = false;                             \
51701e04c3fSmrg   NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
51801e04c3fSmrg   if (this_progress)                                      \
51901e04c3fSmrg      progress = true;                                     \
52001e04c3fSmrg   this_progress;                                          \
52101e04c3fSmrg})
52201e04c3fSmrg
5237ec681f3Smrgvoid
52401e04c3fSmrgbrw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
52501e04c3fSmrg                 bool is_scalar, bool allow_copies)
52601e04c3fSmrg{
52701e04c3fSmrg   bool progress;
5287ec681f3Smrg   unsigned lower_flrp =
5297ec681f3Smrg      (nir->options->lower_flrp16 ? 16 : 0) |
5307ec681f3Smrg      (nir->options->lower_flrp32 ? 32 : 0) |
5317ec681f3Smrg      (nir->options->lower_flrp64 ? 64 : 0);
5327ec681f3Smrg
53301e04c3fSmrg   do {
53401e04c3fSmrg      progress = false;
5359f464c52Smaya      OPT(nir_split_array_vars, nir_var_function_temp);
5369f464c52Smaya      OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
5379f464c52Smaya      OPT(nir_opt_deref);
53801e04c3fSmrg      OPT(nir_lower_vars_to_ssa);
53901e04c3fSmrg      if (allow_copies) {
54001e04c3fSmrg         /* Only run this pass in the first call to brw_nir_optimize.  Later
54101e04c3fSmrg          * calls assume that we've lowered away any copy_deref instructions
54201e04c3fSmrg          * and we don't want to introduce any more.
54301e04c3fSmrg          */
54401e04c3fSmrg         OPT(nir_opt_find_array_copies);
54501e04c3fSmrg      }
54601e04c3fSmrg      OPT(nir_opt_copy_prop_vars);
54701e04c3fSmrg      OPT(nir_opt_dead_write_vars);
5489f464c52Smaya      OPT(nir_opt_combine_stores, nir_var_all);
54901e04c3fSmrg
55001e04c3fSmrg      if (is_scalar) {
5517ec681f3Smrg         OPT(nir_lower_alu_to_scalar, NULL, NULL);
5527ec681f3Smrg      } else {
5537ec681f3Smrg         OPT(nir_opt_shrink_vectors, true);
55401e04c3fSmrg      }
55501e04c3fSmrg
55601e04c3fSmrg      OPT(nir_copy_prop);
55701e04c3fSmrg
55801e04c3fSmrg      if (is_scalar) {
5597ec681f3Smrg         OPT(nir_lower_phis_to_scalar, false);
56001e04c3fSmrg      }
56101e04c3fSmrg
56201e04c3fSmrg      OPT(nir_copy_prop);
56301e04c3fSmrg      OPT(nir_opt_dce);
56401e04c3fSmrg      OPT(nir_opt_cse);
5659f464c52Smaya      OPT(nir_opt_combine_stores, nir_var_all);
5669f464c52Smaya
5679f464c52Smaya      /* Passing 0 to the peephole select pass causes it to convert
5689f464c52Smaya       * if-statements that contain only move instructions in the branches
5699f464c52Smaya       * regardless of the count.
5709f464c52Smaya       *
5719f464c52Smaya       * Passing 1 to the peephole select pass causes it to convert
5729f464c52Smaya       * if-statements that contain at most a single ALU instruction (total)
5737ec681f3Smrg       * in both branches.  Before Gfx6, some math instructions were
5749f464c52Smaya       * prohibitively expensive and the results of compare operations need an
5759f464c52Smaya       * extra resolve step.  For these reasons, this pass is more harmful
5769f464c52Smaya       * than good on those platforms.
5779f464c52Smaya       *
5789f464c52Smaya       * For indirect loads of uniforms (push constants), we assume that array
5799f464c52Smaya       * indices will nearly always be in bounds and the cost of the load is
5809f464c52Smaya       * low.  Therefore there shouldn't be a performance benefit to avoid it.
5819f464c52Smaya       * However, in vec4 tessellation shaders, these loads operate by
5829f464c52Smaya       * actually pulling from memory.
5839f464c52Smaya       */
5849f464c52Smaya      const bool is_vec4_tessellation = !is_scalar &&
5859f464c52Smaya         (nir->info.stage == MESA_SHADER_TESS_CTRL ||
5869f464c52Smaya          nir->info.stage == MESA_SHADER_TESS_EVAL);
5879f464c52Smaya      OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
5887ec681f3Smrg      OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
5897ec681f3Smrg          compiler->devinfo->ver >= 6);
5909f464c52Smaya
59101e04c3fSmrg      OPT(nir_opt_intrinsics);
5929f464c52Smaya      OPT(nir_opt_idiv_const, 32);
59301e04c3fSmrg      OPT(nir_opt_algebraic);
59401e04c3fSmrg      OPT(nir_opt_constant_folding);
5957ec681f3Smrg
5967ec681f3Smrg      if (lower_flrp != 0) {
5977ec681f3Smrg         if (OPT(nir_lower_flrp,
5987ec681f3Smrg                 lower_flrp,
5997ec681f3Smrg                 false /* always_precise */)) {
6007ec681f3Smrg            OPT(nir_opt_constant_folding);
6017ec681f3Smrg         }
6027ec681f3Smrg
6037ec681f3Smrg         /* Nothing should rematerialize any flrps, so we only need to do this
6047ec681f3Smrg          * lowering once.
6057ec681f3Smrg          */
6067ec681f3Smrg         lower_flrp = 0;
6077ec681f3Smrg      }
6087ec681f3Smrg
60901e04c3fSmrg      OPT(nir_opt_dead_cf);
61001e04c3fSmrg      if (OPT(nir_opt_trivial_continues)) {
61101e04c3fSmrg         /* If nir_opt_trivial_continues makes progress, then we need to clean
61201e04c3fSmrg          * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
61301e04c3fSmrg          * to make progress.
61401e04c3fSmrg          */
61501e04c3fSmrg         OPT(nir_copy_prop);
61601e04c3fSmrg         OPT(nir_opt_dce);
61701e04c3fSmrg      }
6189f464c52Smaya      OPT(nir_opt_if, false);
6197ec681f3Smrg      OPT(nir_opt_conditional_discard);
62001e04c3fSmrg      if (nir->options->max_unroll_iterations != 0) {
6217ec681f3Smrg         OPT(nir_opt_loop_unroll);
62201e04c3fSmrg      }
62301e04c3fSmrg      OPT(nir_opt_remove_phis);
6247ec681f3Smrg      OPT(nir_opt_gcm, false);
62501e04c3fSmrg      OPT(nir_opt_undef);
62601e04c3fSmrg      OPT(nir_lower_pack);
62701e04c3fSmrg   } while (progress);
62801e04c3fSmrg
62901e04c3fSmrg   /* Workaround Gfxbench unused local sampler variable which will trigger an
63001e04c3fSmrg    * assert in the opt_large_constants pass.
63101e04c3fSmrg    */
6327ec681f3Smrg   OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
63301e04c3fSmrg}
63401e04c3fSmrg
63501e04c3fSmrgstatic unsigned
6367ec681f3Smrglower_bit_size_callback(const nir_instr *instr, UNUSED void *data)
63701e04c3fSmrg{
6389f464c52Smaya   const struct brw_compiler *compiler = (const struct brw_compiler *) data;
6397ec681f3Smrg   const struct intel_device_info *devinfo = compiler->devinfo;
6407ec681f3Smrg
6417ec681f3Smrg   switch (instr->type) {
6427ec681f3Smrg   case nir_instr_type_alu: {
6437ec681f3Smrg      nir_alu_instr *alu = nir_instr_as_alu(instr);
6447ec681f3Smrg      assert(alu->dest.dest.is_ssa);
6457ec681f3Smrg      if (alu->dest.dest.ssa.bit_size >= 32)
6467ec681f3Smrg         return 0;
6477ec681f3Smrg
6487ec681f3Smrg      /* Note: nir_op_iabs and nir_op_ineg are not lowered here because the
6497ec681f3Smrg       * 8-bit ABS or NEG instruction should eventually get copy propagated
6507ec681f3Smrg       * into the MOV that does the type conversion.  This results in far
6517ec681f3Smrg       * fewer MOV instructions.
6527ec681f3Smrg       */
6537ec681f3Smrg      switch (alu->op) {
6547ec681f3Smrg      case nir_op_idiv:
6557ec681f3Smrg      case nir_op_imod:
6567ec681f3Smrg      case nir_op_irem:
6577ec681f3Smrg      case nir_op_udiv:
6587ec681f3Smrg      case nir_op_umod:
6597ec681f3Smrg      case nir_op_fceil:
6607ec681f3Smrg      case nir_op_ffloor:
6617ec681f3Smrg      case nir_op_ffract:
6627ec681f3Smrg      case nir_op_fround_even:
6637ec681f3Smrg      case nir_op_ftrunc:
6647ec681f3Smrg         return 32;
6657ec681f3Smrg      case nir_op_frcp:
6667ec681f3Smrg      case nir_op_frsq:
6677ec681f3Smrg      case nir_op_fsqrt:
6687ec681f3Smrg      case nir_op_fpow:
6697ec681f3Smrg      case nir_op_fexp2:
6707ec681f3Smrg      case nir_op_flog2:
6717ec681f3Smrg      case nir_op_fsin:
6727ec681f3Smrg      case nir_op_fcos:
6737ec681f3Smrg         return devinfo->ver < 9 ? 32 : 0;
6747ec681f3Smrg      case nir_op_isign:
6757ec681f3Smrg         assert(!"Should have been lowered by nir_opt_algebraic.");
6767ec681f3Smrg         return 0;
6777ec681f3Smrg      default:
6787ec681f3Smrg         if (nir_op_infos[alu->op].num_inputs >= 2 &&
6797ec681f3Smrg             alu->dest.dest.ssa.bit_size == 8)
6807ec681f3Smrg            return 16;
6817ec681f3Smrg
6827ec681f3Smrg         if (nir_alu_instr_is_comparison(alu) &&
6837ec681f3Smrg             alu->src[0].src.ssa->bit_size == 8)
6847ec681f3Smrg            return 16;
6857ec681f3Smrg
6867ec681f3Smrg         return 0;
6877ec681f3Smrg      }
6887ec681f3Smrg      break;
6897ec681f3Smrg   }
6907ec681f3Smrg
6917ec681f3Smrg   case nir_instr_type_intrinsic: {
6927ec681f3Smrg      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
6937ec681f3Smrg      switch (intrin->intrinsic) {
6947ec681f3Smrg      case nir_intrinsic_read_invocation:
6957ec681f3Smrg      case nir_intrinsic_read_first_invocation:
6967ec681f3Smrg      case nir_intrinsic_vote_feq:
6977ec681f3Smrg      case nir_intrinsic_vote_ieq:
6987ec681f3Smrg      case nir_intrinsic_shuffle:
6997ec681f3Smrg      case nir_intrinsic_shuffle_xor:
7007ec681f3Smrg      case nir_intrinsic_shuffle_up:
7017ec681f3Smrg      case nir_intrinsic_shuffle_down:
7027ec681f3Smrg      case nir_intrinsic_quad_broadcast:
7037ec681f3Smrg      case nir_intrinsic_quad_swap_horizontal:
7047ec681f3Smrg      case nir_intrinsic_quad_swap_vertical:
7057ec681f3Smrg      case nir_intrinsic_quad_swap_diagonal:
7067ec681f3Smrg         if (intrin->src[0].ssa->bit_size == 8)
7077ec681f3Smrg            return 16;
7087ec681f3Smrg         return 0;
7097ec681f3Smrg
7107ec681f3Smrg      case nir_intrinsic_reduce:
7117ec681f3Smrg      case nir_intrinsic_inclusive_scan:
7127ec681f3Smrg      case nir_intrinsic_exclusive_scan:
7137ec681f3Smrg         /* There are a couple of register region issues that make things
7147ec681f3Smrg          * complicated for 8-bit types:
7157ec681f3Smrg          *
7167ec681f3Smrg          *    1. Only raw moves are allowed to write to a packed 8-bit
7177ec681f3Smrg          *       destination.
7187ec681f3Smrg          *    2. If we use a strided destination, the efficient way to do
7197ec681f3Smrg          *       scan operations ends up using strides that are too big to
7207ec681f3Smrg          *       encode in an instruction.
7217ec681f3Smrg          *
7227ec681f3Smrg          * To get around these issues, we just do all 8-bit scan operations
7237ec681f3Smrg          * in 16 bits.  It's actually fewer instructions than what we'd have
7247ec681f3Smrg          * to do if we were trying to do it in native 8-bit types and the
7257ec681f3Smrg          * results are the same once we truncate to 8 bits at the end.
7267ec681f3Smrg          */
7277ec681f3Smrg         if (intrin->dest.ssa.bit_size == 8)
7287ec681f3Smrg            return 16;
7297ec681f3Smrg         return 0;
7307ec681f3Smrg
7317ec681f3Smrg      default:
7327ec681f3Smrg         return 0;
7337ec681f3Smrg      }
7347ec681f3Smrg      break;
7357ec681f3Smrg   }
7367ec681f3Smrg
7377ec681f3Smrg   case nir_instr_type_phi: {
7387ec681f3Smrg      nir_phi_instr *phi = nir_instr_as_phi(instr);
7397ec681f3Smrg      if (phi->dest.ssa.bit_size == 8)
7407ec681f3Smrg         return 16;
7417ec681f3Smrg      return 0;
7427ec681f3Smrg   }
7439f464c52Smaya
74401e04c3fSmrg   default:
74501e04c3fSmrg      return 0;
74601e04c3fSmrg   }
74701e04c3fSmrg}
74801e04c3fSmrg
74901e04c3fSmrg/* Does some simple lowering and runs the standard suite of optimizations
75001e04c3fSmrg *
75101e04c3fSmrg * This is intended to be called more-or-less directly after you get the
75201e04c3fSmrg * shader out of GLSL or some other source.  While it is geared towards i965,
75301e04c3fSmrg * it is not at all generator-specific except for the is_scalar flag.  Even
75401e04c3fSmrg * there, it is safe to call with is_scalar = false for a shader that is
75501e04c3fSmrg * intended for the FS backend as long as nir_optimize is called again with
75601e04c3fSmrg * is_scalar = true to scalarize everything prior to code gen.
75701e04c3fSmrg */
7587ec681f3Smrgvoid
7599f464c52Smayabrw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
7609f464c52Smaya                   const nir_shader *softfp64)
76101e04c3fSmrg{
7627ec681f3Smrg   const struct intel_device_info *devinfo = compiler->devinfo;
76301e04c3fSmrg   UNUSED bool progress; /* Written by OPT */
76401e04c3fSmrg
76501e04c3fSmrg   const bool is_scalar = compiler->scalar_stage[nir->info.stage];
76601e04c3fSmrg
7677ec681f3Smrg   nir_validate_ssa_dominance(nir, "before brw_preprocess_nir");
7687ec681f3Smrg
7699f464c52Smaya   if (is_scalar) {
7707ec681f3Smrg      OPT(nir_lower_alu_to_scalar, NULL, NULL);
7719f464c52Smaya   }
7729f464c52Smaya
77301e04c3fSmrg   if (nir->info.stage == MESA_SHADER_GEOMETRY)
7747ec681f3Smrg      OPT(nir_lower_gs_intrinsics, 0);
77501e04c3fSmrg
77601e04c3fSmrg   /* See also brw_nir_trig_workarounds.py */
77701e04c3fSmrg   if (compiler->precise_trig &&
7787ec681f3Smrg       !(devinfo->ver >= 10 || devinfo->is_kabylake))
77901e04c3fSmrg      OPT(brw_nir_apply_trig_workarounds);
78001e04c3fSmrg
7817ec681f3Smrg   if (devinfo->ver >= 12)
7827ec681f3Smrg      OPT(brw_nir_clamp_image_1d_2d_array_sizes);
7837ec681f3Smrg
7847ec681f3Smrg   const nir_lower_tex_options tex_options = {
78501e04c3fSmrg      .lower_txp = ~0,
78601e04c3fSmrg      .lower_txf_offset = true,
78701e04c3fSmrg      .lower_rect_offset = true,
78801e04c3fSmrg      .lower_txd_cube_map = true,
7897ec681f3Smrg      .lower_txd_3d = devinfo->verx10 >= 125,
7909f464c52Smaya      .lower_txb_shadow_clamp = true,
7919f464c52Smaya      .lower_txd_shadow_clamp = true,
7929f464c52Smaya      .lower_txd_offset_clamp = true,
7939f464c52Smaya      .lower_tg4_offsets = true,
7947ec681f3Smrg      .lower_txs_lod = true, /* Wa_14012320009 */
79501e04c3fSmrg   };
79601e04c3fSmrg
79701e04c3fSmrg   OPT(nir_lower_tex, &tex_options);
79801e04c3fSmrg   OPT(nir_normalize_cubemap_coords);
79901e04c3fSmrg
80001e04c3fSmrg   OPT(nir_lower_global_vars_to_local);
80101e04c3fSmrg
80201e04c3fSmrg   OPT(nir_split_var_copies);
8039f464c52Smaya   OPT(nir_split_struct_vars, nir_var_function_temp);
80401e04c3fSmrg
8057ec681f3Smrg   brw_nir_optimize(nir, compiler, is_scalar, true);
80601e04c3fSmrg
8077ec681f3Smrg   OPT(nir_lower_doubles, softfp64, nir->options->lower_doubles_options);
8087ec681f3Smrg   OPT(nir_lower_int64);
80901e04c3fSmrg
8107ec681f3Smrg   OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
8119f464c52Smaya
8127ec681f3Smrg   if (is_scalar) {
8137ec681f3Smrg      OPT(nir_lower_load_const_to_scalar);
8147ec681f3Smrg   }
8159f464c52Smaya
8167ec681f3Smrg   /* Lower a bunch of stuff */
8177ec681f3Smrg   OPT(nir_lower_var_copies);
81801e04c3fSmrg
81901e04c3fSmrg   /* This needs to be run after the first optimization pass but before we
82001e04c3fSmrg    * lower indirect derefs away
82101e04c3fSmrg    */
82201e04c3fSmrg   if (compiler->supports_shader_constants) {
82301e04c3fSmrg      OPT(nir_opt_large_constants, NULL, 32);
82401e04c3fSmrg   }
82501e04c3fSmrg
82601e04c3fSmrg   OPT(nir_lower_system_values);
8277ec681f3Smrg   OPT(nir_lower_compute_system_values, NULL);
82801e04c3fSmrg
82901e04c3fSmrg   const nir_lower_subgroups_options subgroups_options = {
83001e04c3fSmrg      .ballot_bit_size = 32,
8317ec681f3Smrg      .ballot_components = 1,
83201e04c3fSmrg      .lower_to_scalar = true,
83301e04c3fSmrg      .lower_vote_trivial = !is_scalar,
83401e04c3fSmrg      .lower_shuffle = true,
8357ec681f3Smrg      .lower_quad_broadcast_dynamic = true,
8367ec681f3Smrg      .lower_elect = true,
83701e04c3fSmrg   };
83801e04c3fSmrg   OPT(nir_lower_subgroups, &subgroups_options);
83901e04c3fSmrg
84001e04c3fSmrg   OPT(nir_lower_clip_cull_distance_arrays);
84101e04c3fSmrg
84201e04c3fSmrg   nir_variable_mode indirect_mask =
84301e04c3fSmrg      brw_nir_no_indirect_mask(compiler, nir->info.stage);
8447ec681f3Smrg   OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
8457ec681f3Smrg
8467ec681f3Smrg   /* Even in cases where we can handle indirect temporaries via scratch, we
8477ec681f3Smrg    * it can still be expensive.  Lower indirects on small arrays to
8487ec681f3Smrg    * conditional load/stores.
8497ec681f3Smrg    *
8507ec681f3Smrg    * The threshold of 16 was chosen semi-arbitrarily.  The idea is that an
8517ec681f3Smrg    * indirect on an array of 16 elements is about 30 instructions at which
8527ec681f3Smrg    * point, you may be better off doing a send.  With a SIMD8 program, 16
8537ec681f3Smrg    * floats is 1/8 of the entire register file.  Any array larger than that
8547ec681f3Smrg    * is likely to cause pressure issues.  Also, this value is sufficiently
8557ec681f3Smrg    * high that the benchmarks known to suffer from large temporary array
8567ec681f3Smrg    * issues are helped but nothing else in shader-db is hurt except for maybe
8577ec681f3Smrg    * that one kerbal space program shader.
8587ec681f3Smrg    */
8597ec681f3Smrg   if (is_scalar && !(indirect_mask & nir_var_function_temp))
8607ec681f3Smrg      OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
86101e04c3fSmrg
8629f464c52Smaya   /* Lower array derefs of vectors for SSBO and UBO loads.  For both UBOs and
8639f464c52Smaya    * SSBOs, our back-end is capable of loading an entire vec4 at a time and
8649f464c52Smaya    * we would like to take advantage of that whenever possible regardless of
8659f464c52Smaya    * whether or not the app gives us full loads.  This should allow the
8669f464c52Smaya    * optimizer to combine UBO and SSBO load operations and save us some send
8679f464c52Smaya    * messages.
8689f464c52Smaya    */
8699f464c52Smaya   OPT(nir_lower_array_deref_of_vec,
8709f464c52Smaya       nir_var_mem_ubo | nir_var_mem_ssbo,
8719f464c52Smaya       nir_lower_direct_array_deref_of_vec_load);
8729f464c52Smaya
87301e04c3fSmrg   /* Get rid of split copies */
8747ec681f3Smrg   brw_nir_optimize(nir, compiler, is_scalar, false);
87501e04c3fSmrg}
87601e04c3fSmrg
87701e04c3fSmrgvoid
87801e04c3fSmrgbrw_nir_link_shaders(const struct brw_compiler *compiler,
8797ec681f3Smrg                     nir_shader *producer, nir_shader *consumer)
88001e04c3fSmrg{
8817ec681f3Smrg   nir_lower_io_arrays_to_elements(producer, consumer);
8827ec681f3Smrg   nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
8837ec681f3Smrg   nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
88401e04c3fSmrg
8857ec681f3Smrg   const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
8867ec681f3Smrg   const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
88701e04c3fSmrg
88801e04c3fSmrg   if (p_is_scalar && c_is_scalar) {
8897ec681f3Smrg      NIR_PASS_V(producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
8907ec681f3Smrg      NIR_PASS_V(consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
8917ec681f3Smrg      brw_nir_optimize(producer, compiler, p_is_scalar, false);
8927ec681f3Smrg      brw_nir_optimize(consumer, compiler, c_is_scalar, false);
89301e04c3fSmrg   }
89401e04c3fSmrg
8957ec681f3Smrg   if (nir_link_opt_varyings(producer, consumer))
8967ec681f3Smrg      brw_nir_optimize(consumer, compiler, c_is_scalar, false);
8979f464c52Smaya
8987ec681f3Smrg   NIR_PASS_V(producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
8997ec681f3Smrg   NIR_PASS_V(consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
90001e04c3fSmrg
9017ec681f3Smrg   if (nir_remove_unused_varyings(producer, consumer)) {
9027ec681f3Smrg      NIR_PASS_V(producer, nir_lower_global_vars_to_local);
9037ec681f3Smrg      NIR_PASS_V(consumer, nir_lower_global_vars_to_local);
90401e04c3fSmrg
90501e04c3fSmrg      /* The backend might not be able to handle indirects on
90601e04c3fSmrg       * temporaries so we need to lower indirects on any of the
90701e04c3fSmrg       * varyings we have demoted here.
90801e04c3fSmrg       */
9097ec681f3Smrg      NIR_PASS_V(producer, nir_lower_indirect_derefs,
9107ec681f3Smrg                 brw_nir_no_indirect_mask(compiler, producer->info.stage),
9117ec681f3Smrg                 UINT32_MAX);
9127ec681f3Smrg      NIR_PASS_V(consumer, nir_lower_indirect_derefs,
9137ec681f3Smrg                 brw_nir_no_indirect_mask(compiler, consumer->info.stage),
9147ec681f3Smrg                 UINT32_MAX);
9157ec681f3Smrg
9167ec681f3Smrg      brw_nir_optimize(producer, compiler, p_is_scalar, false);
9177ec681f3Smrg      brw_nir_optimize(consumer, compiler, c_is_scalar, false);
91801e04c3fSmrg   }
9199f464c52Smaya
9207ec681f3Smrg   NIR_PASS_V(producer, nir_lower_io_to_vector, nir_var_shader_out);
9217ec681f3Smrg   NIR_PASS_V(producer, nir_opt_combine_stores, nir_var_shader_out);
9227ec681f3Smrg   NIR_PASS_V(consumer, nir_lower_io_to_vector, nir_var_shader_in);
9239f464c52Smaya
9247ec681f3Smrg   if (producer->info.stage != MESA_SHADER_TESS_CTRL) {
9259f464c52Smaya      /* Calling lower_io_to_vector creates output variable writes with
9269f464c52Smaya       * write-masks.  On non-TCS outputs, the back-end can't handle it and we
9279f464c52Smaya       * need to call nir_lower_io_to_temporaries to get rid of them.  This,
9289f464c52Smaya       * in turn, creates temporary variables and extra copy_deref intrinsics
9299f464c52Smaya       * that we need to clean up.
9309f464c52Smaya       */
9317ec681f3Smrg      NIR_PASS_V(producer, nir_lower_io_to_temporaries,
9327ec681f3Smrg                 nir_shader_get_entrypoint(producer), true, false);
9337ec681f3Smrg      NIR_PASS_V(producer, nir_lower_global_vars_to_local);
9347ec681f3Smrg      NIR_PASS_V(producer, nir_split_var_copies);
9357ec681f3Smrg      NIR_PASS_V(producer, nir_lower_var_copies);
9369f464c52Smaya   }
93701e04c3fSmrg}
93801e04c3fSmrg
9397ec681f3Smrgstatic bool
9407ec681f3Smrgbrw_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
9417ec681f3Smrg                             unsigned bit_size,
9427ec681f3Smrg                             unsigned num_components,
9437ec681f3Smrg                             nir_intrinsic_instr *low,
9447ec681f3Smrg                             nir_intrinsic_instr *high,
9457ec681f3Smrg                             void *data)
9467ec681f3Smrg{
9477ec681f3Smrg   /* Don't combine things to generate 64-bit loads/stores.  We have to split
9487ec681f3Smrg    * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
9497ec681f3Smrg    * we don't want to make a mess for the back-end.
9507ec681f3Smrg    */
9517ec681f3Smrg   if (bit_size > 32)
9527ec681f3Smrg      return false;
9537ec681f3Smrg
9547ec681f3Smrg   /* We can handle at most a vec4 right now.  Anything bigger would get
9557ec681f3Smrg    * immediately split by brw_nir_lower_mem_access_bit_sizes anyway.
9567ec681f3Smrg    */
9577ec681f3Smrg   if (num_components > 4)
9587ec681f3Smrg      return false;
9597ec681f3Smrg
9607ec681f3Smrg
9617ec681f3Smrg   uint32_t align;
9627ec681f3Smrg   if (align_offset)
9637ec681f3Smrg      align = 1 << (ffs(align_offset) - 1);
9647ec681f3Smrg   else
9657ec681f3Smrg      align = align_mul;
9667ec681f3Smrg
9677ec681f3Smrg   if (align < bit_size / 8)
9687ec681f3Smrg      return false;
9697ec681f3Smrg
9707ec681f3Smrg   return true;
9717ec681f3Smrg}
9727ec681f3Smrg
9737ec681f3Smrgstatic
9747ec681f3Smrgbool combine_all_barriers(nir_intrinsic_instr *a,
9757ec681f3Smrg                          nir_intrinsic_instr *b,
9767ec681f3Smrg                          void *data)
9777ec681f3Smrg{
9787ec681f3Smrg   /* Translation to backend IR will get rid of modes we don't care about, so
9797ec681f3Smrg    * no harm in always combining them.
9807ec681f3Smrg    *
9817ec681f3Smrg    * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
9827ec681f3Smrg    * scheduling so that it can take advantage of the different semantics.
9837ec681f3Smrg    */
9847ec681f3Smrg   nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
9857ec681f3Smrg                                     nir_intrinsic_memory_modes(b));
9867ec681f3Smrg   nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
9877ec681f3Smrg                                         nir_intrinsic_memory_semantics(b));
9887ec681f3Smrg   nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
9897ec681f3Smrg                                          nir_intrinsic_memory_scope(b)));
9907ec681f3Smrg   return true;
9917ec681f3Smrg}
9927ec681f3Smrg
9937ec681f3Smrgstatic void
9947ec681f3Smrgbrw_vectorize_lower_mem_access(nir_shader *nir,
9957ec681f3Smrg                               const struct brw_compiler *compiler,
9967ec681f3Smrg                               bool is_scalar,
9977ec681f3Smrg                               bool robust_buffer_access)
9987ec681f3Smrg{
9997ec681f3Smrg   const struct intel_device_info *devinfo = compiler->devinfo;
10007ec681f3Smrg   bool progress = false;
10017ec681f3Smrg
10027ec681f3Smrg   if (is_scalar) {
10037ec681f3Smrg      nir_load_store_vectorize_options options = {
10047ec681f3Smrg         .modes = nir_var_mem_ubo | nir_var_mem_ssbo |
10057ec681f3Smrg                  nir_var_mem_global | nir_var_mem_shared,
10067ec681f3Smrg         .callback = brw_nir_should_vectorize_mem,
10077ec681f3Smrg         .robust_modes = (nir_variable_mode)0,
10087ec681f3Smrg      };
10097ec681f3Smrg
10107ec681f3Smrg      if (robust_buffer_access) {
10117ec681f3Smrg         options.robust_modes = nir_var_mem_ubo | nir_var_mem_ssbo |
10127ec681f3Smrg                                nir_var_mem_global;
10137ec681f3Smrg      }
10147ec681f3Smrg
10157ec681f3Smrg      OPT(nir_opt_load_store_vectorize, &options);
10167ec681f3Smrg   }
10177ec681f3Smrg
10187ec681f3Smrg   OPT(brw_nir_lower_mem_access_bit_sizes, devinfo);
10197ec681f3Smrg
10207ec681f3Smrg   while (progress) {
10217ec681f3Smrg      progress = false;
10227ec681f3Smrg
10237ec681f3Smrg      OPT(nir_lower_pack);
10247ec681f3Smrg      OPT(nir_copy_prop);
10257ec681f3Smrg      OPT(nir_opt_dce);
10267ec681f3Smrg      OPT(nir_opt_cse);
10277ec681f3Smrg      OPT(nir_opt_algebraic);
10287ec681f3Smrg      OPT(nir_opt_constant_folding);
10297ec681f3Smrg   }
10307ec681f3Smrg}
10317ec681f3Smrg
10327ec681f3Smrgstatic bool
10337ec681f3Smrgnir_shader_has_local_variables(const nir_shader *nir)
10347ec681f3Smrg{
10357ec681f3Smrg   nir_foreach_function(func, nir) {
10367ec681f3Smrg      if (func->impl && !exec_list_is_empty(&func->impl->locals))
10377ec681f3Smrg         return true;
10387ec681f3Smrg   }
10397ec681f3Smrg
10407ec681f3Smrg   return false;
10417ec681f3Smrg}
10427ec681f3Smrg
104301e04c3fSmrg/* Prepare the given shader for codegen
104401e04c3fSmrg *
104501e04c3fSmrg * This function is intended to be called right before going into the actual
104601e04c3fSmrg * backend and is highly backend-specific.  Also, once this function has been
104701e04c3fSmrg * called on a shader, it will no longer be in SSA form so most optimizations
104801e04c3fSmrg * will not work.
104901e04c3fSmrg */
10507ec681f3Smrgvoid
105101e04c3fSmrgbrw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
10527ec681f3Smrg                    bool is_scalar, bool debug_enabled,
10537ec681f3Smrg                    bool robust_buffer_access)
105401e04c3fSmrg{
10557ec681f3Smrg   const struct intel_device_info *devinfo = compiler->devinfo;
105601e04c3fSmrg
105701e04c3fSmrg   UNUSED bool progress; /* Written by OPT */
105801e04c3fSmrg
10597ec681f3Smrg   OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
10607ec681f3Smrg
10617ec681f3Smrg   OPT(brw_nir_lower_scoped_barriers);
10627ec681f3Smrg   OPT(nir_opt_combine_memory_barriers, combine_all_barriers, NULL);
106301e04c3fSmrg
106401e04c3fSmrg   do {
106501e04c3fSmrg      progress = false;
106601e04c3fSmrg      OPT(nir_opt_algebraic_before_ffma);
106701e04c3fSmrg   } while (progress);
106801e04c3fSmrg
10697ec681f3Smrg   if (devinfo->verx10 >= 125) {
10707ec681f3Smrg      const nir_lower_idiv_options options = {
10717ec681f3Smrg         .imprecise_32bit_lowering = false,
10727ec681f3Smrg         .allow_fp16 = false
10737ec681f3Smrg      };
10747ec681f3Smrg      OPT(nir_lower_idiv, &options);
10757ec681f3Smrg   }
107601e04c3fSmrg
10777ec681f3Smrg   brw_nir_optimize(nir, compiler, is_scalar, false);
10787ec681f3Smrg
10797ec681f3Smrg   if (is_scalar && nir_shader_has_local_variables(nir)) {
10807ec681f3Smrg      OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
10817ec681f3Smrg          glsl_get_natural_size_align_bytes);
10827ec681f3Smrg      OPT(nir_lower_explicit_io, nir_var_function_temp,
10837ec681f3Smrg          nir_address_format_32bit_offset);
10847ec681f3Smrg      brw_nir_optimize(nir, compiler, is_scalar, false);
10857ec681f3Smrg   }
10867ec681f3Smrg
10877ec681f3Smrg   brw_vectorize_lower_mem_access(nir, compiler, is_scalar,
10887ec681f3Smrg                                  robust_buffer_access);
10897ec681f3Smrg
10907ec681f3Smrg   if (OPT(nir_lower_int64))
10917ec681f3Smrg      brw_nir_optimize(nir, compiler, is_scalar, false);
10927ec681f3Smrg
10937ec681f3Smrg   if (devinfo->ver >= 6) {
109401e04c3fSmrg      /* Try and fuse multiply-adds */
109501e04c3fSmrg      OPT(brw_nir_opt_peephole_ffma);
109601e04c3fSmrg   }
109701e04c3fSmrg
10989f464c52Smaya   if (OPT(nir_opt_comparison_pre)) {
10999f464c52Smaya      OPT(nir_copy_prop);
11009f464c52Smaya      OPT(nir_opt_dce);
11019f464c52Smaya      OPT(nir_opt_cse);
11029f464c52Smaya
11039f464c52Smaya      /* Do the select peepehole again.  nir_opt_comparison_pre (combined with
11049f464c52Smaya       * the other optimization passes) will have removed at least one
11059f464c52Smaya       * instruction from one of the branches of the if-statement, so now it
11069f464c52Smaya       * might be under the threshold of conversion to bcsel.
11079f464c52Smaya       *
11089f464c52Smaya       * See brw_nir_optimize for the explanation of is_vec4_tessellation.
11099f464c52Smaya       */
11109f464c52Smaya      const bool is_vec4_tessellation = !is_scalar &&
11119f464c52Smaya         (nir->info.stage == MESA_SHADER_TESS_CTRL ||
11129f464c52Smaya          nir->info.stage == MESA_SHADER_TESS_EVAL);
11139f464c52Smaya      OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
11149f464c52Smaya      OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
11157ec681f3Smrg          compiler->devinfo->ver >= 6);
11169f464c52Smaya   }
11179f464c52Smaya
11187ec681f3Smrg   do {
11197ec681f3Smrg      progress = false;
11207ec681f3Smrg      if (OPT(nir_opt_algebraic_late)) {
11217ec681f3Smrg         /* At this late stage, anything that makes more constants will wreak
11227ec681f3Smrg          * havok on the vec4 backend.  The handling of constants in the vec4
11237ec681f3Smrg          * backend is not good.
11247ec681f3Smrg          */
11257ec681f3Smrg         if (is_scalar)
11267ec681f3Smrg            OPT(nir_opt_constant_folding);
11277ec681f3Smrg
11287ec681f3Smrg         OPT(nir_copy_prop);
11297ec681f3Smrg         OPT(nir_opt_dce);
11307ec681f3Smrg         OPT(nir_opt_cse);
11317ec681f3Smrg      }
11327ec681f3Smrg   } while (progress);
11337ec681f3Smrg
113401e04c3fSmrg
11359f464c52Smaya   OPT(brw_nir_lower_conversions);
11369f464c52Smaya
11377ec681f3Smrg   if (is_scalar)
11387ec681f3Smrg      OPT(nir_lower_alu_to_scalar, NULL, NULL);
11397ec681f3Smrg
11407ec681f3Smrg   while (OPT(nir_opt_algebraic_distribute_src_mods)) {
11417ec681f3Smrg      OPT(nir_copy_prop);
11427ec681f3Smrg      OPT(nir_opt_dce);
11437ec681f3Smrg      OPT(nir_opt_cse);
11447ec681f3Smrg   }
11457ec681f3Smrg
114601e04c3fSmrg   OPT(nir_copy_prop);
114701e04c3fSmrg   OPT(nir_opt_dce);
11487ec681f3Smrg   OPT(nir_opt_move, nir_move_comparisons);
11497ec681f3Smrg   OPT(nir_opt_dead_cf);
115001e04c3fSmrg
11519f464c52Smaya   OPT(nir_lower_bool_to_int32);
11527ec681f3Smrg   OPT(nir_copy_prop);
11537ec681f3Smrg   OPT(nir_opt_dce);
11549f464c52Smaya
115501e04c3fSmrg   OPT(nir_lower_locals_to_regs);
115601e04c3fSmrg
115701e04c3fSmrg   if (unlikely(debug_enabled)) {
115801e04c3fSmrg      /* Re-index SSA defs so we print more sensible numbers. */
115901e04c3fSmrg      nir_foreach_function(function, nir) {
116001e04c3fSmrg         if (function->impl)
116101e04c3fSmrg            nir_index_ssa_defs(function->impl);
116201e04c3fSmrg      }
116301e04c3fSmrg
116401e04c3fSmrg      fprintf(stderr, "NIR (SSA form) for %s shader:\n",
116501e04c3fSmrg              _mesa_shader_stage_to_string(nir->info.stage));
116601e04c3fSmrg      nir_print_shader(nir, stderr);
116701e04c3fSmrg   }
116801e04c3fSmrg
11697ec681f3Smrg   nir_validate_ssa_dominance(nir, "before nir_convert_from_ssa");
11707ec681f3Smrg
117101e04c3fSmrg   OPT(nir_convert_from_ssa, true);
117201e04c3fSmrg
117301e04c3fSmrg   if (!is_scalar) {
117401e04c3fSmrg      OPT(nir_move_vec_src_uses_to_dest);
11757ec681f3Smrg      OPT(nir_lower_vec_to_movs, NULL, NULL);
117601e04c3fSmrg   }
117701e04c3fSmrg
117801e04c3fSmrg   OPT(nir_opt_dce);
117901e04c3fSmrg
11807ec681f3Smrg   if (OPT(nir_opt_rematerialize_compares))
11817ec681f3Smrg      OPT(nir_opt_dce);
11827ec681f3Smrg
118301e04c3fSmrg   /* This is the last pass we run before we start emitting stuff.  It
118401e04c3fSmrg    * determines when we need to insert boolean resolves on Gen <= 5.  We
118501e04c3fSmrg    * run it last because it stashes data in instr->pass_flags and we don't
118601e04c3fSmrg    * want that to be squashed by other NIR passes.
118701e04c3fSmrg    */
11887ec681f3Smrg   if (devinfo->ver <= 5)
118901e04c3fSmrg      brw_nir_analyze_boolean_resolves(nir);
119001e04c3fSmrg
119101e04c3fSmrg   nir_sweep(nir);
119201e04c3fSmrg
119301e04c3fSmrg   if (unlikely(debug_enabled)) {
119401e04c3fSmrg      fprintf(stderr, "NIR (final form) for %s shader:\n",
119501e04c3fSmrg              _mesa_shader_stage_to_string(nir->info.stage));
119601e04c3fSmrg      nir_print_shader(nir, stderr);
119701e04c3fSmrg   }
119801e04c3fSmrg}
119901e04c3fSmrg
12007ec681f3Smrgstatic bool
120101e04c3fSmrgbrw_nir_apply_sampler_key(nir_shader *nir,
120201e04c3fSmrg                          const struct brw_compiler *compiler,
12037ec681f3Smrg                          const struct brw_sampler_prog_key_data *key_tex)
120401e04c3fSmrg{
12057ec681f3Smrg   const struct intel_device_info *devinfo = compiler->devinfo;
12069f464c52Smaya   nir_lower_tex_options tex_options = {
12079f464c52Smaya      .lower_txd_clamp_bindless_sampler = true,
12089f464c52Smaya      .lower_txd_clamp_if_sampler_index_not_lt_16 = true,
12099f464c52Smaya   };
121001e04c3fSmrg
121101e04c3fSmrg   /* Iron Lake and prior require lowering of all rectangle textures */
12127ec681f3Smrg   if (devinfo->ver < 6)
121301e04c3fSmrg      tex_options.lower_rect = true;
121401e04c3fSmrg
121501e04c3fSmrg   /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
12167ec681f3Smrg   if (devinfo->ver < 8) {
121701e04c3fSmrg      tex_options.saturate_s = key_tex->gl_clamp_mask[0];
121801e04c3fSmrg      tex_options.saturate_t = key_tex->gl_clamp_mask[1];
121901e04c3fSmrg      tex_options.saturate_r = key_tex->gl_clamp_mask[2];
122001e04c3fSmrg   }
122101e04c3fSmrg
122201e04c3fSmrg   /* Prior to Haswell, we have to fake texture swizzle */
122301e04c3fSmrg   for (unsigned s = 0; s < MAX_SAMPLERS; s++) {
122401e04c3fSmrg      if (key_tex->swizzles[s] == SWIZZLE_NOOP)
122501e04c3fSmrg         continue;
122601e04c3fSmrg
12277ec681f3Smrg      tex_options.swizzle_result |= BITFIELD_BIT(s);
122801e04c3fSmrg      for (unsigned c = 0; c < 4; c++)
122901e04c3fSmrg         tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
123001e04c3fSmrg   }
123101e04c3fSmrg
123201e04c3fSmrg   /* Prior to Haswell, we have to lower gradients on shadow samplers */
12337ec681f3Smrg   tex_options.lower_txd_shadow = devinfo->verx10 <= 70;
123401e04c3fSmrg
123501e04c3fSmrg   tex_options.lower_y_uv_external = key_tex->y_uv_image_mask;
123601e04c3fSmrg   tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
123701e04c3fSmrg   tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
123801e04c3fSmrg   tex_options.lower_xy_uxvx_external = key_tex->xy_uxvx_image_mask;
12399f464c52Smaya   tex_options.lower_ayuv_external = key_tex->ayuv_image_mask;
12409f464c52Smaya   tex_options.lower_xyuv_external = key_tex->xyuv_image_mask;
12417ec681f3Smrg   tex_options.bt709_external = key_tex->bt709_mask;
12427ec681f3Smrg   tex_options.bt2020_external = key_tex->bt2020_mask;
12439f464c52Smaya
12449f464c52Smaya   /* Setup array of scaling factors for each texture. */
12459f464c52Smaya   memcpy(&tex_options.scale_factors, &key_tex->scale_factors,
12469f464c52Smaya          sizeof(tex_options.scale_factors));
124701e04c3fSmrg
12487ec681f3Smrg   return nir_lower_tex(nir, &tex_options);
12497ec681f3Smrg}
12507ec681f3Smrg
12517ec681f3Smrgstatic unsigned
12527ec681f3Smrgget_subgroup_size(gl_shader_stage stage,
12537ec681f3Smrg                  const struct brw_base_prog_key *key,
12547ec681f3Smrg                  unsigned max_subgroup_size)
12557ec681f3Smrg{
12567ec681f3Smrg   switch (key->subgroup_size_type) {
12577ec681f3Smrg   case BRW_SUBGROUP_SIZE_API_CONSTANT:
12587ec681f3Smrg      /* We have to use the global constant size. */
12597ec681f3Smrg      return BRW_SUBGROUP_SIZE;
12607ec681f3Smrg
12617ec681f3Smrg   case BRW_SUBGROUP_SIZE_UNIFORM:
12627ec681f3Smrg      /* It has to be uniform across all invocations but can vary per stage
12637ec681f3Smrg       * if we want.  This gives us a bit more freedom.
12647ec681f3Smrg       *
12657ec681f3Smrg       * For compute, brw_nir_apply_key is called per-dispatch-width so this
12667ec681f3Smrg       * is the actual subgroup size and not a maximum.  However, we only
12677ec681f3Smrg       * invoke one size of any given compute shader so it's still guaranteed
12687ec681f3Smrg       * to be uniform across invocations.
12697ec681f3Smrg       */
12707ec681f3Smrg      return max_subgroup_size;
12717ec681f3Smrg
12727ec681f3Smrg   case BRW_SUBGROUP_SIZE_VARYING:
12737ec681f3Smrg      /* The subgroup size is allowed to be fully varying.  For geometry
12747ec681f3Smrg       * stages, we know it's always 8 which is max_subgroup_size so we can
12757ec681f3Smrg       * return that.  For compute, brw_nir_apply_key is called once per
12767ec681f3Smrg       * dispatch-width so max_subgroup_size is the real subgroup size.
12777ec681f3Smrg       *
12787ec681f3Smrg       * For fragment, we return 0 and let it fall through to the back-end
12797ec681f3Smrg       * compiler.  This means we can't optimize based on subgroup size but
12807ec681f3Smrg       * that's a risk the client took when it asked for a varying subgroup
12817ec681f3Smrg       * size.
12827ec681f3Smrg       */
12837ec681f3Smrg      return stage == MESA_SHADER_FRAGMENT ? 0 : max_subgroup_size;
12847ec681f3Smrg
12857ec681f3Smrg   case BRW_SUBGROUP_SIZE_REQUIRE_8:
12867ec681f3Smrg   case BRW_SUBGROUP_SIZE_REQUIRE_16:
12877ec681f3Smrg   case BRW_SUBGROUP_SIZE_REQUIRE_32:
12887ec681f3Smrg      assert(stage == MESA_SHADER_COMPUTE);
12897ec681f3Smrg      /* These enum values are expressly chosen to be equal to the subgroup
12907ec681f3Smrg       * size that they require.
12917ec681f3Smrg       */
12927ec681f3Smrg      return key->subgroup_size_type;
129301e04c3fSmrg   }
129401e04c3fSmrg
12957ec681f3Smrg   unreachable("Invalid subgroup size type");
12967ec681f3Smrg}
12977ec681f3Smrg
12987ec681f3Smrgvoid
12997ec681f3Smrgbrw_nir_apply_key(nir_shader *nir,
13007ec681f3Smrg                  const struct brw_compiler *compiler,
13017ec681f3Smrg                  const struct brw_base_prog_key *key,
13027ec681f3Smrg                  unsigned max_subgroup_size,
13037ec681f3Smrg                  bool is_scalar)
13047ec681f3Smrg{
13057ec681f3Smrg   bool progress = false;
13067ec681f3Smrg
13077ec681f3Smrg   OPT(brw_nir_apply_sampler_key, compiler, &key->tex);
13087ec681f3Smrg
13097ec681f3Smrg   const nir_lower_subgroups_options subgroups_options = {
13107ec681f3Smrg      .subgroup_size = get_subgroup_size(nir->info.stage, key,
13117ec681f3Smrg                                         max_subgroup_size),
13127ec681f3Smrg      .ballot_bit_size = 32,
13137ec681f3Smrg      .ballot_components = 1,
13147ec681f3Smrg      .lower_subgroup_masks = true,
13157ec681f3Smrg   };
13167ec681f3Smrg   OPT(nir_lower_subgroups, &subgroups_options);
13177ec681f3Smrg
13187ec681f3Smrg   if (progress)
13197ec681f3Smrg      brw_nir_optimize(nir, compiler, is_scalar, false);
13207ec681f3Smrg}
13217ec681f3Smrg
13227ec681f3Smrgenum brw_conditional_mod
13237ec681f3Smrgbrw_cmod_for_nir_comparison(nir_op op)
13247ec681f3Smrg{
13257ec681f3Smrg   switch (op) {
13267ec681f3Smrg   case nir_op_flt:
13277ec681f3Smrg   case nir_op_flt32:
13287ec681f3Smrg   case nir_op_ilt:
13297ec681f3Smrg   case nir_op_ilt32:
13307ec681f3Smrg   case nir_op_ult:
13317ec681f3Smrg   case nir_op_ult32:
13327ec681f3Smrg      return BRW_CONDITIONAL_L;
13337ec681f3Smrg
13347ec681f3Smrg   case nir_op_fge:
13357ec681f3Smrg   case nir_op_fge32:
13367ec681f3Smrg   case nir_op_ige:
13377ec681f3Smrg   case nir_op_ige32:
13387ec681f3Smrg   case nir_op_uge:
13397ec681f3Smrg   case nir_op_uge32:
13407ec681f3Smrg      return BRW_CONDITIONAL_GE;
13417ec681f3Smrg
13427ec681f3Smrg   case nir_op_feq:
13437ec681f3Smrg   case nir_op_feq32:
13447ec681f3Smrg   case nir_op_ieq:
13457ec681f3Smrg   case nir_op_ieq32:
13467ec681f3Smrg   case nir_op_b32all_fequal2:
13477ec681f3Smrg   case nir_op_b32all_iequal2:
13487ec681f3Smrg   case nir_op_b32all_fequal3:
13497ec681f3Smrg   case nir_op_b32all_iequal3:
13507ec681f3Smrg   case nir_op_b32all_fequal4:
13517ec681f3Smrg   case nir_op_b32all_iequal4:
13527ec681f3Smrg      return BRW_CONDITIONAL_Z;
13537ec681f3Smrg
13547ec681f3Smrg   case nir_op_fneu:
13557ec681f3Smrg   case nir_op_fneu32:
13567ec681f3Smrg   case nir_op_ine:
13577ec681f3Smrg   case nir_op_ine32:
13587ec681f3Smrg   case nir_op_b32any_fnequal2:
13597ec681f3Smrg   case nir_op_b32any_inequal2:
13607ec681f3Smrg   case nir_op_b32any_fnequal3:
13617ec681f3Smrg   case nir_op_b32any_inequal3:
13627ec681f3Smrg   case nir_op_b32any_fnequal4:
13637ec681f3Smrg   case nir_op_b32any_inequal4:
13647ec681f3Smrg      return BRW_CONDITIONAL_NZ;
13657ec681f3Smrg
13667ec681f3Smrg   default:
13677ec681f3Smrg      unreachable("Unsupported NIR comparison op");
13687ec681f3Smrg   }
13697ec681f3Smrg}
13707ec681f3Smrg
13717ec681f3Smrguint32_t
13727ec681f3Smrgbrw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
13737ec681f3Smrg{
13747ec681f3Smrg   switch (atomic->intrinsic) {
13757ec681f3Smrg#define AOP_CASE(atom) \
13767ec681f3Smrg   case nir_intrinsic_image_atomic_##atom:            \
13777ec681f3Smrg   case nir_intrinsic_bindless_image_atomic_##atom:   \
13787ec681f3Smrg   case nir_intrinsic_ssbo_atomic_##atom:             \
13797ec681f3Smrg   case nir_intrinsic_shared_atomic_##atom:           \
13807ec681f3Smrg   case nir_intrinsic_global_atomic_##atom
13817ec681f3Smrg
13827ec681f3Smrg   AOP_CASE(add): {
13837ec681f3Smrg      unsigned src_idx;
13847ec681f3Smrg      switch (atomic->intrinsic) {
13857ec681f3Smrg      case nir_intrinsic_image_atomic_add:
13867ec681f3Smrg      case nir_intrinsic_bindless_image_atomic_add:
13877ec681f3Smrg         src_idx = 3;
13887ec681f3Smrg         break;
13897ec681f3Smrg      case nir_intrinsic_ssbo_atomic_add:
13907ec681f3Smrg         src_idx = 2;
13917ec681f3Smrg         break;
13927ec681f3Smrg      case nir_intrinsic_shared_atomic_add:
13937ec681f3Smrg      case nir_intrinsic_global_atomic_add:
13947ec681f3Smrg         src_idx = 1;
13957ec681f3Smrg         break;
13967ec681f3Smrg      default:
13977ec681f3Smrg         unreachable("Invalid add atomic opcode");
13987ec681f3Smrg      }
13997ec681f3Smrg
14007ec681f3Smrg      if (nir_src_is_const(atomic->src[src_idx])) {
14017ec681f3Smrg         int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
14027ec681f3Smrg         if (add_val == 1)
14037ec681f3Smrg            return BRW_AOP_INC;
14047ec681f3Smrg         else if (add_val == -1)
14057ec681f3Smrg            return BRW_AOP_DEC;
14067ec681f3Smrg      }
14077ec681f3Smrg      return BRW_AOP_ADD;
14087ec681f3Smrg   }
14097ec681f3Smrg
14107ec681f3Smrg   AOP_CASE(imin):         return BRW_AOP_IMIN;
14117ec681f3Smrg   AOP_CASE(umin):         return BRW_AOP_UMIN;
14127ec681f3Smrg   AOP_CASE(imax):         return BRW_AOP_IMAX;
14137ec681f3Smrg   AOP_CASE(umax):         return BRW_AOP_UMAX;
14147ec681f3Smrg   AOP_CASE(and):          return BRW_AOP_AND;
14157ec681f3Smrg   AOP_CASE(or):           return BRW_AOP_OR;
14167ec681f3Smrg   AOP_CASE(xor):          return BRW_AOP_XOR;
14177ec681f3Smrg   AOP_CASE(exchange):     return BRW_AOP_MOV;
14187ec681f3Smrg   AOP_CASE(comp_swap):    return BRW_AOP_CMPWR;
14197ec681f3Smrg
14207ec681f3Smrg#undef AOP_CASE
14217ec681f3Smrg#define AOP_CASE(atom) \
14227ec681f3Smrg   case nir_intrinsic_ssbo_atomic_##atom:          \
14237ec681f3Smrg   case nir_intrinsic_shared_atomic_##atom:        \
14247ec681f3Smrg   case nir_intrinsic_global_atomic_##atom
14257ec681f3Smrg
14267ec681f3Smrg   AOP_CASE(fmin):         return BRW_AOP_FMIN;
14277ec681f3Smrg   AOP_CASE(fmax):         return BRW_AOP_FMAX;
14287ec681f3Smrg   AOP_CASE(fcomp_swap):   return BRW_AOP_FCMPWR;
14297ec681f3Smrg   AOP_CASE(fadd):         return BRW_AOP_FADD;
14307ec681f3Smrg
14317ec681f3Smrg#undef AOP_CASE
14327ec681f3Smrg
14337ec681f3Smrg   default:
14347ec681f3Smrg      unreachable("Unsupported NIR atomic intrinsic");
14357ec681f3Smrg   }
143601e04c3fSmrg}
143701e04c3fSmrg
143801e04c3fSmrgenum brw_reg_type
14397ec681f3Smrgbrw_type_for_nir_type(const struct intel_device_info *devinfo,
14407ec681f3Smrg                      nir_alu_type type)
144101e04c3fSmrg{
144201e04c3fSmrg   switch (type) {
144301e04c3fSmrg   case nir_type_uint:
144401e04c3fSmrg   case nir_type_uint32:
144501e04c3fSmrg      return BRW_REGISTER_TYPE_UD;
144601e04c3fSmrg   case nir_type_bool:
144701e04c3fSmrg   case nir_type_int:
144801e04c3fSmrg   case nir_type_bool32:
144901e04c3fSmrg   case nir_type_int32:
145001e04c3fSmrg      return BRW_REGISTER_TYPE_D;
145101e04c3fSmrg   case nir_type_float:
145201e04c3fSmrg   case nir_type_float32:
145301e04c3fSmrg      return BRW_REGISTER_TYPE_F;
145401e04c3fSmrg   case nir_type_float16:
145501e04c3fSmrg      return BRW_REGISTER_TYPE_HF;
145601e04c3fSmrg   case nir_type_float64:
145701e04c3fSmrg      return BRW_REGISTER_TYPE_DF;
145801e04c3fSmrg   case nir_type_int64:
14597ec681f3Smrg      return devinfo->ver < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_Q;
146001e04c3fSmrg   case nir_type_uint64:
14617ec681f3Smrg      return devinfo->ver < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_UQ;
146201e04c3fSmrg   case nir_type_int16:
146301e04c3fSmrg      return BRW_REGISTER_TYPE_W;
146401e04c3fSmrg   case nir_type_uint16:
146501e04c3fSmrg      return BRW_REGISTER_TYPE_UW;
146601e04c3fSmrg   case nir_type_int8:
146701e04c3fSmrg      return BRW_REGISTER_TYPE_B;
146801e04c3fSmrg   case nir_type_uint8:
146901e04c3fSmrg      return BRW_REGISTER_TYPE_UB;
147001e04c3fSmrg   default:
147101e04c3fSmrg      unreachable("unknown type");
147201e04c3fSmrg   }
147301e04c3fSmrg
147401e04c3fSmrg   return BRW_REGISTER_TYPE_F;
147501e04c3fSmrg}
147601e04c3fSmrg
147701e04c3fSmrgnir_shader *
147801e04c3fSmrgbrw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
147901e04c3fSmrg                               const nir_shader_compiler_options *options,
148001e04c3fSmrg                               const struct brw_tcs_prog_key *key)
148101e04c3fSmrg{
14827ec681f3Smrg   nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_TESS_CTRL,
14837ec681f3Smrg                                                  options, "passthrough TCS");
14847ec681f3Smrg   ralloc_steal(mem_ctx, b.shader);
148501e04c3fSmrg   nir_shader *nir = b.shader;
148601e04c3fSmrg   nir_variable *var;
14877ec681f3Smrg   nir_ssa_def *load;
148801e04c3fSmrg   nir_ssa_def *zero = nir_imm_int(&b, 0);
14899f464c52Smaya   nir_ssa_def *invoc_id = nir_load_invocation_id(&b);
149001e04c3fSmrg
149101e04c3fSmrg   nir->info.inputs_read = key->outputs_written &
149201e04c3fSmrg      ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
149301e04c3fSmrg   nir->info.outputs_written = key->outputs_written;
149401e04c3fSmrg   nir->info.tess.tcs_vertices_out = key->input_vertices;
149501e04c3fSmrg   nir->num_uniforms = 8 * sizeof(uint32_t);
149601e04c3fSmrg
149701e04c3fSmrg   var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_0");
149801e04c3fSmrg   var->data.location = 0;
149901e04c3fSmrg   var = nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "hdr_1");
150001e04c3fSmrg   var->data.location = 1;
150101e04c3fSmrg
150201e04c3fSmrg   /* Write the patch URB header. */
150301e04c3fSmrg   for (int i = 0; i <= 1; i++) {
15047ec681f3Smrg      load = nir_load_uniform(&b, 4, 32, zero, .base = i * 4 * sizeof(uint32_t));
15057ec681f3Smrg
15067ec681f3Smrg      nir_store_output(&b, load, zero,
15077ec681f3Smrg                       .base = VARYING_SLOT_TESS_LEVEL_INNER - i,
15087ec681f3Smrg                       .write_mask = WRITEMASK_XYZW);
150901e04c3fSmrg   }
151001e04c3fSmrg
151101e04c3fSmrg   /* Copy inputs to outputs. */
151201e04c3fSmrg   uint64_t varyings = nir->info.inputs_read;
151301e04c3fSmrg
151401e04c3fSmrg   while (varyings != 0) {
151501e04c3fSmrg      const int varying = ffsll(varyings) - 1;
151601e04c3fSmrg
15177ec681f3Smrg      load = nir_load_per_vertex_input(&b, 4, 32, invoc_id, zero, .base = varying);
15187ec681f3Smrg
15197ec681f3Smrg      nir_store_per_vertex_output(&b, load, invoc_id, zero,
15207ec681f3Smrg                                  .base = varying,
15217ec681f3Smrg                                  .write_mask = WRITEMASK_XYZW);
152201e04c3fSmrg
152301e04c3fSmrg      varyings &= ~BITFIELD64_BIT(varying);
152401e04c3fSmrg   }
152501e04c3fSmrg
152601e04c3fSmrg   nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
152701e04c3fSmrg
15287ec681f3Smrg   brw_preprocess_nir(compiler, nir, NULL);
152901e04c3fSmrg
153001e04c3fSmrg   return nir;
153101e04c3fSmrg}
1532