1b8e80941Smrg/*
2b8e80941Smrg * Copyright © 2015-2016 Intel Corporation
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the next
12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
13b8e80941Smrg * Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21b8e80941Smrg * IN THE SOFTWARE.
22b8e80941Smrg */
23b8e80941Smrg
24b8e80941Smrg#include "brw_compiler.h"
25b8e80941Smrg#include "brw_shader.h"
26b8e80941Smrg#include "brw_eu.h"
27b8e80941Smrg#include "dev/gen_debug.h"
28b8e80941Smrg#include "compiler/nir/nir.h"
29b8e80941Smrg#include "main/errors.h"
30b8e80941Smrg#include "util/debug.h"
31b8e80941Smrg
32b8e80941Smrg#define COMMON_OPTIONS                                                        \
33b8e80941Smrg   .lower_sub = true,                                                         \
34b8e80941Smrg   .lower_fdiv = true,                                                        \
35b8e80941Smrg   .lower_scmp = true,                                                        \
36b8e80941Smrg   .lower_flrp16 = true,                                                      \
37b8e80941Smrg   .lower_fmod16 = true,                                                      \
38b8e80941Smrg   .lower_fmod32 = true,                                                      \
39b8e80941Smrg   .lower_fmod64 = false,                                                     \
40b8e80941Smrg   .lower_bitfield_extract = true,                                            \
41b8e80941Smrg   .lower_bitfield_insert = true,                                             \
42b8e80941Smrg   .lower_uadd_carry = true,                                                  \
43b8e80941Smrg   .lower_usub_borrow = true,                                                 \
44b8e80941Smrg   .lower_fdiv = true,                                                        \
45b8e80941Smrg   .lower_flrp64 = true,                                                      \
46b8e80941Smrg   .lower_isign = true,                                                       \
47b8e80941Smrg   .lower_ldexp = true,                                                       \
48b8e80941Smrg   .lower_device_index_to_zero = true,                                        \
49b8e80941Smrg   .native_integers = true,                                                   \
50b8e80941Smrg   .use_interpolated_input_intrinsics = true,                                 \
51b8e80941Smrg   .vertex_id_zero_based = true,                                              \
52b8e80941Smrg   .lower_base_vertex = true
53b8e80941Smrg
54b8e80941Smrg#define COMMON_SCALAR_OPTIONS                                                 \
55b8e80941Smrg   .lower_pack_half_2x16 = true,                                              \
56b8e80941Smrg   .lower_pack_snorm_2x16 = true,                                             \
57b8e80941Smrg   .lower_pack_snorm_4x8 = true,                                              \
58b8e80941Smrg   .lower_pack_unorm_2x16 = true,                                             \
59b8e80941Smrg   .lower_pack_unorm_4x8 = true,                                              \
60b8e80941Smrg   .lower_unpack_half_2x16 = true,                                            \
61b8e80941Smrg   .lower_unpack_snorm_2x16 = true,                                           \
62b8e80941Smrg   .lower_unpack_snorm_4x8 = true,                                            \
63b8e80941Smrg   .lower_unpack_unorm_2x16 = true,                                           \
64b8e80941Smrg   .lower_unpack_unorm_4x8 = true,                                            \
65b8e80941Smrg   .max_unroll_iterations = 32
66b8e80941Smrg
67b8e80941Smrgstatic const struct nir_shader_compiler_options scalar_nir_options = {
68b8e80941Smrg   COMMON_OPTIONS,
69b8e80941Smrg   COMMON_SCALAR_OPTIONS,
70b8e80941Smrg};
71b8e80941Smrg
72b8e80941Smrgstatic const struct nir_shader_compiler_options vector_nir_options = {
73b8e80941Smrg   COMMON_OPTIONS,
74b8e80941Smrg
75b8e80941Smrg   /* In the vec4 backend, our dpN instruction replicates its result to all the
76b8e80941Smrg    * components of a vec4.  We would like NIR to give us replicated fdot
77b8e80941Smrg    * instructions because it can optimize better for us.
78b8e80941Smrg    */
79b8e80941Smrg   .fdot_replicates = true,
80b8e80941Smrg
81b8e80941Smrg   .lower_pack_snorm_2x16 = true,
82b8e80941Smrg   .lower_pack_unorm_2x16 = true,
83b8e80941Smrg   .lower_unpack_snorm_2x16 = true,
84b8e80941Smrg   .lower_unpack_unorm_2x16 = true,
85b8e80941Smrg   .lower_extract_byte = true,
86b8e80941Smrg   .lower_extract_word = true,
87b8e80941Smrg   .max_unroll_iterations = 32,
88b8e80941Smrg};
89b8e80941Smrg
90b8e80941Smrgstruct brw_compiler *
91b8e80941Smrgbrw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo)
92b8e80941Smrg{
93b8e80941Smrg   struct brw_compiler *compiler = rzalloc(mem_ctx, struct brw_compiler);
94b8e80941Smrg
95b8e80941Smrg   compiler->devinfo = devinfo;
96b8e80941Smrg
97b8e80941Smrg   brw_fs_alloc_reg_sets(compiler);
98b8e80941Smrg   brw_vec4_alloc_reg_set(compiler);
99b8e80941Smrg   brw_init_compaction_tables(devinfo);
100b8e80941Smrg
101b8e80941Smrg   compiler->precise_trig = env_var_as_boolean("INTEL_PRECISE_TRIG", false);
102b8e80941Smrg
103b8e80941Smrg   if (devinfo->gen >= 10) {
104b8e80941Smrg      /* We don't support vec4 mode on Cannonlake. */
105b8e80941Smrg      for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_STAGES; i++)
106b8e80941Smrg         compiler->scalar_stage[i] = true;
107b8e80941Smrg   } else {
108b8e80941Smrg      compiler->scalar_stage[MESA_SHADER_VERTEX] =
109b8e80941Smrg         devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_VS", true);
110b8e80941Smrg      compiler->scalar_stage[MESA_SHADER_TESS_CTRL] =
111b8e80941Smrg         devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_TCS", true);
112b8e80941Smrg      compiler->scalar_stage[MESA_SHADER_TESS_EVAL] =
113b8e80941Smrg         devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_TES", true);
114b8e80941Smrg      compiler->scalar_stage[MESA_SHADER_GEOMETRY] =
115b8e80941Smrg         devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_GS", true);
116b8e80941Smrg      compiler->scalar_stage[MESA_SHADER_FRAGMENT] = true;
117b8e80941Smrg      compiler->scalar_stage[MESA_SHADER_COMPUTE] = true;
118b8e80941Smrg   }
119b8e80941Smrg
120b8e80941Smrg   nir_lower_int64_options int64_options =
121b8e80941Smrg      nir_lower_imul64 |
122b8e80941Smrg      nir_lower_isign64 |
123b8e80941Smrg      nir_lower_divmod64 |
124b8e80941Smrg      nir_lower_imul_high64;
125b8e80941Smrg   nir_lower_doubles_options fp64_options =
126b8e80941Smrg      nir_lower_drcp |
127b8e80941Smrg      nir_lower_dsqrt |
128b8e80941Smrg      nir_lower_drsq |
129b8e80941Smrg      nir_lower_dtrunc |
130b8e80941Smrg      nir_lower_dfloor |
131b8e80941Smrg      nir_lower_dceil |
132b8e80941Smrg      nir_lower_dfract |
133b8e80941Smrg      nir_lower_dround_even |
134b8e80941Smrg      nir_lower_dmod;
135b8e80941Smrg
136b8e80941Smrg   if (!devinfo->has_64bit_types || (INTEL_DEBUG & DEBUG_SOFT64)) {
137b8e80941Smrg      int64_options |= nir_lower_mov64 |
138b8e80941Smrg                       nir_lower_icmp64 |
139b8e80941Smrg                       nir_lower_iadd64 |
140b8e80941Smrg                       nir_lower_iabs64 |
141b8e80941Smrg                       nir_lower_ineg64 |
142b8e80941Smrg                       nir_lower_logic64 |
143b8e80941Smrg                       nir_lower_minmax64 |
144b8e80941Smrg                       nir_lower_shift64 |
145b8e80941Smrg                       nir_lower_extract64;
146b8e80941Smrg      fp64_options |= nir_lower_fp64_full_software;
147b8e80941Smrg   }
148b8e80941Smrg
149b8e80941Smrg   /* The Bspec's section tittled "Instruction_multiply[DevBDW+]" claims that
150b8e80941Smrg    * destination type can be Quadword and source type Doubleword for Gen8 and
151b8e80941Smrg    * Gen9. So, lower 64 bit multiply instruction on rest of the platforms.
152b8e80941Smrg    */
153b8e80941Smrg   if (devinfo->gen < 8 || devinfo->gen > 9)
154b8e80941Smrg      int64_options |= nir_lower_imul_2x32_64;
155b8e80941Smrg
156b8e80941Smrg   /* We want the GLSL compiler to emit code that uses condition codes */
157b8e80941Smrg   for (int i = 0; i < MESA_SHADER_STAGES; i++) {
158b8e80941Smrg      compiler->glsl_compiler_options[i].MaxUnrollIterations = 0;
159b8e80941Smrg      compiler->glsl_compiler_options[i].MaxIfDepth =
160b8e80941Smrg         devinfo->gen < 6 ? 16 : UINT_MAX;
161b8e80941Smrg
162b8e80941Smrg      compiler->glsl_compiler_options[i].EmitNoIndirectInput = true;
163b8e80941Smrg      compiler->glsl_compiler_options[i].EmitNoIndirectUniform = false;
164b8e80941Smrg
165b8e80941Smrg      bool is_scalar = compiler->scalar_stage[i];
166b8e80941Smrg
167b8e80941Smrg      compiler->glsl_compiler_options[i].EmitNoIndirectOutput = is_scalar;
168b8e80941Smrg      compiler->glsl_compiler_options[i].EmitNoIndirectTemp = is_scalar;
169b8e80941Smrg      compiler->glsl_compiler_options[i].OptimizeForAOS = !is_scalar;
170b8e80941Smrg
171b8e80941Smrg      struct nir_shader_compiler_options *nir_options =
172b8e80941Smrg         rzalloc(compiler, struct nir_shader_compiler_options);
173b8e80941Smrg      if (is_scalar) {
174b8e80941Smrg         *nir_options = scalar_nir_options;
175b8e80941Smrg
176b8e80941Smrg         if (devinfo->gen >= 11) {
177b8e80941Smrg            nir_options->lower_flrp32 = true;
178b8e80941Smrg         }
179b8e80941Smrg      } else {
180b8e80941Smrg         *nir_options = vector_nir_options;
181b8e80941Smrg
182b8e80941Smrg         if (devinfo->gen < 6) {
183b8e80941Smrg            /* Prior to Gen6, there are no three source operations. */
184b8e80941Smrg            nir_options->lower_flrp32 = true;
185b8e80941Smrg         }
186b8e80941Smrg      }
187b8e80941Smrg
188b8e80941Smrg      /* Prior to Gen6, there are no three source operations. */
189b8e80941Smrg      nir_options->lower_ffma = devinfo->gen < 6;
190b8e80941Smrg
191b8e80941Smrg      nir_options->lower_bitfield_reverse = devinfo->gen < 7;
192b8e80941Smrg
193b8e80941Smrg      nir_options->lower_int64_options = int64_options;
194b8e80941Smrg      nir_options->lower_doubles_options = fp64_options;
195b8e80941Smrg      compiler->glsl_compiler_options[i].NirOptions = nir_options;
196b8e80941Smrg
197b8e80941Smrg      compiler->glsl_compiler_options[i].ClampBlockIndicesToArrayBounds = true;
198b8e80941Smrg   }
199b8e80941Smrg
200b8e80941Smrg   compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].EmitNoIndirectInput = false;
201b8e80941Smrg   compiler->glsl_compiler_options[MESA_SHADER_TESS_EVAL].EmitNoIndirectInput = false;
202b8e80941Smrg   compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].EmitNoIndirectOutput = false;
203b8e80941Smrg
204b8e80941Smrg   if (compiler->scalar_stage[MESA_SHADER_GEOMETRY])
205b8e80941Smrg      compiler->glsl_compiler_options[MESA_SHADER_GEOMETRY].EmitNoIndirectInput = false;
206b8e80941Smrg
207b8e80941Smrg   return compiler;
208b8e80941Smrg}
209b8e80941Smrg
210b8e80941Smrgstatic void
211b8e80941Smrginsert_u64_bit(uint64_t *val, bool add)
212b8e80941Smrg{
213b8e80941Smrg   *val = (*val << 1) | !!add;
214b8e80941Smrg}
215b8e80941Smrg
216b8e80941Smrguint64_t
217b8e80941Smrgbrw_get_compiler_config_value(const struct brw_compiler *compiler)
218b8e80941Smrg{
219b8e80941Smrg   uint64_t config = 0;
220b8e80941Smrg   insert_u64_bit(&config, compiler->precise_trig);
221b8e80941Smrg   if (compiler->devinfo->gen >= 8 && compiler->devinfo->gen < 10) {
222b8e80941Smrg      insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_VERTEX]);
223b8e80941Smrg      insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_TESS_CTRL]);
224b8e80941Smrg      insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_TESS_EVAL]);
225b8e80941Smrg      insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_GEOMETRY]);
226b8e80941Smrg   }
227b8e80941Smrg   uint64_t debug_bits = INTEL_DEBUG;
228b8e80941Smrg   uint64_t mask = DEBUG_DISK_CACHE_MASK;
229b8e80941Smrg   while (mask != 0) {
230b8e80941Smrg      const uint64_t bit = 1ULL << (ffsll(mask) - 1);
231b8e80941Smrg      insert_u64_bit(&config, (debug_bits & bit) != 0);
232b8e80941Smrg      mask &= ~bit;
233b8e80941Smrg   }
234b8e80941Smrg   return config;
235b8e80941Smrg}
236b8e80941Smrg
237b8e80941Smrgunsigned
238b8e80941Smrgbrw_prog_data_size(gl_shader_stage stage)
239b8e80941Smrg{
240b8e80941Smrg   STATIC_ASSERT(MESA_SHADER_VERTEX == 0);
241b8e80941Smrg   STATIC_ASSERT(MESA_SHADER_TESS_CTRL == 1);
242b8e80941Smrg   STATIC_ASSERT(MESA_SHADER_TESS_EVAL == 2);
243b8e80941Smrg   STATIC_ASSERT(MESA_SHADER_GEOMETRY == 3);
244b8e80941Smrg   STATIC_ASSERT(MESA_SHADER_FRAGMENT == 4);
245b8e80941Smrg   STATIC_ASSERT(MESA_SHADER_COMPUTE == 5);
246b8e80941Smrg   static const size_t stage_sizes[] = {
247b8e80941Smrg      sizeof(struct brw_vs_prog_data),
248b8e80941Smrg      sizeof(struct brw_tcs_prog_data),
249b8e80941Smrg      sizeof(struct brw_tes_prog_data),
250b8e80941Smrg      sizeof(struct brw_gs_prog_data),
251b8e80941Smrg      sizeof(struct brw_wm_prog_data),
252b8e80941Smrg      sizeof(struct brw_cs_prog_data),
253b8e80941Smrg   };
254b8e80941Smrg   assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_sizes));
255b8e80941Smrg   return stage_sizes[stage];
256b8e80941Smrg}
257b8e80941Smrg
258b8e80941Smrgunsigned
259b8e80941Smrgbrw_prog_key_size(gl_shader_stage stage)
260b8e80941Smrg{
261b8e80941Smrg   static const size_t stage_sizes[] = {
262b8e80941Smrg      sizeof(struct brw_vs_prog_key),
263b8e80941Smrg      sizeof(struct brw_tcs_prog_key),
264b8e80941Smrg      sizeof(struct brw_tes_prog_key),
265b8e80941Smrg      sizeof(struct brw_gs_prog_key),
266b8e80941Smrg      sizeof(struct brw_wm_prog_key),
267b8e80941Smrg      sizeof(struct brw_cs_prog_key),
268b8e80941Smrg   };
269b8e80941Smrg   assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_sizes));
270b8e80941Smrg   return stage_sizes[stage];
271b8e80941Smrg}
272