nir_loop_analyze.c revision b8e80941
1/*
2 * Copyright © 2015 Thomas Helland
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "nir.h"
25#include "nir_constant_expressions.h"
26#include "nir_loop_analyze.h"
27
28typedef enum {
29   undefined,
30   invariant,
31   not_invariant,
32   basic_induction
33} nir_loop_variable_type;
34
35typedef struct nir_basic_induction_var {
36   nir_alu_instr *alu;                      /* The def of the alu-operation */
37   nir_ssa_def *def_outside_loop;           /* The phi-src outside the loop */
38} nir_basic_induction_var;
39
40typedef struct {
41   /* A link for the work list */
42   struct list_head process_link;
43
44   bool in_loop;
45
46   /* The ssa_def associated with this info */
47   nir_ssa_def *def;
48
49   /* The type of this ssa_def */
50   nir_loop_variable_type type;
51
52   /* If this is of type basic_induction */
53   struct nir_basic_induction_var *ind;
54
55   /* True if variable is in an if branch */
56   bool in_if_branch;
57
58   /* True if variable is in a nested loop */
59   bool in_nested_loop;
60
61} nir_loop_variable;
62
63typedef struct {
64   /* The loop we store information for */
65   nir_loop *loop;
66
67   /* Loop_variable for all ssa_defs in function */
68   nir_loop_variable *loop_vars;
69
70   /* A list of the loop_vars to analyze */
71   struct list_head process_list;
72
73   nir_variable_mode indirect_mask;
74
75} loop_info_state;
76
77static nir_loop_variable *
78get_loop_var(nir_ssa_def *value, loop_info_state *state)
79{
80   return &(state->loop_vars[value->index]);
81}
82
83typedef struct {
84   loop_info_state *state;
85   bool in_if_branch;
86   bool in_nested_loop;
87} init_loop_state;
88
89static bool
90init_loop_def(nir_ssa_def *def, void *void_init_loop_state)
91{
92   init_loop_state *loop_init_state = void_init_loop_state;
93   nir_loop_variable *var = get_loop_var(def, loop_init_state->state);
94
95   if (loop_init_state->in_nested_loop) {
96      var->in_nested_loop = true;
97   } else if (loop_init_state->in_if_branch) {
98      var->in_if_branch = true;
99   } else {
100      /* Add to the tail of the list. That way we start at the beginning of
101       * the defs in the loop instead of the end when walking the list. This
102       * means less recursive calls. Only add defs that are not in nested
103       * loops or conditional blocks.
104       */
105      list_addtail(&var->process_link, &loop_init_state->state->process_list);
106   }
107
108   var->in_loop = true;
109
110   return true;
111}
112
113/** Calculate an estimated cost in number of instructions
114 *
115 * We do this so that we don't unroll loops which will later get massively
116 * inflated due to int64 or fp64 lowering.  The estimates provided here don't
117 * have to be massively accurate; they just have to be good enough that loop
118 * unrolling doesn't cause things to blow up too much.
119 */
120static unsigned
121instr_cost(nir_instr *instr, const nir_shader_compiler_options *options)
122{
123   if (instr->type == nir_instr_type_intrinsic ||
124       instr->type == nir_instr_type_tex)
125      return 1;
126
127   if (instr->type != nir_instr_type_alu)
128      return 0;
129
130   nir_alu_instr *alu = nir_instr_as_alu(instr);
131   const nir_op_info *info = &nir_op_infos[alu->op];
132
133   /* Assume everything 16 or 32-bit is cheap.
134    *
135    * There are no 64-bit ops that don't have a 64-bit thing as their
136    * destination or first source.
137    */
138   if (nir_dest_bit_size(alu->dest.dest) < 64 &&
139       nir_src_bit_size(alu->src[0].src) < 64)
140      return 1;
141
142   bool is_fp64 = nir_dest_bit_size(alu->dest.dest) == 64 &&
143      nir_alu_type_get_base_type(info->output_type) == nir_type_float;
144   for (unsigned i = 0; i < info->num_inputs; i++) {
145      if (nir_src_bit_size(alu->src[i].src) == 64 &&
146          nir_alu_type_get_base_type(info->input_types[i]) == nir_type_float)
147         is_fp64 = true;
148   }
149
150   if (is_fp64) {
151      /* If it's something lowered normally, it's expensive. */
152      unsigned cost = 1;
153      if (options->lower_doubles_options &
154          nir_lower_doubles_op_to_options_mask(alu->op))
155         cost *= 20;
156
157      /* If it's full software, it's even more expensive */
158      if (options->lower_doubles_options & nir_lower_fp64_full_software)
159         cost *= 100;
160
161      return cost;
162   } else {
163      if (options->lower_int64_options &
164          nir_lower_int64_op_to_options_mask(alu->op)) {
165         /* These require a doing the division algorithm. */
166         if (alu->op == nir_op_idiv || alu->op == nir_op_udiv ||
167             alu->op == nir_op_imod || alu->op == nir_op_umod ||
168             alu->op == nir_op_irem)
169            return 100;
170
171         /* Other int64 lowering isn't usually all that expensive */
172         return 5;
173      }
174
175      return 1;
176   }
177}
178
179static bool
180init_loop_block(nir_block *block, loop_info_state *state,
181                bool in_if_branch, bool in_nested_loop,
182                const nir_shader_compiler_options *options)
183{
184   init_loop_state init_state = {.in_if_branch = in_if_branch,
185                                 .in_nested_loop = in_nested_loop,
186                                 .state = state };
187
188   nir_foreach_instr(instr, block) {
189      state->loop->info->instr_cost += instr_cost(instr, options);
190      nir_foreach_ssa_def(instr, init_loop_def, &init_state);
191   }
192
193   return true;
194}
195
196static inline bool
197is_var_alu(nir_loop_variable *var)
198{
199   return var->def->parent_instr->type == nir_instr_type_alu;
200}
201
202static inline bool
203is_var_constant(nir_loop_variable *var)
204{
205   return var->def->parent_instr->type == nir_instr_type_load_const;
206}
207
208static inline bool
209is_var_phi(nir_loop_variable *var)
210{
211   return var->def->parent_instr->type == nir_instr_type_phi;
212}
213
214static inline bool
215mark_invariant(nir_ssa_def *def, loop_info_state *state)
216{
217   nir_loop_variable *var = get_loop_var(def, state);
218
219   if (var->type == invariant)
220      return true;
221
222   if (!var->in_loop) {
223      var->type = invariant;
224      return true;
225   }
226
227   if (var->type == not_invariant)
228      return false;
229
230   if (is_var_alu(var)) {
231      nir_alu_instr *alu = nir_instr_as_alu(def->parent_instr);
232
233      for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
234         if (!mark_invariant(alu->src[i].src.ssa, state)) {
235            var->type = not_invariant;
236            return false;
237         }
238      }
239      var->type = invariant;
240      return true;
241   }
242
243   /* Phis shouldn't be invariant except if one operand is invariant, and the
244    * other is the phi itself. These should be removed by opt_remove_phis.
245    * load_consts are already set to invariant and constant during init,
246    * and so should return earlier. Remaining op_codes are set undefined.
247    */
248   var->type = not_invariant;
249   return false;
250}
251
252static void
253compute_invariance_information(loop_info_state *state)
254{
255   /* An expression is invariant in a loop L if:
256    *  (base cases)
257    *    – it’s a constant
258    *    – it’s a variable use, all of whose single defs are outside of L
259    *  (inductive cases)
260    *    – it’s a pure computation all of whose args are loop invariant
261    *    – it’s a variable use whose single reaching def, and the
262    *      rhs of that def is loop-invariant
263    */
264   list_for_each_entry_safe(nir_loop_variable, var, &state->process_list,
265                            process_link) {
266      assert(!var->in_if_branch && !var->in_nested_loop);
267
268      if (mark_invariant(var->def, state))
269         list_del(&var->process_link);
270   }
271}
272
273/* If all of the instruction sources point to identical ALU instructions (as
274 * per nir_instrs_equal), return one of the ALU instructions.  Otherwise,
275 * return NULL.
276 */
277static nir_alu_instr *
278phi_instr_as_alu(nir_phi_instr *phi)
279{
280   nir_alu_instr *first = NULL;
281   nir_foreach_phi_src(src, phi) {
282      assert(src->src.is_ssa);
283      if (src->src.ssa->parent_instr->type != nir_instr_type_alu)
284         return NULL;
285
286      nir_alu_instr *alu = nir_instr_as_alu(src->src.ssa->parent_instr);
287      if (first == NULL) {
288         first = alu;
289      } else {
290         if (!nir_instrs_equal(&first->instr, &alu->instr))
291            return NULL;
292      }
293   }
294
295   return first;
296}
297
298static bool
299alu_src_has_identity_swizzle(nir_alu_instr *alu, unsigned src_idx)
300{
301   assert(nir_op_infos[alu->op].input_sizes[src_idx] == 0);
302   assert(alu->dest.dest.is_ssa);
303   for (unsigned i = 0; i < alu->dest.dest.ssa.num_components; i++) {
304      if (alu->src[src_idx].swizzle[i] != i)
305         return false;
306   }
307
308   return true;
309}
310
311static bool
312compute_induction_information(loop_info_state *state)
313{
314   bool found_induction_var = false;
315   list_for_each_entry_safe(nir_loop_variable, var, &state->process_list,
316                            process_link) {
317
318      /* It can't be an induction variable if it is invariant. Invariants and
319       * things in nested loops or conditionals should have been removed from
320       * the list by compute_invariance_information().
321       */
322      assert(!var->in_if_branch && !var->in_nested_loop &&
323             var->type != invariant);
324
325      /* We are only interested in checking phis for the basic induction
326       * variable case as its simple to detect. All basic induction variables
327       * have a phi node
328       */
329      if (!is_var_phi(var))
330         continue;
331
332      nir_phi_instr *phi = nir_instr_as_phi(var->def->parent_instr);
333      nir_basic_induction_var *biv = rzalloc(state, nir_basic_induction_var);
334
335      nir_loop_variable *alu_src_var = NULL;
336      nir_foreach_phi_src(src, phi) {
337         nir_loop_variable *src_var = get_loop_var(src->src.ssa, state);
338
339         /* If one of the sources is in an if branch or nested loop then don't
340          * attempt to go any further.
341          */
342         if (src_var->in_if_branch || src_var->in_nested_loop)
343            break;
344
345         /* Detect inductions variables that are incremented in both branches
346          * of an unnested if rather than in a loop block.
347          */
348         if (is_var_phi(src_var)) {
349            nir_phi_instr *src_phi =
350               nir_instr_as_phi(src_var->def->parent_instr);
351            nir_alu_instr *src_phi_alu = phi_instr_as_alu(src_phi);
352            if (src_phi_alu) {
353               src_var = get_loop_var(&src_phi_alu->dest.dest.ssa, state);
354               if (!src_var->in_if_branch)
355                  break;
356            }
357         }
358
359         if (!src_var->in_loop && !biv->def_outside_loop) {
360            biv->def_outside_loop = src_var->def;
361         } else if (is_var_alu(src_var) && !biv->alu) {
362            alu_src_var = src_var;
363            nir_alu_instr *alu = nir_instr_as_alu(src_var->def->parent_instr);
364
365            if (nir_op_infos[alu->op].num_inputs == 2) {
366               for (unsigned i = 0; i < 2; i++) {
367                  /* Is one of the operands const, and the other the phi.  The
368                   * phi source can't be swizzled in any way.
369                   */
370                  if (nir_src_is_const(alu->src[i].src) &&
371                      alu->src[1-i].src.ssa == &phi->dest.ssa &&
372                      alu_src_has_identity_swizzle(alu, 1 - i))
373                     biv->alu = alu;
374               }
375            }
376
377            if (!biv->alu)
378               break;
379         } else {
380            biv->alu = NULL;
381            break;
382         }
383      }
384
385      if (biv->alu && biv->def_outside_loop &&
386          biv->def_outside_loop->parent_instr->type == nir_instr_type_load_const) {
387         alu_src_var->type = basic_induction;
388         alu_src_var->ind = biv;
389         var->type = basic_induction;
390         var->ind = biv;
391
392         found_induction_var = true;
393      } else {
394         ralloc_free(biv);
395      }
396   }
397   return found_induction_var;
398}
399
400static bool
401initialize_ssa_def(nir_ssa_def *def, void *void_state)
402{
403   loop_info_state *state = void_state;
404   nir_loop_variable *var = get_loop_var(def, state);
405
406   var->in_loop = false;
407   var->def = def;
408
409   if (def->parent_instr->type == nir_instr_type_load_const) {
410      var->type = invariant;
411   } else {
412      var->type = undefined;
413   }
414
415   return true;
416}
417
418static bool
419find_loop_terminators(loop_info_state *state)
420{
421   bool success = false;
422   foreach_list_typed_safe(nir_cf_node, node, node, &state->loop->body) {
423      if (node->type == nir_cf_node_if) {
424         nir_if *nif = nir_cf_node_as_if(node);
425
426         nir_block *break_blk = NULL;
427         nir_block *continue_from_blk = NULL;
428         bool continue_from_then = true;
429
430         nir_block *last_then = nir_if_last_then_block(nif);
431         nir_block *last_else = nir_if_last_else_block(nif);
432         if (nir_block_ends_in_break(last_then)) {
433            break_blk = last_then;
434            continue_from_blk = last_else;
435            continue_from_then = false;
436         } else if (nir_block_ends_in_break(last_else)) {
437            break_blk = last_else;
438            continue_from_blk = last_then;
439         }
440
441         /* If there is a break then we should find a terminator. If we can
442          * not find a loop terminator, but there is a break-statement then
443          * we should return false so that we do not try to find trip-count
444          */
445         if (!nir_is_trivial_loop_if(nif, break_blk)) {
446            state->loop->info->complex_loop = true;
447            return false;
448         }
449
450         /* Continue if the if contained no jumps at all */
451         if (!break_blk)
452            continue;
453
454         if (nif->condition.ssa->parent_instr->type == nir_instr_type_phi) {
455            state->loop->info->complex_loop = true;
456            return false;
457         }
458
459         nir_loop_terminator *terminator =
460            rzalloc(state->loop->info, nir_loop_terminator);
461
462         list_addtail(&terminator->loop_terminator_link,
463                      &state->loop->info->loop_terminator_list);
464
465         terminator->nif = nif;
466         terminator->break_block = break_blk;
467         terminator->continue_from_block = continue_from_blk;
468         terminator->continue_from_then = continue_from_then;
469         terminator->conditional_instr = nif->condition.ssa->parent_instr;
470
471         success = true;
472      }
473   }
474
475   return success;
476}
477
478/* This function looks for an array access within a loop that uses an
479 * induction variable for the array index. If found it returns the size of the
480 * array, otherwise 0 is returned. If we find an induction var we pass it back
481 * to the caller via array_index_out.
482 */
483static unsigned
484find_array_access_via_induction(loop_info_state *state,
485                                nir_deref_instr *deref,
486                                nir_loop_variable **array_index_out)
487{
488   for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
489      if (d->deref_type != nir_deref_type_array)
490         continue;
491
492      assert(d->arr.index.is_ssa);
493      nir_loop_variable *array_index = get_loop_var(d->arr.index.ssa, state);
494
495      if (array_index->type != basic_induction)
496         continue;
497
498      if (array_index_out)
499         *array_index_out = array_index;
500
501      nir_deref_instr *parent = nir_deref_instr_parent(d);
502      if (glsl_type_is_array_or_matrix(parent->type)) {
503         return glsl_get_length(parent->type);
504      } else {
505         assert(glsl_type_is_vector(parent->type));
506         return glsl_get_vector_elements(parent->type);
507      }
508   }
509
510   return 0;
511}
512
513static bool
514guess_loop_limit(loop_info_state *state, nir_const_value *limit_val,
515                 nir_ssa_scalar basic_ind)
516{
517   unsigned min_array_size = 0;
518
519   nir_foreach_block_in_cf_node(block, &state->loop->cf_node) {
520      nir_foreach_instr(instr, block) {
521         if (instr->type != nir_instr_type_intrinsic)
522            continue;
523
524         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
525
526         /* Check for arrays variably-indexed by a loop induction variable. */
527         if (intrin->intrinsic == nir_intrinsic_load_deref ||
528             intrin->intrinsic == nir_intrinsic_store_deref ||
529             intrin->intrinsic == nir_intrinsic_copy_deref) {
530
531            nir_loop_variable *array_idx = NULL;
532            unsigned array_size =
533               find_array_access_via_induction(state,
534                                               nir_src_as_deref(intrin->src[0]),
535                                               &array_idx);
536            if (array_idx && basic_ind.def == array_idx->def &&
537                (min_array_size == 0 || min_array_size > array_size)) {
538               /* Array indices are scalars */
539               assert(basic_ind.def->num_components == 1);
540               min_array_size = array_size;
541            }
542
543            if (intrin->intrinsic != nir_intrinsic_copy_deref)
544               continue;
545
546            array_size =
547               find_array_access_via_induction(state,
548                                               nir_src_as_deref(intrin->src[1]),
549                                               &array_idx);
550            if (array_idx && basic_ind.def == array_idx->def &&
551                (min_array_size == 0 || min_array_size > array_size)) {
552               /* Array indices are scalars */
553               assert(basic_ind.def->num_components == 1);
554               min_array_size = array_size;
555            }
556         }
557      }
558   }
559
560   if (min_array_size) {
561      *limit_val = nir_const_value_for_uint(min_array_size,
562                                            basic_ind.def->bit_size);
563      return true;
564   }
565
566   return false;
567}
568
569static bool
570try_find_limit_of_alu(nir_ssa_scalar limit, nir_const_value *limit_val,
571                      nir_loop_terminator *terminator, loop_info_state *state)
572{
573   if (!nir_ssa_scalar_is_alu(limit))
574      return false;
575
576   nir_op limit_op = nir_ssa_scalar_alu_op(limit);
577   if (limit_op == nir_op_imin || limit_op == nir_op_fmin) {
578      for (unsigned i = 0; i < 2; i++) {
579         nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(limit, i);
580         if (nir_ssa_scalar_is_const(src)) {
581            *limit_val = nir_ssa_scalar_as_const_value(src);
582            terminator->exact_trip_count_unknown = true;
583            return true;
584         }
585      }
586   }
587
588   return false;
589}
590
591static nir_const_value
592eval_const_unop(nir_op op, unsigned bit_size, nir_const_value src0)
593{
594   assert(nir_op_infos[op].num_inputs == 1);
595   nir_const_value dest;
596   nir_const_value *src[1] = { &src0 };
597   nir_eval_const_opcode(op, &dest, 1, bit_size, src);
598   return dest;
599}
600
601static nir_const_value
602eval_const_binop(nir_op op, unsigned bit_size,
603                 nir_const_value src0, nir_const_value src1)
604{
605   assert(nir_op_infos[op].num_inputs == 2);
606   nir_const_value dest;
607   nir_const_value *src[2] = { &src0, &src1 };
608   nir_eval_const_opcode(op, &dest, 1, bit_size, src);
609   return dest;
610}
611
612static int32_t
613get_iteration(nir_op cond_op, nir_const_value initial, nir_const_value step,
614              nir_const_value limit, unsigned bit_size)
615{
616   nir_const_value span, iter;
617
618   switch (cond_op) {
619   case nir_op_ige:
620   case nir_op_ilt:
621   case nir_op_ieq:
622   case nir_op_ine:
623      span = eval_const_binop(nir_op_isub, bit_size, limit, initial);
624      iter = eval_const_binop(nir_op_idiv, bit_size, span, step);
625      break;
626
627   case nir_op_uge:
628   case nir_op_ult:
629      span = eval_const_binop(nir_op_isub, bit_size, limit, initial);
630      iter = eval_const_binop(nir_op_udiv, bit_size, span, step);
631      break;
632
633   case nir_op_fge:
634   case nir_op_flt:
635   case nir_op_feq:
636   case nir_op_fne:
637      span = eval_const_binop(nir_op_fsub, bit_size, limit, initial);
638      iter = eval_const_binop(nir_op_fdiv, bit_size, span, step);
639      iter = eval_const_unop(nir_op_f2i64, bit_size, iter);
640      break;
641
642   default:
643      return -1;
644   }
645
646   uint64_t iter_u64 = nir_const_value_as_uint(iter, bit_size);
647   return iter_u64 > INT_MAX ? -1 : (int)iter_u64;
648}
649
650static bool
651test_iterations(int32_t iter_int, nir_const_value *step,
652                nir_const_value *limit, nir_op cond_op, unsigned bit_size,
653                nir_alu_type induction_base_type,
654                nir_const_value *initial, bool limit_rhs, bool invert_cond)
655{
656   assert(nir_op_infos[cond_op].num_inputs == 2);
657
658   nir_const_value iter_src;
659   nir_op mul_op;
660   nir_op add_op;
661   switch (induction_base_type) {
662   case nir_type_float:
663      iter_src = nir_const_value_for_float(iter_int, bit_size);
664      mul_op = nir_op_fmul;
665      add_op = nir_op_fadd;
666      break;
667   case nir_type_int:
668   case nir_type_uint:
669      iter_src = nir_const_value_for_int(iter_int, bit_size);
670      mul_op = nir_op_imul;
671      add_op = nir_op_iadd;
672      break;
673   default:
674      unreachable("Unhandled induction variable base type!");
675   }
676
677   /* Multiple the iteration count we are testing by the number of times we
678    * step the induction variable each iteration.
679    */
680   nir_const_value *mul_src[2] = { &iter_src, step };
681   nir_const_value mul_result;
682   nir_eval_const_opcode(mul_op, &mul_result, 1, bit_size, mul_src);
683
684   /* Add the initial value to the accumulated induction variable total */
685   nir_const_value *add_src[2] = { &mul_result, initial };
686   nir_const_value add_result;
687   nir_eval_const_opcode(add_op, &add_result, 1, bit_size, add_src);
688
689   nir_const_value *src[2];
690   src[limit_rhs ? 0 : 1] = &add_result;
691   src[limit_rhs ? 1 : 0] = limit;
692
693   /* Evaluate the loop exit condition */
694   nir_const_value result;
695   nir_eval_const_opcode(cond_op, &result, 1, bit_size, src);
696
697   return invert_cond ? !result.b : result.b;
698}
699
700static int
701calculate_iterations(nir_const_value *initial, nir_const_value *step,
702                     nir_const_value *limit, nir_alu_instr *alu,
703                     nir_ssa_scalar cond, nir_op alu_op, bool limit_rhs,
704                     bool invert_cond)
705{
706   assert(initial != NULL && step != NULL && limit != NULL);
707
708   /* nir_op_isub should have been lowered away by this point */
709   assert(alu->op != nir_op_isub);
710
711   /* Make sure the alu type for our induction variable is compatible with the
712    * conditional alus input type. If its not something has gone really wrong.
713    */
714   nir_alu_type induction_base_type =
715      nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type);
716   if (induction_base_type == nir_type_int || induction_base_type == nir_type_uint) {
717      assert(nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[1]) == nir_type_int ||
718             nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[1]) == nir_type_uint);
719   } else {
720      assert(nir_alu_type_get_base_type(nir_op_infos[alu_op].input_types[0]) ==
721             induction_base_type);
722   }
723
724   /* Check for nsupported alu operations */
725   if (alu->op != nir_op_iadd && alu->op != nir_op_fadd)
726      return -1;
727
728   /* do-while loops can increment the starting value before the condition is
729    * checked. e.g.
730    *
731    *    do {
732    *        ndx++;
733    *     } while (ndx < 3);
734    *
735    * Here we check if the induction variable is used directly by the loop
736    * condition and if so we assume we need to step the initial value.
737    */
738   unsigned trip_offset = 0;
739   nir_alu_instr *cond_alu = nir_instr_as_alu(cond.def->parent_instr);
740   if (cond_alu->src[0].src.ssa == &alu->dest.dest.ssa ||
741       cond_alu->src[1].src.ssa == &alu->dest.dest.ssa) {
742      trip_offset = 1;
743   }
744
745   assert(nir_src_bit_size(alu->src[0].src) ==
746          nir_src_bit_size(alu->src[1].src));
747   unsigned bit_size = nir_src_bit_size(alu->src[0].src);
748   int iter_int = get_iteration(alu_op, *initial, *step, *limit, bit_size);
749
750   /* If iter_int is negative the loop is ill-formed or is the conditional is
751    * unsigned with a huge iteration count so don't bother going any further.
752    */
753   if (iter_int < 0)
754      return -1;
755
756   /* An explanation from the GLSL unrolling pass:
757    *
758    * Make sure that the calculated number of iterations satisfies the exit
759    * condition.  This is needed to catch off-by-one errors and some types of
760    * ill-formed loops.  For example, we need to detect that the following
761    * loop does not have a maximum iteration count.
762    *
763    *    for (float x = 0.0; x != 0.9; x += 0.2);
764    */
765   for (int bias = -1; bias <= 1; bias++) {
766      const int iter_bias = iter_int + bias;
767
768      if (test_iterations(iter_bias, step, limit, alu_op, bit_size,
769                          induction_base_type, initial,
770                          limit_rhs, invert_cond)) {
771         return iter_bias > 0 ? iter_bias - trip_offset : iter_bias;
772      }
773   }
774
775   return -1;
776}
777
778static nir_op
779inverse_comparison(nir_op alu_op)
780{
781   switch (alu_op) {
782   case nir_op_fge:
783      return nir_op_flt;
784   case nir_op_ige:
785      return nir_op_ilt;
786   case nir_op_uge:
787      return nir_op_ult;
788   case nir_op_flt:
789      return nir_op_fge;
790   case nir_op_ilt:
791      return nir_op_ige;
792   case nir_op_ult:
793      return nir_op_uge;
794   case nir_op_feq:
795      return nir_op_fne;
796   case nir_op_ieq:
797      return nir_op_ine;
798   case nir_op_fne:
799      return nir_op_feq;
800   case nir_op_ine:
801      return nir_op_ieq;
802   default:
803      unreachable("Unsuported comparison!");
804   }
805}
806
807static bool
808is_supported_terminator_condition(nir_ssa_scalar cond)
809{
810   if (!nir_ssa_scalar_is_alu(cond))
811      return false;
812
813   nir_alu_instr *alu = nir_instr_as_alu(cond.def->parent_instr);
814   return nir_alu_instr_is_comparison(alu) &&
815          nir_op_infos[alu->op].num_inputs == 2;
816}
817
818static bool
819get_induction_and_limit_vars(nir_ssa_scalar cond,
820                             nir_ssa_scalar *ind,
821                             nir_ssa_scalar *limit,
822                             bool *limit_rhs,
823                             loop_info_state *state)
824{
825   nir_ssa_scalar rhs, lhs;
826   lhs = nir_ssa_scalar_chase_alu_src(cond, 0);
827   rhs = nir_ssa_scalar_chase_alu_src(cond, 1);
828
829   if (get_loop_var(lhs.def, state)->type == basic_induction) {
830      *ind = lhs;
831      *limit = rhs;
832      *limit_rhs = true;
833      return true;
834   } else if (get_loop_var(rhs.def, state)->type == basic_induction) {
835      *ind = rhs;
836      *limit = lhs;
837      *limit_rhs = false;
838      return true;
839   } else {
840      return false;
841   }
842}
843
844static bool
845try_find_trip_count_vars_in_iand(nir_ssa_scalar *cond,
846                                 nir_ssa_scalar *ind,
847                                 nir_ssa_scalar *limit,
848                                 bool *limit_rhs,
849                                 loop_info_state *state)
850{
851   const nir_op alu_op = nir_ssa_scalar_alu_op(*cond);
852   assert(alu_op == nir_op_ieq || alu_op == nir_op_inot);
853
854   nir_ssa_scalar iand = nir_ssa_scalar_chase_alu_src(*cond, 0);
855
856   if (alu_op == nir_op_ieq) {
857      nir_ssa_scalar zero = nir_ssa_scalar_chase_alu_src(*cond, 1);
858
859      if (!nir_ssa_scalar_is_alu(iand) || !nir_ssa_scalar_is_const(zero)) {
860         /* Maybe we had it the wrong way, flip things around */
861         nir_ssa_scalar tmp = zero;
862         zero = iand;
863         iand = tmp;
864
865         /* If we still didn't find what we need then return */
866         if (!nir_ssa_scalar_is_const(zero))
867            return false;
868      }
869
870      /* If the loop is not breaking on (x && y) == 0 then return */
871      if (nir_ssa_scalar_as_uint(zero) != 0)
872         return false;
873   }
874
875   if (!nir_ssa_scalar_is_alu(iand))
876      return false;
877
878   if (nir_ssa_scalar_alu_op(iand) != nir_op_iand)
879      return false;
880
881   /* Check if iand src is a terminator condition and try get induction var
882    * and trip limit var.
883    */
884   bool found_induction_var = false;
885   for (unsigned i = 0; i < 2; i++) {
886      nir_ssa_scalar src = nir_ssa_scalar_chase_alu_src(iand, i);
887      if (is_supported_terminator_condition(src) &&
888          get_induction_and_limit_vars(src, ind, limit, limit_rhs, state)) {
889         *cond = src;
890         found_induction_var = true;
891
892         /* If we've found one with a constant limit, stop. */
893         if (nir_ssa_scalar_is_const(*limit))
894            return true;
895      }
896   }
897
898   return found_induction_var;
899}
900
901/* Run through each of the terminators of the loop and try to infer a possible
902 * trip-count. We need to check them all, and set the lowest trip-count as the
903 * trip-count of our loop. If one of the terminators has an undecidable
904 * trip-count we can not safely assume anything about the duration of the
905 * loop.
906 */
907static void
908find_trip_count(loop_info_state *state)
909{
910   bool trip_count_known = true;
911   bool guessed_trip_count = false;
912   nir_loop_terminator *limiting_terminator = NULL;
913   int max_trip_count = -1;
914
915   list_for_each_entry(nir_loop_terminator, terminator,
916                       &state->loop->info->loop_terminator_list,
917                       loop_terminator_link) {
918      assert(terminator->nif->condition.is_ssa);
919      nir_ssa_scalar cond = { terminator->nif->condition.ssa, 0 };
920
921      if (!nir_ssa_scalar_is_alu(cond)) {
922         /* If we get here the loop is dead and will get cleaned up by the
923          * nir_opt_dead_cf pass.
924          */
925         trip_count_known = false;
926         continue;
927      }
928
929      nir_op alu_op = nir_ssa_scalar_alu_op(cond);
930
931      bool limit_rhs;
932      nir_ssa_scalar basic_ind = { NULL, 0 };
933      nir_ssa_scalar limit;
934      if ((alu_op == nir_op_inot || alu_op == nir_op_ieq) &&
935          try_find_trip_count_vars_in_iand(&cond, &basic_ind, &limit,
936                                           &limit_rhs, state)) {
937
938         /* The loop is exiting on (x && y) == 0 so we need to get the
939          * inverse of x or y (i.e. which ever contained the induction var) in
940          * order to compute the trip count.
941          */
942         alu_op = inverse_comparison(nir_ssa_scalar_alu_op(cond));
943         trip_count_known = false;
944         terminator->exact_trip_count_unknown = true;
945      }
946
947      if (!basic_ind.def) {
948         if (is_supported_terminator_condition(cond)) {
949            get_induction_and_limit_vars(cond, &basic_ind,
950                                         &limit, &limit_rhs, state);
951         }
952      }
953
954      /* The comparison has to have a basic induction variable for us to be
955       * able to find trip counts.
956       */
957      if (!basic_ind.def) {
958         trip_count_known = false;
959         continue;
960      }
961
962      terminator->induction_rhs = !limit_rhs;
963
964      /* Attempt to find a constant limit for the loop */
965      nir_const_value limit_val;
966      if (nir_ssa_scalar_is_const(limit)) {
967         limit_val = nir_ssa_scalar_as_const_value(limit);
968      } else {
969         trip_count_known = false;
970
971         if (!try_find_limit_of_alu(limit, &limit_val, terminator, state)) {
972            /* Guess loop limit based on array access */
973            if (!guess_loop_limit(state, &limit_val, basic_ind)) {
974               continue;
975            }
976
977            guessed_trip_count = true;
978         }
979      }
980
981      /* We have determined that we have the following constants:
982       * (With the typical int i = 0; i < x; i++; as an example)
983       *    - Upper limit.
984       *    - Starting value
985       *    - Step / iteration size
986       * Thats all thats needed to calculate the trip-count
987       */
988
989      nir_basic_induction_var *ind_var =
990         get_loop_var(basic_ind.def, state)->ind;
991
992      /* The basic induction var might be a vector but, because we guarantee
993       * earlier that the phi source has a scalar swizzle, we can take the
994       * component from basic_ind.
995       */
996      nir_ssa_scalar initial_s = { ind_var->def_outside_loop, basic_ind.comp };
997      nir_ssa_scalar alu_s = { &ind_var->alu->dest.dest.ssa, basic_ind.comp };
998
999      nir_const_value initial_val = nir_ssa_scalar_as_const_value(initial_s);
1000
1001      /* We are guaranteed by earlier code that at least one of these sources
1002       * is a constant but we don't know which.
1003       */
1004      nir_const_value step_val;
1005      memset(&step_val, 0, sizeof(step_val));
1006      UNUSED bool found_step_value = false;
1007      assert(nir_op_infos[ind_var->alu->op].num_inputs == 2);
1008      for (unsigned i = 0; i < 2; i++) {
1009         nir_ssa_scalar alu_src = nir_ssa_scalar_chase_alu_src(alu_s, i);
1010         if (nir_ssa_scalar_is_const(alu_src)) {
1011            found_step_value = true;
1012            step_val = nir_ssa_scalar_as_const_value(alu_src);
1013            break;
1014         }
1015      }
1016      assert(found_step_value);
1017
1018      int iterations = calculate_iterations(&initial_val, &step_val,
1019                                            &limit_val,
1020                                            ind_var->alu, cond,
1021                                            alu_op, limit_rhs,
1022                                            terminator->continue_from_then);
1023
1024      /* Where we not able to calculate the iteration count */
1025      if (iterations == -1) {
1026         trip_count_known = false;
1027         guessed_trip_count = false;
1028         continue;
1029      }
1030
1031      if (guessed_trip_count) {
1032         guessed_trip_count = false;
1033         if (state->loop->info->guessed_trip_count == 0 ||
1034             state->loop->info->guessed_trip_count > iterations)
1035            state->loop->info->guessed_trip_count = iterations;
1036
1037         continue;
1038      }
1039
1040      /* If this is the first run or we have found a smaller amount of
1041       * iterations than previously (we have identified a more limiting
1042       * terminator) set the trip count and limiting terminator.
1043       */
1044      if (max_trip_count == -1 || iterations < max_trip_count) {
1045         max_trip_count = iterations;
1046         limiting_terminator = terminator;
1047      }
1048   }
1049
1050   state->loop->info->exact_trip_count_known = trip_count_known;
1051   if (max_trip_count > -1)
1052      state->loop->info->max_trip_count = max_trip_count;
1053   state->loop->info->limiting_terminator = limiting_terminator;
1054}
1055
1056static bool
1057force_unroll_array_access(loop_info_state *state, nir_deref_instr *deref)
1058{
1059   unsigned array_size = find_array_access_via_induction(state, deref, NULL);
1060   if (array_size) {
1061      if (array_size == state->loop->info->max_trip_count)
1062         return true;
1063
1064      if (deref->mode & state->indirect_mask)
1065         return true;
1066   }
1067
1068   return false;
1069}
1070
1071static bool
1072force_unroll_heuristics(loop_info_state *state, nir_block *block)
1073{
1074   nir_foreach_instr(instr, block) {
1075      if (instr->type != nir_instr_type_intrinsic)
1076         continue;
1077
1078      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1079
1080      /* Check for arrays variably-indexed by a loop induction variable.
1081       * Unrolling the loop may convert that access into constant-indexing.
1082       */
1083      if (intrin->intrinsic == nir_intrinsic_load_deref ||
1084          intrin->intrinsic == nir_intrinsic_store_deref ||
1085          intrin->intrinsic == nir_intrinsic_copy_deref) {
1086         if (force_unroll_array_access(state,
1087                                       nir_src_as_deref(intrin->src[0])))
1088            return true;
1089
1090         if (intrin->intrinsic == nir_intrinsic_copy_deref &&
1091             force_unroll_array_access(state,
1092                                       nir_src_as_deref(intrin->src[1])))
1093            return true;
1094      }
1095   }
1096
1097   return false;
1098}
1099
1100static void
1101get_loop_info(loop_info_state *state, nir_function_impl *impl)
1102{
1103   nir_shader *shader = impl->function->shader;
1104   const nir_shader_compiler_options *options = shader->options;
1105
1106   /* Initialize all variables to "outside_loop". This also marks defs
1107    * invariant and constant if they are nir_instr_type_load_consts
1108    */
1109   nir_foreach_block(block, impl) {
1110      nir_foreach_instr(instr, block)
1111         nir_foreach_ssa_def(instr, initialize_ssa_def, state);
1112   }
1113
1114   /* Add all entries in the outermost part of the loop to the processing list
1115    * Mark the entries in conditionals or in nested loops accordingly
1116    */
1117   foreach_list_typed_safe(nir_cf_node, node, node, &state->loop->body) {
1118      switch (node->type) {
1119
1120      case nir_cf_node_block:
1121         init_loop_block(nir_cf_node_as_block(node), state,
1122                         false, false, options);
1123         break;
1124
1125      case nir_cf_node_if:
1126         nir_foreach_block_in_cf_node(block, node)
1127            init_loop_block(block, state, true, false, options);
1128         break;
1129
1130      case nir_cf_node_loop:
1131         nir_foreach_block_in_cf_node(block, node) {
1132            init_loop_block(block, state, false, true, options);
1133         }
1134         break;
1135
1136      case nir_cf_node_function:
1137         break;
1138      }
1139   }
1140
1141   /* Try to find all simple terminators of the loop. If we can't find any,
1142    * or we find possible terminators that have side effects then bail.
1143    */
1144   if (!find_loop_terminators(state)) {
1145      list_for_each_entry_safe(nir_loop_terminator, terminator,
1146                               &state->loop->info->loop_terminator_list,
1147                               loop_terminator_link) {
1148         list_del(&terminator->loop_terminator_link);
1149         ralloc_free(terminator);
1150      }
1151      return;
1152   }
1153
1154   /* Induction analysis needs invariance information so get that first */
1155   compute_invariance_information(state);
1156
1157   /* We have invariance information so try to find induction variables */
1158   if (!compute_induction_information(state))
1159      return;
1160
1161   /* Run through each of the terminators and try to compute a trip-count */
1162   find_trip_count(state);
1163
1164   nir_foreach_block_in_cf_node(block, &state->loop->cf_node) {
1165      if (force_unroll_heuristics(state, block)) {
1166         state->loop->info->force_unroll = true;
1167         break;
1168      }
1169   }
1170}
1171
1172static loop_info_state *
1173initialize_loop_info_state(nir_loop *loop, void *mem_ctx,
1174                           nir_function_impl *impl)
1175{
1176   loop_info_state *state = rzalloc(mem_ctx, loop_info_state);
1177   state->loop_vars = rzalloc_array(mem_ctx, nir_loop_variable,
1178                                    impl->ssa_alloc);
1179   state->loop = loop;
1180
1181   list_inithead(&state->process_list);
1182
1183   if (loop->info)
1184     ralloc_free(loop->info);
1185
1186   loop->info = rzalloc(loop, nir_loop_info);
1187
1188   list_inithead(&loop->info->loop_terminator_list);
1189
1190   return state;
1191}
1192
1193static void
1194process_loops(nir_cf_node *cf_node, nir_variable_mode indirect_mask)
1195{
1196   switch (cf_node->type) {
1197   case nir_cf_node_block:
1198      return;
1199   case nir_cf_node_if: {
1200      nir_if *if_stmt = nir_cf_node_as_if(cf_node);
1201      foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->then_list)
1202         process_loops(nested_node, indirect_mask);
1203      foreach_list_typed(nir_cf_node, nested_node, node, &if_stmt->else_list)
1204         process_loops(nested_node, indirect_mask);
1205      return;
1206   }
1207   case nir_cf_node_loop: {
1208      nir_loop *loop = nir_cf_node_as_loop(cf_node);
1209      foreach_list_typed(nir_cf_node, nested_node, node, &loop->body)
1210         process_loops(nested_node, indirect_mask);
1211      break;
1212   }
1213   default:
1214      unreachable("unknown cf node type");
1215   }
1216
1217   nir_loop *loop = nir_cf_node_as_loop(cf_node);
1218   nir_function_impl *impl = nir_cf_node_get_function(cf_node);
1219   void *mem_ctx = ralloc_context(NULL);
1220
1221   loop_info_state *state = initialize_loop_info_state(loop, mem_ctx, impl);
1222   state->indirect_mask = indirect_mask;
1223
1224   get_loop_info(state, impl);
1225
1226   ralloc_free(mem_ctx);
1227}
1228
1229void
1230nir_loop_analyze_impl(nir_function_impl *impl,
1231                      nir_variable_mode indirect_mask)
1232{
1233   nir_index_ssa_defs(impl);
1234   foreach_list_typed(nir_cf_node, node, node, &impl->body)
1235      process_loops(node, indirect_mask);
1236}
1237