1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28#include "nir.h"
29
30/*
31 * Implements a pass that lowers vector phi nodes to scalar phi nodes when
32 * we don't think it will hurt anything.
33 */
34
35struct lower_phis_to_scalar_state {
36   void *mem_ctx;
37   void *dead_ctx;
38
39   /* Hash table marking which phi nodes are scalarizable.  The key is
40    * pointers to phi instructions and the entry is either NULL for not
41    * scalarizable or non-null for scalarizable.
42    */
43   struct hash_table *phi_table;
44};
45
46static bool
47should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state);
48
49static bool
50is_phi_src_scalarizable(nir_phi_src *src,
51                        struct lower_phis_to_scalar_state *state)
52{
53   /* Don't know what to do with non-ssa sources */
54   if (!src->src.is_ssa)
55      return false;
56
57   nir_instr *src_instr = src->src.ssa->parent_instr;
58   switch (src_instr->type) {
59   case nir_instr_type_alu: {
60      nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
61
62      /* ALU operations with output_size == 0 should be scalarized.  We
63       * will also see a bunch of vecN operations from scalarizing ALU
64       * operations and, since they can easily be copy-propagated, they
65       * are ok too.
66       */
67      return nir_op_infos[src_alu->op].output_size == 0 ||
68             src_alu->op == nir_op_vec2 ||
69             src_alu->op == nir_op_vec3 ||
70             src_alu->op == nir_op_vec4;
71   }
72
73   case nir_instr_type_phi:
74      /* A phi is scalarizable if we're going to lower it */
75      return should_lower_phi(nir_instr_as_phi(src_instr), state);
76
77   case nir_instr_type_load_const:
78      /* These are trivially scalarizable */
79      return true;
80
81   case nir_instr_type_ssa_undef:
82      /* The caller of this function is going to OR the results and we don't
83       * want undefs to count so we return false.
84       */
85      return false;
86
87   case nir_instr_type_intrinsic: {
88      nir_intrinsic_instr *src_intrin = nir_instr_as_intrinsic(src_instr);
89
90      switch (src_intrin->intrinsic) {
91      case nir_intrinsic_load_deref: {
92         nir_deref_instr *deref = nir_src_as_deref(src_intrin->src[0]);
93         return deref->mode == nir_var_shader_in ||
94                deref->mode == nir_var_uniform ||
95                deref->mode == nir_var_mem_ubo ||
96                deref->mode == nir_var_mem_ssbo ||
97                deref->mode == nir_var_mem_global;
98      }
99
100      case nir_intrinsic_interp_deref_at_centroid:
101      case nir_intrinsic_interp_deref_at_sample:
102      case nir_intrinsic_interp_deref_at_offset:
103      case nir_intrinsic_load_uniform:
104      case nir_intrinsic_load_ubo:
105      case nir_intrinsic_load_ssbo:
106      case nir_intrinsic_load_global:
107      case nir_intrinsic_load_input:
108         return true;
109      default:
110         break;
111      }
112   }
113
114   default:
115      /* We can't scalarize this type of instruction */
116      return false;
117   }
118}
119
120/**
121 * Determines if the given phi node should be lowered.  The only phi nodes
122 * we will scalarize at the moment are those where all of the sources are
123 * scalarizable.
124 *
125 * The reason for this comes down to coalescing.  Since phi sources can't
126 * swizzle, swizzles on phis have to be resolved by inserting a mov right
127 * before the phi.  The choice then becomes between movs to pick off
128 * components for a scalar phi or potentially movs to recombine components
129 * for a vector phi.  The problem is that the movs generated to pick off
130 * the components are almost uncoalescable.  We can't coalesce them in NIR
131 * because we need them to pick off components and we can't coalesce them
132 * in the backend because the source register is a vector and the
133 * destination is a scalar that may be used at other places in the program.
134 * On the other hand, if we have a bunch of scalars going into a vector
135 * phi, the situation is much better.  In this case, if the SSA def is
136 * generated in the predecessor block to the corresponding phi source, the
137 * backend code will be an ALU op into a temporary and then a mov into the
138 * given vector component;  this move can almost certainly be coalesced
139 * away.
140 */
141static bool
142should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state)
143{
144   /* Already scalar */
145   if (phi->dest.ssa.num_components == 1)
146      return false;
147
148   struct hash_entry *entry = _mesa_hash_table_search(state->phi_table, phi);
149   if (entry)
150      return entry->data != NULL;
151
152   /* Insert an entry and mark it as scalarizable for now. That way
153    * we don't recurse forever and a cycle in the dependence graph
154    * won't automatically make us fail to scalarize.
155    */
156   entry = _mesa_hash_table_insert(state->phi_table, phi, (void *)(intptr_t)1);
157
158   bool scalarizable = false;
159
160   nir_foreach_phi_src(src, phi) {
161      /* This loop ignores srcs that are not scalarizable because its likely
162       * still worth copying to temps if another phi source is scalarizable.
163       * This reduces register spilling by a huge amount in the i965 driver for
164       * Deus Ex: MD.
165       */
166      scalarizable = is_phi_src_scalarizable(src, state);
167      if (scalarizable)
168         break;
169   }
170
171   /* The hash table entry for 'phi' may have changed while recursing the
172    * dependence graph, so we need to reset it */
173   entry = _mesa_hash_table_search(state->phi_table, phi);
174   assert(entry);
175
176   entry->data = (void *)(intptr_t)scalarizable;
177
178   return scalarizable;
179}
180
181static bool
182lower_phis_to_scalar_block(nir_block *block,
183                           struct lower_phis_to_scalar_state *state)
184{
185   bool progress = false;
186
187   /* Find the last phi node in the block */
188   nir_phi_instr *last_phi = NULL;
189   nir_foreach_instr(instr, block) {
190      if (instr->type != nir_instr_type_phi)
191         break;
192
193      last_phi = nir_instr_as_phi(instr);
194   }
195
196   /* We have to handle the phi nodes in their own pass due to the way
197    * we're modifying the linked list of instructions.
198    */
199   nir_foreach_instr_safe(instr, block) {
200      if (instr->type != nir_instr_type_phi)
201         break;
202
203      nir_phi_instr *phi = nir_instr_as_phi(instr);
204
205      if (!should_lower_phi(phi, state))
206         continue;
207
208      unsigned bit_size = phi->dest.ssa.bit_size;
209
210      /* Create a vecN operation to combine the results.  Most of these
211       * will be redundant, but copy propagation should clean them up for
212       * us.  No need to add the complexity here.
213       */
214      nir_op vec_op;
215      switch (phi->dest.ssa.num_components) {
216      case 2: vec_op = nir_op_vec2; break;
217      case 3: vec_op = nir_op_vec3; break;
218      case 4: vec_op = nir_op_vec4; break;
219      default: unreachable("Invalid number of components");
220      }
221
222      nir_alu_instr *vec = nir_alu_instr_create(state->mem_ctx, vec_op);
223      nir_ssa_dest_init(&vec->instr, &vec->dest.dest,
224                        phi->dest.ssa.num_components,
225                        bit_size, NULL);
226      vec->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
227
228      for (unsigned i = 0; i < phi->dest.ssa.num_components; i++) {
229         nir_phi_instr *new_phi = nir_phi_instr_create(state->mem_ctx);
230         nir_ssa_dest_init(&new_phi->instr, &new_phi->dest, 1,
231                           phi->dest.ssa.bit_size, NULL);
232
233         vec->src[i].src = nir_src_for_ssa(&new_phi->dest.ssa);
234
235         nir_foreach_phi_src(src, phi) {
236            /* We need to insert a mov to grab the i'th component of src */
237            nir_alu_instr *mov = nir_alu_instr_create(state->mem_ctx,
238                                                      nir_op_imov);
239            nir_ssa_dest_init(&mov->instr, &mov->dest.dest, 1, bit_size, NULL);
240            mov->dest.write_mask = 1;
241            nir_src_copy(&mov->src[0].src, &src->src, state->mem_ctx);
242            mov->src[0].swizzle[0] = i;
243
244            /* Insert at the end of the predecessor but before the jump */
245            nir_instr *pred_last_instr = nir_block_last_instr(src->pred);
246            if (pred_last_instr && pred_last_instr->type == nir_instr_type_jump)
247               nir_instr_insert_before(pred_last_instr, &mov->instr);
248            else
249               nir_instr_insert_after_block(src->pred, &mov->instr);
250
251            nir_phi_src *new_src = ralloc(new_phi, nir_phi_src);
252            new_src->pred = src->pred;
253            new_src->src = nir_src_for_ssa(&mov->dest.dest.ssa);
254
255            exec_list_push_tail(&new_phi->srcs, &new_src->node);
256         }
257
258         nir_instr_insert_before(&phi->instr, &new_phi->instr);
259      }
260
261      nir_instr_insert_after(&last_phi->instr, &vec->instr);
262
263      nir_ssa_def_rewrite_uses(&phi->dest.ssa,
264                               nir_src_for_ssa(&vec->dest.dest.ssa));
265
266      ralloc_steal(state->dead_ctx, phi);
267      nir_instr_remove(&phi->instr);
268
269      progress = true;
270
271      /* We're using the safe iterator and inserting all the newly
272       * scalarized phi nodes before their non-scalarized version so that's
273       * ok.  However, we are also inserting vec operations after all of
274       * the last phi node so once we get here, we can't trust even the
275       * safe iterator to stop properly.  We have to break manually.
276       */
277      if (instr == &last_phi->instr)
278         break;
279   }
280
281   return progress;
282}
283
284static bool
285lower_phis_to_scalar_impl(nir_function_impl *impl)
286{
287   struct lower_phis_to_scalar_state state;
288   bool progress = false;
289
290   state.mem_ctx = ralloc_parent(impl);
291   state.dead_ctx = ralloc_context(NULL);
292   state.phi_table = _mesa_pointer_hash_table_create(state.dead_ctx);
293
294   nir_foreach_block(block, impl) {
295      progress = lower_phis_to_scalar_block(block, &state) || progress;
296   }
297
298   nir_metadata_preserve(impl, nir_metadata_block_index |
299                               nir_metadata_dominance);
300
301   ralloc_free(state.dead_ctx);
302   return progress;
303}
304
305/** A pass that lowers vector phi nodes to scalar
306 *
307 * This pass loops through the blocks and lowers looks for vector phi nodes
308 * it can lower to scalar phi nodes.  Not all phi nodes are lowered.  For
309 * instance, if one of the sources is a non-scalarizable vector, then we
310 * don't bother lowering because that would generate hard-to-coalesce movs.
311 */
312bool
313nir_lower_phis_to_scalar(nir_shader *shader)
314{
315   bool progress = false;
316
317   nir_foreach_function(function, shader) {
318      if (function->impl)
319         progress = lower_phis_to_scalar_impl(function->impl) || progress;
320   }
321
322   return progress;
323}
324