1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28#include "nir.h"
29
30/*
31 * Implements a pass that lowers vector phi nodes to scalar phi nodes when
32 * we don't think it will hurt anything.
33 */
34
35struct lower_phis_to_scalar_state {
36   nir_shader *shader;
37   void *mem_ctx;
38   struct exec_list dead_instrs;
39
40   bool lower_all;
41
42   /* Hash table marking which phi nodes are scalarizable.  The key is
43    * pointers to phi instructions and the entry is either NULL for not
44    * scalarizable or non-null for scalarizable.
45    */
46   struct hash_table *phi_table;
47};
48
49static bool
50should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state);
51
52static bool
53is_phi_src_scalarizable(nir_phi_src *src,
54                        struct lower_phis_to_scalar_state *state)
55{
56   /* Don't know what to do with non-ssa sources */
57   if (!src->src.is_ssa)
58      return false;
59
60   nir_instr *src_instr = src->src.ssa->parent_instr;
61   switch (src_instr->type) {
62   case nir_instr_type_alu: {
63      nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
64
65      /* ALU operations with output_size == 0 should be scalarized.  We
66       * will also see a bunch of vecN operations from scalarizing ALU
67       * operations and, since they can easily be copy-propagated, they
68       * are ok too.
69       */
70      return nir_op_infos[src_alu->op].output_size == 0 ||
71             nir_op_is_vec(src_alu->op);
72   }
73
74   case nir_instr_type_phi:
75      /* A phi is scalarizable if we're going to lower it */
76      return should_lower_phi(nir_instr_as_phi(src_instr), state);
77
78   case nir_instr_type_load_const:
79      /* These are trivially scalarizable */
80      return true;
81
82   case nir_instr_type_ssa_undef:
83      /* The caller of this function is going to OR the results and we don't
84       * want undefs to count so we return false.
85       */
86      return false;
87
88   case nir_instr_type_intrinsic: {
89      nir_intrinsic_instr *src_intrin = nir_instr_as_intrinsic(src_instr);
90
91      switch (src_intrin->intrinsic) {
92      case nir_intrinsic_load_deref: {
93         /* Don't scalarize if we see a load of a local variable because it
94          * might turn into one of the things we can't scalarize.
95          */
96         nir_deref_instr *deref = nir_src_as_deref(src_intrin->src[0]);
97         return !nir_deref_mode_may_be(deref, nir_var_function_temp |
98                                              nir_var_shader_temp);
99      }
100
101      case nir_intrinsic_interp_deref_at_centroid:
102      case nir_intrinsic_interp_deref_at_sample:
103      case nir_intrinsic_interp_deref_at_offset:
104      case nir_intrinsic_interp_deref_at_vertex:
105      case nir_intrinsic_load_uniform:
106      case nir_intrinsic_load_ubo:
107      case nir_intrinsic_load_ssbo:
108      case nir_intrinsic_load_global:
109      case nir_intrinsic_load_global_constant:
110      case nir_intrinsic_load_input:
111         return true;
112      default:
113         break;
114      }
115   }
116   FALLTHROUGH;
117
118   default:
119      /* We can't scalarize this type of instruction */
120      return false;
121   }
122}
123
124/**
125 * Determines if the given phi node should be lowered.  The only phi nodes
126 * we will scalarize at the moment are those where all of the sources are
127 * scalarizable, unless lower_all is set.
128 *
129 * The reason for this comes down to coalescing.  Since phi sources can't
130 * swizzle, swizzles on phis have to be resolved by inserting a mov right
131 * before the phi.  The choice then becomes between movs to pick off
132 * components for a scalar phi or potentially movs to recombine components
133 * for a vector phi.  The problem is that the movs generated to pick off
134 * the components are almost uncoalescable.  We can't coalesce them in NIR
135 * because we need them to pick off components and we can't coalesce them
136 * in the backend because the source register is a vector and the
137 * destination is a scalar that may be used at other places in the program.
138 * On the other hand, if we have a bunch of scalars going into a vector
139 * phi, the situation is much better.  In this case, if the SSA def is
140 * generated in the predecessor block to the corresponding phi source, the
141 * backend code will be an ALU op into a temporary and then a mov into the
142 * given vector component;  this move can almost certainly be coalesced
143 * away.
144 */
145static bool
146should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state)
147{
148   /* Already scalar */
149   if (phi->dest.ssa.num_components == 1)
150      return false;
151
152   if (state->lower_all)
153      return true;
154
155   struct hash_entry *entry = _mesa_hash_table_search(state->phi_table, phi);
156   if (entry)
157      return entry->data != NULL;
158
159   /* Insert an entry and mark it as scalarizable for now. That way
160    * we don't recurse forever and a cycle in the dependence graph
161    * won't automatically make us fail to scalarize.
162    */
163   entry = _mesa_hash_table_insert(state->phi_table, phi, (void *)(intptr_t)1);
164
165   bool scalarizable = false;
166
167   nir_foreach_phi_src(src, phi) {
168      /* This loop ignores srcs that are not scalarizable because its likely
169       * still worth copying to temps if another phi source is scalarizable.
170       * This reduces register spilling by a huge amount in the i965 driver for
171       * Deus Ex: MD.
172       */
173      scalarizable = is_phi_src_scalarizable(src, state);
174      if (scalarizable)
175         break;
176   }
177
178   /* The hash table entry for 'phi' may have changed while recursing the
179    * dependence graph, so we need to reset it */
180   entry = _mesa_hash_table_search(state->phi_table, phi);
181   assert(entry);
182
183   entry->data = (void *)(intptr_t)scalarizable;
184
185   return scalarizable;
186}
187
188static bool
189lower_phis_to_scalar_block(nir_block *block,
190                           struct lower_phis_to_scalar_state *state)
191{
192   bool progress = false;
193
194   /* Find the last phi node in the block */
195   nir_phi_instr *last_phi = NULL;
196   nir_foreach_instr(instr, block) {
197      if (instr->type != nir_instr_type_phi)
198         break;
199
200      last_phi = nir_instr_as_phi(instr);
201   }
202
203   /* We have to handle the phi nodes in their own pass due to the way
204    * we're modifying the linked list of instructions.
205    */
206   nir_foreach_instr_safe(instr, block) {
207      if (instr->type != nir_instr_type_phi)
208         break;
209
210      nir_phi_instr *phi = nir_instr_as_phi(instr);
211
212      if (!should_lower_phi(phi, state))
213         continue;
214
215      unsigned bit_size = phi->dest.ssa.bit_size;
216
217      /* Create a vecN operation to combine the results.  Most of these
218       * will be redundant, but copy propagation should clean them up for
219       * us.  No need to add the complexity here.
220       */
221      nir_op vec_op = nir_op_vec(phi->dest.ssa.num_components);
222
223      nir_alu_instr *vec = nir_alu_instr_create(state->shader, vec_op);
224      nir_ssa_dest_init(&vec->instr, &vec->dest.dest,
225                        phi->dest.ssa.num_components,
226                        bit_size, NULL);
227      vec->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
228
229      for (unsigned i = 0; i < phi->dest.ssa.num_components; i++) {
230         nir_phi_instr *new_phi = nir_phi_instr_create(state->shader);
231         nir_ssa_dest_init(&new_phi->instr, &new_phi->dest, 1,
232                           phi->dest.ssa.bit_size, NULL);
233
234         vec->src[i].src = nir_src_for_ssa(&new_phi->dest.ssa);
235
236         nir_foreach_phi_src(src, phi) {
237            /* We need to insert a mov to grab the i'th component of src */
238            nir_alu_instr *mov = nir_alu_instr_create(state->shader,
239                                                      nir_op_mov);
240            nir_ssa_dest_init(&mov->instr, &mov->dest.dest, 1, bit_size, NULL);
241            mov->dest.write_mask = 1;
242            nir_src_copy(&mov->src[0].src, &src->src);
243            mov->src[0].swizzle[0] = i;
244
245            /* Insert at the end of the predecessor but before the jump */
246            nir_instr *pred_last_instr = nir_block_last_instr(src->pred);
247            if (pred_last_instr && pred_last_instr->type == nir_instr_type_jump)
248               nir_instr_insert_before(pred_last_instr, &mov->instr);
249            else
250               nir_instr_insert_after_block(src->pred, &mov->instr);
251
252            nir_phi_instr_add_src(new_phi, src->pred, nir_src_for_ssa(&mov->dest.dest.ssa));
253         }
254
255         nir_instr_insert_before(&phi->instr, &new_phi->instr);
256      }
257
258      nir_instr_insert_after(&last_phi->instr, &vec->instr);
259
260      nir_ssa_def_rewrite_uses(&phi->dest.ssa,
261                               &vec->dest.dest.ssa);
262
263      nir_instr_remove(&phi->instr);
264      exec_list_push_tail(&state->dead_instrs, &phi->instr.node);
265
266      progress = true;
267
268      /* We're using the safe iterator and inserting all the newly
269       * scalarized phi nodes before their non-scalarized version so that's
270       * ok.  However, we are also inserting vec operations after all of
271       * the last phi node so once we get here, we can't trust even the
272       * safe iterator to stop properly.  We have to break manually.
273       */
274      if (instr == &last_phi->instr)
275         break;
276   }
277
278   return progress;
279}
280
281static bool
282lower_phis_to_scalar_impl(nir_function_impl *impl, bool lower_all)
283{
284   struct lower_phis_to_scalar_state state;
285   bool progress = false;
286
287   state.shader = impl->function->shader;
288   state.mem_ctx = ralloc_parent(impl);
289   exec_list_make_empty(&state.dead_instrs);
290   state.phi_table = _mesa_pointer_hash_table_create(NULL);
291   state.lower_all = lower_all;
292
293   nir_foreach_block(block, impl) {
294      progress = lower_phis_to_scalar_block(block, &state) || progress;
295   }
296
297   nir_metadata_preserve(impl, nir_metadata_block_index |
298                               nir_metadata_dominance);
299
300   nir_instr_free_list(&state.dead_instrs);
301
302   ralloc_free(state.phi_table);
303
304   return progress;
305}
306
307/** A pass that lowers vector phi nodes to scalar
308 *
309 * This pass loops through the blocks and lowers looks for vector phi nodes
310 * it can lower to scalar phi nodes.  Not all phi nodes are lowered.  For
311 * instance, if one of the sources is a non-scalarizable vector, then we
312 * don't bother lowering because that would generate hard-to-coalesce movs.
313 */
314bool
315nir_lower_phis_to_scalar(nir_shader *shader, bool lower_all)
316{
317   bool progress = false;
318
319   nir_foreach_function(function, shader) {
320      if (function->impl)
321         progress = lower_phis_to_scalar_impl(function->impl, lower_all) || progress;
322   }
323
324   return progress;
325}
326