nir_lower_multiview.c revision 7ec681f3
1/*
2 * Copyright © 2016 Intel Corporation
3 * Copyright © 2020 Valve Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25#include "nir_control_flow.h"
26#include "nir_builder.h"
27
28/**
29 * This file implements an optimization for multiview. Some GPU's have a
30 * special mode which allows the vertex shader (or last stage in the geometry
31 * pipeline) to create multiple primitives in different layers of the
32 * framebuffer at once by writing multiple copies of gl_Position. The
33 * assumption is that in most uses of multiview, the only use of gl_ViewIndex
34 * is to change the position to implement the parallax effect, and other
35 * varyings will be the same between the different views. We put the body of
36 * the original vertex shader in a loop, writing to a different copy of
37 * gl_Position each loop iteration, and then let other optimizations clean up
38 * the mess.
39 */
40
41static bool
42shader_writes_to_memory(nir_shader *shader)
43{
44   /* With multiview, we would need to ensure that memory writes happen either
45    * once or once per view. Since combination of multiview and memory writes
46    * is not expected, we'll just skip this optimization in this case.
47    */
48
49   nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
50
51   nir_foreach_block(block, entrypoint) {
52      nir_foreach_instr(instr, block) {
53         if (instr->type != nir_instr_type_intrinsic)
54            continue;
55         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
56
57         switch (intrin->intrinsic) {
58         case nir_intrinsic_deref_atomic_add:
59         case nir_intrinsic_deref_atomic_imin:
60         case nir_intrinsic_deref_atomic_umin:
61         case nir_intrinsic_deref_atomic_imax:
62         case nir_intrinsic_deref_atomic_umax:
63         case nir_intrinsic_deref_atomic_and:
64         case nir_intrinsic_deref_atomic_or:
65         case nir_intrinsic_deref_atomic_xor:
66         case nir_intrinsic_deref_atomic_exchange:
67         case nir_intrinsic_deref_atomic_comp_swap:
68         case nir_intrinsic_store_ssbo:
69         case nir_intrinsic_ssbo_atomic_add:
70         case nir_intrinsic_ssbo_atomic_imin:
71         case nir_intrinsic_ssbo_atomic_umin:
72         case nir_intrinsic_ssbo_atomic_imax:
73         case nir_intrinsic_ssbo_atomic_umax:
74         case nir_intrinsic_ssbo_atomic_and:
75         case nir_intrinsic_ssbo_atomic_or:
76         case nir_intrinsic_ssbo_atomic_xor:
77         case nir_intrinsic_ssbo_atomic_exchange:
78         case nir_intrinsic_ssbo_atomic_comp_swap:
79         case nir_intrinsic_store_shared:
80         case nir_intrinsic_shared_atomic_add:
81         case nir_intrinsic_shared_atomic_imin:
82         case nir_intrinsic_shared_atomic_umin:
83         case nir_intrinsic_shared_atomic_imax:
84         case nir_intrinsic_shared_atomic_umax:
85         case nir_intrinsic_shared_atomic_and:
86         case nir_intrinsic_shared_atomic_or:
87         case nir_intrinsic_shared_atomic_xor:
88         case nir_intrinsic_shared_atomic_exchange:
89         case nir_intrinsic_shared_atomic_comp_swap:
90         case nir_intrinsic_image_deref_store:
91         case nir_intrinsic_image_deref_atomic_add:
92         case nir_intrinsic_image_deref_atomic_fadd:
93         case nir_intrinsic_image_deref_atomic_umin:
94         case nir_intrinsic_image_deref_atomic_umax:
95         case nir_intrinsic_image_deref_atomic_imin:
96         case nir_intrinsic_image_deref_atomic_imax:
97         case nir_intrinsic_image_deref_atomic_fmin:
98         case nir_intrinsic_image_deref_atomic_fmax:
99         case nir_intrinsic_image_deref_atomic_and:
100         case nir_intrinsic_image_deref_atomic_or:
101         case nir_intrinsic_image_deref_atomic_xor:
102         case nir_intrinsic_image_deref_atomic_exchange:
103         case nir_intrinsic_image_deref_atomic_comp_swap:
104            return true;
105
106         default:
107            /* Keep walking. */
108            break;
109         }
110      }
111   }
112
113   return false;
114}
115
116bool
117nir_shader_uses_view_index(nir_shader *shader)
118{
119   nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
120
121   nir_foreach_block(block, entrypoint) {
122      nir_foreach_instr(instr, block) {
123         if (instr->type != nir_instr_type_intrinsic)
124            continue;
125
126         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
127         if (intrin->intrinsic == nir_intrinsic_load_view_index)
128            return true;
129      }
130   }
131
132   return false;
133}
134
135static bool
136shader_only_position_uses_view_index(nir_shader *shader)
137{
138   nir_shader *shader_no_position = nir_shader_clone(NULL, shader);
139   nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader_no_position);
140
141   /* Remove the store position from a cloned shader. */
142   nir_foreach_block(block, entrypoint) {
143      nir_foreach_instr_safe(instr, block) {
144         if (instr->type != nir_instr_type_intrinsic)
145            continue;
146
147         nir_intrinsic_instr *store = nir_instr_as_intrinsic(instr);
148         if (store->intrinsic != nir_intrinsic_store_deref)
149            continue;
150
151         nir_variable *var = nir_intrinsic_get_var(store, 0);
152         if (var->data.location != VARYING_SLOT_POS)
153            continue;
154
155         nir_instr_remove(&store->instr);
156      }
157   }
158
159   /* Clean up shader so unused load_view_index intrinsics are removed. */
160   bool progress;
161   do {
162      progress = false;
163      progress |= nir_opt_dead_cf(shader_no_position);
164
165      /* Peephole select will drop if-blocks that have then and else empty,
166       * which will remove the usage of an SSA in the condition.
167       */
168      progress |= nir_opt_peephole_select(shader_no_position, 0, false, false);
169
170      progress |= nir_opt_dce(shader_no_position);
171   } while (progress);
172
173   bool uses_view_index = nir_shader_uses_view_index(shader_no_position);
174
175   ralloc_free(shader_no_position);
176   return !uses_view_index;
177}
178
179/* Return true if it's safe to call nir_lower_multiview() on this vertex
180 * shader. Note that this only handles driver-agnostic checks, i.e. things
181 * which would make nir_lower_multiview() incorrect. Any driver-specific
182 * checks, e.g. for sufficient varying space or performance considerations,
183 * should be handled in the driver.
184 *
185 * Note that we don't handle the more complex checks needed for lowering
186 * pipelines with geometry or tessellation shaders.
187 */
188
189bool
190nir_can_lower_multiview(nir_shader *shader)
191{
192   bool writes_position = false;
193   nir_foreach_shader_out_variable(var, shader) {
194      if (var->data.location == VARYING_SLOT_POS) {
195         writes_position = true;
196         break;
197      }
198   }
199
200   /* Don't bother handling this edge case. */
201   if (!writes_position)
202      return false;
203
204   return !shader_writes_to_memory(shader) &&
205          shader_only_position_uses_view_index(shader);
206}
207
208/**
209 * The lowering. Call with the last active geometry stage.
210 */
211
212bool
213nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
214{
215   assert(shader->info.stage != MESA_SHADER_FRAGMENT);
216   int view_count = util_bitcount(view_mask);
217
218   nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
219
220   /* Update position to refer to an array. */
221   nir_variable *pos_var = NULL;
222   nir_foreach_shader_out_variable(var, shader) {
223      if (var->data.location == VARYING_SLOT_POS) {
224         assert(var->type == glsl_vec4_type());
225         var->type = glsl_array_type(glsl_vec4_type(), view_count, 0);
226         var->data.per_view = true;
227         pos_var = var;
228         break;
229      }
230   }
231
232   assert(pos_var);
233
234   nir_cf_list body;
235   nir_cf_list_extract(&body, &entrypoint->body);
236
237   nir_builder b;
238   nir_builder_init(&b, entrypoint);
239   b.cursor = nir_after_cf_list(&entrypoint->body);
240
241   /* Loop Index will go from 0 to view_count. */
242   nir_variable *loop_index_var =
243      nir_local_variable_create(entrypoint, glsl_uint_type(), "loop_index");
244   nir_deref_instr *loop_index_deref = nir_build_deref_var(&b, loop_index_var);
245   nir_store_deref(&b, loop_index_deref, nir_imm_int(&b, 0), 1);
246
247   /* Array of view index values that are active in the loop.  Note that the
248    * loop index only matches the view index if there are no gaps in the
249    * view_mask.
250    */
251   nir_variable *view_index_var = nir_local_variable_create(
252      entrypoint, glsl_array_type(glsl_uint_type(), view_count, 0), "view_index");
253   nir_deref_instr *view_index_deref = nir_build_deref_var(&b, view_index_var);
254   {
255      int array_position = 0;
256      uint32_t view_mask_temp = view_mask;
257      while (view_mask_temp) {
258         uint32_t view_index = u_bit_scan(&view_mask_temp);
259         nir_store_deref(&b, nir_build_deref_array_imm(&b, view_index_deref, array_position),
260                         nir_imm_int(&b, view_index), 1);
261         array_position++;
262      }
263   }
264
265   /* Create the equivalent of
266    *
267    *    while (true):
268    *       if (loop_index >= view_count):
269    *          break
270    *
271    *       view_index = active_indices[loop_index]
272    *       pos_deref = &pos[loop_index]
273    *
274    *       # Placeholder for the body to be reinserted.
275    *
276    *       loop_index += 1
277    *
278    * Later both `view_index` and `pos_deref` will be used to rewrite the
279    * original shader body.
280    */
281
282   nir_loop* loop = nir_push_loop(&b);
283
284   nir_ssa_def *loop_index = nir_load_deref(&b, loop_index_deref);
285   nir_ssa_def *cmp = nir_ige(&b, loop_index, nir_imm_int(&b, view_count));
286   nir_if *loop_check = nir_push_if(&b, cmp);
287   nir_jump(&b, nir_jump_break);
288   nir_pop_if(&b, loop_check);
289
290   nir_ssa_def *view_index =
291      nir_load_deref(&b, nir_build_deref_array(&b, view_index_deref, loop_index));
292   nir_deref_instr *pos_deref =
293      nir_build_deref_array(&b, nir_build_deref_var(&b, pos_var), loop_index);
294
295   nir_store_deref(&b, loop_index_deref, nir_iadd_imm(&b, loop_index, 1), 1);
296   nir_pop_loop(&b, loop);
297
298   /* Reinsert the body. */
299   b.cursor = nir_after_instr(&pos_deref->instr);
300   nir_cf_reinsert(&body, b.cursor);
301
302   nir_foreach_block(block, entrypoint) {
303      nir_foreach_instr_safe(instr, block) {
304         if (instr->type != nir_instr_type_intrinsic)
305            continue;
306
307         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
308
309         switch (intrin->intrinsic) {
310         case nir_intrinsic_load_view_index: {
311            assert(intrin->dest.is_ssa);
312            nir_ssa_def_rewrite_uses(&intrin->dest.ssa, view_index);
313            break;
314         }
315
316         case nir_intrinsic_store_deref: {
317            nir_variable *var = nir_intrinsic_get_var(intrin, 0);
318            if (var == pos_var) {
319               nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
320
321               nir_instr_rewrite_src(instr, &intrin->src[0],
322                                     nir_src_for_ssa(&pos_deref->dest.ssa));
323
324               /* Remove old deref since it has the wrong type. */
325               nir_deref_instr_remove_if_unused(old_deref);
326            }
327            break;
328         }
329
330         case nir_intrinsic_load_deref:
331            if (nir_intrinsic_get_var(intrin, 0) == pos_var) {
332               unreachable("Should have lowered I/O to temporaries "
333                           "so no load_deref on position output is expected.");
334            }
335            break;
336
337         case nir_intrinsic_copy_deref:
338            unreachable("Should have lowered copy_derefs at this point");
339            break;
340
341         default:
342            /* Do nothing. */
343            break;
344         }
345      }
346   }
347
348   nir_metadata_preserve(entrypoint, nir_metadata_none);
349   return true;
350}
351
352