101e04c3fSmrg/*
201e04c3fSmrg * Copyright © 2015 Intel Corporation
301e04c3fSmrg *
401e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a
501e04c3fSmrg * copy of this software and associated documentation files (the "Software"),
601e04c3fSmrg * to deal in the Software without restriction, including without limitation
701e04c3fSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
801e04c3fSmrg * and/or sell copies of the Software, and to permit persons to whom the
901e04c3fSmrg * Software is furnished to do so, subject to the following conditions:
1001e04c3fSmrg *
1101e04c3fSmrg * The above copyright notice and this permission notice (including the next
1201e04c3fSmrg * paragraph) shall be included in all copies or substantial portions of the
1301e04c3fSmrg * Software.
1401e04c3fSmrg *
1501e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1601e04c3fSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1701e04c3fSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1801e04c3fSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1901e04c3fSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2001e04c3fSmrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
2101e04c3fSmrg * IN THE SOFTWARE.
2201e04c3fSmrg */
2301e04c3fSmrg
2401e04c3fSmrg#include "nir.h"
2501e04c3fSmrg#include "nir_builder.h"
2601e04c3fSmrg#include "nir_control_flow.h"
2701e04c3fSmrg#include "nir_vla.h"
2801e04c3fSmrg
297ec681f3Smrgstatic bool function_ends_in_jump(nir_function_impl *impl)
307ec681f3Smrg{
317ec681f3Smrg   nir_block *last_block = nir_impl_last_block(impl);
327ec681f3Smrg   return nir_block_ends_in_jump(last_block);
337ec681f3Smrg}
347ec681f3Smrg
357e102996Smayavoid nir_inline_function_impl(struct nir_builder *b,
367e102996Smaya                              const nir_function_impl *impl,
377ec681f3Smrg                              nir_ssa_def **params,
387ec681f3Smrg                              struct hash_table *shader_var_remap)
397e102996Smaya{
407e102996Smaya   nir_function_impl *copy = nir_function_impl_clone(b->shader, impl);
417e102996Smaya
427e102996Smaya   exec_list_append(&b->impl->locals, &copy->locals);
437e102996Smaya   exec_list_append(&b->impl->registers, &copy->registers);
447e102996Smaya
457e102996Smaya   nir_foreach_block(block, copy) {
467e102996Smaya      nir_foreach_instr_safe(instr, block) {
477ec681f3Smrg         switch (instr->type) {
487ec681f3Smrg         case nir_instr_type_deref: {
497ec681f3Smrg            nir_deref_instr *deref = nir_instr_as_deref(instr);
507ec681f3Smrg            if (deref->deref_type != nir_deref_type_var)
517ec681f3Smrg               break;
527ec681f3Smrg
537ec681f3Smrg            /* We don't need to remap function variables.  We already cloned
547ec681f3Smrg             * them as part of nir_function_impl_clone and appended them to
557ec681f3Smrg             * b->impl->locals.
567ec681f3Smrg             */
577ec681f3Smrg            if (deref->var->data.mode == nir_var_function_temp)
587ec681f3Smrg               break;
597ec681f3Smrg
607ec681f3Smrg            /* If no map is provided, we assume that there are either no
617ec681f3Smrg             * shader variables or they already live b->shader (this is the
627ec681f3Smrg             * case for function inlining within a single shader.
637ec681f3Smrg             */
647ec681f3Smrg            if (shader_var_remap == NULL)
657ec681f3Smrg               break;
667ec681f3Smrg
677ec681f3Smrg            struct hash_entry *entry =
687ec681f3Smrg               _mesa_hash_table_search(shader_var_remap, deref->var);
697ec681f3Smrg            if (entry == NULL) {
707ec681f3Smrg               nir_variable *nvar = nir_variable_clone(deref->var, b->shader);
717ec681f3Smrg               nir_shader_add_variable(b->shader, nvar);
727ec681f3Smrg               entry = _mesa_hash_table_insert(shader_var_remap,
737ec681f3Smrg                                               deref->var, nvar);
747ec681f3Smrg            }
757ec681f3Smrg            deref->var = entry->data;
767ec681f3Smrg            break;
777ec681f3Smrg         }
787ec681f3Smrg
797ec681f3Smrg         case nir_instr_type_intrinsic: {
807ec681f3Smrg            nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
817ec681f3Smrg            if (load->intrinsic != nir_intrinsic_load_param)
827ec681f3Smrg               break;
837ec681f3Smrg
847ec681f3Smrg            unsigned param_idx = nir_intrinsic_param_idx(load);
857ec681f3Smrg            assert(param_idx < impl->function->num_params);
867ec681f3Smrg            assert(load->dest.is_ssa);
877ec681f3Smrg            nir_ssa_def_rewrite_uses(&load->dest.ssa,
887ec681f3Smrg                                     params[param_idx]);
897ec681f3Smrg
907ec681f3Smrg            /* Remove any left-over load_param intrinsics because they're soon
917ec681f3Smrg             * to be in another function and therefore no longer valid.
927ec681f3Smrg             */
937ec681f3Smrg            nir_instr_remove(&load->instr);
947ec681f3Smrg            break;
957ec681f3Smrg         }
967ec681f3Smrg
977ec681f3Smrg         case nir_instr_type_jump:
987ec681f3Smrg            /* Returns have to be lowered for this to work */
997ec681f3Smrg            assert(nir_instr_as_jump(instr)->type != nir_jump_return);
1007ec681f3Smrg            break;
1017ec681f3Smrg
1027ec681f3Smrg         default:
1037ec681f3Smrg            break;
1047ec681f3Smrg         }
1057e102996Smaya      }
1067e102996Smaya   }
1077e102996Smaya
1087ec681f3Smrg   bool nest_if = function_ends_in_jump(copy);
1097ec681f3Smrg
1107e102996Smaya   /* Pluck the body out of the function and place it here */
1117e102996Smaya   nir_cf_list body;
1127e102996Smaya   nir_cf_list_extract(&body, &copy->body);
1137e102996Smaya
1147ec681f3Smrg   if (nest_if) {
1157ec681f3Smrg      nir_if *cf = nir_push_if(b, nir_imm_bool(b, true));
1167ec681f3Smrg      nir_cf_reinsert(&body, nir_after_cf_list(&cf->then_list));
1177ec681f3Smrg      nir_pop_if(b, cf);
1187ec681f3Smrg   } else {
1197ec681f3Smrg      /* Insert a nop at the cursor so we can keep track of where things are as
1207ec681f3Smrg       * we add/remove stuff from the CFG.
1217ec681f3Smrg       */
1227ec681f3Smrg      nir_intrinsic_instr *nop = nir_nop(b);
1237ec681f3Smrg      nir_cf_reinsert(&body, nir_before_instr(&nop->instr));
1247ec681f3Smrg      b->cursor = nir_instr_remove(&nop->instr);
1257ec681f3Smrg   }
1267e102996Smaya}
1277e102996Smaya
12801e04c3fSmrgstatic bool inline_function_impl(nir_function_impl *impl, struct set *inlined);
12901e04c3fSmrg
13001e04c3fSmrgstatic bool
13101e04c3fSmrginline_functions_block(nir_block *block, nir_builder *b,
13201e04c3fSmrg                       struct set *inlined)
13301e04c3fSmrg{
13401e04c3fSmrg   bool progress = false;
13501e04c3fSmrg   /* This is tricky.  We're iterating over instructions in a block but, as
13601e04c3fSmrg    * we go, the block and its instruction list are being split into
13701e04c3fSmrg    * pieces.  However, this *should* be safe since foreach_safe always
13801e04c3fSmrg    * stashes the next thing in the iteration.  That next thing will
13901e04c3fSmrg    * properly get moved to the next block when it gets split, and we
14001e04c3fSmrg    * continue iterating there.
14101e04c3fSmrg    */
14201e04c3fSmrg   nir_foreach_instr_safe(instr, block) {
14301e04c3fSmrg      if (instr->type != nir_instr_type_call)
14401e04c3fSmrg         continue;
14501e04c3fSmrg
14601e04c3fSmrg      progress = true;
14701e04c3fSmrg
14801e04c3fSmrg      nir_call_instr *call = nir_instr_as_call(instr);
14901e04c3fSmrg      assert(call->callee->impl);
15001e04c3fSmrg
1517e102996Smaya      /* Make sure that the function we're calling is already inlined */
15201e04c3fSmrg      inline_function_impl(call->callee->impl, inlined);
15301e04c3fSmrg
1547e102996Smaya      b->cursor = nir_instr_remove(&call->instr);
15501e04c3fSmrg
15601e04c3fSmrg      /* Rewrite all of the uses of the callee's parameters to use the call
15701e04c3fSmrg       * instructions sources.  In order to ensure that the "load" happens
15801e04c3fSmrg       * here and not later (for register sources), we make sure to convert it
15901e04c3fSmrg       * to an SSA value first.
16001e04c3fSmrg       */
16101e04c3fSmrg      const unsigned num_params = call->num_params;
16201e04c3fSmrg      NIR_VLA(nir_ssa_def *, params, num_params);
16301e04c3fSmrg      for (unsigned i = 0; i < num_params; i++) {
16401e04c3fSmrg         params[i] = nir_ssa_for_src(b, call->params[i],
16501e04c3fSmrg                                     call->callee->params[i].num_components);
16601e04c3fSmrg      }
16701e04c3fSmrg
1687ec681f3Smrg      nir_inline_function_impl(b, call->callee->impl, params, NULL);
16901e04c3fSmrg   }
17001e04c3fSmrg
17101e04c3fSmrg   return progress;
17201e04c3fSmrg}
17301e04c3fSmrg
17401e04c3fSmrgstatic bool
17501e04c3fSmrginline_function_impl(nir_function_impl *impl, struct set *inlined)
17601e04c3fSmrg{
17701e04c3fSmrg   if (_mesa_set_search(inlined, impl))
17801e04c3fSmrg      return false; /* Already inlined */
17901e04c3fSmrg
18001e04c3fSmrg   nir_builder b;
18101e04c3fSmrg   nir_builder_init(&b, impl);
18201e04c3fSmrg
18301e04c3fSmrg   bool progress = false;
18401e04c3fSmrg   nir_foreach_block_safe(block, impl) {
18501e04c3fSmrg      progress |= inline_functions_block(block, &b, inlined);
18601e04c3fSmrg   }
18701e04c3fSmrg
18801e04c3fSmrg   if (progress) {
18901e04c3fSmrg      /* SSA and register indices are completely messed up now */
19001e04c3fSmrg      nir_index_ssa_defs(impl);
19101e04c3fSmrg      nir_index_local_regs(impl);
19201e04c3fSmrg
19301e04c3fSmrg      nir_metadata_preserve(impl, nir_metadata_none);
1947e102996Smaya   } else {
1957ec681f3Smrg      nir_metadata_preserve(impl, nir_metadata_all);
19601e04c3fSmrg   }
19701e04c3fSmrg
19801e04c3fSmrg   _mesa_set_add(inlined, impl);
19901e04c3fSmrg
20001e04c3fSmrg   return progress;
20101e04c3fSmrg}
20201e04c3fSmrg
2037e102996Smaya/** A pass to inline all functions in a shader into their callers
2047e102996Smaya *
2057e102996Smaya * For most use-cases, function inlining is a multi-step process.  The general
2067e102996Smaya * pattern employed by SPIR-V consumers and others is as follows:
2077e102996Smaya *
2087ec681f3Smrg *  1. nir_lower_variable_initializers(shader, nir_var_function_temp)
2097e102996Smaya *
2107e102996Smaya *     This is needed because local variables from the callee are simply added
2117e102996Smaya *     to the locals list for the caller and the information about where the
2127e102996Smaya *     constant initializer logically happens is lost.  If the callee is
2137e102996Smaya *     called in a loop, this can cause the variable to go from being
2147e102996Smaya *     initialized once per loop iteration to being initialized once at the
2157e102996Smaya *     top of the caller and values to persist from one invocation of the
2167e102996Smaya *     callee to the next.  The simple solution to this problem is to get rid
2177e102996Smaya *     of constant initializers before function inlining.
2187e102996Smaya *
2197e102996Smaya *  2. nir_lower_returns(shader)
2207e102996Smaya *
2217e102996Smaya *     nir_inline_functions assumes that all functions end "naturally" by
2227e102996Smaya *     execution reaching the end of the function without any return
2237e102996Smaya *     instructions causing instant jumps to the end.  Thanks to NIR being
2247e102996Smaya *     structured, we can't represent arbitrary jumps to various points in the
2257e102996Smaya *     program which is what an early return in the callee would have to turn
2267e102996Smaya *     into when we inline it into the caller.  Instead, we require returns to
2277e102996Smaya *     be lowered which lets us just copy+paste the callee directly into the
2287e102996Smaya *     caller.
2297e102996Smaya *
2307e102996Smaya *  3. nir_inline_functions(shader)
2317e102996Smaya *
2327e102996Smaya *     This does the actual function inlining and the resulting shader will
2337e102996Smaya *     contain no call instructions.
2347e102996Smaya *
2357e102996Smaya *  4. nir_opt_deref(shader)
2367e102996Smaya *
2377e102996Smaya *     Most functions contain pointer parameters where the result of a deref
2387e102996Smaya *     instruction is passed in as a parameter, loaded via a load_param
2397e102996Smaya *     intrinsic, and then turned back into a deref via a cast.  Function
2407e102996Smaya *     inlining will get rid of the load_param but we are still left with a
2417e102996Smaya *     cast.  Running nir_opt_deref gets rid of the intermediate cast and
2427e102996Smaya *     results in a whole deref chain again.  This is currently required by a
2437e102996Smaya *     number of optimizations and lowering passes at least for certain
2447e102996Smaya *     variable modes.
2457e102996Smaya *
2467e102996Smaya *  5. Loop over the functions and delete all but the main entrypoint.
2477e102996Smaya *
2487e102996Smaya *     In the Intel Vulkan driver this looks like this:
2497e102996Smaya *
2507e102996Smaya *        foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
2517e102996Smaya *           if (func != entry_point)
2527e102996Smaya *              exec_node_remove(&func->node);
2537e102996Smaya *        }
2547e102996Smaya *        assert(exec_list_length(&nir->functions) == 1);
2557e102996Smaya *
2567e102996Smaya *    While nir_inline_functions does get rid of all call instructions, it
2577e102996Smaya *    doesn't get rid of any functions because it doesn't know what the "root
2587e102996Smaya *    function" is.  Instead, it's up to the individual driver to know how to
2597e102996Smaya *    decide on a root function and delete the rest.  With SPIR-V,
2607e102996Smaya *    spirv_to_nir returns the root function and so we can just use == whereas
2617e102996Smaya *    with GL, you may have to look for a function named "main".
2627e102996Smaya *
2637ec681f3Smrg *  6. nir_lower_variable_initializers(shader, ~nir_var_function_temp)
2647e102996Smaya *
2657e102996Smaya *     Lowering constant initializers on inputs, outputs, global variables,
2667e102996Smaya *     etc. requires that we know the main entrypoint so that we know where to
2677e102996Smaya *     initialize them.  Otherwise, we would have to assume that anything
2687e102996Smaya *     could be a main entrypoint and initialize them at the start of every
2697e102996Smaya *     function but that would clearly be wrong if any of those functions were
2707e102996Smaya *     ever called within another function.  Simply requiring a single-
2717e102996Smaya *     entrypoint function shader is the best way to make it well-defined.
2727e102996Smaya */
27301e04c3fSmrgbool
27401e04c3fSmrgnir_inline_functions(nir_shader *shader)
27501e04c3fSmrg{
2767e102996Smaya   struct set *inlined = _mesa_pointer_set_create(NULL);
27701e04c3fSmrg   bool progress = false;
27801e04c3fSmrg
27901e04c3fSmrg   nir_foreach_function(function, shader) {
28001e04c3fSmrg      if (function->impl)
28101e04c3fSmrg         progress = inline_function_impl(function->impl, inlined) || progress;
28201e04c3fSmrg   }
28301e04c3fSmrg
28401e04c3fSmrg   _mesa_set_destroy(inlined, NULL);
28501e04c3fSmrg
28601e04c3fSmrg   return progress;
28701e04c3fSmrg}
288