1/*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "aco_instruction_selection.h"
26
27#include "common/ac_exp_param.h"
28#include "common/sid.h"
29#include "vulkan/radv_descriptor_set.h"
30
31#include "nir_control_flow.h"
32
33#include <vector>
34
35namespace aco {
36
37namespace {
38
39bool
40is_loop_header_block(nir_block* block)
41{
42   return block->cf_node.parent->type == nir_cf_node_loop &&
43          block == nir_loop_first_block(nir_cf_node_as_loop(block->cf_node.parent));
44}
45
46/* similar to nir_block_is_unreachable(), but does not require dominance information */
47bool
48is_block_reachable(nir_function_impl* impl, nir_block* known_reachable, nir_block* block)
49{
50   if (block == nir_start_block(impl) || block == known_reachable)
51      return true;
52
53   /* skip loop back-edges */
54   if (is_loop_header_block(block)) {
55      nir_loop* loop = nir_cf_node_as_loop(block->cf_node.parent);
56      nir_block* preheader = nir_block_cf_tree_prev(nir_loop_first_block(loop));
57      return is_block_reachable(impl, known_reachable, preheader);
58   }
59
60   set_foreach (block->predecessors, entry) {
61      if (is_block_reachable(impl, known_reachable, (nir_block*)entry->key))
62         return true;
63   }
64
65   return false;
66}
67
68/* Check whether the given SSA def is only used by cross-lane instructions. */
69bool
70only_used_by_cross_lane_instrs(nir_ssa_def* ssa, bool follow_phis = true)
71{
72   nir_foreach_use (src, ssa) {
73      switch (src->parent_instr->type) {
74      case nir_instr_type_alu: {
75         nir_alu_instr* alu = nir_instr_as_alu(src->parent_instr);
76         if (alu->op != nir_op_unpack_64_2x32_split_x && alu->op != nir_op_unpack_64_2x32_split_y)
77            return false;
78         if (!only_used_by_cross_lane_instrs(&alu->dest.dest.ssa, follow_phis))
79            return false;
80
81         continue;
82      }
83      case nir_instr_type_intrinsic: {
84         nir_intrinsic_instr* intrin = nir_instr_as_intrinsic(src->parent_instr);
85         if (intrin->intrinsic != nir_intrinsic_read_invocation &&
86             intrin->intrinsic != nir_intrinsic_read_first_invocation &&
87             intrin->intrinsic != nir_intrinsic_lane_permute_16_amd)
88            return false;
89
90         continue;
91      }
92      case nir_instr_type_phi: {
93         /* Don't follow more than 1 phis, this avoids infinite loops. */
94         if (!follow_phis)
95            return false;
96
97         nir_phi_instr* phi = nir_instr_as_phi(src->parent_instr);
98         if (!only_used_by_cross_lane_instrs(&phi->dest.ssa, false))
99            return false;
100
101         continue;
102      }
103      default: return false;
104      }
105   }
106
107   return true;
108}
109
110/* If one side of a divergent IF ends in a branch and the other doesn't, we
111 * might have to emit the contents of the side without the branch at the merge
112 * block instead. This is so that we can use any SGPR live-out of the side
113 * without the branch without creating a linear phi in the invert or merge block. */
114bool
115sanitize_if(nir_function_impl* impl, nir_if* nif)
116{
117   // TODO: skip this if the condition is uniform and there are no divergent breaks/continues?
118
119   nir_block* then_block = nir_if_last_then_block(nif);
120   nir_block* else_block = nir_if_last_else_block(nif);
121   bool then_jump = nir_block_ends_in_jump(then_block) ||
122                    !is_block_reachable(impl, nir_if_first_then_block(nif), then_block);
123   bool else_jump = nir_block_ends_in_jump(else_block) ||
124                    !is_block_reachable(impl, nir_if_first_else_block(nif), else_block);
125   if (then_jump == else_jump)
126      return false;
127
128   /* If the continue from block is empty then return as there is nothing to
129    * move.
130    */
131   if (nir_cf_list_is_empty_block(else_jump ? &nif->then_list : &nif->else_list))
132      return false;
133
134   /* Even though this if statement has a jump on one side, we may still have
135    * phis afterwards.  Single-source phis can be produced by loop unrolling
136    * or dead control-flow passes and are perfectly legal.  Run a quick phi
137    * removal on the block after the if to clean up any such phis.
138    */
139   nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
140
141   /* Finally, move the continue from branch after the if-statement. */
142   nir_block* last_continue_from_blk = else_jump ? then_block : else_block;
143   nir_block* first_continue_from_blk =
144      else_jump ? nir_if_first_then_block(nif) : nir_if_first_else_block(nif);
145
146   nir_cf_list tmp;
147   nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
148                  nir_after_block(last_continue_from_blk));
149   nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
150
151   return true;
152}
153
154bool
155sanitize_cf_list(nir_function_impl* impl, struct exec_list* cf_list)
156{
157   bool progress = false;
158   foreach_list_typed (nir_cf_node, cf_node, node, cf_list) {
159      switch (cf_node->type) {
160      case nir_cf_node_block: break;
161      case nir_cf_node_if: {
162         nir_if* nif = nir_cf_node_as_if(cf_node);
163         progress |= sanitize_cf_list(impl, &nif->then_list);
164         progress |= sanitize_cf_list(impl, &nif->else_list);
165         progress |= sanitize_if(impl, nif);
166         break;
167      }
168      case nir_cf_node_loop: {
169         nir_loop* loop = nir_cf_node_as_loop(cf_node);
170         progress |= sanitize_cf_list(impl, &loop->body);
171         break;
172      }
173      case nir_cf_node_function: unreachable("Invalid cf type");
174      }
175   }
176
177   return progress;
178}
179
180void
181apply_nuw_to_ssa(isel_context* ctx, nir_ssa_def* ssa)
182{
183   nir_ssa_scalar scalar;
184   scalar.def = ssa;
185   scalar.comp = 0;
186
187   if (!nir_ssa_scalar_is_alu(scalar) || nir_ssa_scalar_alu_op(scalar) != nir_op_iadd)
188      return;
189
190   nir_alu_instr* add = nir_instr_as_alu(ssa->parent_instr);
191
192   if (add->no_unsigned_wrap)
193      return;
194
195   nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
196   nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
197
198   if (nir_ssa_scalar_is_const(src0)) {
199      nir_ssa_scalar tmp = src0;
200      src0 = src1;
201      src1 = tmp;
202   }
203
204   uint32_t src1_ub = nir_unsigned_upper_bound(ctx->shader, ctx->range_ht, src1, &ctx->ub_config);
205   add->no_unsigned_wrap =
206      !nir_addition_might_overflow(ctx->shader, ctx->range_ht, src0, src1_ub, &ctx->ub_config);
207}
208
209void
210apply_nuw_to_offsets(isel_context* ctx, nir_function_impl* impl)
211{
212   nir_foreach_block (block, impl) {
213      nir_foreach_instr (instr, block) {
214         if (instr->type != nir_instr_type_intrinsic)
215            continue;
216         nir_intrinsic_instr* intrin = nir_instr_as_intrinsic(instr);
217
218         switch (intrin->intrinsic) {
219         case nir_intrinsic_load_constant:
220         case nir_intrinsic_load_uniform:
221         case nir_intrinsic_load_push_constant:
222            if (!nir_src_is_divergent(intrin->src[0]))
223               apply_nuw_to_ssa(ctx, intrin->src[0].ssa);
224            break;
225         case nir_intrinsic_load_ubo:
226         case nir_intrinsic_load_ssbo:
227            if (!nir_src_is_divergent(intrin->src[1]))
228               apply_nuw_to_ssa(ctx, intrin->src[1].ssa);
229            break;
230         case nir_intrinsic_store_ssbo:
231            if (!nir_src_is_divergent(intrin->src[2]))
232               apply_nuw_to_ssa(ctx, intrin->src[2].ssa);
233            break;
234         default: break;
235         }
236      }
237   }
238}
239
240RegClass
241get_reg_class(isel_context* ctx, RegType type, unsigned components, unsigned bitsize)
242{
243   if (bitsize == 1)
244      return RegClass(RegType::sgpr, ctx->program->lane_mask.size() * components);
245   else
246      return RegClass::get(type, components * bitsize / 8u);
247}
248
249void
250setup_vs_output_info(isel_context* ctx, nir_shader* nir,
251                     const radv_vs_output_info* outinfo)
252{
253   ctx->export_clip_dists = outinfo->export_clip_dists;
254   ctx->num_clip_distances = util_bitcount(outinfo->clip_dist_mask);
255   ctx->num_cull_distances = util_bitcount(outinfo->cull_dist_mask);
256
257   assert(ctx->num_clip_distances + ctx->num_cull_distances <= 8);
258
259   /* GFX10+ early rasterization:
260    * When there are no param exports in an NGG (or legacy VS) shader,
261    * RADV sets NO_PC_EXPORT=1, which means the HW will start clipping and rasterization
262    * as soon as it encounters a DONE pos export. When this happens, PS waves can launch
263    * before the NGG (or VS) waves finish.
264    */
265   ctx->program->early_rast = ctx->program->chip_class >= GFX10 && outinfo->param_exports == 0;
266}
267
268void
269setup_vs_variables(isel_context* ctx, nir_shader* nir)
270{
271   if (ctx->stage == vertex_vs || ctx->stage == vertex_ngg) {
272      setup_vs_output_info(ctx, nir, &ctx->program->info->vs.outinfo);
273
274      /* TODO: NGG streamout */
275      if (ctx->stage.hw == HWStage::NGG)
276         assert(!ctx->args->shader_info->so.num_outputs);
277   }
278
279   if (ctx->stage == vertex_ngg) {
280      ctx->program->config->lds_size =
281         DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
282      assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) <
283             (32 * 1024));
284   }
285}
286
287void
288setup_gs_variables(isel_context* ctx, nir_shader* nir)
289{
290   if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs) {
291      ctx->program->config->lds_size =
292         ctx->program->info->gs_ring_info.lds_size; /* Already in units of the alloc granularity */
293   } else if (ctx->stage == vertex_geometry_ngg || ctx->stage == tess_eval_geometry_ngg) {
294      setup_vs_output_info(ctx, nir, &ctx->program->info->vs.outinfo);
295
296      ctx->program->config->lds_size =
297         DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
298   }
299}
300
301void
302setup_tcs_info(isel_context* ctx, nir_shader* nir, nir_shader* vs)
303{
304   ctx->tcs_in_out_eq = ctx->args->shader_info->vs.tcs_in_out_eq;
305   ctx->tcs_temp_only_inputs = ctx->args->shader_info->vs.tcs_temp_only_input_mask;
306   ctx->tcs_num_patches = ctx->args->shader_info->num_tess_patches;
307   ctx->program->config->lds_size = ctx->args->shader_info->tcs.num_lds_blocks;
308}
309
310void
311setup_tes_variables(isel_context* ctx, nir_shader* nir)
312{
313   ctx->tcs_num_patches = ctx->args->shader_info->num_tess_patches;
314
315   if (ctx->stage == tess_eval_vs || ctx->stage == tess_eval_ngg) {
316      setup_vs_output_info(ctx, nir, &ctx->program->info->tes.outinfo);
317
318      /* TODO: NGG streamout */
319      if (ctx->stage.hw == HWStage::NGG)
320         assert(!ctx->args->shader_info->so.num_outputs);
321   }
322
323   if (ctx->stage == tess_eval_ngg) {
324      ctx->program->config->lds_size =
325         DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
326      assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) <
327             (32 * 1024));
328   }
329}
330
331void
332setup_variables(isel_context* ctx, nir_shader* nir)
333{
334   switch (nir->info.stage) {
335   case MESA_SHADER_FRAGMENT: {
336      break;
337   }
338   case MESA_SHADER_COMPUTE: {
339      ctx->program->config->lds_size =
340         DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
341      break;
342   }
343   case MESA_SHADER_VERTEX: {
344      setup_vs_variables(ctx, nir);
345      break;
346   }
347   case MESA_SHADER_GEOMETRY: {
348      setup_gs_variables(ctx, nir);
349      break;
350   }
351   case MESA_SHADER_TESS_CTRL: {
352      break;
353   }
354   case MESA_SHADER_TESS_EVAL: {
355      setup_tes_variables(ctx, nir);
356      break;
357   }
358   default: unreachable("Unhandled shader stage.");
359   }
360
361   /* Make sure we fit the available LDS space. */
362   assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) <=
363          ctx->program->dev.lds_limit);
364}
365
366void
367setup_nir(isel_context* ctx, nir_shader* nir)
368{
369   /* the variable setup has to be done before lower_io / CSE */
370   setup_variables(ctx, nir);
371
372   nir_convert_to_lcssa(nir, true, false);
373   nir_lower_phis_to_scalar(nir, true);
374
375   nir_function_impl* func = nir_shader_get_entrypoint(nir);
376   nir_index_ssa_defs(func);
377}
378
379} /* end namespace */
380
381void
382init_context(isel_context* ctx, nir_shader* shader)
383{
384   nir_function_impl* impl = nir_shader_get_entrypoint(shader);
385   ctx->shader = shader;
386
387   /* Init NIR range analysis. */
388   ctx->range_ht = _mesa_pointer_hash_table_create(NULL);
389   ctx->ub_config.min_subgroup_size = 64;
390   ctx->ub_config.max_subgroup_size = 64;
391   if (ctx->shader->info.stage == MESA_SHADER_COMPUTE && ctx->args->shader_info->cs.subgroup_size) {
392      ctx->ub_config.min_subgroup_size = ctx->args->shader_info->cs.subgroup_size;
393      ctx->ub_config.max_subgroup_size = ctx->args->shader_info->cs.subgroup_size;
394   }
395   ctx->ub_config.max_workgroup_invocations = 2048;
396   ctx->ub_config.max_workgroup_count[0] = 65535;
397   ctx->ub_config.max_workgroup_count[1] = 65535;
398   ctx->ub_config.max_workgroup_count[2] = 65535;
399   ctx->ub_config.max_workgroup_size[0] = 2048;
400   ctx->ub_config.max_workgroup_size[1] = 2048;
401   ctx->ub_config.max_workgroup_size[2] = 2048;
402   for (unsigned i = 0; i < MAX_VERTEX_ATTRIBS; i++) {
403      unsigned attrib_format = ctx->options->key.vs.vertex_attribute_formats[i];
404      unsigned dfmt = attrib_format & 0xf;
405      unsigned nfmt = (attrib_format >> 4) & 0x7;
406
407      uint32_t max = UINT32_MAX;
408      if (nfmt == V_008F0C_BUF_NUM_FORMAT_UNORM) {
409         max = 0x3f800000u;
410      } else if (nfmt == V_008F0C_BUF_NUM_FORMAT_UINT || nfmt == V_008F0C_BUF_NUM_FORMAT_USCALED) {
411         bool uscaled = nfmt == V_008F0C_BUF_NUM_FORMAT_USCALED;
412         switch (dfmt) {
413         case V_008F0C_BUF_DATA_FORMAT_8:
414         case V_008F0C_BUF_DATA_FORMAT_8_8:
415         case V_008F0C_BUF_DATA_FORMAT_8_8_8_8: max = uscaled ? 0x437f0000u : UINT8_MAX; break;
416         case V_008F0C_BUF_DATA_FORMAT_10_10_10_2:
417         case V_008F0C_BUF_DATA_FORMAT_2_10_10_10: max = uscaled ? 0x447fc000u : 1023; break;
418         case V_008F0C_BUF_DATA_FORMAT_10_11_11:
419         case V_008F0C_BUF_DATA_FORMAT_11_11_10: max = uscaled ? 0x44ffe000u : 2047; break;
420         case V_008F0C_BUF_DATA_FORMAT_16:
421         case V_008F0C_BUF_DATA_FORMAT_16_16:
422         case V_008F0C_BUF_DATA_FORMAT_16_16_16_16: max = uscaled ? 0x477fff00u : UINT16_MAX; break;
423         case V_008F0C_BUF_DATA_FORMAT_32:
424         case V_008F0C_BUF_DATA_FORMAT_32_32:
425         case V_008F0C_BUF_DATA_FORMAT_32_32_32:
426         case V_008F0C_BUF_DATA_FORMAT_32_32_32_32: max = uscaled ? 0x4f800000u : UINT32_MAX; break;
427         }
428      }
429      ctx->ub_config.vertex_attrib_max[i] = max;
430   }
431
432   nir_divergence_analysis(shader);
433   nir_opt_uniform_atomics(shader);
434
435   apply_nuw_to_offsets(ctx, impl);
436
437   /* sanitize control flow */
438   sanitize_cf_list(impl, &impl->body);
439   nir_metadata_preserve(impl, nir_metadata_none);
440
441   /* we'll need these for isel */
442   nir_metadata_require(impl, nir_metadata_block_index);
443
444   if (!ctx->stage.has(SWStage::GSCopy) && ctx->options->dump_preoptir) {
445      fprintf(stderr, "NIR shader before instruction selection:\n");
446      nir_print_shader(shader, stderr);
447   }
448
449   ctx->first_temp_id = ctx->program->peekAllocationId();
450   ctx->program->allocateRange(impl->ssa_alloc);
451   RegClass* regclasses = ctx->program->temp_rc.data() + ctx->first_temp_id;
452
453   std::unique_ptr<unsigned[]> nir_to_aco{new unsigned[impl->num_blocks]()};
454
455   /* TODO: make this recursive to improve compile times */
456   bool done = false;
457   while (!done) {
458      done = true;
459      nir_foreach_block (block, impl) {
460         nir_foreach_instr (instr, block) {
461            switch (instr->type) {
462            case nir_instr_type_alu: {
463               nir_alu_instr* alu_instr = nir_instr_as_alu(instr);
464               RegType type =
465                  nir_dest_is_divergent(alu_instr->dest.dest) ? RegType::vgpr : RegType::sgpr;
466               switch (alu_instr->op) {
467               case nir_op_fmul:
468               case nir_op_fadd:
469               case nir_op_fsub:
470               case nir_op_fmax:
471               case nir_op_fmin:
472               case nir_op_fneg:
473               case nir_op_fabs:
474               case nir_op_fsat:
475               case nir_op_fsign:
476               case nir_op_frcp:
477               case nir_op_frsq:
478               case nir_op_fsqrt:
479               case nir_op_fexp2:
480               case nir_op_flog2:
481               case nir_op_ffract:
482               case nir_op_ffloor:
483               case nir_op_fceil:
484               case nir_op_ftrunc:
485               case nir_op_fround_even:
486               case nir_op_fsin:
487               case nir_op_fcos:
488               case nir_op_f2f16:
489               case nir_op_f2f16_rtz:
490               case nir_op_f2f16_rtne:
491               case nir_op_f2f32:
492               case nir_op_f2f64:
493               case nir_op_u2f16:
494               case nir_op_u2f32:
495               case nir_op_u2f64:
496               case nir_op_i2f16:
497               case nir_op_i2f32:
498               case nir_op_i2f64:
499               case nir_op_pack_half_2x16_split:
500               case nir_op_unpack_half_2x16_split_x:
501               case nir_op_unpack_half_2x16_split_y:
502               case nir_op_fddx:
503               case nir_op_fddy:
504               case nir_op_fddx_fine:
505               case nir_op_fddy_fine:
506               case nir_op_fddx_coarse:
507               case nir_op_fddy_coarse:
508               case nir_op_fquantize2f16:
509               case nir_op_ldexp:
510               case nir_op_frexp_sig:
511               case nir_op_frexp_exp:
512               case nir_op_cube_face_index_amd:
513               case nir_op_cube_face_coord_amd:
514               case nir_op_sad_u8x4:
515               case nir_op_iadd_sat:
516               case nir_op_udot_4x8_uadd:
517               case nir_op_sdot_4x8_iadd:
518               case nir_op_udot_4x8_uadd_sat:
519               case nir_op_sdot_4x8_iadd_sat:
520               case nir_op_udot_2x16_uadd:
521               case nir_op_sdot_2x16_iadd:
522               case nir_op_udot_2x16_uadd_sat:
523               case nir_op_sdot_2x16_iadd_sat: type = RegType::vgpr; break;
524               case nir_op_f2i16:
525               case nir_op_f2u16:
526               case nir_op_f2i32:
527               case nir_op_f2u32:
528               case nir_op_f2i64:
529               case nir_op_f2u64:
530               case nir_op_b2i8:
531               case nir_op_b2i16:
532               case nir_op_b2i32:
533               case nir_op_b2i64:
534               case nir_op_b2b32:
535               case nir_op_b2f16:
536               case nir_op_b2f32:
537               case nir_op_mov: break;
538               case nir_op_iadd:
539               case nir_op_isub:
540               case nir_op_imul:
541               case nir_op_imin:
542               case nir_op_imax:
543               case nir_op_umin:
544               case nir_op_umax:
545               case nir_op_ishl:
546               case nir_op_ishr:
547               case nir_op_ushr:
548                  /* packed 16bit instructions have to be VGPR */
549                  type = alu_instr->dest.dest.ssa.num_components == 2 ? RegType::vgpr : type;
550                  FALLTHROUGH;
551               default:
552                  for (unsigned i = 0; i < nir_op_infos[alu_instr->op].num_inputs; i++) {
553                     if (regclasses[alu_instr->src[i].src.ssa->index].type() == RegType::vgpr)
554                        type = RegType::vgpr;
555                  }
556                  break;
557               }
558
559               RegClass rc = get_reg_class(ctx, type, alu_instr->dest.dest.ssa.num_components,
560                                           alu_instr->dest.dest.ssa.bit_size);
561               regclasses[alu_instr->dest.dest.ssa.index] = rc;
562               break;
563            }
564            case nir_instr_type_load_const: {
565               unsigned num_components = nir_instr_as_load_const(instr)->def.num_components;
566               unsigned bit_size = nir_instr_as_load_const(instr)->def.bit_size;
567               RegClass rc = get_reg_class(ctx, RegType::sgpr, num_components, bit_size);
568               regclasses[nir_instr_as_load_const(instr)->def.index] = rc;
569               break;
570            }
571            case nir_instr_type_intrinsic: {
572               nir_intrinsic_instr* intrinsic = nir_instr_as_intrinsic(instr);
573               if (!nir_intrinsic_infos[intrinsic->intrinsic].has_dest)
574                  break;
575               RegType type = RegType::sgpr;
576               switch (intrinsic->intrinsic) {
577               case nir_intrinsic_load_push_constant:
578               case nir_intrinsic_load_workgroup_id:
579               case nir_intrinsic_load_num_workgroups:
580               case nir_intrinsic_load_ray_launch_size:
581               case nir_intrinsic_load_subgroup_id:
582               case nir_intrinsic_load_num_subgroups:
583               case nir_intrinsic_load_first_vertex:
584               case nir_intrinsic_load_base_instance:
585               case nir_intrinsic_vote_all:
586               case nir_intrinsic_vote_any:
587               case nir_intrinsic_read_first_invocation:
588               case nir_intrinsic_read_invocation:
589               case nir_intrinsic_first_invocation:
590               case nir_intrinsic_ballot:
591               case nir_intrinsic_load_ring_tess_factors_amd:
592               case nir_intrinsic_load_ring_tess_factors_offset_amd:
593               case nir_intrinsic_load_ring_tess_offchip_amd:
594               case nir_intrinsic_load_ring_tess_offchip_offset_amd:
595               case nir_intrinsic_load_ring_esgs_amd:
596               case nir_intrinsic_load_ring_es2gs_offset_amd:
597               case nir_intrinsic_image_deref_samples:
598               case nir_intrinsic_has_input_vertex_amd:
599               case nir_intrinsic_has_input_primitive_amd:
600               case nir_intrinsic_load_workgroup_num_input_vertices_amd:
601               case nir_intrinsic_load_workgroup_num_input_primitives_amd:
602               case nir_intrinsic_load_shader_query_enabled_amd:
603               case nir_intrinsic_load_cull_front_face_enabled_amd:
604               case nir_intrinsic_load_cull_back_face_enabled_amd:
605               case nir_intrinsic_load_cull_ccw_amd:
606               case nir_intrinsic_load_cull_small_primitives_enabled_amd:
607               case nir_intrinsic_load_cull_any_enabled_amd:
608               case nir_intrinsic_load_viewport_x_scale:
609               case nir_intrinsic_load_viewport_y_scale:
610               case nir_intrinsic_load_viewport_x_offset:
611               case nir_intrinsic_load_viewport_y_offset: type = RegType::sgpr; break;
612               case nir_intrinsic_load_sample_id:
613               case nir_intrinsic_load_sample_mask_in:
614               case nir_intrinsic_load_input:
615               case nir_intrinsic_load_output:
616               case nir_intrinsic_load_input_vertex:
617               case nir_intrinsic_load_per_vertex_input:
618               case nir_intrinsic_load_per_vertex_output:
619               case nir_intrinsic_load_vertex_id:
620               case nir_intrinsic_load_vertex_id_zero_base:
621               case nir_intrinsic_load_barycentric_sample:
622               case nir_intrinsic_load_barycentric_pixel:
623               case nir_intrinsic_load_barycentric_model:
624               case nir_intrinsic_load_barycentric_centroid:
625               case nir_intrinsic_load_barycentric_at_sample:
626               case nir_intrinsic_load_barycentric_at_offset:
627               case nir_intrinsic_load_interpolated_input:
628               case nir_intrinsic_load_frag_coord:
629               case nir_intrinsic_load_frag_shading_rate:
630               case nir_intrinsic_load_sample_pos:
631               case nir_intrinsic_load_local_invocation_id:
632               case nir_intrinsic_load_local_invocation_index:
633               case nir_intrinsic_load_subgroup_invocation:
634               case nir_intrinsic_load_tess_coord:
635               case nir_intrinsic_write_invocation_amd:
636               case nir_intrinsic_mbcnt_amd:
637               case nir_intrinsic_byte_permute_amd:
638               case nir_intrinsic_lane_permute_16_amd:
639               case nir_intrinsic_load_instance_id:
640               case nir_intrinsic_ssbo_atomic_add:
641               case nir_intrinsic_ssbo_atomic_imin:
642               case nir_intrinsic_ssbo_atomic_umin:
643               case nir_intrinsic_ssbo_atomic_imax:
644               case nir_intrinsic_ssbo_atomic_umax:
645               case nir_intrinsic_ssbo_atomic_and:
646               case nir_intrinsic_ssbo_atomic_or:
647               case nir_intrinsic_ssbo_atomic_xor:
648               case nir_intrinsic_ssbo_atomic_exchange:
649               case nir_intrinsic_ssbo_atomic_comp_swap:
650               case nir_intrinsic_ssbo_atomic_fmin:
651               case nir_intrinsic_ssbo_atomic_fmax:
652               case nir_intrinsic_global_atomic_add:
653               case nir_intrinsic_global_atomic_imin:
654               case nir_intrinsic_global_atomic_umin:
655               case nir_intrinsic_global_atomic_imax:
656               case nir_intrinsic_global_atomic_umax:
657               case nir_intrinsic_global_atomic_and:
658               case nir_intrinsic_global_atomic_or:
659               case nir_intrinsic_global_atomic_xor:
660               case nir_intrinsic_global_atomic_exchange:
661               case nir_intrinsic_global_atomic_comp_swap:
662               case nir_intrinsic_global_atomic_fmin:
663               case nir_intrinsic_global_atomic_fmax:
664               case nir_intrinsic_image_deref_atomic_add:
665               case nir_intrinsic_image_deref_atomic_umin:
666               case nir_intrinsic_image_deref_atomic_imin:
667               case nir_intrinsic_image_deref_atomic_umax:
668               case nir_intrinsic_image_deref_atomic_imax:
669               case nir_intrinsic_image_deref_atomic_and:
670               case nir_intrinsic_image_deref_atomic_or:
671               case nir_intrinsic_image_deref_atomic_xor:
672               case nir_intrinsic_image_deref_atomic_exchange:
673               case nir_intrinsic_image_deref_atomic_comp_swap:
674               case nir_intrinsic_image_deref_atomic_fmin:
675               case nir_intrinsic_image_deref_atomic_fmax:
676               case nir_intrinsic_image_deref_size:
677               case nir_intrinsic_shared_atomic_add:
678               case nir_intrinsic_shared_atomic_imin:
679               case nir_intrinsic_shared_atomic_umin:
680               case nir_intrinsic_shared_atomic_imax:
681               case nir_intrinsic_shared_atomic_umax:
682               case nir_intrinsic_shared_atomic_and:
683               case nir_intrinsic_shared_atomic_or:
684               case nir_intrinsic_shared_atomic_xor:
685               case nir_intrinsic_shared_atomic_exchange:
686               case nir_intrinsic_shared_atomic_comp_swap:
687               case nir_intrinsic_shared_atomic_fadd:
688               case nir_intrinsic_shared_atomic_fmin:
689               case nir_intrinsic_shared_atomic_fmax:
690               case nir_intrinsic_load_scratch:
691               case nir_intrinsic_load_invocation_id:
692               case nir_intrinsic_load_primitive_id:
693               case nir_intrinsic_load_buffer_amd:
694               case nir_intrinsic_load_tess_rel_patch_id_amd:
695               case nir_intrinsic_load_gs_vertex_offset_amd:
696               case nir_intrinsic_load_initial_edgeflags_amd:
697               case nir_intrinsic_load_packed_passthrough_primitive_amd:
698               case nir_intrinsic_gds_atomic_add_amd:
699               case nir_intrinsic_bvh64_intersect_ray_amd:
700               case nir_intrinsic_load_cull_small_prim_precision_amd: type = RegType::vgpr; break;
701               case nir_intrinsic_load_shared:
702                  /* When the result of these loads is only used by cross-lane instructions,
703                   * it is beneficial to use a VGPR destination. This is because this allows
704                   * to put the s_waitcnt further down, which decreases latency.
705                   */
706                  if (only_used_by_cross_lane_instrs(&intrinsic->dest.ssa)) {
707                     type = RegType::vgpr;
708                     break;
709                  }
710                  FALLTHROUGH;
711               case nir_intrinsic_shuffle:
712               case nir_intrinsic_quad_broadcast:
713               case nir_intrinsic_quad_swap_horizontal:
714               case nir_intrinsic_quad_swap_vertical:
715               case nir_intrinsic_quad_swap_diagonal:
716               case nir_intrinsic_quad_swizzle_amd:
717               case nir_intrinsic_masked_swizzle_amd:
718               case nir_intrinsic_inclusive_scan:
719               case nir_intrinsic_exclusive_scan:
720               case nir_intrinsic_reduce:
721               case nir_intrinsic_load_sbt_amd:
722               case nir_intrinsic_load_ubo:
723               case nir_intrinsic_load_ssbo:
724               case nir_intrinsic_load_global:
725               case nir_intrinsic_load_global_constant:
726               case nir_intrinsic_vulkan_resource_index:
727               case nir_intrinsic_get_ssbo_size:
728                  type = nir_dest_is_divergent(intrinsic->dest) ? RegType::vgpr : RegType::sgpr;
729                  break;
730               case nir_intrinsic_load_view_index:
731                  type = ctx->stage == fragment_fs ? RegType::vgpr : RegType::sgpr;
732                  break;
733               default:
734                  for (unsigned i = 0; i < nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
735                       i++) {
736                     if (regclasses[intrinsic->src[i].ssa->index].type() == RegType::vgpr)
737                        type = RegType::vgpr;
738                  }
739                  break;
740               }
741               RegClass rc = get_reg_class(ctx, type, intrinsic->dest.ssa.num_components,
742                                           intrinsic->dest.ssa.bit_size);
743               regclasses[intrinsic->dest.ssa.index] = rc;
744               break;
745            }
746            case nir_instr_type_tex: {
747               nir_tex_instr* tex = nir_instr_as_tex(instr);
748               RegType type = nir_dest_is_divergent(tex->dest) ? RegType::vgpr : RegType::sgpr;
749
750               if (tex->op == nir_texop_texture_samples) {
751                  assert(!tex->dest.ssa.divergent);
752               }
753
754               RegClass rc =
755                  get_reg_class(ctx, type, tex->dest.ssa.num_components, tex->dest.ssa.bit_size);
756               regclasses[tex->dest.ssa.index] = rc;
757               break;
758            }
759            case nir_instr_type_parallel_copy: {
760               nir_foreach_parallel_copy_entry (entry, nir_instr_as_parallel_copy(instr)) {
761                  regclasses[entry->dest.ssa.index] = regclasses[entry->src.ssa->index];
762               }
763               break;
764            }
765            case nir_instr_type_ssa_undef: {
766               unsigned num_components = nir_instr_as_ssa_undef(instr)->def.num_components;
767               unsigned bit_size = nir_instr_as_ssa_undef(instr)->def.bit_size;
768               RegClass rc = get_reg_class(ctx, RegType::sgpr, num_components, bit_size);
769               regclasses[nir_instr_as_ssa_undef(instr)->def.index] = rc;
770               break;
771            }
772            case nir_instr_type_phi: {
773               nir_phi_instr* phi = nir_instr_as_phi(instr);
774               RegType type = RegType::sgpr;
775               unsigned num_components = phi->dest.ssa.num_components;
776               assert((phi->dest.ssa.bit_size != 1 || num_components == 1) &&
777                      "Multiple components not supported on boolean phis.");
778
779               if (nir_dest_is_divergent(phi->dest)) {
780                  type = RegType::vgpr;
781               } else {
782                  nir_foreach_phi_src (src, phi) {
783                     if (regclasses[src->src.ssa->index].type() == RegType::vgpr)
784                        type = RegType::vgpr;
785                  }
786               }
787
788               RegClass rc = get_reg_class(ctx, type, num_components, phi->dest.ssa.bit_size);
789               if (rc != regclasses[phi->dest.ssa.index])
790                  done = false;
791               regclasses[phi->dest.ssa.index] = rc;
792               break;
793            }
794            default: break;
795            }
796         }
797      }
798   }
799
800   ctx->program->config->spi_ps_input_ena = ctx->args->shader_info->ps.spi_ps_input;
801   ctx->program->config->spi_ps_input_addr = ctx->args->shader_info->ps.spi_ps_input;
802
803   ctx->cf_info.nir_to_aco = std::move(nir_to_aco);
804
805   /* align and copy constant data */
806   while (ctx->program->constant_data.size() % 4u)
807      ctx->program->constant_data.push_back(0);
808   ctx->constant_data_offset = ctx->program->constant_data.size();
809   ctx->program->constant_data.insert(ctx->program->constant_data.end(),
810                                      (uint8_t*)shader->constant_data,
811                                      (uint8_t*)shader->constant_data + shader->constant_data_size);
812}
813
814void
815cleanup_context(isel_context* ctx)
816{
817   _mesa_hash_table_destroy(ctx->range_ht, NULL);
818}
819
820isel_context
821setup_isel_context(Program* program, unsigned shader_count, struct nir_shader* const* shaders,
822                   ac_shader_config* config, const struct radv_shader_args* args, bool is_gs_copy_shader)
823{
824   SWStage sw_stage = SWStage::None;
825   for (unsigned i = 0; i < shader_count; i++) {
826      switch (shaders[i]->info.stage) {
827      case MESA_SHADER_VERTEX: sw_stage = sw_stage | SWStage::VS; break;
828      case MESA_SHADER_TESS_CTRL: sw_stage = sw_stage | SWStage::TCS; break;
829      case MESA_SHADER_TESS_EVAL: sw_stage = sw_stage | SWStage::TES; break;
830      case MESA_SHADER_GEOMETRY:
831         sw_stage = sw_stage | (is_gs_copy_shader ? SWStage::GSCopy : SWStage::GS);
832         break;
833      case MESA_SHADER_FRAGMENT: sw_stage = sw_stage | SWStage::FS; break;
834      case MESA_SHADER_COMPUTE: sw_stage = sw_stage | SWStage::CS; break;
835      default: unreachable("Shader stage not implemented");
836      }
837   }
838   bool gfx9_plus = args->options->chip_class >= GFX9;
839   bool ngg = args->shader_info->is_ngg && args->options->chip_class >= GFX10;
840   HWStage hw_stage{};
841   if (sw_stage == SWStage::VS && args->shader_info->vs.as_es && !ngg)
842      hw_stage = HWStage::ES;
843   else if (sw_stage == SWStage::VS && !args->shader_info->vs.as_ls && !ngg)
844      hw_stage = HWStage::VS;
845   else if (sw_stage == SWStage::VS && ngg)
846      hw_stage = HWStage::NGG; /* GFX10/NGG: VS without GS uses the HW GS stage */
847   else if (sw_stage == SWStage::GS)
848      hw_stage = HWStage::GS;
849   else if (sw_stage == SWStage::FS)
850      hw_stage = HWStage::FS;
851   else if (sw_stage == SWStage::CS)
852      hw_stage = HWStage::CS;
853   else if (sw_stage == SWStage::GSCopy)
854      hw_stage = HWStage::VS;
855   else if (sw_stage == SWStage::VS_GS && gfx9_plus && !ngg)
856      hw_stage = HWStage::GS; /* GFX6-9: VS+GS merged into a GS (and GFX10/legacy) */
857   else if (sw_stage == SWStage::VS_GS && ngg)
858      hw_stage = HWStage::NGG; /* GFX10+: VS+GS merged into an NGG GS */
859   else if (sw_stage == SWStage::VS && args->shader_info->vs.as_ls)
860      hw_stage = HWStage::LS; /* GFX6-8: VS is a Local Shader, when tessellation is used */
861   else if (sw_stage == SWStage::TCS)
862      hw_stage = HWStage::HS; /* GFX6-8: TCS is a Hull Shader */
863   else if (sw_stage == SWStage::VS_TCS)
864      hw_stage = HWStage::HS; /* GFX9-10: VS+TCS merged into a Hull Shader */
865   else if (sw_stage == SWStage::TES && !args->shader_info->tes.as_es && !ngg)
866      hw_stage = HWStage::VS; /* GFX6-9: TES without GS uses the HW VS stage (and GFX10/legacy) */
867   else if (sw_stage == SWStage::TES && !args->shader_info->tes.as_es && ngg)
868      hw_stage = HWStage::NGG; /* GFX10/NGG: TES without GS */
869   else if (sw_stage == SWStage::TES && args->shader_info->tes.as_es && !ngg)
870      hw_stage = HWStage::ES; /* GFX6-8: TES is an Export Shader */
871   else if (sw_stage == SWStage::TES_GS && gfx9_plus && !ngg)
872      hw_stage = HWStage::GS; /* GFX9: TES+GS merged into a GS (and GFX10/legacy) */
873   else if (sw_stage == SWStage::TES_GS && ngg)
874      hw_stage = HWStage::NGG; /* GFX10+: TES+GS merged into an NGG GS */
875   else
876      unreachable("Shader stage not implemented");
877
878   init_program(program, Stage{hw_stage, sw_stage}, args->shader_info, args->options->chip_class,
879                args->options->family, args->options->wgp_mode, config);
880
881   isel_context ctx = {};
882   ctx.program = program;
883   ctx.args = args;
884   ctx.options = args->options;
885   ctx.stage = program->stage;
886
887   program->workgroup_size = args->shader_info->workgroup_size;
888   assert(program->workgroup_size);
889
890   if (ctx.stage == tess_control_hs)
891      setup_tcs_info(&ctx, shaders[0], NULL);
892   else if (ctx.stage == vertex_tess_control_hs)
893      setup_tcs_info(&ctx, shaders[1], shaders[0]);
894
895   calc_min_waves(program);
896
897   unsigned scratch_size = 0;
898   if (program->stage == gs_copy_vs) {
899      assert(shader_count == 1);
900      setup_vs_output_info(&ctx, shaders[0], &args->shader_info->vs.outinfo);
901   } else {
902      for (unsigned i = 0; i < shader_count; i++) {
903         nir_shader* nir = shaders[i];
904         setup_nir(&ctx, nir);
905      }
906
907      for (unsigned i = 0; i < shader_count; i++)
908         scratch_size = std::max(scratch_size, shaders[i]->scratch_size);
909   }
910
911   ctx.program->config->scratch_bytes_per_wave = align(scratch_size * ctx.program->wave_size, 1024);
912
913   ctx.block = ctx.program->create_and_insert_block();
914   ctx.block->kind = block_kind_top_level;
915
916   return ctx;
917}
918
919} // namespace aco
920