1/*
2 * Copyright © 2017 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#include "nir.h"
28#include "nir_builder.h"
29
30#if defined(_WIN32) && !defined(snprintf)
31#define snprintf _snprintf
32#endif
33
34/*
35 * Remap atomic counters to SSBOs, starting from the shader's next SSBO slot
36 * (info.num_ssbos).
37 */
38
39static bool
40lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b)
41{
42   nir_intrinsic_op op;
43
44   b->cursor = nir_before_instr(&instr->instr);
45
46   switch (instr->intrinsic) {
47   case nir_intrinsic_memory_barrier_atomic_counter:
48      /* Atomic counters are now SSBOs so memoryBarrierAtomicCounter() is now
49       * memoryBarrierBuffer().
50       */
51      instr->intrinsic = nir_intrinsic_memory_barrier_buffer;
52      return true;
53
54   case nir_intrinsic_atomic_counter_inc:
55   case nir_intrinsic_atomic_counter_add:
56   case nir_intrinsic_atomic_counter_pre_dec:
57   case nir_intrinsic_atomic_counter_post_dec:
58      /* inc and dec get remapped to add: */
59      op = nir_intrinsic_ssbo_atomic_add;
60      break;
61   case nir_intrinsic_atomic_counter_read:
62      op = nir_intrinsic_load_ssbo;
63      break;
64   case nir_intrinsic_atomic_counter_min:
65      op = nir_intrinsic_ssbo_atomic_umin;
66      break;
67   case nir_intrinsic_atomic_counter_max:
68      op = nir_intrinsic_ssbo_atomic_umax;
69      break;
70   case nir_intrinsic_atomic_counter_and:
71      op = nir_intrinsic_ssbo_atomic_and;
72      break;
73   case nir_intrinsic_atomic_counter_or:
74      op = nir_intrinsic_ssbo_atomic_or;
75      break;
76   case nir_intrinsic_atomic_counter_xor:
77      op = nir_intrinsic_ssbo_atomic_xor;
78      break;
79   case nir_intrinsic_atomic_counter_exchange:
80      op = nir_intrinsic_ssbo_atomic_exchange;
81      break;
82   case nir_intrinsic_atomic_counter_comp_swap:
83      op = nir_intrinsic_ssbo_atomic_comp_swap;
84      break;
85   default:
86      return false;
87   }
88
89   nir_ssa_def *buffer = nir_imm_int(b, ssbo_offset + nir_intrinsic_base(instr));
90   nir_ssa_def *temp = NULL;
91   nir_intrinsic_instr *new_instr =
92         nir_intrinsic_instr_create(b->shader, op);
93
94   /* a couple instructions need special handling since they don't map
95    * 1:1 with ssbo atomics
96    */
97   switch (instr->intrinsic) {
98   case nir_intrinsic_atomic_counter_inc:
99      /* remapped to ssbo_atomic_add: { buffer_idx, offset, +1 } */
100      temp = nir_imm_int(b, +1);
101      new_instr->src[0] = nir_src_for_ssa(buffer);
102      nir_src_copy(&new_instr->src[1], &instr->src[0]);
103      new_instr->src[2] = nir_src_for_ssa(temp);
104      break;
105   case nir_intrinsic_atomic_counter_pre_dec:
106   case nir_intrinsic_atomic_counter_post_dec:
107      /* remapped to ssbo_atomic_add: { buffer_idx, offset, -1 } */
108      /* NOTE semantic difference so we adjust the return value below */
109      temp = nir_imm_int(b, -1);
110      new_instr->src[0] = nir_src_for_ssa(buffer);
111      nir_src_copy(&new_instr->src[1], &instr->src[0]);
112      new_instr->src[2] = nir_src_for_ssa(temp);
113      break;
114   case nir_intrinsic_atomic_counter_read:
115      /* remapped to load_ssbo: { buffer_idx, offset } */
116      new_instr->src[0] = nir_src_for_ssa(buffer);
117      nir_src_copy(&new_instr->src[1], &instr->src[0]);
118      break;
119   default:
120      /* remapped to ssbo_atomic_x: { buffer_idx, offset, data, (compare)? } */
121      new_instr->src[0] = nir_src_for_ssa(buffer);
122      nir_src_copy(&new_instr->src[1], &instr->src[0]);
123      nir_src_copy(&new_instr->src[2], &instr->src[1]);
124      if (op == nir_intrinsic_ssbo_atomic_comp_swap ||
125          op == nir_intrinsic_ssbo_atomic_fcomp_swap)
126         nir_src_copy(&new_instr->src[3], &instr->src[2]);
127      break;
128   }
129
130   if (new_instr->intrinsic == nir_intrinsic_load_ssbo) {
131      nir_intrinsic_set_align(new_instr, 4, 0);
132
133      /* we could be replacing an intrinsic with fixed # of dest
134       * num_components with one that has variable number.  So
135       * best to take this from the dest:
136       */
137      new_instr->num_components = instr->dest.ssa.num_components;
138   }
139
140   nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
141                     instr->dest.ssa.num_components,
142                     instr->dest.ssa.bit_size, NULL);
143   nir_instr_insert_before(&instr->instr, &new_instr->instr);
144   nir_instr_remove(&instr->instr);
145
146   if (instr->intrinsic == nir_intrinsic_atomic_counter_pre_dec) {
147      b->cursor = nir_after_instr(&new_instr->instr);
148      nir_ssa_def *result = nir_iadd(b, &new_instr->dest.ssa, temp);
149      nir_ssa_def_rewrite_uses(&instr->dest.ssa, result);
150   } else {
151      nir_ssa_def_rewrite_uses(&instr->dest.ssa, &new_instr->dest.ssa);
152   }
153
154   return true;
155}
156
157static bool
158is_atomic_uint(const struct glsl_type *type)
159{
160   if (glsl_get_base_type(type) == GLSL_TYPE_ARRAY)
161      return is_atomic_uint(glsl_get_array_element(type));
162   return glsl_get_base_type(type) == GLSL_TYPE_ATOMIC_UINT;
163}
164
165bool
166nir_lower_atomics_to_ssbo(nir_shader *shader)
167{
168   unsigned ssbo_offset = shader->info.num_ssbos;
169   bool progress = false;
170
171   nir_foreach_function(function, shader) {
172      if (function->impl) {
173         nir_builder builder;
174         nir_builder_init(&builder, function->impl);
175         nir_foreach_block(block, function->impl) {
176            nir_foreach_instr_safe(instr, block) {
177               if (instr->type == nir_instr_type_intrinsic)
178                  progress |= lower_instr(nir_instr_as_intrinsic(instr),
179                                          ssbo_offset, &builder);
180            }
181         }
182
183         nir_metadata_preserve(function->impl, nir_metadata_block_index |
184                                               nir_metadata_dominance);
185      }
186   }
187
188   if (progress) {
189      /* replace atomic_uint uniforms with ssbo's: */
190      unsigned replaced = 0;
191      nir_foreach_uniform_variable_safe(var, shader) {
192         if (is_atomic_uint(var->type)) {
193            exec_node_remove(&var->node);
194
195            if (replaced & (1 << var->data.binding))
196               continue;
197
198            nir_variable *ssbo;
199            char name[16];
200
201            /* A length of 0 is used to denote unsized arrays */
202            const struct glsl_type *type = glsl_array_type(glsl_uint_type(), 0, 0);
203
204            snprintf(name, sizeof(name), "counter%d", var->data.binding);
205
206            ssbo = nir_variable_create(shader, nir_var_mem_ssbo, type, name);
207            ssbo->data.binding = ssbo_offset + var->data.binding;
208            ssbo->data.explicit_binding = var->data.explicit_binding;
209
210            /* We can't use num_abos, because it only represents the number of
211             * active atomic counters, and currently unlike SSBO's they aren't
212             * compacted so num_abos actually isn't a bound on the index passed
213             * to nir_intrinsic_atomic_counter_*. e.g. if we have a single atomic
214             * counter declared like:
215             *
216             * layout(binding=1) atomic_uint counter0;
217             *
218             * then when we lower accesses to it the atomic_counter_* intrinsics
219             * will have 1 as the index but num_abos will still be 1.
220             */
221            shader->info.num_ssbos = MAX2(shader->info.num_ssbos,
222                                          ssbo->data.binding + 1);
223
224            struct glsl_struct_field field = {
225                  .type = type,
226                  .name = "counters",
227                  .location = -1,
228            };
229
230            ssbo->interface_type =
231                  glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
232                                      false, "counters");
233
234            replaced |= (1 << var->data.binding);
235         }
236      }
237
238      shader->info.num_abos = 0;
239   }
240
241   return progress;
242}
243
244