1/*
2 * Copyright © 2020 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25/*
26 * Optimizes atomics (with uniform offsets) using subgroup operations to ensure
27 * only one atomic operation is done per subgroup. So res = atomicAdd(addr, 1)
28 * would become something like:
29 *
30 * uint tmp = subgroupAdd(1);
31 * uint res;
32 * if (subgroupElect())
33 *    res = atomicAdd(addr, tmp);
34 * res = subgroupBroadcastFirst(res) + subgroupExclusiveAdd(1);
35 *
36 * This pass requires and preserves LCSSA and divergence information.
37 */
38
39#include "nir/nir.h"
40#include "nir/nir_builder.h"
41
42static nir_op
43parse_atomic_op(nir_intrinsic_op op, unsigned *offset_src, unsigned *data_src)
44{
45   switch (op) {
46   #define OP_NOIMG(intrin, alu) \
47   case nir_intrinsic_ssbo_atomic_##intrin: \
48      *offset_src = 1; \
49      *data_src = 2; \
50      return nir_op_##alu; \
51   case nir_intrinsic_shared_atomic_##intrin: \
52   case nir_intrinsic_global_atomic_##intrin: \
53   case nir_intrinsic_deref_atomic_##intrin: \
54      *offset_src = 0; \
55      *data_src = 1; \
56      return nir_op_##alu;
57   #define OP(intrin, alu) \
58   OP_NOIMG(intrin, alu) \
59   case nir_intrinsic_image_deref_atomic_##intrin: \
60   case nir_intrinsic_image_atomic_##intrin: \
61   case nir_intrinsic_bindless_image_atomic_##intrin: \
62      *offset_src = 1; \
63      *data_src = 3; \
64      return nir_op_##alu;
65   OP(add, iadd)
66   OP(imin, imin)
67   OP(umin, umin)
68   OP(imax, imax)
69   OP(umax, umax)
70   OP(and, iand)
71   OP(or, ior)
72   OP(xor, ixor)
73   OP(fadd, fadd)
74   OP_NOIMG(fmin, fmin)
75   OP_NOIMG(fmax, fmax)
76   #undef OP_NOIMG
77   #undef OP
78   default:
79      return nir_num_opcodes;
80   }
81}
82
83static unsigned
84get_dim(nir_ssa_scalar scalar)
85{
86   if (!scalar.def->divergent)
87      return 0;
88
89   if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
90      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
91      if (intrin->intrinsic == nir_intrinsic_load_subgroup_invocation)
92         return 0x8;
93      else if (intrin->intrinsic == nir_intrinsic_load_local_invocation_index)
94         return 0x7;
95      else if (intrin->intrinsic == nir_intrinsic_load_local_invocation_id)
96         return 1 << scalar.comp;
97      else if (intrin->intrinsic == nir_intrinsic_load_global_invocation_index)
98         return 0x7;
99      else if (intrin->intrinsic == nir_intrinsic_load_global_invocation_id)
100         return 1 << scalar.comp;
101   } else if (nir_ssa_scalar_is_alu(scalar)) {
102      if (nir_ssa_scalar_alu_op(scalar) == nir_op_iadd ||
103          nir_ssa_scalar_alu_op(scalar) == nir_op_imul) {
104         nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
105         nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
106
107         unsigned src0_dim = get_dim(src0);
108         if (!src0_dim && src0.def->divergent)
109            return 0;
110         unsigned src1_dim = get_dim(src1);
111         if (!src1_dim && src1.def->divergent)
112            return 0;
113
114         return src0_dim | src1_dim;
115      } else if (nir_ssa_scalar_alu_op(scalar) == nir_op_ishl) {
116         nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
117         nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
118         return src1.def->divergent ? 0 : get_dim(src0);
119      }
120   }
121
122   return 0;
123}
124
125/* Returns a bitmask of invocation indices that are compared against a subgroup
126 * uniform value.
127 */
128static unsigned
129match_invocation_comparison(nir_ssa_scalar scalar)
130{
131   bool is_alu = nir_ssa_scalar_is_alu(scalar);
132   if (is_alu && nir_ssa_scalar_alu_op(scalar) == nir_op_iand) {
133      return match_invocation_comparison(nir_ssa_scalar_chase_alu_src(scalar, 0)) |
134             match_invocation_comparison(nir_ssa_scalar_chase_alu_src(scalar, 1));
135   } else if (is_alu && nir_ssa_scalar_alu_op(scalar) == nir_op_ieq) {
136      if (!nir_ssa_scalar_chase_alu_src(scalar, 0).def->divergent)
137         return get_dim(nir_ssa_scalar_chase_alu_src(scalar, 1));
138      if (!nir_ssa_scalar_chase_alu_src(scalar, 1).def->divergent)
139         return get_dim(nir_ssa_scalar_chase_alu_src(scalar, 0));
140   } else if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
141      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(scalar.def->parent_instr);
142      if (intrin->intrinsic == nir_intrinsic_elect)
143         return 0x8;
144   }
145
146   return 0;
147}
148
149/* Returns true if the intrinsic is already conditional so that at most one
150 * invocation in the subgroup does the atomic.
151 */
152static bool
153is_atomic_already_optimized(nir_shader *shader, nir_intrinsic_instr *instr)
154{
155   unsigned dims = 0;
156   for (nir_cf_node *cf = &instr->instr.block->cf_node; cf; cf = cf->parent) {
157      if (cf->type == nir_cf_node_if) {
158         nir_block *first_then = nir_if_first_then_block(nir_cf_node_as_if(cf));
159         nir_block *last_then = nir_if_last_then_block(nir_cf_node_as_if(cf));
160         bool within_then = instr->instr.block->index >= first_then->index;
161         within_then = within_then && instr->instr.block->index <= last_then->index;
162         if (!within_then)
163            continue;
164
165         nir_ssa_scalar cond = {nir_cf_node_as_if(cf)->condition.ssa, 0};
166         dims |= match_invocation_comparison(cond);
167      }
168   }
169
170   unsigned dims_needed = 0;
171   for (unsigned i = 0; i < 3; i++)
172      dims_needed |= (shader->info.workgroup_size[i] > 1) << i;
173
174   return (dims & dims_needed) == dims_needed || dims & 0x8;
175}
176
177/* Perform a reduction and/or exclusive scan. */
178static void
179reduce_data(nir_builder *b, nir_op op, nir_ssa_def *data,
180            nir_ssa_def **reduce, nir_ssa_def **scan)
181{
182   if (scan) {
183      *scan = nir_exclusive_scan(b, data, .reduction_op=op);
184      if (reduce) {
185         nir_ssa_def *last_lane = nir_last_invocation(b);
186         nir_ssa_def *res = nir_build_alu(b, op, *scan, data, NULL, NULL);
187         *reduce = nir_read_invocation(b, res, last_lane);
188      }
189   } else {
190      *reduce = nir_reduce(b, data, .reduction_op=op);
191   }
192}
193
194static nir_ssa_def *
195optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev)
196{
197   unsigned offset_src, data_src;
198   nir_op op = parse_atomic_op(intrin->intrinsic, &offset_src, &data_src);
199   nir_ssa_def *data = intrin->src[data_src].ssa;
200
201   /* Separate uniform reduction and scan is faster than doing a combined scan+reduce */
202   bool combined_scan_reduce = return_prev && data->divergent;
203   nir_ssa_def *reduce = NULL, *scan = NULL;
204   reduce_data(b, op, data, &reduce, combined_scan_reduce ? &scan : NULL);
205
206   nir_instr_rewrite_src(&intrin->instr, &intrin->src[data_src], nir_src_for_ssa(reduce));
207   nir_update_instr_divergence(b->shader, &intrin->instr);
208
209   nir_ssa_def *cond = nir_elect(b, 1);
210
211   nir_if *nif = nir_push_if(b, cond);
212
213   nir_instr_remove(&intrin->instr);
214   nir_builder_instr_insert(b, &intrin->instr);
215
216   if (return_prev) {
217      nir_push_else(b, nif);
218
219      nir_ssa_def *undef = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size);
220
221      nir_pop_if(b, nif);
222      nir_ssa_def *result = nir_if_phi(b, &intrin->dest.ssa, undef);
223      result = nir_read_first_invocation(b, result);
224
225      if (!combined_scan_reduce)
226         reduce_data(b, op, data, NULL, &scan);
227
228      return nir_build_alu(b, op, result, scan, NULL, NULL);
229   } else {
230      nir_pop_if(b, nif);
231      return NULL;
232   }
233}
234
235static void
236optimize_and_rewrite_atomic(nir_builder *b, nir_intrinsic_instr *intrin)
237{
238   nir_if *helper_nif = NULL;
239   if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
240      nir_ssa_def *helper = nir_is_helper_invocation(b, 1);
241      helper_nif = nir_push_if(b, nir_inot(b, helper));
242   }
243
244   ASSERTED bool original_result_divergent = intrin->dest.ssa.divergent;
245   bool return_prev = !nir_ssa_def_is_unused(&intrin->dest.ssa);
246
247   nir_ssa_def old_result = intrin->dest.ssa;
248   list_replace(&intrin->dest.ssa.uses, &old_result.uses);
249   list_replace(&intrin->dest.ssa.if_uses, &old_result.if_uses);
250   nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, intrin->dest.ssa.bit_size, NULL);
251
252   nir_ssa_def *result = optimize_atomic(b, intrin, return_prev);
253
254   if (helper_nif) {
255      nir_push_else(b, helper_nif);
256      nir_ssa_def *undef = result ? nir_ssa_undef(b, 1, result->bit_size) : NULL;
257      nir_pop_if(b, helper_nif);
258      if (result)
259         result = nir_if_phi(b, result, undef);
260   }
261
262   if (result) {
263      assert(result->divergent == original_result_divergent);
264      nir_ssa_def_rewrite_uses(&old_result, result);
265   }
266}
267
268static bool
269opt_uniform_atomics(nir_function_impl *impl)
270{
271   bool progress = false;
272   nir_builder b;
273   nir_builder_init(&b, impl);
274   b.update_divergence = true;
275
276   nir_foreach_block(block, impl) {
277      nir_foreach_instr_safe(instr, block) {
278         if (instr->type != nir_instr_type_intrinsic)
279            continue;
280
281         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
282         unsigned offset_src, data_src;
283         if (parse_atomic_op(intrin->intrinsic, &offset_src, &data_src) == nir_num_opcodes)
284            continue;
285
286         if (nir_src_is_divergent(intrin->src[offset_src]))
287            continue;
288
289         if (is_atomic_already_optimized(b.shader, intrin))
290            continue;
291
292         b.cursor = nir_before_instr(instr);
293         optimize_and_rewrite_atomic(&b, intrin);
294         progress = true;
295      }
296   }
297
298   return progress;
299}
300
301bool
302nir_opt_uniform_atomics(nir_shader *shader)
303{
304   bool progress = false;
305
306   /* A 1x1x1 workgroup only ever has one active lane, so there's no point in
307    * optimizing any atomics.
308    */
309   if (gl_shader_stage_uses_workgroup(shader->info.stage) &&
310       !shader->info.workgroup_size_variable &&
311       shader->info.workgroup_size[0] == 1 && shader->info.workgroup_size[1] == 1 &&
312       shader->info.workgroup_size[2] == 1)
313      return false;
314
315   nir_foreach_function(function, shader) {
316      if (!function->impl)
317         continue;
318
319      if (opt_uniform_atomics(function->impl)) {
320         progress = true;
321         nir_metadata_preserve(function->impl, 0);
322      } else {
323         nir_metadata_preserve(function->impl, nir_metadata_all);
324      }
325   }
326
327   return progress;
328}
329