1/*
2 * Copyright © 2019 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "compiler/nir/nir.h"
25#include "compiler/nir/nir_builder.h"
26#include "util/u_math.h"
27#include "ir3_compiler.h"
28#include "ir3_nir.h"
29
30static inline bool
31get_ubo_load_range(nir_shader *nir, nir_intrinsic_instr *instr,
32                   uint32_t alignment, struct ir3_ubo_range *r)
33{
34   uint32_t offset = nir_intrinsic_range_base(instr);
35   uint32_t size = nir_intrinsic_range(instr);
36
37   /* If the offset is constant, the range is trivial (and NIR may not have
38    * figured it out).
39    */
40   if (nir_src_is_const(instr->src[1])) {
41      offset = nir_src_as_uint(instr->src[1]);
42      size = nir_intrinsic_dest_components(instr) * 4;
43   }
44
45   /* If we haven't figured out the range accessed in the UBO, bail. */
46   if (size == ~0)
47      return false;
48
49   r->start = ROUND_DOWN_TO(offset, alignment * 16);
50   r->end = ALIGN(offset + size, alignment * 16);
51
52   return true;
53}
54
55static bool
56get_ubo_info(nir_intrinsic_instr *instr, struct ir3_ubo_info *ubo)
57{
58   if (nir_src_is_const(instr->src[0])) {
59      ubo->block = nir_src_as_uint(instr->src[0]);
60      ubo->bindless_base = 0;
61      ubo->bindless = false;
62      return true;
63   } else {
64      nir_intrinsic_instr *rsrc = ir3_bindless_resource(instr->src[0]);
65      if (rsrc && nir_src_is_const(rsrc->src[0])) {
66         ubo->block = nir_src_as_uint(rsrc->src[0]);
67         ubo->bindless_base = nir_intrinsic_desc_set(rsrc);
68         ubo->bindless = true;
69         return true;
70      }
71   }
72   return false;
73}
74
75/**
76 * Finds the given instruction's UBO load in the UBO upload plan, if any.
77 */
78static const struct ir3_ubo_range *
79get_existing_range(nir_intrinsic_instr *instr,
80                   const struct ir3_ubo_analysis_state *state,
81                   struct ir3_ubo_range *r)
82{
83   struct ir3_ubo_info ubo = {};
84
85   if (!get_ubo_info(instr, &ubo))
86      return NULL;
87
88   for (int i = 0; i < state->num_enabled; i++) {
89      const struct ir3_ubo_range *range = &state->range[i];
90      if (!memcmp(&range->ubo, &ubo, sizeof(ubo)) && r->start >= range->start &&
91          r->end <= range->end) {
92         return range;
93      }
94   }
95
96   return NULL;
97}
98
99/**
100 * Merges together neighboring/overlapping ranges in the range plan with a
101 * newly updated range.
102 */
103static void
104merge_neighbors(struct ir3_ubo_analysis_state *state, int index)
105{
106   struct ir3_ubo_range *a = &state->range[index];
107
108   /* index is always the first slot that would have neighbored/overlapped with
109    * the new range.
110    */
111   for (int i = index + 1; i < state->num_enabled; i++) {
112      struct ir3_ubo_range *b = &state->range[i];
113      if (memcmp(&a->ubo, &b->ubo, sizeof(a->ubo)))
114         continue;
115
116      if (a->start > b->end || a->end < b->start)
117         continue;
118
119      /* Merge B into A. */
120      a->start = MIN2(a->start, b->start);
121      a->end = MAX2(a->end, b->end);
122
123      /* Swap the last enabled range into B's now unused slot */
124      *b = state->range[--state->num_enabled];
125   }
126}
127
128/**
129 * During the first pass over the shader, makes the plan of which UBO upload
130 * should include the range covering this UBO load.
131 *
132 * We are passed in an upload_remaining of how much space is left for us in
133 * the const file, and we make sure our plan doesn't exceed that.
134 */
135static void
136gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
137                  struct ir3_ubo_analysis_state *state, uint32_t alignment,
138                  uint32_t *upload_remaining)
139{
140   if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
141      return;
142
143   struct ir3_ubo_info ubo = {};
144   if (!get_ubo_info(instr, &ubo))
145      return;
146
147   struct ir3_ubo_range r;
148   if (!get_ubo_load_range(nir, instr, alignment, &r))
149      return;
150
151   /* See if there's an existing range for this UBO we want to merge into. */
152   for (int i = 0; i < state->num_enabled; i++) {
153      struct ir3_ubo_range *plan_r = &state->range[i];
154      if (memcmp(&plan_r->ubo, &ubo, sizeof(ubo)))
155         continue;
156
157      /* Don't extend existing uploads unless they're
158       * neighboring/overlapping.
159       */
160      if (r.start > plan_r->end || r.end < plan_r->start)
161         continue;
162
163      r.start = MIN2(r.start, plan_r->start);
164      r.end = MAX2(r.end, plan_r->end);
165
166      uint32_t added = (plan_r->start - r.start) + (r.end - plan_r->end);
167      if (added >= *upload_remaining)
168         return;
169
170      plan_r->start = r.start;
171      plan_r->end = r.end;
172      *upload_remaining -= added;
173
174      merge_neighbors(state, i);
175      return;
176   }
177
178   if (state->num_enabled == ARRAY_SIZE(state->range))
179      return;
180
181   uint32_t added = r.end - r.start;
182   if (added >= *upload_remaining)
183      return;
184
185   struct ir3_ubo_range *plan_r = &state->range[state->num_enabled++];
186   plan_r->ubo = ubo;
187   plan_r->start = r.start;
188   plan_r->end = r.end;
189   *upload_remaining -= added;
190}
191
192/* For indirect offset, it is common to see a pattern of multiple
193 * loads with the same base, but different constant offset, ie:
194 *
195 *    vec1 32 ssa_33 = iadd ssa_base, const_offset
196 *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
197 *
198 * Detect this, and peel out the const_offset part, to end up with:
199 *
200 *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset,
201 * 0, 0)
202 *
203 * Or similarly:
204 *
205 *    vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
206 *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
207 *
208 * Can be converted to:
209 *
210 *    vec1 32 ssa_base = imul24 a, b
211 *    vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset,
212 * 0, 0)
213 *
214 * This gives the other opt passes something much easier to work
215 * with (ie. not requiring value range tracking)
216 */
217static void
218handle_partial_const(nir_builder *b, nir_ssa_def **srcp, int *offp)
219{
220   if ((*srcp)->parent_instr->type != nir_instr_type_alu)
221      return;
222
223   nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);
224
225   if (alu->op == nir_op_imad24_ir3) {
226      /* This case is slightly more complicated as we need to
227       * replace the imad24_ir3 with an imul24:
228       */
229      if (!nir_src_is_const(alu->src[2].src))
230         return;
231
232      *offp += nir_src_as_uint(alu->src[2].src);
233      *srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),
234                         nir_ssa_for_alu_src(b, alu, 1));
235
236      return;
237   }
238
239   if (alu->op != nir_op_iadd)
240      return;
241
242   if (!(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa))
243      return;
244
245   if (nir_src_is_const(alu->src[0].src)) {
246      *offp += nir_src_as_uint(alu->src[0].src);
247      *srcp = alu->src[1].src.ssa;
248   } else if (nir_src_is_const(alu->src[1].src)) {
249      *srcp = alu->src[0].src.ssa;
250      *offp += nir_src_as_uint(alu->src[1].src);
251   }
252}
253
254/* Tracks the maximum bindful UBO accessed so that we reduce the UBO
255 * descriptors emitted in the fast path for GL.
256 */
257static void
258track_ubo_use(nir_intrinsic_instr *instr, nir_builder *b, int *num_ubos)
259{
260   if (ir3_bindless_resource(instr->src[0])) {
261      assert(!b->shader->info.first_ubo_is_default_ubo); /* only set for GL */
262      return;
263   }
264
265   if (nir_src_is_const(instr->src[0])) {
266      int block = nir_src_as_uint(instr->src[0]);
267      *num_ubos = MAX2(*num_ubos, block + 1);
268   } else {
269      *num_ubos = b->shader->info.num_ubos;
270   }
271}
272
273static bool
274lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
275                          const struct ir3_ubo_analysis_state *state,
276                          int *num_ubos, uint32_t alignment)
277{
278   b->cursor = nir_before_instr(&instr->instr);
279
280   struct ir3_ubo_range r;
281   if (!get_ubo_load_range(b->shader, instr, alignment, &r)) {
282      track_ubo_use(instr, b, num_ubos);
283      return false;
284   }
285
286   /* We don't lower dynamic block index UBO loads to load_uniform, but we
287    * could probably with some effort determine a block stride in number of
288    * registers.
289    */
290   const struct ir3_ubo_range *range = get_existing_range(instr, state, &r);
291   if (!range) {
292      track_ubo_use(instr, b, num_ubos);
293      return false;
294   }
295
296   nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
297   int const_offset = 0;
298
299   handle_partial_const(b, &ubo_offset, &const_offset);
300
301   /* UBO offset is in bytes, but uniform offset is in units of
302    * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
303    * offset is in units of 16 bytes, so we need to multiply by 4. And
304    * also the same for the constant part of the offset:
305    */
306   const int shift = -2;
307   nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
308   nir_ssa_def *uniform_offset = NULL;
309   if (new_offset) {
310      uniform_offset = new_offset;
311   } else {
312      uniform_offset = shift > 0
313                          ? nir_ishl(b, ubo_offset, nir_imm_int(b, shift))
314                          : nir_ushr(b, ubo_offset, nir_imm_int(b, -shift));
315   }
316
317   debug_assert(!(const_offset & 0x3));
318   const_offset >>= 2;
319
320   const int range_offset = ((int)range->offset - (int)range->start) / 4;
321   const_offset += range_offset;
322
323   /* The range_offset could be negative, if if only part of the UBO
324    * block is accessed, range->start can be greater than range->offset.
325    * But we can't underflow const_offset.  If necessary we need to
326    * insert nir instructions to compensate (which can hopefully be
327    * optimized away)
328    */
329   if (const_offset < 0) {
330      uniform_offset = nir_iadd_imm(b, uniform_offset, const_offset);
331      const_offset = 0;
332   }
333
334   nir_ssa_def *uniform =
335      nir_load_uniform(b, instr->num_components, instr->dest.ssa.bit_size,
336                       uniform_offset, .base = const_offset);
337
338   nir_ssa_def_rewrite_uses(&instr->dest.ssa, uniform);
339
340   nir_instr_remove(&instr->instr);
341
342   return true;
343}
344
345static bool
346instr_is_load_ubo(nir_instr *instr)
347{
348   if (instr->type != nir_instr_type_intrinsic)
349      return false;
350
351   nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;
352
353   /* nir_lower_ubo_vec4 happens after this pass. */
354   assert(op != nir_intrinsic_load_ubo_vec4);
355
356   return op == nir_intrinsic_load_ubo;
357}
358
359void
360ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)
361{
362   struct ir3_const_state *const_state = ir3_const_state(v);
363   struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
364   struct ir3_compiler *compiler = v->shader->compiler;
365
366   /* Limit our uploads to the amount of constant buffer space available in
367    * the hardware, minus what the shader compiler may need for various
368    * driver params.  We do this UBO-to-push-constant before the real
369    * allocation of the driver params' const space, because UBO pointers can
370    * be driver params but this pass usually eliminatings them.
371    */
372   struct ir3_const_state worst_case_const_state = {};
373   ir3_setup_const_state(nir, v, &worst_case_const_state);
374   const uint32_t max_upload =
375      (ir3_max_const(v) - worst_case_const_state.offsets.immediate) * 16;
376
377   memset(state, 0, sizeof(*state));
378
379   uint32_t upload_remaining = max_upload;
380   nir_foreach_function (function, nir) {
381      if (function->impl) {
382         nir_foreach_block (block, function->impl) {
383            nir_foreach_instr (instr, block) {
384               if (instr_is_load_ubo(instr))
385                  gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), state,
386                                    compiler->const_upload_unit,
387                                    &upload_remaining);
388            }
389         }
390      }
391   }
392
393   /* For now, everything we upload is accessed statically and thus will be
394    * used by the shader. Once we can upload dynamically indexed data, we may
395    * upload sparsely accessed arrays, at which point we probably want to
396    * give priority to smaller UBOs, on the assumption that big UBOs will be
397    * accessed dynamically.  Alternatively, we can track statically and
398    * dynamically accessed ranges separately and upload static rangtes
399    * first.
400    */
401
402   uint32_t offset = v->shader->num_reserved_user_consts * 16;
403   for (uint32_t i = 0; i < state->num_enabled; i++) {
404      uint32_t range_size = state->range[i].end - state->range[i].start;
405
406      debug_assert(offset <= max_upload);
407      state->range[i].offset = offset;
408      assert(offset <= max_upload);
409      offset += range_size;
410   }
411   state->size = offset;
412}
413
414bool
415ir3_nir_lower_ubo_loads(nir_shader *nir, struct ir3_shader_variant *v)
416{
417   struct ir3_compiler *compiler = v->shader->compiler;
418   /* For the binning pass variant, we re-use the corresponding draw-pass
419    * variants const_state and ubo state.  To make these clear, in this
420    * pass it is const (read-only)
421    */
422   const struct ir3_const_state *const_state = ir3_const_state(v);
423   const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
424
425   int num_ubos = 0;
426   bool progress = false;
427   nir_foreach_function (function, nir) {
428      if (function->impl) {
429         nir_builder builder;
430         nir_builder_init(&builder, function->impl);
431         nir_foreach_block (block, function->impl) {
432            nir_foreach_instr_safe (instr, block) {
433               if (!instr_is_load_ubo(instr))
434                  continue;
435               progress |= lower_ubo_load_to_uniform(
436                  nir_instr_as_intrinsic(instr), &builder, state, &num_ubos,
437                  compiler->const_upload_unit);
438            }
439         }
440
441         nir_metadata_preserve(
442            function->impl, nir_metadata_block_index | nir_metadata_dominance);
443      }
444   }
445   /* Update the num_ubos field for GL (first_ubo_is_default_ubo).  With
446    * Vulkan's bindless, we don't use the num_ubos field, so we can leave it
447    * incremented.
448    */
449   if (nir->info.first_ubo_is_default_ubo)
450      nir->info.num_ubos = num_ubos;
451
452   return progress;
453}
454
455static bool
456fixup_load_uniform_filter(const nir_instr *instr, const void *arg)
457{
458   if (instr->type != nir_instr_type_intrinsic)
459      return false;
460   return nir_instr_as_intrinsic(instr)->intrinsic ==
461          nir_intrinsic_load_uniform;
462}
463
464static nir_ssa_def *
465fixup_load_uniform_instr(struct nir_builder *b, nir_instr *instr, void *arg)
466{
467   nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
468
469   /* We don't need to worry about non-indirect case: */
470   if (nir_src_is_const(intr->src[0]))
471      return NULL;
472
473   const unsigned base_offset_limit = (1 << 9); /* 9 bits */
474   unsigned base_offset = nir_intrinsic_base(intr);
475
476   /* Or cases were base offset is lower than the hw limit: */
477   if (base_offset < base_offset_limit)
478      return NULL;
479
480   b->cursor = nir_before_instr(instr);
481
482   nir_ssa_def *offset = nir_ssa_for_src(b, intr->src[0], 1);
483
484   /* We'd like to avoid a sequence like:
485    *
486    *   vec4 32 ssa_18 = intrinsic load_uniform (ssa_4) (1024, 0, 0)
487    *   vec4 32 ssa_19 = intrinsic load_uniform (ssa_4) (1072, 0, 0)
488    *   vec4 32 ssa_20 = intrinsic load_uniform (ssa_4) (1120, 0, 0)
489    *
490    * From turning into a unique offset value (which requires reloading
491    * a0.x for each instruction).  So instead of just adding the constant
492    * base_offset to the non-const offset, be a bit more clever and only
493    * extract the part that cannot be encoded.  Afterwards CSE should
494    * turn the result into:
495    *
496    *   vec1 32 ssa_5 = load_const (1024)
497    *   vec4 32 ssa_6  = iadd ssa4_, ssa_5
498    *   vec4 32 ssa_18 = intrinsic load_uniform (ssa_5) (0, 0, 0)
499    *   vec4 32 ssa_19 = intrinsic load_uniform (ssa_5) (48, 0, 0)
500    *   vec4 32 ssa_20 = intrinsic load_uniform (ssa_5) (96, 0, 0)
501    */
502   unsigned new_base_offset = base_offset % base_offset_limit;
503
504   nir_intrinsic_set_base(intr, new_base_offset);
505   offset = nir_iadd_imm(b, offset, base_offset - new_base_offset);
506
507   nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(offset));
508
509   return NIR_LOWER_INSTR_PROGRESS;
510}
511
512/**
513 * For relative CONST file access, we can only encode 10b worth of fixed offset,
514 * so in cases where the base offset is larger, we need to peel it out into
515 * ALU instructions.
516 *
517 * This should run late, after constant folding has had a chance to do it's
518 * thing, so we can actually know if it is an indirect uniform offset or not.
519 */
520bool
521ir3_nir_fixup_load_uniform(nir_shader *nir)
522{
523   return nir_shader_lower_instructions(nir, fixup_load_uniform_filter,
524                                        fixup_load_uniform_instr, NULL);
525}
526static nir_ssa_def *
527ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
528{
529   struct ir3_const_state *const_state = data;
530   nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in_instr);
531
532   /* Pick a UBO index to use as our constant data.  Skip UBO 0 since that's
533    * reserved for gallium's cb0.
534    */
535   if (const_state->constant_data_ubo == -1) {
536      if (b->shader->info.num_ubos == 0)
537         b->shader->info.num_ubos++;
538      const_state->constant_data_ubo = b->shader->info.num_ubos++;
539   }
540
541   unsigned num_components = instr->num_components;
542   if (nir_dest_bit_size(instr->dest) == 16) {
543      /* We can't do 16b loads -- either from LDC (32-bit only in any of our
544       * traces, and disasm that doesn't look like it really supports it) or
545       * from the constant file (where CONSTANT_DEMOTION_ENABLE means we get
546       * automatic 32b-to-16b conversions when we ask for 16b from it).
547       * Instead, we'll load 32b from a UBO and unpack from there.
548       */
549      num_components = DIV_ROUND_UP(num_components, 2);
550   }
551   unsigned base = nir_intrinsic_base(instr);
552   nir_ssa_def *index = nir_imm_int(b, const_state->constant_data_ubo);
553   nir_ssa_def *offset =
554      nir_iadd_imm(b, nir_ssa_for_src(b, instr->src[0], 1), base);
555
556   nir_ssa_def *result =
557      nir_load_ubo(b, num_components, 32, index, offset,
558                   .align_mul = nir_intrinsic_align_mul(instr),
559                   .align_offset = nir_intrinsic_align_offset(instr),
560                   .range_base = base, .range = nir_intrinsic_range(instr));
561
562   if (nir_dest_bit_size(instr->dest) == 16) {
563      result = nir_bitcast_vector(b, result, 16);
564      result = nir_channels(b, result, BITSET_MASK(instr->num_components));
565   }
566
567   return result;
568}
569
570static bool
571ir3_lower_load_const_filter(const nir_instr *instr, const void *data)
572{
573   return (instr->type == nir_instr_type_intrinsic &&
574           nir_instr_as_intrinsic(instr)->intrinsic ==
575              nir_intrinsic_load_constant);
576}
577
578/* Lowers load_constant intrinsics to UBO accesses so we can run them through
579 * the general "upload to const file or leave as UBO access" code.
580 */
581bool
582ir3_nir_lower_load_constant(nir_shader *nir, struct ir3_shader_variant *v)
583{
584   struct ir3_const_state *const_state = ir3_const_state(v);
585
586   const_state->constant_data_ubo = -1;
587
588   bool progress = nir_shader_lower_instructions(
589      nir, ir3_lower_load_const_filter, ir3_nir_lower_load_const_instr,
590      const_state);
591
592   if (progress) {
593      struct ir3_compiler *compiler = v->shader->compiler;
594
595      /* Save a copy of the NIR constant data to the variant for
596       * inclusion in the final assembly.
597       */
598      v->constant_data_size =
599         align(nir->constant_data_size,
600               compiler->const_upload_unit * 4 * sizeof(uint32_t));
601      v->constant_data = rzalloc_size(v, v->constant_data_size);
602      memcpy(v->constant_data, nir->constant_data, nir->constant_data_size);
603   }
604
605   return progress;
606}
607