1/*
2 * Copyright © 2014 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "nir_instr_set.h"
25#include "nir_vla.h"
26#include "util/half_float.h"
27
28static bool
29src_is_ssa(nir_src *src, void *data)
30{
31   (void) data;
32   return src->is_ssa;
33}
34
35static bool
36dest_is_ssa(nir_dest *dest, void *data)
37{
38   (void) data;
39   return dest->is_ssa;
40}
41
42static inline bool
43instr_each_src_and_dest_is_ssa(const nir_instr *instr)
44{
45   if (!nir_foreach_dest((nir_instr *)instr, dest_is_ssa, NULL) ||
46       !nir_foreach_src((nir_instr *)instr, src_is_ssa, NULL))
47      return false;
48
49   return true;
50}
51
52/* This function determines if uses of an instruction can safely be rewritten
53 * to use another identical instruction instead. Note that this function must
54 * be kept in sync with hash_instr() and nir_instrs_equal() -- only
55 * instructions that pass this test will be handed on to those functions, and
56 * conversely they must handle everything that this function returns true for.
57 */
58static bool
59instr_can_rewrite(const nir_instr *instr)
60{
61   /* We only handle SSA. */
62   assert(instr_each_src_and_dest_is_ssa(instr));
63
64   switch (instr->type) {
65   case nir_instr_type_alu:
66   case nir_instr_type_deref:
67   case nir_instr_type_tex:
68   case nir_instr_type_load_const:
69   case nir_instr_type_phi:
70      return true;
71   case nir_instr_type_intrinsic:
72      return nir_intrinsic_can_reorder(nir_instr_as_intrinsic(instr));
73   case nir_instr_type_call:
74   case nir_instr_type_jump:
75   case nir_instr_type_ssa_undef:
76      return false;
77   case nir_instr_type_parallel_copy:
78   default:
79      unreachable("Invalid instruction type");
80   }
81
82   return false;
83}
84
85
86#define HASH(hash, data) _mesa_fnv32_1a_accumulate((hash), (data))
87
88static uint32_t
89hash_src(uint32_t hash, const nir_src *src)
90{
91   assert(src->is_ssa);
92   hash = HASH(hash, src->ssa);
93   return hash;
94}
95
96static uint32_t
97hash_alu_src(uint32_t hash, const nir_alu_src *src, unsigned num_components)
98{
99   hash = HASH(hash, src->abs);
100   hash = HASH(hash, src->negate);
101
102   for (unsigned i = 0; i < num_components; i++)
103      hash = HASH(hash, src->swizzle[i]);
104
105   hash = hash_src(hash, &src->src);
106   return hash;
107}
108
109static uint32_t
110hash_alu(uint32_t hash, const nir_alu_instr *instr)
111{
112   hash = HASH(hash, instr->op);
113   hash = HASH(hash, instr->dest.dest.ssa.num_components);
114   hash = HASH(hash, instr->dest.dest.ssa.bit_size);
115   /* We explicitly don't hash instr->dest.dest.exact */
116
117   if (nir_op_infos[instr->op].algebraic_properties & NIR_OP_IS_COMMUTATIVE) {
118      assert(nir_op_infos[instr->op].num_inputs == 2);
119      uint32_t hash0 = hash_alu_src(hash, &instr->src[0],
120                                    nir_ssa_alu_instr_src_components(instr, 0));
121      uint32_t hash1 = hash_alu_src(hash, &instr->src[1],
122                                    nir_ssa_alu_instr_src_components(instr, 1));
123      /* For commutative operations, we need some commutative way of
124       * combining the hashes.  One option would be to XOR them but that
125       * means that anything with two identical sources will hash to 0 and
126       * that's common enough we probably don't want the guaranteed
127       * collision.  Either addition or multiplication will also work.
128       */
129      hash = hash0 * hash1;
130   } else {
131      for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
132         hash = hash_alu_src(hash, &instr->src[i],
133                             nir_ssa_alu_instr_src_components(instr, i));
134      }
135   }
136
137   return hash;
138}
139
140static uint32_t
141hash_deref(uint32_t hash, const nir_deref_instr *instr)
142{
143   hash = HASH(hash, instr->deref_type);
144   hash = HASH(hash, instr->mode);
145   hash = HASH(hash, instr->type);
146
147   if (instr->deref_type == nir_deref_type_var)
148      return HASH(hash, instr->var);
149
150   hash = hash_src(hash, &instr->parent);
151
152   switch (instr->deref_type) {
153   case nir_deref_type_struct:
154      hash = HASH(hash, instr->strct.index);
155      break;
156
157   case nir_deref_type_array:
158   case nir_deref_type_ptr_as_array:
159      hash = hash_src(hash, &instr->arr.index);
160      break;
161
162   case nir_deref_type_cast:
163      hash = HASH(hash, instr->cast.ptr_stride);
164      break;
165
166   case nir_deref_type_var:
167   case nir_deref_type_array_wildcard:
168      /* Nothing to do */
169      break;
170
171   default:
172      unreachable("Invalid instruction deref type");
173   }
174
175   return hash;
176}
177
178static uint32_t
179hash_load_const(uint32_t hash, const nir_load_const_instr *instr)
180{
181   hash = HASH(hash, instr->def.num_components);
182
183   if (instr->def.bit_size == 1) {
184      for (unsigned i = 0; i < instr->def.num_components; i++) {
185         uint8_t b = instr->value[i].b;
186         hash = HASH(hash, b);
187      }
188   } else {
189      unsigned size = instr->def.num_components * sizeof(*instr->value);
190      hash = _mesa_fnv32_1a_accumulate_block(hash, instr->value, size);
191   }
192
193   return hash;
194}
195
196static int
197cmp_phi_src(const void *data1, const void *data2)
198{
199   nir_phi_src *src1 = *(nir_phi_src **)data1;
200   nir_phi_src *src2 = *(nir_phi_src **)data2;
201   return src1->pred - src2->pred;
202}
203
204static uint32_t
205hash_phi(uint32_t hash, const nir_phi_instr *instr)
206{
207   hash = HASH(hash, instr->instr.block);
208
209   /* sort sources by predecessor, since the order shouldn't matter */
210   unsigned num_preds = instr->instr.block->predecessors->entries;
211   NIR_VLA(nir_phi_src *, srcs, num_preds);
212   unsigned i = 0;
213   nir_foreach_phi_src(src, instr) {
214      srcs[i++] = src;
215   }
216
217   qsort(srcs, num_preds, sizeof(nir_phi_src *), cmp_phi_src);
218
219   for (i = 0; i < num_preds; i++) {
220      hash = hash_src(hash, &srcs[i]->src);
221      hash = HASH(hash, srcs[i]->pred);
222   }
223
224   return hash;
225}
226
227static uint32_t
228hash_intrinsic(uint32_t hash, const nir_intrinsic_instr *instr)
229{
230   const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
231   hash = HASH(hash, instr->intrinsic);
232
233   if (info->has_dest) {
234      hash = HASH(hash, instr->dest.ssa.num_components);
235      hash = HASH(hash, instr->dest.ssa.bit_size);
236   }
237
238   hash = _mesa_fnv32_1a_accumulate_block(hash, instr->const_index,
239                                          info->num_indices
240                                             * sizeof(instr->const_index[0]));
241   return hash;
242}
243
244static uint32_t
245hash_tex(uint32_t hash, const nir_tex_instr *instr)
246{
247   hash = HASH(hash, instr->op);
248   hash = HASH(hash, instr->num_srcs);
249
250   for (unsigned i = 0; i < instr->num_srcs; i++) {
251      hash = HASH(hash, instr->src[i].src_type);
252      hash = hash_src(hash, &instr->src[i].src);
253   }
254
255   hash = HASH(hash, instr->coord_components);
256   hash = HASH(hash, instr->sampler_dim);
257   hash = HASH(hash, instr->is_array);
258   hash = HASH(hash, instr->is_shadow);
259   hash = HASH(hash, instr->is_new_style_shadow);
260   unsigned component = instr->component;
261   hash = HASH(hash, component);
262   for (unsigned i = 0; i < 4; ++i)
263      for (unsigned j = 0; j < 2; ++j)
264         hash = HASH(hash, instr->tg4_offsets[i][j]);
265   hash = HASH(hash, instr->texture_index);
266   hash = HASH(hash, instr->texture_array_size);
267   hash = HASH(hash, instr->sampler_index);
268
269   return hash;
270}
271
272/* Computes a hash of an instruction for use in a hash table. Note that this
273 * will only work for instructions where instr_can_rewrite() returns true, and
274 * it should return identical hashes for two instructions that are the same
275 * according nir_instrs_equal().
276 */
277
278static uint32_t
279hash_instr(const void *data)
280{
281   const nir_instr *instr = data;
282   uint32_t hash = _mesa_fnv32_1a_offset_bias;
283
284   switch (instr->type) {
285   case nir_instr_type_alu:
286      hash = hash_alu(hash, nir_instr_as_alu(instr));
287      break;
288   case nir_instr_type_deref:
289      hash = hash_deref(hash, nir_instr_as_deref(instr));
290      break;
291   case nir_instr_type_load_const:
292      hash = hash_load_const(hash, nir_instr_as_load_const(instr));
293      break;
294   case nir_instr_type_phi:
295      hash = hash_phi(hash, nir_instr_as_phi(instr));
296      break;
297   case nir_instr_type_intrinsic:
298      hash = hash_intrinsic(hash, nir_instr_as_intrinsic(instr));
299      break;
300   case nir_instr_type_tex:
301      hash = hash_tex(hash, nir_instr_as_tex(instr));
302      break;
303   default:
304      unreachable("Invalid instruction type");
305   }
306
307   return hash;
308}
309
310bool
311nir_srcs_equal(nir_src src1, nir_src src2)
312{
313   if (src1.is_ssa) {
314      if (src2.is_ssa) {
315         return src1.ssa == src2.ssa;
316      } else {
317         return false;
318      }
319   } else {
320      if (src2.is_ssa) {
321         return false;
322      } else {
323         if ((src1.reg.indirect == NULL) != (src2.reg.indirect == NULL))
324            return false;
325
326         if (src1.reg.indirect) {
327            if (!nir_srcs_equal(*src1.reg.indirect, *src2.reg.indirect))
328               return false;
329         }
330
331         return src1.reg.reg == src2.reg.reg &&
332                src1.reg.base_offset == src2.reg.base_offset;
333      }
334   }
335}
336
337/**
338 * If the \p s is an SSA value that was generated by a negation instruction,
339 * that instruction is returned as a \c nir_alu_instr.  Otherwise \c NULL is
340 * returned.
341 */
342static nir_alu_instr *
343get_neg_instr(nir_src s)
344{
345   nir_alu_instr *alu = nir_src_as_alu_instr(s);
346
347   return alu != NULL && (alu->op == nir_op_fneg || alu->op == nir_op_ineg)
348          ? alu : NULL;
349}
350
351bool
352nir_const_value_negative_equal(const nir_const_value *c1,
353                               const nir_const_value *c2,
354                               unsigned components,
355                               nir_alu_type base_type,
356                               unsigned bits)
357{
358   assert(base_type == nir_alu_type_get_base_type(base_type));
359   assert(base_type != nir_type_invalid);
360
361   /* This can occur for 1-bit Boolean values. */
362   if (bits == 1)
363      return false;
364
365   switch (base_type) {
366   case nir_type_float:
367      switch (bits) {
368      case 16:
369         for (unsigned i = 0; i < components; i++) {
370            if (_mesa_half_to_float(c1[i].u16) !=
371                -_mesa_half_to_float(c2[i].u16)) {
372               return false;
373            }
374         }
375
376         return true;
377
378      case 32:
379         for (unsigned i = 0; i < components; i++) {
380            if (c1[i].f32 != -c2[i].f32)
381               return false;
382         }
383
384         return true;
385
386      case 64:
387         for (unsigned i = 0; i < components; i++) {
388            if (c1[i].f64 != -c2[i].f64)
389               return false;
390         }
391
392         return true;
393
394      default:
395         unreachable("unknown bit size");
396      }
397
398      break;
399
400   case nir_type_int:
401   case nir_type_uint:
402      switch (bits) {
403      case 8:
404         for (unsigned i = 0; i < components; i++) {
405            if (c1[i].i8 != -c2[i].i8)
406               return false;
407         }
408
409         return true;
410
411      case 16:
412         for (unsigned i = 0; i < components; i++) {
413            if (c1[i].i16 != -c2[i].i16)
414               return false;
415         }
416
417         return true;
418         break;
419
420      case 32:
421         for (unsigned i = 0; i < components; i++) {
422            if (c1[i].i32 != -c2[i].i32)
423               return false;
424         }
425
426         return true;
427
428      case 64:
429         for (unsigned i = 0; i < components; i++) {
430            if (c1[i].i64 != -c2[i].i64)
431               return false;
432         }
433
434         return true;
435
436      default:
437         unreachable("unknown bit size");
438      }
439
440      break;
441
442   case nir_type_bool:
443      return false;
444
445   default:
446      break;
447   }
448
449   return false;
450}
451
452/**
453 * Shallow compare of ALU srcs to determine if one is the negation of the other
454 *
455 * This function detects cases where \p alu1 is a constant and \p alu2 is a
456 * constant that is its negation.  It will also detect cases where \p alu2 is
457 * an SSA value that is a \c nir_op_fneg applied to \p alu1 (and vice versa).
458 *
459 * This function does not detect the general case when \p alu1 and \p alu2 are
460 * SSA values that are the negations of each other (e.g., \p alu1 represents
461 * (a * b) and \p alu2 represents (-a * b)).
462 */
463bool
464nir_alu_srcs_negative_equal(const nir_alu_instr *alu1,
465                            const nir_alu_instr *alu2,
466                            unsigned src1, unsigned src2)
467{
468   if (alu1->src[src1].abs != alu2->src[src2].abs)
469      return false;
470
471   bool parity = alu1->src[src1].negate != alu2->src[src2].negate;
472
473   /* Handling load_const instructions is tricky. */
474
475   const nir_const_value *const const1 =
476      nir_src_as_const_value(alu1->src[src1].src);
477
478   if (const1 != NULL) {
479      /* Assume that constant folding will eliminate source mods and unary
480       * ops.
481       */
482      if (parity)
483         return false;
484
485      const nir_const_value *const const2 =
486         nir_src_as_const_value(alu2->src[src2].src);
487
488      if (const2 == NULL)
489         return false;
490
491      if (nir_src_bit_size(alu1->src[src1].src) !=
492          nir_src_bit_size(alu2->src[src2].src))
493         return false;
494
495      /* FINISHME: Apply the swizzle? */
496      return nir_const_value_negative_equal(const1,
497                                            const2,
498                                            nir_ssa_alu_instr_src_components(alu1, src1),
499                                            nir_op_infos[alu1->op].input_types[src1],
500                                            nir_src_bit_size(alu1->src[src1].src));
501   }
502
503   uint8_t alu1_swizzle[4] = {0};
504   nir_src alu1_actual_src;
505   nir_alu_instr *neg1 = get_neg_instr(alu1->src[src1].src);
506
507   if (neg1) {
508      parity = !parity;
509      alu1_actual_src = neg1->src[0].src;
510
511      for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(neg1, 0); i++)
512         alu1_swizzle[i] = neg1->src[0].swizzle[i];
513   } else {
514      alu1_actual_src = alu1->src[src1].src;
515
516      for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(alu1, src1); i++)
517         alu1_swizzle[i] = i;
518   }
519
520   uint8_t alu2_swizzle[4] = {0};
521   nir_src alu2_actual_src;
522   nir_alu_instr *neg2 = get_neg_instr(alu2->src[src2].src);
523
524   if (neg2) {
525      parity = !parity;
526      alu2_actual_src = neg2->src[0].src;
527
528      for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(neg2, 0); i++)
529         alu2_swizzle[i] = neg2->src[0].swizzle[i];
530   } else {
531      alu2_actual_src = alu2->src[src2].src;
532
533      for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(alu2, src2); i++)
534         alu2_swizzle[i] = i;
535   }
536
537   for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(alu1, src1); i++) {
538      if (alu1_swizzle[alu1->src[src1].swizzle[i]] !=
539          alu2_swizzle[alu2->src[src2].swizzle[i]])
540         return false;
541   }
542
543   return parity && nir_srcs_equal(alu1_actual_src, alu2_actual_src);
544}
545
546bool
547nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
548                   unsigned src1, unsigned src2)
549{
550   if (alu1->src[src1].abs != alu2->src[src2].abs ||
551       alu1->src[src1].negate != alu2->src[src2].negate)
552      return false;
553
554   for (unsigned i = 0; i < nir_ssa_alu_instr_src_components(alu1, src1); i++) {
555      if (alu1->src[src1].swizzle[i] != alu2->src[src2].swizzle[i])
556         return false;
557   }
558
559   return nir_srcs_equal(alu1->src[src1].src, alu2->src[src2].src);
560}
561
562/* Returns "true" if two instructions are equal. Note that this will only
563 * work for the subset of instructions defined by instr_can_rewrite(). Also,
564 * it should only return "true" for instructions that hash_instr() will return
565 * the same hash for (ignoring collisions, of course).
566 */
567
568bool
569nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
570{
571   assert(instr_can_rewrite(instr1) && instr_can_rewrite(instr2));
572
573   if (instr1->type != instr2->type)
574      return false;
575
576   switch (instr1->type) {
577   case nir_instr_type_alu: {
578      nir_alu_instr *alu1 = nir_instr_as_alu(instr1);
579      nir_alu_instr *alu2 = nir_instr_as_alu(instr2);
580
581      if (alu1->op != alu2->op)
582         return false;
583
584      /* TODO: We can probably acutally do something more inteligent such
585       * as allowing different numbers and taking a maximum or something
586       * here */
587      if (alu1->dest.dest.ssa.num_components != alu2->dest.dest.ssa.num_components)
588         return false;
589
590      if (alu1->dest.dest.ssa.bit_size != alu2->dest.dest.ssa.bit_size)
591         return false;
592
593      /* We explicitly don't hash instr->dest.dest.exact */
594
595      if (nir_op_infos[alu1->op].algebraic_properties & NIR_OP_IS_COMMUTATIVE) {
596         assert(nir_op_infos[alu1->op].num_inputs == 2);
597         return (nir_alu_srcs_equal(alu1, alu2, 0, 0) &&
598                 nir_alu_srcs_equal(alu1, alu2, 1, 1)) ||
599                (nir_alu_srcs_equal(alu1, alu2, 0, 1) &&
600                 nir_alu_srcs_equal(alu1, alu2, 1, 0));
601      } else {
602         for (unsigned i = 0; i < nir_op_infos[alu1->op].num_inputs; i++) {
603            if (!nir_alu_srcs_equal(alu1, alu2, i, i))
604               return false;
605         }
606      }
607      return true;
608   }
609   case nir_instr_type_deref: {
610      nir_deref_instr *deref1 = nir_instr_as_deref(instr1);
611      nir_deref_instr *deref2 = nir_instr_as_deref(instr2);
612
613      if (deref1->deref_type != deref2->deref_type ||
614          deref1->mode != deref2->mode ||
615          deref1->type != deref2->type)
616         return false;
617
618      if (deref1->deref_type == nir_deref_type_var)
619         return deref1->var == deref2->var;
620
621      if (!nir_srcs_equal(deref1->parent, deref2->parent))
622         return false;
623
624      switch (deref1->deref_type) {
625      case nir_deref_type_struct:
626         if (deref1->strct.index != deref2->strct.index)
627            return false;
628         break;
629
630      case nir_deref_type_array:
631      case nir_deref_type_ptr_as_array:
632         if (!nir_srcs_equal(deref1->arr.index, deref2->arr.index))
633            return false;
634         break;
635
636      case nir_deref_type_cast:
637         if (deref1->cast.ptr_stride != deref2->cast.ptr_stride)
638            return false;
639         break;
640
641      case nir_deref_type_var:
642      case nir_deref_type_array_wildcard:
643         /* Nothing to do */
644         break;
645
646      default:
647         unreachable("Invalid instruction deref type");
648      }
649      return true;
650   }
651   case nir_instr_type_tex: {
652      nir_tex_instr *tex1 = nir_instr_as_tex(instr1);
653      nir_tex_instr *tex2 = nir_instr_as_tex(instr2);
654
655      if (tex1->op != tex2->op)
656         return false;
657
658      if (tex1->num_srcs != tex2->num_srcs)
659         return false;
660      for (unsigned i = 0; i < tex1->num_srcs; i++) {
661         if (tex1->src[i].src_type != tex2->src[i].src_type ||
662             !nir_srcs_equal(tex1->src[i].src, tex2->src[i].src)) {
663            return false;
664         }
665      }
666
667      if (tex1->coord_components != tex2->coord_components ||
668          tex1->sampler_dim != tex2->sampler_dim ||
669          tex1->is_array != tex2->is_array ||
670          tex1->is_shadow != tex2->is_shadow ||
671          tex1->is_new_style_shadow != tex2->is_new_style_shadow ||
672          tex1->component != tex2->component ||
673         tex1->texture_index != tex2->texture_index ||
674         tex1->texture_array_size != tex2->texture_array_size ||
675         tex1->sampler_index != tex2->sampler_index) {
676         return false;
677      }
678
679      if (memcmp(tex1->tg4_offsets, tex2->tg4_offsets,
680                 sizeof(tex1->tg4_offsets)))
681         return false;
682
683      return true;
684   }
685   case nir_instr_type_load_const: {
686      nir_load_const_instr *load1 = nir_instr_as_load_const(instr1);
687      nir_load_const_instr *load2 = nir_instr_as_load_const(instr2);
688
689      if (load1->def.num_components != load2->def.num_components)
690         return false;
691
692      if (load1->def.bit_size != load2->def.bit_size)
693         return false;
694
695      if (load1->def.bit_size == 1) {
696         for (unsigned i = 0; i < load1->def.num_components; ++i) {
697            if (load1->value[i].b != load2->value[i].b)
698               return false;
699         }
700      } else {
701         unsigned size = load1->def.num_components * sizeof(*load1->value);
702         if (memcmp(load1->value, load2->value, size) != 0)
703            return false;
704      }
705      return true;
706   }
707   case nir_instr_type_phi: {
708      nir_phi_instr *phi1 = nir_instr_as_phi(instr1);
709      nir_phi_instr *phi2 = nir_instr_as_phi(instr2);
710
711      if (phi1->instr.block != phi2->instr.block)
712         return false;
713
714      nir_foreach_phi_src(src1, phi1) {
715         nir_foreach_phi_src(src2, phi2) {
716            if (src1->pred == src2->pred) {
717               if (!nir_srcs_equal(src1->src, src2->src))
718                  return false;
719
720               break;
721            }
722         }
723      }
724
725      return true;
726   }
727   case nir_instr_type_intrinsic: {
728      nir_intrinsic_instr *intrinsic1 = nir_instr_as_intrinsic(instr1);
729      nir_intrinsic_instr *intrinsic2 = nir_instr_as_intrinsic(instr2);
730      const nir_intrinsic_info *info =
731         &nir_intrinsic_infos[intrinsic1->intrinsic];
732
733      if (intrinsic1->intrinsic != intrinsic2->intrinsic ||
734          intrinsic1->num_components != intrinsic2->num_components)
735         return false;
736
737      if (info->has_dest && intrinsic1->dest.ssa.num_components !=
738                            intrinsic2->dest.ssa.num_components)
739         return false;
740
741      if (info->has_dest && intrinsic1->dest.ssa.bit_size !=
742                            intrinsic2->dest.ssa.bit_size)
743         return false;
744
745      for (unsigned i = 0; i < info->num_srcs; i++) {
746         if (!nir_srcs_equal(intrinsic1->src[i], intrinsic2->src[i]))
747            return false;
748      }
749
750      for (unsigned i = 0; i < info->num_indices; i++) {
751         if (intrinsic1->const_index[i] != intrinsic2->const_index[i])
752            return false;
753      }
754
755      return true;
756   }
757   case nir_instr_type_call:
758   case nir_instr_type_jump:
759   case nir_instr_type_ssa_undef:
760   case nir_instr_type_parallel_copy:
761   default:
762      unreachable("Invalid instruction type");
763   }
764
765   unreachable("All cases in the above switch should return");
766}
767
768static nir_ssa_def *
769nir_instr_get_dest_ssa_def(nir_instr *instr)
770{
771   switch (instr->type) {
772   case nir_instr_type_alu:
773      assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
774      return &nir_instr_as_alu(instr)->dest.dest.ssa;
775   case nir_instr_type_deref:
776      assert(nir_instr_as_deref(instr)->dest.is_ssa);
777      return &nir_instr_as_deref(instr)->dest.ssa;
778   case nir_instr_type_load_const:
779      return &nir_instr_as_load_const(instr)->def;
780   case nir_instr_type_phi:
781      assert(nir_instr_as_phi(instr)->dest.is_ssa);
782      return &nir_instr_as_phi(instr)->dest.ssa;
783   case nir_instr_type_intrinsic:
784      assert(nir_instr_as_intrinsic(instr)->dest.is_ssa);
785      return &nir_instr_as_intrinsic(instr)->dest.ssa;
786   case nir_instr_type_tex:
787      assert(nir_instr_as_tex(instr)->dest.is_ssa);
788      return &nir_instr_as_tex(instr)->dest.ssa;
789   default:
790      unreachable("We never ask for any of these");
791   }
792}
793
794static bool
795cmp_func(const void *data1, const void *data2)
796{
797   return nir_instrs_equal(data1, data2);
798}
799
800struct set *
801nir_instr_set_create(void *mem_ctx)
802{
803   return _mesa_set_create(mem_ctx, hash_instr, cmp_func);
804}
805
806void
807nir_instr_set_destroy(struct set *instr_set)
808{
809   _mesa_set_destroy(instr_set, NULL);
810}
811
812bool
813nir_instr_set_add_or_rewrite(struct set *instr_set, nir_instr *instr)
814{
815   if (!instr_can_rewrite(instr))
816      return false;
817
818   uint32_t hash = hash_instr(instr);
819   struct set_entry *e = _mesa_set_search_pre_hashed(instr_set, hash, instr);
820   if (e) {
821      nir_ssa_def *def = nir_instr_get_dest_ssa_def(instr);
822      nir_instr *match = (nir_instr *) e->key;
823      nir_ssa_def *new_def = nir_instr_get_dest_ssa_def(match);
824
825      /* It's safe to replace an exact instruction with an inexact one as
826       * long as we make it exact.  If we got here, the two instructions are
827       * exactly identical in every other way so, once we've set the exact
828       * bit, they are the same.
829       */
830      if (instr->type == nir_instr_type_alu && nir_instr_as_alu(instr)->exact)
831         nir_instr_as_alu(match)->exact = true;
832
833      nir_ssa_def_rewrite_uses(def, nir_src_for_ssa(new_def));
834      return true;
835   }
836
837   _mesa_set_add_pre_hashed(instr_set, hash, instr);
838   return false;
839}
840
841void
842nir_instr_set_remove(struct set *instr_set, nir_instr *instr)
843{
844   if (!instr_can_rewrite(instr))
845      return;
846
847   struct set_entry *entry = _mesa_set_search(instr_set, instr);
848   if (entry)
849      _mesa_set_remove(instr_set, entry);
850}
851
852