1b8e80941Smrg/*
2b8e80941Smrg * Copyright © 2018 Intel Corporation
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the next
12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
13b8e80941Smrg * Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21b8e80941Smrg * DEALINGS IN THE SOFTWARE.
22b8e80941Smrg */
23b8e80941Smrg
24b8e80941Smrg#include <gtest/gtest.h>
25b8e80941Smrg
26b8e80941Smrg#include "nir.h"
27b8e80941Smrg#include "nir_builder.h"
28b8e80941Smrg
29b8e80941Smrgnamespace {
30b8e80941Smrg
31b8e80941Smrgclass nir_vars_test : public ::testing::Test {
32b8e80941Smrgprotected:
33b8e80941Smrg   nir_vars_test();
34b8e80941Smrg   ~nir_vars_test();
35b8e80941Smrg
36b8e80941Smrg   nir_variable *create_var(nir_variable_mode mode, const glsl_type *type,
37b8e80941Smrg                            const char *name) {
38b8e80941Smrg      if (mode == nir_var_function_temp)
39b8e80941Smrg         return nir_local_variable_create(b->impl, type, name);
40b8e80941Smrg      else
41b8e80941Smrg         return nir_variable_create(b->shader, mode, type, name);
42b8e80941Smrg   }
43b8e80941Smrg
44b8e80941Smrg   nir_variable *create_int(nir_variable_mode mode, const char *name) {
45b8e80941Smrg      return create_var(mode, glsl_int_type(), name);
46b8e80941Smrg   }
47b8e80941Smrg
48b8e80941Smrg   nir_variable *create_ivec2(nir_variable_mode mode, const char *name) {
49b8e80941Smrg      return create_var(mode, glsl_vector_type(GLSL_TYPE_INT, 2), name);
50b8e80941Smrg   }
51b8e80941Smrg
52b8e80941Smrg   nir_variable *create_ivec4(nir_variable_mode mode, const char *name) {
53b8e80941Smrg      return create_var(mode, glsl_vector_type(GLSL_TYPE_INT, 4), name);
54b8e80941Smrg   }
55b8e80941Smrg
56b8e80941Smrg   nir_variable **create_many_int(nir_variable_mode mode, const char *prefix, unsigned count) {
57b8e80941Smrg      nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
58b8e80941Smrg      for (unsigned i = 0; i < count; i++)
59b8e80941Smrg         result[i] = create_int(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
60b8e80941Smrg      return result;
61b8e80941Smrg   }
62b8e80941Smrg
63b8e80941Smrg   nir_variable **create_many_ivec2(nir_variable_mode mode, const char *prefix, unsigned count) {
64b8e80941Smrg      nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
65b8e80941Smrg      for (unsigned i = 0; i < count; i++)
66b8e80941Smrg         result[i] = create_ivec2(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
67b8e80941Smrg      return result;
68b8e80941Smrg   }
69b8e80941Smrg
70b8e80941Smrg   nir_variable **create_many_ivec4(nir_variable_mode mode, const char *prefix, unsigned count) {
71b8e80941Smrg      nir_variable **result = (nir_variable **)linear_alloc_child(lin_ctx, sizeof(nir_variable *) * count);
72b8e80941Smrg      for (unsigned i = 0; i < count; i++)
73b8e80941Smrg         result[i] = create_ivec4(mode, linear_asprintf(lin_ctx, "%s%u", prefix, i));
74b8e80941Smrg      return result;
75b8e80941Smrg   }
76b8e80941Smrg
77b8e80941Smrg   unsigned count_intrinsics(nir_intrinsic_op intrinsic);
78b8e80941Smrg
79b8e80941Smrg   nir_intrinsic_instr *get_intrinsic(nir_intrinsic_op intrinsic,
80b8e80941Smrg                                      unsigned index);
81b8e80941Smrg
82b8e80941Smrg   void *mem_ctx;
83b8e80941Smrg   void *lin_ctx;
84b8e80941Smrg
85b8e80941Smrg   nir_builder *b;
86b8e80941Smrg};
87b8e80941Smrg
88b8e80941Smrgnir_vars_test::nir_vars_test()
89b8e80941Smrg{
90b8e80941Smrg   mem_ctx = ralloc_context(NULL);
91b8e80941Smrg   lin_ctx = linear_alloc_parent(mem_ctx, 0);
92b8e80941Smrg   static const nir_shader_compiler_options options = { };
93b8e80941Smrg   b = rzalloc(mem_ctx, nir_builder);
94b8e80941Smrg   nir_builder_init_simple_shader(b, mem_ctx, MESA_SHADER_FRAGMENT, &options);
95b8e80941Smrg}
96b8e80941Smrg
97b8e80941Smrgnir_vars_test::~nir_vars_test()
98b8e80941Smrg{
99b8e80941Smrg   if (HasFailure()) {
100b8e80941Smrg      printf("\nShader from the failed test:\n\n");
101b8e80941Smrg      nir_print_shader(b->shader, stdout);
102b8e80941Smrg   }
103b8e80941Smrg
104b8e80941Smrg   ralloc_free(mem_ctx);
105b8e80941Smrg}
106b8e80941Smrg
107b8e80941Smrgunsigned
108b8e80941Smrgnir_vars_test::count_intrinsics(nir_intrinsic_op intrinsic)
109b8e80941Smrg{
110b8e80941Smrg   unsigned count = 0;
111b8e80941Smrg   nir_foreach_block(block, b->impl) {
112b8e80941Smrg      nir_foreach_instr(instr, block) {
113b8e80941Smrg         if (instr->type != nir_instr_type_intrinsic)
114b8e80941Smrg            continue;
115b8e80941Smrg         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
116b8e80941Smrg         if (intrin->intrinsic == intrinsic)
117b8e80941Smrg            count++;
118b8e80941Smrg      }
119b8e80941Smrg   }
120b8e80941Smrg   return count;
121b8e80941Smrg}
122b8e80941Smrg
123b8e80941Smrgnir_intrinsic_instr *
124b8e80941Smrgnir_vars_test::get_intrinsic(nir_intrinsic_op intrinsic,
125b8e80941Smrg                             unsigned index)
126b8e80941Smrg{
127b8e80941Smrg   nir_foreach_block(block, b->impl) {
128b8e80941Smrg      nir_foreach_instr(instr, block) {
129b8e80941Smrg         if (instr->type != nir_instr_type_intrinsic)
130b8e80941Smrg            continue;
131b8e80941Smrg         nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
132b8e80941Smrg         if (intrin->intrinsic == intrinsic) {
133b8e80941Smrg            if (index == 0)
134b8e80941Smrg               return intrin;
135b8e80941Smrg            index--;
136b8e80941Smrg         }
137b8e80941Smrg      }
138b8e80941Smrg   }
139b8e80941Smrg   return NULL;
140b8e80941Smrg}
141b8e80941Smrg
142b8e80941Smrg/* Allow grouping the tests while still sharing the helpers. */
143b8e80941Smrgclass nir_redundant_load_vars_test : public nir_vars_test {};
144b8e80941Smrgclass nir_copy_prop_vars_test : public nir_vars_test {};
145b8e80941Smrgclass nir_dead_write_vars_test : public nir_vars_test {};
146b8e80941Smrgclass nir_combine_stores_test : public nir_vars_test {};
147b8e80941Smrg
148b8e80941Smrg} // namespace
149b8e80941Smrg
150b8e80941SmrgTEST_F(nir_redundant_load_vars_test, duplicated_load)
151b8e80941Smrg{
152b8e80941Smrg   /* Load a variable twice in the same block.  One should be removed. */
153b8e80941Smrg
154b8e80941Smrg   nir_variable *in = create_int(nir_var_shader_in, "in");
155b8e80941Smrg   nir_variable **out = create_many_int(nir_var_shader_out, "out", 2);
156b8e80941Smrg
157b8e80941Smrg   nir_store_var(b, out[0], nir_load_var(b, in), 1);
158b8e80941Smrg   nir_store_var(b, out[1], nir_load_var(b, in), 1);
159b8e80941Smrg
160b8e80941Smrg   nir_validate_shader(b->shader, NULL);
161b8e80941Smrg
162b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
163b8e80941Smrg
164b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
165b8e80941Smrg   EXPECT_TRUE(progress);
166b8e80941Smrg
167b8e80941Smrg   nir_validate_shader(b->shader, NULL);
168b8e80941Smrg
169b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
170b8e80941Smrg}
171b8e80941Smrg
172b8e80941SmrgTEST_F(nir_redundant_load_vars_test, duplicated_load_in_two_blocks)
173b8e80941Smrg{
174b8e80941Smrg   /* Load a variable twice in different blocks.  One should be removed. */
175b8e80941Smrg
176b8e80941Smrg   nir_variable *in = create_int(nir_var_shader_in, "in");
177b8e80941Smrg   nir_variable **out = create_many_int(nir_var_shader_out, "out", 2);
178b8e80941Smrg
179b8e80941Smrg   nir_store_var(b, out[0], nir_load_var(b, in), 1);
180b8e80941Smrg
181b8e80941Smrg   /* Forces the stores to be in different blocks. */
182b8e80941Smrg   nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
183b8e80941Smrg
184b8e80941Smrg   nir_store_var(b, out[1], nir_load_var(b, in), 1);
185b8e80941Smrg
186b8e80941Smrg   nir_validate_shader(b->shader, NULL);
187b8e80941Smrg
188b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
189b8e80941Smrg
190b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
191b8e80941Smrg   EXPECT_TRUE(progress);
192b8e80941Smrg
193b8e80941Smrg   nir_validate_shader(b->shader, NULL);
194b8e80941Smrg
195b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
196b8e80941Smrg}
197b8e80941Smrg
198b8e80941SmrgTEST_F(nir_redundant_load_vars_test, invalidate_inside_if_block)
199b8e80941Smrg{
200b8e80941Smrg   /* Load variables, then write to some of then in different branches of the
201b8e80941Smrg    * if statement.  They should be invalidated accordingly.
202b8e80941Smrg    */
203b8e80941Smrg
204b8e80941Smrg   nir_variable **g = create_many_int(nir_var_shader_temp, "g", 3);
205b8e80941Smrg   nir_variable **out = create_many_int(nir_var_shader_out, "out", 3);
206b8e80941Smrg
207b8e80941Smrg   nir_load_var(b, g[0]);
208b8e80941Smrg   nir_load_var(b, g[1]);
209b8e80941Smrg   nir_load_var(b, g[2]);
210b8e80941Smrg
211b8e80941Smrg   nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
212b8e80941Smrg   nir_store_var(b, g[0], nir_imm_int(b, 10), 1);
213b8e80941Smrg
214b8e80941Smrg   nir_push_else(b, if_stmt);
215b8e80941Smrg   nir_store_var(b, g[1], nir_imm_int(b, 20), 1);
216b8e80941Smrg
217b8e80941Smrg   nir_pop_if(b, if_stmt);
218b8e80941Smrg
219b8e80941Smrg   nir_store_var(b, out[0], nir_load_var(b, g[0]), 1);
220b8e80941Smrg   nir_store_var(b, out[1], nir_load_var(b, g[1]), 1);
221b8e80941Smrg   nir_store_var(b, out[2], nir_load_var(b, g[2]), 1);
222b8e80941Smrg
223b8e80941Smrg   nir_validate_shader(b->shader, NULL);
224b8e80941Smrg
225b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
226b8e80941Smrg   EXPECT_TRUE(progress);
227b8e80941Smrg
228b8e80941Smrg   /* There are 3 initial loads, plus 2 loads for the values invalidated
229b8e80941Smrg    * inside the if statement.
230b8e80941Smrg    */
231b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 5);
232b8e80941Smrg
233b8e80941Smrg   /* We only load g[2] once. */
234b8e80941Smrg   unsigned g2_load_count = 0;
235b8e80941Smrg   for (int i = 0; i < 5; i++) {
236b8e80941Smrg         nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, i);
237b8e80941Smrg         if (nir_intrinsic_get_var(load, 0) == g[2])
238b8e80941Smrg            g2_load_count++;
239b8e80941Smrg   }
240b8e80941Smrg   EXPECT_EQ(g2_load_count, 1);
241b8e80941Smrg}
242b8e80941Smrg
243b8e80941SmrgTEST_F(nir_redundant_load_vars_test, invalidate_live_load_in_the_end_of_loop)
244b8e80941Smrg{
245b8e80941Smrg   /* Invalidating a load in the end of loop body will apply to the whole loop
246b8e80941Smrg    * body.
247b8e80941Smrg    */
248b8e80941Smrg
249b8e80941Smrg   nir_variable *v = create_int(nir_var_mem_ssbo, "v");
250b8e80941Smrg
251b8e80941Smrg   nir_load_var(b, v);
252b8e80941Smrg
253b8e80941Smrg   nir_loop *loop = nir_push_loop(b);
254b8e80941Smrg
255b8e80941Smrg   nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
256b8e80941Smrg   nir_jump(b, nir_jump_break);
257b8e80941Smrg   nir_pop_if(b, if_stmt);
258b8e80941Smrg
259b8e80941Smrg   nir_load_var(b, v);
260b8e80941Smrg   nir_store_var(b, v, nir_imm_int(b, 10), 1);
261b8e80941Smrg
262b8e80941Smrg   nir_pop_loop(b, loop);
263b8e80941Smrg
264b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
265b8e80941Smrg   ASSERT_FALSE(progress);
266b8e80941Smrg}
267b8e80941Smrg
268b8e80941SmrgTEST_F(nir_copy_prop_vars_test, simple_copies)
269b8e80941Smrg{
270b8e80941Smrg   nir_variable *in   = create_int(nir_var_shader_in,     "in");
271b8e80941Smrg   nir_variable *temp = create_int(nir_var_function_temp, "temp");
272b8e80941Smrg   nir_variable *out  = create_int(nir_var_shader_out,    "out");
273b8e80941Smrg
274b8e80941Smrg   nir_copy_var(b, temp, in);
275b8e80941Smrg   nir_copy_var(b, out, temp);
276b8e80941Smrg
277b8e80941Smrg   nir_validate_shader(b->shader, NULL);
278b8e80941Smrg
279b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
280b8e80941Smrg   EXPECT_TRUE(progress);
281b8e80941Smrg
282b8e80941Smrg   nir_validate_shader(b->shader, NULL);
283b8e80941Smrg
284b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 2);
285b8e80941Smrg
286b8e80941Smrg   nir_intrinsic_instr *first_copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
287b8e80941Smrg   ASSERT_TRUE(first_copy->src[1].is_ssa);
288b8e80941Smrg
289b8e80941Smrg   nir_intrinsic_instr *second_copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
290b8e80941Smrg   ASSERT_TRUE(second_copy->src[1].is_ssa);
291b8e80941Smrg
292b8e80941Smrg   EXPECT_EQ(first_copy->src[1].ssa, second_copy->src[1].ssa);
293b8e80941Smrg}
294b8e80941Smrg
295b8e80941SmrgTEST_F(nir_copy_prop_vars_test, simple_store_load)
296b8e80941Smrg{
297b8e80941Smrg   nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
298b8e80941Smrg   unsigned mask = 1 | 2;
299b8e80941Smrg
300b8e80941Smrg   nir_ssa_def *stored_value = nir_imm_ivec2(b, 10, 20);
301b8e80941Smrg   nir_store_var(b, v[0], stored_value, mask);
302b8e80941Smrg
303b8e80941Smrg   nir_ssa_def *read_value = nir_load_var(b, v[0]);
304b8e80941Smrg   nir_store_var(b, v[1], read_value, mask);
305b8e80941Smrg
306b8e80941Smrg   nir_validate_shader(b->shader, NULL);
307b8e80941Smrg
308b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
309b8e80941Smrg   EXPECT_TRUE(progress);
310b8e80941Smrg
311b8e80941Smrg   nir_validate_shader(b->shader, NULL);
312b8e80941Smrg
313b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
314b8e80941Smrg
315b8e80941Smrg   for (int i = 0; i < 2; i++) {
316b8e80941Smrg      nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
317b8e80941Smrg      ASSERT_TRUE(store->src[1].is_ssa);
318b8e80941Smrg      EXPECT_EQ(store->src[1].ssa, stored_value);
319b8e80941Smrg   }
320b8e80941Smrg}
321b8e80941Smrg
322b8e80941SmrgTEST_F(nir_copy_prop_vars_test, store_store_load)
323b8e80941Smrg{
324b8e80941Smrg   nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
325b8e80941Smrg   unsigned mask = 1 | 2;
326b8e80941Smrg
327b8e80941Smrg   nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
328b8e80941Smrg   nir_store_var(b, v[0], first_value, mask);
329b8e80941Smrg
330b8e80941Smrg   nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
331b8e80941Smrg   nir_store_var(b, v[0], second_value, mask);
332b8e80941Smrg
333b8e80941Smrg   nir_ssa_def *read_value = nir_load_var(b, v[0]);
334b8e80941Smrg   nir_store_var(b, v[1], read_value, mask);
335b8e80941Smrg
336b8e80941Smrg   nir_validate_shader(b->shader, NULL);
337b8e80941Smrg
338b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
339b8e80941Smrg   EXPECT_TRUE(progress);
340b8e80941Smrg
341b8e80941Smrg   nir_validate_shader(b->shader, NULL);
342b8e80941Smrg
343b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
344b8e80941Smrg
345b8e80941Smrg   /* Store to v[1] should use second_value directly. */
346b8e80941Smrg   nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
347b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
348b8e80941Smrg   ASSERT_TRUE(store_to_v1->src[1].is_ssa);
349b8e80941Smrg   EXPECT_EQ(store_to_v1->src[1].ssa, second_value);
350b8e80941Smrg}
351b8e80941Smrg
352b8e80941SmrgTEST_F(nir_copy_prop_vars_test, store_store_load_different_components)
353b8e80941Smrg{
354b8e80941Smrg   nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
355b8e80941Smrg
356b8e80941Smrg   nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
357b8e80941Smrg   nir_store_var(b, v[0], first_value, 1 << 1);
358b8e80941Smrg
359b8e80941Smrg   nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
360b8e80941Smrg   nir_store_var(b, v[0], second_value, 1 << 0);
361b8e80941Smrg
362b8e80941Smrg   nir_ssa_def *read_value = nir_load_var(b, v[0]);
363b8e80941Smrg   nir_store_var(b, v[1], read_value, 1 << 1);
364b8e80941Smrg
365b8e80941Smrg   nir_validate_shader(b->shader, NULL);
366b8e80941Smrg
367b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
368b8e80941Smrg   EXPECT_TRUE(progress);
369b8e80941Smrg
370b8e80941Smrg   nir_validate_shader(b->shader, NULL);
371b8e80941Smrg
372b8e80941Smrg   nir_opt_constant_folding(b->shader);
373b8e80941Smrg   nir_validate_shader(b->shader, NULL);
374b8e80941Smrg
375b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
376b8e80941Smrg
377b8e80941Smrg   /* Store to v[1] should use first_value directly.  The write of
378b8e80941Smrg    * second_value did not overwrite the component it uses.
379b8e80941Smrg    */
380b8e80941Smrg   nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
381b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
382b8e80941Smrg   ASSERT_EQ(nir_src_comp_as_uint(store_to_v1->src[1], 1), 20);
383b8e80941Smrg}
384b8e80941Smrg
385b8e80941SmrgTEST_F(nir_copy_prop_vars_test, store_store_load_different_components_in_many_blocks)
386b8e80941Smrg{
387b8e80941Smrg   nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
388b8e80941Smrg
389b8e80941Smrg   nir_ssa_def *first_value = nir_imm_ivec2(b, 10, 20);
390b8e80941Smrg   nir_store_var(b, v[0], first_value, 1 << 1);
391b8e80941Smrg
392b8e80941Smrg   /* Adding an if statement will cause blocks to be created. */
393b8e80941Smrg   nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
394b8e80941Smrg
395b8e80941Smrg   nir_ssa_def *second_value = nir_imm_ivec2(b, 30, 40);
396b8e80941Smrg   nir_store_var(b, v[0], second_value, 1 << 0);
397b8e80941Smrg
398b8e80941Smrg   /* Adding an if statement will cause blocks to be created. */
399b8e80941Smrg   nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
400b8e80941Smrg
401b8e80941Smrg   nir_ssa_def *read_value = nir_load_var(b, v[0]);
402b8e80941Smrg   nir_store_var(b, v[1], read_value, 1 << 1);
403b8e80941Smrg
404b8e80941Smrg   nir_validate_shader(b->shader, NULL);
405b8e80941Smrg
406b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
407b8e80941Smrg   EXPECT_TRUE(progress);
408b8e80941Smrg
409b8e80941Smrg   nir_validate_shader(b->shader, NULL);
410b8e80941Smrg
411b8e80941Smrg   nir_opt_constant_folding(b->shader);
412b8e80941Smrg   nir_validate_shader(b->shader, NULL);
413b8e80941Smrg
414b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
415b8e80941Smrg
416b8e80941Smrg   /* Store to v[1] should use first_value directly.  The write of
417b8e80941Smrg    * second_value did not overwrite the component it uses.
418b8e80941Smrg    */
419b8e80941Smrg   nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
420b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
421b8e80941Smrg   ASSERT_EQ(nir_src_comp_as_uint(store_to_v1->src[1], 1), 20);
422b8e80941Smrg}
423b8e80941Smrg
424b8e80941SmrgTEST_F(nir_copy_prop_vars_test, memory_barrier_in_two_blocks)
425b8e80941Smrg{
426b8e80941Smrg   nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 4);
427b8e80941Smrg
428b8e80941Smrg   nir_store_var(b, v[0], nir_imm_int(b, 1), 1);
429b8e80941Smrg   nir_store_var(b, v[1], nir_imm_int(b, 2), 1);
430b8e80941Smrg
431b8e80941Smrg   /* Split into many blocks. */
432b8e80941Smrg   nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
433b8e80941Smrg
434b8e80941Smrg   nir_store_var(b, v[2], nir_load_var(b, v[0]), 1);
435b8e80941Smrg
436b8e80941Smrg   nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_memory_barrier)->instr);
437b8e80941Smrg
438b8e80941Smrg   nir_store_var(b, v[3], nir_load_var(b, v[1]), 1);
439b8e80941Smrg
440b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
441b8e80941Smrg   ASSERT_TRUE(progress);
442b8e80941Smrg
443b8e80941Smrg   /* Only the second load will remain after the optimization. */
444b8e80941Smrg   ASSERT_EQ(1, count_intrinsics(nir_intrinsic_load_deref));
445b8e80941Smrg   nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
446b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(load, 0), v[1]);
447b8e80941Smrg}
448b8e80941Smrg
449b8e80941SmrgTEST_F(nir_copy_prop_vars_test, simple_store_load_in_two_blocks)
450b8e80941Smrg{
451b8e80941Smrg   nir_variable **v = create_many_ivec2(nir_var_function_temp, "v", 2);
452b8e80941Smrg   unsigned mask = 1 | 2;
453b8e80941Smrg
454b8e80941Smrg   nir_ssa_def *stored_value = nir_imm_ivec2(b, 10, 20);
455b8e80941Smrg   nir_store_var(b, v[0], stored_value, mask);
456b8e80941Smrg
457b8e80941Smrg   /* Adding an if statement will cause blocks to be created. */
458b8e80941Smrg   nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
459b8e80941Smrg
460b8e80941Smrg   nir_ssa_def *read_value = nir_load_var(b, v[0]);
461b8e80941Smrg   nir_store_var(b, v[1], read_value, mask);
462b8e80941Smrg
463b8e80941Smrg   nir_validate_shader(b->shader, NULL);
464b8e80941Smrg
465b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
466b8e80941Smrg   EXPECT_TRUE(progress);
467b8e80941Smrg
468b8e80941Smrg   nir_validate_shader(b->shader, NULL);
469b8e80941Smrg
470b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
471b8e80941Smrg
472b8e80941Smrg   for (int i = 0; i < 2; i++) {
473b8e80941Smrg      nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
474b8e80941Smrg      ASSERT_TRUE(store->src[1].is_ssa);
475b8e80941Smrg      EXPECT_EQ(store->src[1].ssa, stored_value);
476b8e80941Smrg   }
477b8e80941Smrg}
478b8e80941Smrg
479b8e80941SmrgTEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previous_load)
480b8e80941Smrg{
481b8e80941Smrg   nir_variable *in0 = create_ivec2(nir_var_mem_ssbo, "in0");
482b8e80941Smrg   nir_variable *in1 = create_ivec2(nir_var_mem_ssbo, "in1");
483b8e80941Smrg   nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
484b8e80941Smrg   nir_variable *out = create_int(nir_var_mem_ssbo, "out");
485b8e80941Smrg
486b8e80941Smrg   nir_store_var(b, vec, nir_load_var(b, in0), 1 << 0);
487b8e80941Smrg   nir_store_var(b, vec, nir_load_var(b, in1), 1 << 1);
488b8e80941Smrg
489b8e80941Smrg   /* This load will be dropped, as vec.y (or vec[1]) is already known. */
490b8e80941Smrg   nir_deref_instr *deref =
491b8e80941Smrg      nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
492b8e80941Smrg   nir_ssa_def *loaded_from_deref = nir_load_deref(b, deref);
493b8e80941Smrg
494b8e80941Smrg   /* This store should use the value loaded from in1. */
495b8e80941Smrg   nir_store_var(b, out, loaded_from_deref, 1 << 0);
496b8e80941Smrg
497b8e80941Smrg   nir_validate_shader(b->shader, NULL);
498b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
499b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
500b8e80941Smrg
501b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
502b8e80941Smrg   EXPECT_TRUE(progress);
503b8e80941Smrg
504b8e80941Smrg   nir_validate_shader(b->shader, NULL);
505b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
506b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
507b8e80941Smrg
508b8e80941Smrg   nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 2);
509b8e80941Smrg   ASSERT_TRUE(store->src[1].is_ssa);
510b8e80941Smrg
511b8e80941Smrg   /* NOTE: The ALU instruction is how we get the vec.y. */
512b8e80941Smrg   ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
513b8e80941Smrg}
514b8e80941Smrg
515b8e80941SmrgTEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previous_copy)
516b8e80941Smrg{
517b8e80941Smrg   nir_variable *in0 = create_ivec2(nir_var_mem_ssbo, "in0");
518b8e80941Smrg   nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
519b8e80941Smrg
520b8e80941Smrg   nir_copy_var(b, vec, in0);
521b8e80941Smrg
522b8e80941Smrg   /* This load will be replaced with one from in0. */
523b8e80941Smrg   nir_deref_instr *deref =
524b8e80941Smrg      nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
525b8e80941Smrg   nir_load_deref(b, deref);
526b8e80941Smrg
527b8e80941Smrg   nir_validate_shader(b->shader, NULL);
528b8e80941Smrg
529b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
530b8e80941Smrg   EXPECT_TRUE(progress);
531b8e80941Smrg
532b8e80941Smrg   nir_validate_shader(b->shader, NULL);
533b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
534b8e80941Smrg
535b8e80941Smrg   nir_intrinsic_instr *load = get_intrinsic(nir_intrinsic_load_deref, 0);
536b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(load, 0), in0);
537b8e80941Smrg}
538b8e80941Smrg
539b8e80941SmrgTEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_gets_reused)
540b8e80941Smrg{
541b8e80941Smrg   nir_variable *in0 = create_ivec2(nir_var_mem_ssbo, "in0");
542b8e80941Smrg   nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
543b8e80941Smrg   nir_variable *out = create_ivec2(nir_var_mem_ssbo, "out");
544b8e80941Smrg
545b8e80941Smrg   /* Loading "vec[1]" deref will save the information about vec.y. */
546b8e80941Smrg   nir_deref_instr *deref =
547b8e80941Smrg      nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
548b8e80941Smrg   nir_load_deref(b, deref);
549b8e80941Smrg
550b8e80941Smrg   /* Store to vec.x. */
551b8e80941Smrg   nir_store_var(b, vec, nir_load_var(b, in0), 1 << 0);
552b8e80941Smrg
553b8e80941Smrg   /* This load will be dropped, since both vec.x and vec.y are known. */
554b8e80941Smrg   nir_ssa_def *loaded_from_vec = nir_load_var(b, vec);
555b8e80941Smrg   nir_store_var(b, out, loaded_from_vec, 0x3);
556b8e80941Smrg
557b8e80941Smrg   nir_validate_shader(b->shader, NULL);
558b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
559b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
560b8e80941Smrg
561b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
562b8e80941Smrg   EXPECT_TRUE(progress);
563b8e80941Smrg
564b8e80941Smrg   nir_validate_shader(b->shader, NULL);
565b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
566b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
567b8e80941Smrg
568b8e80941Smrg   nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 1);
569b8e80941Smrg   ASSERT_TRUE(store->src[1].is_ssa);
570b8e80941Smrg   ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
571b8e80941Smrg}
572b8e80941Smrg
573b8e80941SmrgTEST_F(nir_copy_prop_vars_test, store_load_direct_array_deref_on_vector)
574b8e80941Smrg{
575b8e80941Smrg   nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
576b8e80941Smrg   nir_variable *out0 = create_int(nir_var_mem_ssbo, "out0");
577b8e80941Smrg   nir_variable *out1 = create_ivec2(nir_var_mem_ssbo, "out1");
578b8e80941Smrg
579b8e80941Smrg   /* Store to "vec[1]" and "vec[0]". */
580b8e80941Smrg   nir_deref_instr *store_deref_y =
581b8e80941Smrg      nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
582b8e80941Smrg   nir_store_deref(b, store_deref_y, nir_imm_int(b, 20), 1);
583b8e80941Smrg
584b8e80941Smrg   nir_deref_instr *store_deref_x =
585b8e80941Smrg      nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 0);
586b8e80941Smrg   nir_store_deref(b, store_deref_x, nir_imm_int(b, 10), 1);
587b8e80941Smrg
588b8e80941Smrg   /* Both loads below will be dropped, because the values are already known. */
589b8e80941Smrg   nir_deref_instr *load_deref_y =
590b8e80941Smrg      nir_build_deref_array_imm(b, nir_build_deref_var(b, vec), 1);
591b8e80941Smrg   nir_store_var(b, out0, nir_load_deref(b, load_deref_y), 1);
592b8e80941Smrg
593b8e80941Smrg   nir_store_var(b, out1, nir_load_var(b, vec), 1);
594b8e80941Smrg
595b8e80941Smrg   nir_validate_shader(b->shader, NULL);
596b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
597b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
598b8e80941Smrg
599b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
600b8e80941Smrg   EXPECT_TRUE(progress);
601b8e80941Smrg
602b8e80941Smrg   nir_validate_shader(b->shader, NULL);
603b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 0);
604b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 4);
605b8e80941Smrg
606b8e80941Smrg   /* Third store will just use the value from first store. */
607b8e80941Smrg   nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
608b8e80941Smrg   nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
609b8e80941Smrg   ASSERT_TRUE(third_store->src[1].is_ssa);
610b8e80941Smrg   EXPECT_EQ(third_store->src[1].ssa, first_store->src[1].ssa);
611b8e80941Smrg
612b8e80941Smrg   /* Fourth store will compose first and second store values. */
613b8e80941Smrg   nir_intrinsic_instr *fourth_store = get_intrinsic(nir_intrinsic_store_deref, 3);
614b8e80941Smrg   ASSERT_TRUE(fourth_store->src[1].is_ssa);
615b8e80941Smrg   EXPECT_TRUE(nir_src_as_alu_instr(fourth_store->src[1]));
616b8e80941Smrg}
617b8e80941Smrg
618b8e80941SmrgTEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref_on_vector)
619b8e80941Smrg{
620b8e80941Smrg   nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
621b8e80941Smrg   nir_variable *idx = create_int(nir_var_mem_ssbo, "idx");
622b8e80941Smrg   nir_variable *out = create_int(nir_var_mem_ssbo, "out");
623b8e80941Smrg
624b8e80941Smrg   nir_ssa_def *idx_ssa = nir_load_var(b, idx);
625b8e80941Smrg
626b8e80941Smrg   /* Store to vec[idx]. */
627b8e80941Smrg   nir_deref_instr *store_deref =
628b8e80941Smrg      nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
629b8e80941Smrg   nir_store_deref(b, store_deref, nir_imm_int(b, 20), 1);
630b8e80941Smrg
631b8e80941Smrg   /* Load from vec[idx] to store in out. This load should be dropped. */
632b8e80941Smrg   nir_deref_instr *load_deref =
633b8e80941Smrg      nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
634b8e80941Smrg   nir_store_var(b, out, nir_load_deref(b, load_deref), 1);
635b8e80941Smrg
636b8e80941Smrg   nir_validate_shader(b->shader, NULL);
637b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
638b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
639b8e80941Smrg
640b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
641b8e80941Smrg   EXPECT_TRUE(progress);
642b8e80941Smrg
643b8e80941Smrg   nir_validate_shader(b->shader, NULL);
644b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
645b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
646b8e80941Smrg
647b8e80941Smrg   /* Store to vec[idx] propagated to out. */
648b8e80941Smrg   nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
649b8e80941Smrg   nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
650b8e80941Smrg   ASSERT_TRUE(first->src[1].is_ssa);
651b8e80941Smrg   ASSERT_TRUE(second->src[1].is_ssa);
652b8e80941Smrg   EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
653b8e80941Smrg}
654b8e80941Smrg
655b8e80941SmrgTEST_F(nir_copy_prop_vars_test, store_load_direct_and_indirect_array_deref_on_vector)
656b8e80941Smrg{
657b8e80941Smrg   nir_variable *vec = create_ivec2(nir_var_mem_ssbo, "vec");
658b8e80941Smrg   nir_variable *idx = create_int(nir_var_mem_ssbo, "idx");
659b8e80941Smrg   nir_variable **out = create_many_int(nir_var_mem_ssbo, "out", 2);
660b8e80941Smrg
661b8e80941Smrg   nir_ssa_def *idx_ssa = nir_load_var(b, idx);
662b8e80941Smrg
663b8e80941Smrg   /* Store to vec. */
664b8e80941Smrg   nir_store_var(b, vec, nir_imm_ivec2(b, 10, 10), 1 | 2);
665b8e80941Smrg
666b8e80941Smrg   /* Load from vec[idx]. This load is currently not dropped. */
667b8e80941Smrg   nir_deref_instr *indirect =
668b8e80941Smrg      nir_build_deref_array(b, nir_build_deref_var(b, vec), idx_ssa);
669b8e80941Smrg   nir_store_var(b, out[0], nir_load_deref(b, indirect), 1);
670b8e80941Smrg
671b8e80941Smrg   /* Load from vec[idx] again. This load should be dropped. */
672b8e80941Smrg   nir_store_var(b, out[1], nir_load_deref(b, indirect), 1);
673b8e80941Smrg
674b8e80941Smrg   nir_validate_shader(b->shader, NULL);
675b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 3);
676b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
677b8e80941Smrg
678b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
679b8e80941Smrg   EXPECT_TRUE(progress);
680b8e80941Smrg
681b8e80941Smrg   nir_validate_shader(b->shader, NULL);
682b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
683b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
684b8e80941Smrg
685b8e80941Smrg   /* Store to vec[idx] propagated to out. */
686b8e80941Smrg   nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
687b8e80941Smrg   nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
688b8e80941Smrg   ASSERT_TRUE(second->src[1].is_ssa);
689b8e80941Smrg   ASSERT_TRUE(third->src[1].is_ssa);
690b8e80941Smrg   EXPECT_EQ(second->src[1].ssa, third->src[1].ssa);
691b8e80941Smrg}
692b8e80941Smrg
693b8e80941SmrgTEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref)
694b8e80941Smrg{
695b8e80941Smrg   nir_variable *arr = create_var(nir_var_mem_ssbo,
696b8e80941Smrg                                  glsl_array_type(glsl_int_type(), 10, 0),
697b8e80941Smrg                                  "arr");
698b8e80941Smrg   nir_variable *idx = create_int(nir_var_mem_ssbo, "idx");
699b8e80941Smrg   nir_variable *out = create_int(nir_var_mem_ssbo, "out");
700b8e80941Smrg
701b8e80941Smrg   nir_ssa_def *idx_ssa = nir_load_var(b, idx);
702b8e80941Smrg
703b8e80941Smrg   /* Store to arr[idx]. */
704b8e80941Smrg   nir_deref_instr *store_deref =
705b8e80941Smrg      nir_build_deref_array(b, nir_build_deref_var(b, arr), idx_ssa);
706b8e80941Smrg   nir_store_deref(b, store_deref, nir_imm_int(b, 20), 1);
707b8e80941Smrg
708b8e80941Smrg   /* Load from arr[idx] to store in out. This load should be dropped. */
709b8e80941Smrg   nir_deref_instr *load_deref =
710b8e80941Smrg      nir_build_deref_array(b, nir_build_deref_var(b, arr), idx_ssa);
711b8e80941Smrg   nir_store_var(b, out, nir_load_deref(b, load_deref), 1);
712b8e80941Smrg
713b8e80941Smrg   nir_validate_shader(b->shader, NULL);
714b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
715b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
716b8e80941Smrg
717b8e80941Smrg   bool progress = nir_opt_copy_prop_vars(b->shader);
718b8e80941Smrg   EXPECT_TRUE(progress);
719b8e80941Smrg
720b8e80941Smrg   nir_validate_shader(b->shader, NULL);
721b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 1);
722b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
723b8e80941Smrg
724b8e80941Smrg   /* Store to arr[idx] propagated to out. */
725b8e80941Smrg   nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
726b8e80941Smrg   nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
727b8e80941Smrg   ASSERT_TRUE(first->src[1].is_ssa);
728b8e80941Smrg   ASSERT_TRUE(second->src[1].is_ssa);
729b8e80941Smrg   EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
730b8e80941Smrg}
731b8e80941Smrg
732b8e80941SmrgTEST_F(nir_dead_write_vars_test, no_dead_writes_in_block)
733b8e80941Smrg{
734b8e80941Smrg   nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 2);
735b8e80941Smrg
736b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
737b8e80941Smrg
738b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
739b8e80941Smrg   ASSERT_FALSE(progress);
740b8e80941Smrg}
741b8e80941Smrg
742b8e80941SmrgTEST_F(nir_dead_write_vars_test, no_dead_writes_different_components_in_block)
743b8e80941Smrg{
744b8e80941Smrg   nir_variable **v = create_many_ivec2(nir_var_mem_ssbo, "v", 3);
745b8e80941Smrg
746b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
747b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[2]), 1 << 1);
748b8e80941Smrg
749b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
750b8e80941Smrg   ASSERT_FALSE(progress);
751b8e80941Smrg}
752b8e80941Smrg
753b8e80941SmrgTEST_F(nir_dead_write_vars_test, no_dead_writes_in_if_statement)
754b8e80941Smrg{
755b8e80941Smrg   nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 6);
756b8e80941Smrg
757b8e80941Smrg   nir_store_var(b, v[2], nir_load_var(b, v[0]), 1);
758b8e80941Smrg   nir_store_var(b, v[3], nir_load_var(b, v[1]), 1);
759b8e80941Smrg
760b8e80941Smrg   /* Each arm of the if statement will overwrite one store. */
761b8e80941Smrg   nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
762b8e80941Smrg   nir_store_var(b, v[2], nir_load_var(b, v[4]), 1);
763b8e80941Smrg
764b8e80941Smrg   nir_push_else(b, if_stmt);
765b8e80941Smrg   nir_store_var(b, v[3], nir_load_var(b, v[5]), 1);
766b8e80941Smrg
767b8e80941Smrg   nir_pop_if(b, if_stmt);
768b8e80941Smrg
769b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
770b8e80941Smrg   ASSERT_FALSE(progress);
771b8e80941Smrg}
772b8e80941Smrg
773b8e80941SmrgTEST_F(nir_dead_write_vars_test, no_dead_writes_in_loop_statement)
774b8e80941Smrg{
775b8e80941Smrg   nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
776b8e80941Smrg
777b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
778b8e80941Smrg
779b8e80941Smrg   /* Loop will write other value.  Since it might not be executed, it doesn't
780b8e80941Smrg    * kill the first write.
781b8e80941Smrg    */
782b8e80941Smrg   nir_loop *loop = nir_push_loop(b);
783b8e80941Smrg
784b8e80941Smrg   nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
785b8e80941Smrg   nir_jump(b, nir_jump_break);
786b8e80941Smrg   nir_pop_if(b, if_stmt);
787b8e80941Smrg
788b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[2]), 1);
789b8e80941Smrg   nir_pop_loop(b, loop);
790b8e80941Smrg
791b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
792b8e80941Smrg   ASSERT_FALSE(progress);
793b8e80941Smrg}
794b8e80941Smrg
795b8e80941SmrgTEST_F(nir_dead_write_vars_test, dead_write_in_block)
796b8e80941Smrg{
797b8e80941Smrg   nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
798b8e80941Smrg
799b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
800b8e80941Smrg   nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
801b8e80941Smrg   nir_store_var(b, v[0], load_v2, 1);
802b8e80941Smrg
803b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
804b8e80941Smrg   ASSERT_TRUE(progress);
805b8e80941Smrg
806b8e80941Smrg   EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
807b8e80941Smrg
808b8e80941Smrg   nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
809b8e80941Smrg   ASSERT_TRUE(store->src[1].is_ssa);
810b8e80941Smrg   EXPECT_EQ(store->src[1].ssa, load_v2);
811b8e80941Smrg}
812b8e80941Smrg
813b8e80941SmrgTEST_F(nir_dead_write_vars_test, dead_write_components_in_block)
814b8e80941Smrg{
815b8e80941Smrg   nir_variable **v = create_many_ivec2(nir_var_mem_ssbo, "v", 3);
816b8e80941Smrg
817b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
818b8e80941Smrg   nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
819b8e80941Smrg   nir_store_var(b, v[0], load_v2, 1 << 0);
820b8e80941Smrg
821b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
822b8e80941Smrg   ASSERT_TRUE(progress);
823b8e80941Smrg
824b8e80941Smrg   EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
825b8e80941Smrg
826b8e80941Smrg   nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
827b8e80941Smrg   ASSERT_TRUE(store->src[1].is_ssa);
828b8e80941Smrg   EXPECT_EQ(store->src[1].ssa, load_v2);
829b8e80941Smrg}
830b8e80941Smrg
831b8e80941Smrg
832b8e80941Smrg/* TODO: The DISABLED tests below depend on the dead write removal be able to
833b8e80941Smrg * identify dead writes between multiple blocks.  This is still not
834b8e80941Smrg * implemented.
835b8e80941Smrg */
836b8e80941Smrg
837b8e80941SmrgTEST_F(nir_dead_write_vars_test, DISABLED_dead_write_in_two_blocks)
838b8e80941Smrg{
839b8e80941Smrg   nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
840b8e80941Smrg
841b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
842b8e80941Smrg   nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
843b8e80941Smrg
844b8e80941Smrg   /* Causes the stores to be in different blocks. */
845b8e80941Smrg   nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
846b8e80941Smrg
847b8e80941Smrg   nir_store_var(b, v[0], load_v2, 1);
848b8e80941Smrg
849b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
850b8e80941Smrg   ASSERT_TRUE(progress);
851b8e80941Smrg
852b8e80941Smrg   EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
853b8e80941Smrg
854b8e80941Smrg   nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
855b8e80941Smrg   ASSERT_TRUE(store->src[1].is_ssa);
856b8e80941Smrg   EXPECT_EQ(store->src[1].ssa, load_v2);
857b8e80941Smrg}
858b8e80941Smrg
859b8e80941SmrgTEST_F(nir_dead_write_vars_test, DISABLED_dead_write_components_in_two_blocks)
860b8e80941Smrg{
861b8e80941Smrg   nir_variable **v = create_many_ivec2(nir_var_mem_ssbo, "v", 3);
862b8e80941Smrg
863b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[1]), 1 << 0);
864b8e80941Smrg
865b8e80941Smrg   /* Causes the stores to be in different blocks. */
866b8e80941Smrg   nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
867b8e80941Smrg
868b8e80941Smrg   nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
869b8e80941Smrg   nir_store_var(b, v[0], load_v2, 1 << 0);
870b8e80941Smrg
871b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
872b8e80941Smrg   ASSERT_TRUE(progress);
873b8e80941Smrg
874b8e80941Smrg   EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
875b8e80941Smrg
876b8e80941Smrg   nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
877b8e80941Smrg   ASSERT_TRUE(store->src[1].is_ssa);
878b8e80941Smrg   EXPECT_EQ(store->src[1].ssa, load_v2);
879b8e80941Smrg}
880b8e80941Smrg
881b8e80941SmrgTEST_F(nir_dead_write_vars_test, DISABLED_dead_writes_in_if_statement)
882b8e80941Smrg{
883b8e80941Smrg   nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 4);
884b8e80941Smrg
885b8e80941Smrg   /* Both branches will overwrite, making the previous store dead. */
886b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
887b8e80941Smrg
888b8e80941Smrg   nir_if *if_stmt = nir_push_if(b, nir_imm_int(b, 0));
889b8e80941Smrg   nir_ssa_def *load_v2 = nir_load_var(b, v[2]);
890b8e80941Smrg   nir_store_var(b, v[0], load_v2, 1);
891b8e80941Smrg
892b8e80941Smrg   nir_push_else(b, if_stmt);
893b8e80941Smrg   nir_ssa_def *load_v3 = nir_load_var(b, v[3]);
894b8e80941Smrg   nir_store_var(b, v[0], load_v3, 1);
895b8e80941Smrg
896b8e80941Smrg   nir_pop_if(b, if_stmt);
897b8e80941Smrg
898b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
899b8e80941Smrg   ASSERT_TRUE(progress);
900b8e80941Smrg   EXPECT_EQ(2, count_intrinsics(nir_intrinsic_store_deref));
901b8e80941Smrg
902b8e80941Smrg   nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
903b8e80941Smrg   ASSERT_TRUE(first_store->src[1].is_ssa);
904b8e80941Smrg   EXPECT_EQ(first_store->src[1].ssa, load_v2);
905b8e80941Smrg
906b8e80941Smrg   nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1);
907b8e80941Smrg   ASSERT_TRUE(second_store->src[1].is_ssa);
908b8e80941Smrg   EXPECT_EQ(second_store->src[1].ssa, load_v3);
909b8e80941Smrg}
910b8e80941Smrg
911b8e80941SmrgTEST_F(nir_dead_write_vars_test, DISABLED_memory_barrier_in_two_blocks)
912b8e80941Smrg{
913b8e80941Smrg   nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 2);
914b8e80941Smrg
915b8e80941Smrg   nir_store_var(b, v[0], nir_imm_int(b, 1), 1);
916b8e80941Smrg   nir_store_var(b, v[1], nir_imm_int(b, 2), 1);
917b8e80941Smrg
918b8e80941Smrg   /* Split into many blocks. */
919b8e80941Smrg   nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
920b8e80941Smrg
921b8e80941Smrg   /* Because it is before the barrier, this will kill the previous store to that target. */
922b8e80941Smrg   nir_store_var(b, v[0], nir_imm_int(b, 3), 1);
923b8e80941Smrg
924b8e80941Smrg   nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_memory_barrier)->instr);
925b8e80941Smrg
926b8e80941Smrg   nir_store_var(b, v[1], nir_imm_int(b, 4), 1);
927b8e80941Smrg
928b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
929b8e80941Smrg   ASSERT_TRUE(progress);
930b8e80941Smrg
931b8e80941Smrg   EXPECT_EQ(3, count_intrinsics(nir_intrinsic_store_deref));
932b8e80941Smrg}
933b8e80941Smrg
934b8e80941SmrgTEST_F(nir_dead_write_vars_test, DISABLED_unrelated_barrier_in_two_blocks)
935b8e80941Smrg{
936b8e80941Smrg   nir_variable **v = create_many_int(nir_var_mem_ssbo, "v", 3);
937b8e80941Smrg   nir_variable *out = create_int(nir_var_shader_out, "out");
938b8e80941Smrg
939b8e80941Smrg   nir_store_var(b, out, nir_load_var(b, v[1]), 1);
940b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[1]), 1);
941b8e80941Smrg
942b8e80941Smrg   /* Split into many blocks. */
943b8e80941Smrg   nir_pop_if(b, nir_push_if(b, nir_imm_int(b, 0)));
944b8e80941Smrg
945b8e80941Smrg   /* Emit vertex will ensure writes to output variables are considered used,
946b8e80941Smrg    * but should not affect other types of variables. */
947b8e80941Smrg
948b8e80941Smrg   nir_builder_instr_insert(b, &nir_intrinsic_instr_create(b->shader, nir_intrinsic_emit_vertex)->instr);
949b8e80941Smrg
950b8e80941Smrg   nir_store_var(b, out, nir_load_var(b, v[2]), 1);
951b8e80941Smrg   nir_store_var(b, v[0], nir_load_var(b, v[2]), 1);
952b8e80941Smrg
953b8e80941Smrg   bool progress = nir_opt_dead_write_vars(b->shader);
954b8e80941Smrg   ASSERT_TRUE(progress);
955b8e80941Smrg
956b8e80941Smrg   /* Verify the first write to v[0] was removed. */
957b8e80941Smrg   EXPECT_EQ(3, count_intrinsics(nir_intrinsic_store_deref));
958b8e80941Smrg
959b8e80941Smrg   nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
960b8e80941Smrg   EXPECT_EQ(nir_intrinsic_get_var(first_store, 0), out);
961b8e80941Smrg
962b8e80941Smrg   nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1);
963b8e80941Smrg   EXPECT_EQ(nir_intrinsic_get_var(second_store, 0), out);
964b8e80941Smrg
965b8e80941Smrg   nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
966b8e80941Smrg   EXPECT_EQ(nir_intrinsic_get_var(third_store, 0), v[0]);
967b8e80941Smrg}
968b8e80941Smrg
969b8e80941SmrgTEST_F(nir_combine_stores_test, non_overlapping_stores)
970b8e80941Smrg{
971b8e80941Smrg   nir_variable **v = create_many_ivec4(nir_var_mem_ssbo, "v", 4);
972b8e80941Smrg   nir_variable *out = create_ivec4(nir_var_shader_out, "out");
973b8e80941Smrg
974b8e80941Smrg   for (int i = 0; i < 4; i++)
975b8e80941Smrg      nir_store_var(b, out, nir_load_var(b, v[i]), 1 << i);
976b8e80941Smrg
977b8e80941Smrg   nir_validate_shader(b->shader, NULL);
978b8e80941Smrg
979b8e80941Smrg   bool progress = nir_opt_combine_stores(b->shader, nir_var_shader_out);
980b8e80941Smrg   ASSERT_TRUE(progress);
981b8e80941Smrg
982b8e80941Smrg   nir_validate_shader(b->shader, NULL);
983b8e80941Smrg
984b8e80941Smrg   /* Clean up to verify from where the values in combined store are coming. */
985b8e80941Smrg   nir_copy_prop(b->shader);
986b8e80941Smrg   nir_opt_dce(b->shader);
987b8e80941Smrg
988b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
989b8e80941Smrg   nir_intrinsic_instr *combined = get_intrinsic(nir_intrinsic_store_deref, 0);
990b8e80941Smrg   ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
991b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
992b8e80941Smrg
993b8e80941Smrg   nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
994b8e80941Smrg   ASSERT_TRUE(vec);
995b8e80941Smrg   for (int i = 0; i < 4; i++) {
996b8e80941Smrg      nir_intrinsic_instr *load = nir_src_as_intrinsic(vec->src[i].src);
997b8e80941Smrg      ASSERT_EQ(load->intrinsic, nir_intrinsic_load_deref);
998b8e80941Smrg      ASSERT_EQ(nir_intrinsic_get_var(load, 0), v[i])
999b8e80941Smrg         << "Source value for component " << i << " of store is wrong";
1000b8e80941Smrg      ASSERT_EQ(vec->src[i].swizzle[0], i)
1001b8e80941Smrg         << "Source component for component " << i << " of store is wrong";
1002b8e80941Smrg   }
1003b8e80941Smrg}
1004b8e80941Smrg
1005b8e80941SmrgTEST_F(nir_combine_stores_test, overlapping_stores)
1006b8e80941Smrg{
1007b8e80941Smrg   nir_variable **v = create_many_ivec4(nir_var_mem_ssbo, "v", 3);
1008b8e80941Smrg   nir_variable *out = create_ivec4(nir_var_shader_out, "out");
1009b8e80941Smrg
1010b8e80941Smrg   /* Make stores with xy, yz and zw masks. */
1011b8e80941Smrg   for (int i = 0; i < 3; i++) {
1012b8e80941Smrg      nir_component_mask_t mask = (1 << i) | (1 << (i + 1));
1013b8e80941Smrg      nir_store_var(b, out, nir_load_var(b, v[i]), mask);
1014b8e80941Smrg   }
1015b8e80941Smrg
1016b8e80941Smrg   nir_validate_shader(b->shader, NULL);
1017b8e80941Smrg
1018b8e80941Smrg   bool progress = nir_opt_combine_stores(b->shader, nir_var_shader_out);
1019b8e80941Smrg   ASSERT_TRUE(progress);
1020b8e80941Smrg
1021b8e80941Smrg   nir_validate_shader(b->shader, NULL);
1022b8e80941Smrg
1023b8e80941Smrg   /* Clean up to verify from where the values in combined store are coming. */
1024b8e80941Smrg   nir_copy_prop(b->shader);
1025b8e80941Smrg   nir_opt_dce(b->shader);
1026b8e80941Smrg
1027b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
1028b8e80941Smrg   nir_intrinsic_instr *combined = get_intrinsic(nir_intrinsic_store_deref, 0);
1029b8e80941Smrg   ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
1030b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
1031b8e80941Smrg
1032b8e80941Smrg   nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
1033b8e80941Smrg   ASSERT_TRUE(vec);
1034b8e80941Smrg
1035b8e80941Smrg   /* Component x comes from v[0]. */
1036b8e80941Smrg   nir_intrinsic_instr *load_for_x = nir_src_as_intrinsic(vec->src[0].src);
1037b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(load_for_x, 0), v[0]);
1038b8e80941Smrg   ASSERT_EQ(vec->src[0].swizzle[0], 0);
1039b8e80941Smrg
1040b8e80941Smrg   /* Component y comes from v[1]. */
1041b8e80941Smrg   nir_intrinsic_instr *load_for_y = nir_src_as_intrinsic(vec->src[1].src);
1042b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(load_for_y, 0), v[1]);
1043b8e80941Smrg   ASSERT_EQ(vec->src[1].swizzle[0], 1);
1044b8e80941Smrg
1045b8e80941Smrg   /* Components z and w come from v[2]. */
1046b8e80941Smrg   nir_intrinsic_instr *load_for_z = nir_src_as_intrinsic(vec->src[2].src);
1047b8e80941Smrg   nir_intrinsic_instr *load_for_w = nir_src_as_intrinsic(vec->src[3].src);
1048b8e80941Smrg   ASSERT_EQ(load_for_z, load_for_w);
1049b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(load_for_z, 0), v[2]);
1050b8e80941Smrg   ASSERT_EQ(vec->src[2].swizzle[0], 2);
1051b8e80941Smrg   ASSERT_EQ(vec->src[3].swizzle[0], 3);
1052b8e80941Smrg}
1053b8e80941Smrg
1054b8e80941SmrgTEST_F(nir_combine_stores_test, direct_array_derefs)
1055b8e80941Smrg{
1056b8e80941Smrg   nir_variable **v = create_many_ivec4(nir_var_mem_ssbo, "vec", 2);
1057b8e80941Smrg   nir_variable **s = create_many_int(nir_var_mem_ssbo, "scalar", 2);
1058b8e80941Smrg   nir_variable *out = create_ivec4(nir_var_mem_ssbo, "out");
1059b8e80941Smrg
1060b8e80941Smrg   nir_deref_instr *out_deref = nir_build_deref_var(b, out);
1061b8e80941Smrg
1062b8e80941Smrg   /* Store to vector with mask x. */
1063b8e80941Smrg   nir_store_deref(b, out_deref, nir_load_var(b, v[0]),
1064b8e80941Smrg                   1 << 0);
1065b8e80941Smrg
1066b8e80941Smrg   /* Store to vector with mask yz. */
1067b8e80941Smrg   nir_store_deref(b, out_deref, nir_load_var(b, v[1]),
1068b8e80941Smrg                   (1 << 2) | (1 << 1));
1069b8e80941Smrg
1070b8e80941Smrg   /* Store to vector[2], overlapping with previous store. */
1071b8e80941Smrg   nir_store_deref(b,
1072b8e80941Smrg                   nir_build_deref_array_imm(b, out_deref, 2),
1073b8e80941Smrg                   nir_load_var(b, s[0]),
1074b8e80941Smrg                   1 << 0);
1075b8e80941Smrg
1076b8e80941Smrg   /* Store to vector[3], no overlap. */
1077b8e80941Smrg   nir_store_deref(b,
1078b8e80941Smrg                   nir_build_deref_array_imm(b, out_deref, 3),
1079b8e80941Smrg                   nir_load_var(b, s[1]),
1080b8e80941Smrg                   1 << 0);
1081b8e80941Smrg
1082b8e80941Smrg   nir_validate_shader(b->shader, NULL);
1083b8e80941Smrg
1084b8e80941Smrg   bool progress = nir_opt_combine_stores(b->shader, nir_var_mem_ssbo);
1085b8e80941Smrg   ASSERT_TRUE(progress);
1086b8e80941Smrg
1087b8e80941Smrg   nir_validate_shader(b->shader, NULL);
1088b8e80941Smrg
1089b8e80941Smrg   /* Clean up to verify from where the values in combined store are coming. */
1090b8e80941Smrg   nir_copy_prop(b->shader);
1091b8e80941Smrg   nir_opt_dce(b->shader);
1092b8e80941Smrg
1093b8e80941Smrg   ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
1094b8e80941Smrg   nir_intrinsic_instr *combined = get_intrinsic(nir_intrinsic_store_deref, 0);
1095b8e80941Smrg   ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
1096b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
1097b8e80941Smrg
1098b8e80941Smrg   nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
1099b8e80941Smrg   ASSERT_TRUE(vec);
1100b8e80941Smrg
1101b8e80941Smrg   /* Component x comes from v[0]. */
1102b8e80941Smrg   nir_intrinsic_instr *load_for_x = nir_src_as_intrinsic(vec->src[0].src);
1103b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(load_for_x, 0), v[0]);
1104b8e80941Smrg   ASSERT_EQ(vec->src[0].swizzle[0], 0);
1105b8e80941Smrg
1106b8e80941Smrg   /* Component y comes from v[1]. */
1107b8e80941Smrg   nir_intrinsic_instr *load_for_y = nir_src_as_intrinsic(vec->src[1].src);
1108b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(load_for_y, 0), v[1]);
1109b8e80941Smrg   ASSERT_EQ(vec->src[1].swizzle[0], 1);
1110b8e80941Smrg
1111b8e80941Smrg   /* Components z comes from s[0]. */
1112b8e80941Smrg   nir_intrinsic_instr *load_for_z = nir_src_as_intrinsic(vec->src[2].src);
1113b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(load_for_z, 0), s[0]);
1114b8e80941Smrg   ASSERT_EQ(vec->src[2].swizzle[0], 0);
1115b8e80941Smrg
1116b8e80941Smrg   /* Component w comes from s[1]. */
1117b8e80941Smrg   nir_intrinsic_instr *load_for_w = nir_src_as_intrinsic(vec->src[3].src);
1118b8e80941Smrg   ASSERT_EQ(nir_intrinsic_get_var(load_for_w, 0), s[1]);
1119b8e80941Smrg   ASSERT_EQ(vec->src[3].swizzle[0], 0);
1120b8e80941Smrg}
1121