1b8e80941Smrg/*
2b8e80941Smrg * Copyright © 2016 Red Hat.
3b8e80941Smrg * Copyright © 2016 Bas Nieuwenhuizen
4b8e80941Smrg *
5b8e80941Smrg * based in part on anv driver which is:
6b8e80941Smrg * Copyright © 2015 Intel Corporation
7b8e80941Smrg *
8b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
9b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
10b8e80941Smrg * to deal in the Software without restriction, including without limitation
11b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
13b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
14b8e80941Smrg *
15b8e80941Smrg * The above copyright notice and this permission notice (including the next
16b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
17b8e80941Smrg * Software.
18b8e80941Smrg *
19b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25b8e80941Smrg * IN THE SOFTWARE.
26b8e80941Smrg */
27b8e80941Smrg
28b8e80941Smrg#include "util/mesa-sha1.h"
29b8e80941Smrg#include "util/u_atomic.h"
30b8e80941Smrg#include "radv_debug.h"
31b8e80941Smrg#include "radv_private.h"
32b8e80941Smrg#include "radv_shader.h"
33b8e80941Smrg#include "radv_shader_helper.h"
34b8e80941Smrg#include "nir/nir.h"
35b8e80941Smrg#include "nir/nir_builder.h"
36b8e80941Smrg#include "spirv/nir_spirv.h"
37b8e80941Smrg
38b8e80941Smrg#include <llvm-c/Core.h>
39b8e80941Smrg#include <llvm-c/TargetMachine.h>
40b8e80941Smrg#include <llvm-c/Support.h>
41b8e80941Smrg
42b8e80941Smrg#include "sid.h"
43b8e80941Smrg#include "gfx9d.h"
44b8e80941Smrg#include "ac_binary.h"
45b8e80941Smrg#include "ac_llvm_util.h"
46b8e80941Smrg#include "ac_nir_to_llvm.h"
47b8e80941Smrg#include "vk_format.h"
48b8e80941Smrg#include "util/debug.h"
49b8e80941Smrg#include "ac_exp_param.h"
50b8e80941Smrg
51b8e80941Smrg#include "util/string_buffer.h"
52b8e80941Smrg
53b8e80941Smrgstatic const struct nir_shader_compiler_options nir_options = {
54b8e80941Smrg	.vertex_id_zero_based = true,
55b8e80941Smrg	.lower_scmp = true,
56b8e80941Smrg	.lower_flrp16 = true,
57b8e80941Smrg	.lower_flrp32 = true,
58b8e80941Smrg	.lower_flrp64 = true,
59b8e80941Smrg	.lower_device_index_to_zero = true,
60b8e80941Smrg	.lower_fsat = true,
61b8e80941Smrg	.lower_fdiv = true,
62b8e80941Smrg	.lower_sub = true,
63b8e80941Smrg	.lower_pack_snorm_2x16 = true,
64b8e80941Smrg	.lower_pack_snorm_4x8 = true,
65b8e80941Smrg	.lower_pack_unorm_2x16 = true,
66b8e80941Smrg	.lower_pack_unorm_4x8 = true,
67b8e80941Smrg	.lower_unpack_snorm_2x16 = true,
68b8e80941Smrg	.lower_unpack_snorm_4x8 = true,
69b8e80941Smrg	.lower_unpack_unorm_2x16 = true,
70b8e80941Smrg	.lower_unpack_unorm_4x8 = true,
71b8e80941Smrg	.lower_extract_byte = true,
72b8e80941Smrg	.lower_extract_word = true,
73b8e80941Smrg	.lower_ffma = true,
74b8e80941Smrg	.lower_fpow = true,
75b8e80941Smrg	.lower_mul_2x32_64 = true,
76b8e80941Smrg	.max_unroll_iterations = 32
77b8e80941Smrg};
78b8e80941Smrg
79b8e80941SmrgVkResult radv_CreateShaderModule(
80b8e80941Smrg	VkDevice                                    _device,
81b8e80941Smrg	const VkShaderModuleCreateInfo*             pCreateInfo,
82b8e80941Smrg	const VkAllocationCallbacks*                pAllocator,
83b8e80941Smrg	VkShaderModule*                             pShaderModule)
84b8e80941Smrg{
85b8e80941Smrg	RADV_FROM_HANDLE(radv_device, device, _device);
86b8e80941Smrg	struct radv_shader_module *module;
87b8e80941Smrg
88b8e80941Smrg	assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
89b8e80941Smrg	assert(pCreateInfo->flags == 0);
90b8e80941Smrg
91b8e80941Smrg	module = vk_alloc2(&device->alloc, pAllocator,
92b8e80941Smrg			     sizeof(*module) + pCreateInfo->codeSize, 8,
93b8e80941Smrg			     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
94b8e80941Smrg	if (module == NULL)
95b8e80941Smrg		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
96b8e80941Smrg
97b8e80941Smrg	module->nir = NULL;
98b8e80941Smrg	module->size = pCreateInfo->codeSize;
99b8e80941Smrg	memcpy(module->data, pCreateInfo->pCode, module->size);
100b8e80941Smrg
101b8e80941Smrg	_mesa_sha1_compute(module->data, module->size, module->sha1);
102b8e80941Smrg
103b8e80941Smrg	*pShaderModule = radv_shader_module_to_handle(module);
104b8e80941Smrg
105b8e80941Smrg	return VK_SUCCESS;
106b8e80941Smrg}
107b8e80941Smrg
108b8e80941Smrgvoid radv_DestroyShaderModule(
109b8e80941Smrg	VkDevice                                    _device,
110b8e80941Smrg	VkShaderModule                              _module,
111b8e80941Smrg	const VkAllocationCallbacks*                pAllocator)
112b8e80941Smrg{
113b8e80941Smrg	RADV_FROM_HANDLE(radv_device, device, _device);
114b8e80941Smrg	RADV_FROM_HANDLE(radv_shader_module, module, _module);
115b8e80941Smrg
116b8e80941Smrg	if (!module)
117b8e80941Smrg		return;
118b8e80941Smrg
119b8e80941Smrg	vk_free2(&device->alloc, pAllocator, module);
120b8e80941Smrg}
121b8e80941Smrg
122b8e80941Smrgvoid
123b8e80941Smrgradv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
124b8e80941Smrg                  bool allow_copies)
125b8e80941Smrg{
126b8e80941Smrg        bool progress;
127b8e80941Smrg
128b8e80941Smrg        do {
129b8e80941Smrg                progress = false;
130b8e80941Smrg
131b8e80941Smrg		NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
132b8e80941Smrg		NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
133b8e80941Smrg
134b8e80941Smrg                NIR_PASS_V(shader, nir_lower_vars_to_ssa);
135b8e80941Smrg		NIR_PASS_V(shader, nir_lower_pack);
136b8e80941Smrg
137b8e80941Smrg		if (allow_copies) {
138b8e80941Smrg			/* Only run this pass in the first call to
139b8e80941Smrg			 * radv_optimize_nir.  Later calls assume that we've
140b8e80941Smrg			 * lowered away any copy_deref instructions and we
141b8e80941Smrg			 *  don't want to introduce any more.
142b8e80941Smrg			*/
143b8e80941Smrg			NIR_PASS(progress, shader, nir_opt_find_array_copies);
144b8e80941Smrg		}
145b8e80941Smrg
146b8e80941Smrg		NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
147b8e80941Smrg		NIR_PASS(progress, shader, nir_opt_dead_write_vars);
148b8e80941Smrg
149b8e80941Smrg                NIR_PASS_V(shader, nir_lower_alu_to_scalar);
150b8e80941Smrg                NIR_PASS_V(shader, nir_lower_phis_to_scalar);
151b8e80941Smrg
152b8e80941Smrg                NIR_PASS(progress, shader, nir_copy_prop);
153b8e80941Smrg                NIR_PASS(progress, shader, nir_opt_remove_phis);
154b8e80941Smrg                NIR_PASS(progress, shader, nir_opt_dce);
155b8e80941Smrg                if (nir_opt_trivial_continues(shader)) {
156b8e80941Smrg                        progress = true;
157b8e80941Smrg                        NIR_PASS(progress, shader, nir_copy_prop);
158b8e80941Smrg			NIR_PASS(progress, shader, nir_opt_remove_phis);
159b8e80941Smrg                        NIR_PASS(progress, shader, nir_opt_dce);
160b8e80941Smrg                }
161b8e80941Smrg                NIR_PASS(progress, shader, nir_opt_if, true);
162b8e80941Smrg                NIR_PASS(progress, shader, nir_opt_dead_cf);
163b8e80941Smrg                NIR_PASS(progress, shader, nir_opt_cse);
164b8e80941Smrg                NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
165b8e80941Smrg                NIR_PASS(progress, shader, nir_opt_algebraic);
166b8e80941Smrg                NIR_PASS(progress, shader, nir_opt_constant_folding);
167b8e80941Smrg                NIR_PASS(progress, shader, nir_opt_undef);
168b8e80941Smrg                NIR_PASS(progress, shader, nir_opt_conditional_discard);
169b8e80941Smrg                if (shader->options->max_unroll_iterations) {
170b8e80941Smrg                        NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
171b8e80941Smrg                }
172b8e80941Smrg        } while (progress && !optimize_conservatively);
173b8e80941Smrg
174b8e80941Smrg        NIR_PASS(progress, shader, nir_opt_shrink_load);
175b8e80941Smrg        NIR_PASS(progress, shader, nir_opt_move_load_ubo);
176b8e80941Smrg}
177b8e80941Smrg
178b8e80941Smrgnir_shader *
179b8e80941Smrgradv_shader_compile_to_nir(struct radv_device *device,
180b8e80941Smrg			   struct radv_shader_module *module,
181b8e80941Smrg			   const char *entrypoint_name,
182b8e80941Smrg			   gl_shader_stage stage,
183b8e80941Smrg			   const VkSpecializationInfo *spec_info,
184b8e80941Smrg			   const VkPipelineCreateFlags flags,
185b8e80941Smrg			   const struct radv_pipeline_layout *layout)
186b8e80941Smrg{
187b8e80941Smrg	nir_shader *nir;
188b8e80941Smrg	nir_function *entry_point;
189b8e80941Smrg	if (module->nir) {
190b8e80941Smrg		/* Some things such as our meta clear/blit code will give us a NIR
191b8e80941Smrg		 * shader directly.  In that case, we just ignore the SPIR-V entirely
192b8e80941Smrg		 * and just use the NIR shader */
193b8e80941Smrg		nir = module->nir;
194b8e80941Smrg		nir->options = &nir_options;
195b8e80941Smrg		nir_validate_shader(nir, "in internal shader");
196b8e80941Smrg
197b8e80941Smrg		assert(exec_list_length(&nir->functions) == 1);
198b8e80941Smrg		struct exec_node *node = exec_list_get_head(&nir->functions);
199b8e80941Smrg		entry_point = exec_node_data(nir_function, node, node);
200b8e80941Smrg	} else {
201b8e80941Smrg		uint32_t *spirv = (uint32_t *) module->data;
202b8e80941Smrg		assert(module->size % 4 == 0);
203b8e80941Smrg
204b8e80941Smrg		if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
205b8e80941Smrg			radv_print_spirv(spirv, module->size, stderr);
206b8e80941Smrg
207b8e80941Smrg		uint32_t num_spec_entries = 0;
208b8e80941Smrg		struct nir_spirv_specialization *spec_entries = NULL;
209b8e80941Smrg		if (spec_info && spec_info->mapEntryCount > 0) {
210b8e80941Smrg			num_spec_entries = spec_info->mapEntryCount;
211b8e80941Smrg			spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
212b8e80941Smrg			for (uint32_t i = 0; i < num_spec_entries; i++) {
213b8e80941Smrg				VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
214b8e80941Smrg				const void *data = spec_info->pData + entry.offset;
215b8e80941Smrg				assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
216b8e80941Smrg
217b8e80941Smrg				spec_entries[i].id = spec_info->pMapEntries[i].constantID;
218b8e80941Smrg				if (spec_info->dataSize == 8)
219b8e80941Smrg					spec_entries[i].data64 = *(const uint64_t *)data;
220b8e80941Smrg				else
221b8e80941Smrg					spec_entries[i].data32 = *(const uint32_t *)data;
222b8e80941Smrg			}
223b8e80941Smrg		}
224b8e80941Smrg		const struct spirv_to_nir_options spirv_options = {
225b8e80941Smrg			.lower_ubo_ssbo_access_to_offsets = true,
226b8e80941Smrg			.caps = {
227b8e80941Smrg				.derivative_group = true,
228b8e80941Smrg				.descriptor_array_dynamic_indexing = true,
229b8e80941Smrg				.descriptor_array_non_uniform_indexing = true,
230b8e80941Smrg				.descriptor_indexing = true,
231b8e80941Smrg				.device_group = true,
232b8e80941Smrg				.draw_parameters = true,
233b8e80941Smrg				.float16 = true,
234b8e80941Smrg				.float64 = true,
235b8e80941Smrg				.gcn_shader = true,
236b8e80941Smrg				.geometry_streams = true,
237b8e80941Smrg				.image_read_without_format = true,
238b8e80941Smrg				.image_write_without_format = true,
239b8e80941Smrg				.int8 = true,
240b8e80941Smrg				.int16 = true,
241b8e80941Smrg				.int64 = true,
242b8e80941Smrg				.int64_atomics = true,
243b8e80941Smrg				.multiview = true,
244b8e80941Smrg				.physical_storage_buffer_address = true,
245b8e80941Smrg				.runtime_descriptor_array = true,
246b8e80941Smrg				.shader_viewport_index_layer = true,
247b8e80941Smrg				.stencil_export = true,
248b8e80941Smrg				.storage_8bit = true,
249b8e80941Smrg				.storage_16bit = true,
250b8e80941Smrg				.storage_image_ms = true,
251b8e80941Smrg				.subgroup_arithmetic = true,
252b8e80941Smrg				.subgroup_ballot = true,
253b8e80941Smrg				.subgroup_basic = true,
254b8e80941Smrg				.subgroup_quad = true,
255b8e80941Smrg				.subgroup_shuffle = true,
256b8e80941Smrg				.subgroup_vote = true,
257b8e80941Smrg				.tessellation = true,
258b8e80941Smrg				.transform_feedback = true,
259b8e80941Smrg				.trinary_minmax = true,
260b8e80941Smrg				.variable_pointers = true,
261b8e80941Smrg			},
262b8e80941Smrg			.ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
263b8e80941Smrg			.ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
264b8e80941Smrg			.phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
265b8e80941Smrg			.push_const_ptr_type = glsl_uint_type(),
266b8e80941Smrg			.shared_ptr_type = glsl_uint_type(),
267b8e80941Smrg		};
268b8e80941Smrg		entry_point = spirv_to_nir(spirv, module->size / 4,
269b8e80941Smrg					   spec_entries, num_spec_entries,
270b8e80941Smrg					   stage, entrypoint_name,
271b8e80941Smrg					   &spirv_options, &nir_options);
272b8e80941Smrg		nir = entry_point->shader;
273b8e80941Smrg		assert(nir->info.stage == stage);
274b8e80941Smrg		nir_validate_shader(nir, "after spirv_to_nir");
275b8e80941Smrg
276b8e80941Smrg		free(spec_entries);
277b8e80941Smrg
278b8e80941Smrg		/* We have to lower away local constant initializers right before we
279b8e80941Smrg		 * inline functions.  That way they get properly initialized at the top
280b8e80941Smrg		 * of the function and not at the top of its caller.
281b8e80941Smrg		 */
282b8e80941Smrg		NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
283b8e80941Smrg		NIR_PASS_V(nir, nir_lower_returns);
284b8e80941Smrg		NIR_PASS_V(nir, nir_inline_functions);
285b8e80941Smrg		NIR_PASS_V(nir, nir_opt_deref);
286b8e80941Smrg
287b8e80941Smrg		/* Pick off the single entrypoint that we want */
288b8e80941Smrg		foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
289b8e80941Smrg			if (func != entry_point)
290b8e80941Smrg				exec_node_remove(&func->node);
291b8e80941Smrg		}
292b8e80941Smrg		assert(exec_list_length(&nir->functions) == 1);
293b8e80941Smrg		entry_point->name = ralloc_strdup(entry_point, "main");
294b8e80941Smrg
295b8e80941Smrg		/* Make sure we lower constant initializers on output variables so that
296b8e80941Smrg		 * nir_remove_dead_variables below sees the corresponding stores
297b8e80941Smrg		 */
298b8e80941Smrg		NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_shader_out);
299b8e80941Smrg
300b8e80941Smrg		/* Now that we've deleted all but the main function, we can go ahead and
301b8e80941Smrg		 * lower the rest of the constant initializers.
302b8e80941Smrg		 */
303b8e80941Smrg		NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
304b8e80941Smrg
305b8e80941Smrg		/* Split member structs.  We do this before lower_io_to_temporaries so that
306b8e80941Smrg		 * it doesn't lower system values to temporaries by accident.
307b8e80941Smrg		 */
308b8e80941Smrg		NIR_PASS_V(nir, nir_split_var_copies);
309b8e80941Smrg		NIR_PASS_V(nir, nir_split_per_member_structs);
310b8e80941Smrg
311b8e80941Smrg		NIR_PASS_V(nir, nir_remove_dead_variables,
312b8e80941Smrg		           nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
313b8e80941Smrg
314b8e80941Smrg		NIR_PASS_V(nir, nir_propagate_invariant);
315b8e80941Smrg
316b8e80941Smrg		NIR_PASS_V(nir, nir_lower_system_values);
317b8e80941Smrg		NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
318b8e80941Smrg		NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
319b8e80941Smrg	}
320b8e80941Smrg
321b8e80941Smrg	/* Vulkan uses the separate-shader linking model */
322b8e80941Smrg	nir->info.separate_shader = true;
323b8e80941Smrg
324b8e80941Smrg	nir_shader_gather_info(nir, entry_point->impl);
325b8e80941Smrg
326b8e80941Smrg	static const nir_lower_tex_options tex_options = {
327b8e80941Smrg	  .lower_txp = ~0,
328b8e80941Smrg	  .lower_tg4_offsets = true,
329b8e80941Smrg	};
330b8e80941Smrg
331b8e80941Smrg	nir_lower_tex(nir, &tex_options);
332b8e80941Smrg
333b8e80941Smrg	nir_lower_vars_to_ssa(nir);
334b8e80941Smrg
335b8e80941Smrg	if (nir->info.stage == MESA_SHADER_VERTEX ||
336b8e80941Smrg	    nir->info.stage == MESA_SHADER_GEOMETRY) {
337b8e80941Smrg		NIR_PASS_V(nir, nir_lower_io_to_temporaries,
338b8e80941Smrg			   nir_shader_get_entrypoint(nir), true, true);
339b8e80941Smrg	} else if (nir->info.stage == MESA_SHADER_TESS_EVAL||
340b8e80941Smrg		   nir->info.stage == MESA_SHADER_FRAGMENT) {
341b8e80941Smrg		NIR_PASS_V(nir, nir_lower_io_to_temporaries,
342b8e80941Smrg			   nir_shader_get_entrypoint(nir), true, false);
343b8e80941Smrg	}
344b8e80941Smrg
345b8e80941Smrg	nir_split_var_copies(nir);
346b8e80941Smrg
347b8e80941Smrg	nir_lower_global_vars_to_local(nir);
348b8e80941Smrg	nir_remove_dead_variables(nir, nir_var_function_temp);
349b8e80941Smrg	nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
350b8e80941Smrg			.subgroup_size = 64,
351b8e80941Smrg			.ballot_bit_size = 64,
352b8e80941Smrg			.lower_to_scalar = 1,
353b8e80941Smrg			.lower_subgroup_masks = 1,
354b8e80941Smrg			.lower_shuffle = 1,
355b8e80941Smrg			.lower_shuffle_to_32bit = 1,
356b8e80941Smrg			.lower_vote_eq_to_ballot = 1,
357b8e80941Smrg		});
358b8e80941Smrg
359b8e80941Smrg	nir_lower_load_const_to_scalar(nir);
360b8e80941Smrg
361b8e80941Smrg	if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
362b8e80941Smrg		radv_optimize_nir(nir, false, true);
363b8e80941Smrg
364b8e80941Smrg	/* We call nir_lower_var_copies() after the first radv_optimize_nir()
365b8e80941Smrg	 * to remove any copies introduced by nir_opt_find_array_copies().
366b8e80941Smrg	 */
367b8e80941Smrg	nir_lower_var_copies(nir);
368b8e80941Smrg
369b8e80941Smrg	/* Indirect lowering must be called after the radv_optimize_nir() loop
370b8e80941Smrg	 * has been called at least once. Otherwise indirect lowering can
371b8e80941Smrg	 * bloat the instruction count of the loop and cause it to be
372b8e80941Smrg	 * considered too large for unrolling.
373b8e80941Smrg	 */
374b8e80941Smrg	ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
375b8e80941Smrg	radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, false);
376b8e80941Smrg
377b8e80941Smrg	return nir;
378b8e80941Smrg}
379b8e80941Smrg
380b8e80941Smrgvoid *
381b8e80941Smrgradv_alloc_shader_memory(struct radv_device *device,
382b8e80941Smrg			 struct radv_shader_variant *shader)
383b8e80941Smrg{
384b8e80941Smrg	mtx_lock(&device->shader_slab_mutex);
385b8e80941Smrg	list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
386b8e80941Smrg		uint64_t offset = 0;
387b8e80941Smrg		list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) {
388b8e80941Smrg			if (s->bo_offset - offset >= shader->code_size) {
389b8e80941Smrg				shader->bo = slab->bo;
390b8e80941Smrg				shader->bo_offset = offset;
391b8e80941Smrg				list_addtail(&shader->slab_list, &s->slab_list);
392b8e80941Smrg				mtx_unlock(&device->shader_slab_mutex);
393b8e80941Smrg				return slab->ptr + offset;
394b8e80941Smrg			}
395b8e80941Smrg			offset = align_u64(s->bo_offset + s->code_size, 256);
396b8e80941Smrg		}
397b8e80941Smrg		if (slab->size - offset >= shader->code_size) {
398b8e80941Smrg			shader->bo = slab->bo;
399b8e80941Smrg			shader->bo_offset = offset;
400b8e80941Smrg			list_addtail(&shader->slab_list, &slab->shaders);
401b8e80941Smrg			mtx_unlock(&device->shader_slab_mutex);
402b8e80941Smrg			return slab->ptr + offset;
403b8e80941Smrg		}
404b8e80941Smrg	}
405b8e80941Smrg
406b8e80941Smrg	mtx_unlock(&device->shader_slab_mutex);
407b8e80941Smrg	struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
408b8e80941Smrg
409b8e80941Smrg	slab->size = 256 * 1024;
410b8e80941Smrg	slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
411b8e80941Smrg	                                     RADEON_DOMAIN_VRAM,
412b8e80941Smrg					     RADEON_FLAG_NO_INTERPROCESS_SHARING |
413b8e80941Smrg					     (device->physical_device->cpdma_prefetch_writes_memory ?
414b8e80941Smrg					             0 : RADEON_FLAG_READ_ONLY),
415b8e80941Smrg					     RADV_BO_PRIORITY_SHADER);
416b8e80941Smrg	slab->ptr = (char*)device->ws->buffer_map(slab->bo);
417b8e80941Smrg	list_inithead(&slab->shaders);
418b8e80941Smrg
419b8e80941Smrg	mtx_lock(&device->shader_slab_mutex);
420b8e80941Smrg	list_add(&slab->slabs, &device->shader_slabs);
421b8e80941Smrg
422b8e80941Smrg	shader->bo = slab->bo;
423b8e80941Smrg	shader->bo_offset = 0;
424b8e80941Smrg	list_add(&shader->slab_list, &slab->shaders);
425b8e80941Smrg	mtx_unlock(&device->shader_slab_mutex);
426b8e80941Smrg	return slab->ptr;
427b8e80941Smrg}
428b8e80941Smrg
429b8e80941Smrgvoid
430b8e80941Smrgradv_destroy_shader_slabs(struct radv_device *device)
431b8e80941Smrg{
432b8e80941Smrg	list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
433b8e80941Smrg		device->ws->buffer_destroy(slab->bo);
434b8e80941Smrg		free(slab);
435b8e80941Smrg	}
436b8e80941Smrg	mtx_destroy(&device->shader_slab_mutex);
437b8e80941Smrg}
438b8e80941Smrg
439b8e80941Smrg/* For the UMR disassembler. */
440b8e80941Smrg#define DEBUGGER_END_OF_CODE_MARKER    0xbf9f0000 /* invalid instruction */
441b8e80941Smrg#define DEBUGGER_NUM_MARKERS           5
442b8e80941Smrg
443b8e80941Smrgstatic unsigned
444b8e80941Smrgradv_get_shader_binary_size(struct ac_shader_binary *binary)
445b8e80941Smrg{
446b8e80941Smrg	return binary->code_size + DEBUGGER_NUM_MARKERS * 4;
447b8e80941Smrg}
448b8e80941Smrg
449b8e80941Smrgstatic void
450b8e80941Smrgradv_fill_shader_variant(struct radv_device *device,
451b8e80941Smrg			 struct radv_shader_variant *variant,
452b8e80941Smrg			 struct ac_shader_binary *binary,
453b8e80941Smrg			 gl_shader_stage stage)
454b8e80941Smrg{
455b8e80941Smrg	bool scratch_enabled = variant->config.scratch_bytes_per_wave > 0;
456b8e80941Smrg	struct radv_shader_info *info = &variant->info.info;
457b8e80941Smrg	unsigned vgpr_comp_cnt = 0;
458b8e80941Smrg
459b8e80941Smrg	variant->code_size = radv_get_shader_binary_size(binary);
460b8e80941Smrg	variant->rsrc2 = S_00B12C_USER_SGPR(variant->info.num_user_sgprs) |
461b8e80941Smrg			 S_00B12C_USER_SGPR_MSB(variant->info.num_user_sgprs >> 5) |
462b8e80941Smrg			 S_00B12C_SCRATCH_EN(scratch_enabled) |
463b8e80941Smrg			 S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
464b8e80941Smrg			 S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
465b8e80941Smrg			 S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
466b8e80941Smrg			 S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
467b8e80941Smrg			 S_00B12C_SO_EN(!!info->so.num_outputs);
468b8e80941Smrg
469b8e80941Smrg	variant->rsrc1 = S_00B848_VGPRS((variant->config.num_vgprs - 1) / 4) |
470b8e80941Smrg		S_00B848_SGPRS((variant->config.num_sgprs - 1) / 8) |
471b8e80941Smrg		S_00B848_DX10_CLAMP(1) |
472b8e80941Smrg		S_00B848_FLOAT_MODE(variant->config.float_mode);
473b8e80941Smrg
474b8e80941Smrg	switch (stage) {
475b8e80941Smrg	case MESA_SHADER_TESS_EVAL:
476b8e80941Smrg		vgpr_comp_cnt = 3;
477b8e80941Smrg		variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
478b8e80941Smrg		break;
479b8e80941Smrg	case MESA_SHADER_TESS_CTRL:
480b8e80941Smrg		if (device->physical_device->rad_info.chip_class >= GFX9) {
481b8e80941Smrg			vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
482b8e80941Smrg		} else {
483b8e80941Smrg			variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
484b8e80941Smrg		}
485b8e80941Smrg		break;
486b8e80941Smrg	case MESA_SHADER_VERTEX:
487b8e80941Smrg	case MESA_SHADER_GEOMETRY:
488b8e80941Smrg		vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
489b8e80941Smrg		break;
490b8e80941Smrg	case MESA_SHADER_FRAGMENT:
491b8e80941Smrg		break;
492b8e80941Smrg	case MESA_SHADER_COMPUTE:
493b8e80941Smrg		variant->rsrc2 |=
494b8e80941Smrg			S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
495b8e80941Smrg			S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
496b8e80941Smrg			S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
497b8e80941Smrg			S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
498b8e80941Smrg						info->cs.uses_thread_id[1] ? 1 : 0) |
499b8e80941Smrg			S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
500b8e80941Smrg			S_00B84C_LDS_SIZE(variant->config.lds_size);
501b8e80941Smrg		break;
502b8e80941Smrg	default:
503b8e80941Smrg		unreachable("unsupported shader type");
504b8e80941Smrg		break;
505b8e80941Smrg	}
506b8e80941Smrg
507b8e80941Smrg	if (device->physical_device->rad_info.chip_class >= GFX9 &&
508b8e80941Smrg	    stage == MESA_SHADER_GEOMETRY) {
509b8e80941Smrg		unsigned es_type = variant->info.gs.es_type;
510b8e80941Smrg		unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
511b8e80941Smrg
512b8e80941Smrg		if (es_type == MESA_SHADER_VERTEX) {
513b8e80941Smrg			es_vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
514b8e80941Smrg		} else if (es_type == MESA_SHADER_TESS_EVAL) {
515b8e80941Smrg			es_vgpr_comp_cnt = 3;
516b8e80941Smrg		} else {
517b8e80941Smrg			unreachable("invalid shader ES type");
518b8e80941Smrg		}
519b8e80941Smrg
520b8e80941Smrg		/* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
521b8e80941Smrg		 * VGPR[0:4] are always loaded.
522b8e80941Smrg		 */
523b8e80941Smrg		if (info->uses_invocation_id) {
524b8e80941Smrg			gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
525b8e80941Smrg		} else if (info->uses_prim_id) {
526b8e80941Smrg			gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
527b8e80941Smrg		} else if (variant->info.gs.vertices_in >= 3) {
528b8e80941Smrg			gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
529b8e80941Smrg		} else {
530b8e80941Smrg			gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
531b8e80941Smrg		}
532b8e80941Smrg
533b8e80941Smrg		variant->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
534b8e80941Smrg		variant->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
535b8e80941Smrg		                  S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
536b8e80941Smrg	} else if (device->physical_device->rad_info.chip_class >= GFX9 &&
537b8e80941Smrg		   stage == MESA_SHADER_TESS_CTRL) {
538b8e80941Smrg		variant->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
539b8e80941Smrg	} else {
540b8e80941Smrg		variant->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
541b8e80941Smrg	}
542b8e80941Smrg
543b8e80941Smrg	void *ptr = radv_alloc_shader_memory(device, variant);
544b8e80941Smrg	memcpy(ptr, binary->code, binary->code_size);
545b8e80941Smrg
546b8e80941Smrg	/* Add end-of-code markers for the UMR disassembler. */
547b8e80941Smrg       uint32_t *ptr32 = (uint32_t *)ptr + binary->code_size / 4;
548b8e80941Smrg       for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
549b8e80941Smrg		ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
550b8e80941Smrg
551b8e80941Smrg}
552b8e80941Smrg
553b8e80941Smrgstatic void radv_init_llvm_target()
554b8e80941Smrg{
555b8e80941Smrg	LLVMInitializeAMDGPUTargetInfo();
556b8e80941Smrg	LLVMInitializeAMDGPUTarget();
557b8e80941Smrg	LLVMInitializeAMDGPUTargetMC();
558b8e80941Smrg	LLVMInitializeAMDGPUAsmPrinter();
559b8e80941Smrg
560b8e80941Smrg	/* For inline assembly. */
561b8e80941Smrg	LLVMInitializeAMDGPUAsmParser();
562b8e80941Smrg
563b8e80941Smrg	/* Workaround for bug in llvm 4.0 that causes image intrinsics
564b8e80941Smrg	 * to disappear.
565b8e80941Smrg	 * https://reviews.llvm.org/D26348
566b8e80941Smrg	 *
567b8e80941Smrg	 * Workaround for bug in llvm that causes the GPU to hang in presence
568b8e80941Smrg	 * of nested loops because there is an exec mask issue. The proper
569b8e80941Smrg	 * solution is to fix LLVM but this might require a bunch of work.
570b8e80941Smrg	 * https://bugs.llvm.org/show_bug.cgi?id=37744
571b8e80941Smrg	 *
572b8e80941Smrg	 * "mesa" is the prefix for error messages.
573b8e80941Smrg	 */
574b8e80941Smrg	if (HAVE_LLVM >= 0x0800) {
575b8e80941Smrg		const char *argv[2] = { "mesa", "-simplifycfg-sink-common=false" };
576b8e80941Smrg		LLVMParseCommandLineOptions(2, argv, NULL);
577b8e80941Smrg
578b8e80941Smrg	} else {
579b8e80941Smrg		const char *argv[3] = { "mesa", "-simplifycfg-sink-common=false",
580b8e80941Smrg					"-amdgpu-skip-threshold=1" };
581b8e80941Smrg		LLVMParseCommandLineOptions(3, argv, NULL);
582b8e80941Smrg	}
583b8e80941Smrg}
584b8e80941Smrg
585b8e80941Smrgstatic once_flag radv_init_llvm_target_once_flag = ONCE_FLAG_INIT;
586b8e80941Smrg
587b8e80941Smrgstatic void radv_init_llvm_once(void)
588b8e80941Smrg{
589b8e80941Smrg	call_once(&radv_init_llvm_target_once_flag, radv_init_llvm_target);
590b8e80941Smrg}
591b8e80941Smrg
592b8e80941Smrgstatic struct radv_shader_variant *
593b8e80941Smrgshader_variant_create(struct radv_device *device,
594b8e80941Smrg		      struct radv_shader_module *module,
595b8e80941Smrg		      struct nir_shader * const *shaders,
596b8e80941Smrg		      int shader_count,
597b8e80941Smrg		      gl_shader_stage stage,
598b8e80941Smrg		      struct radv_nir_compiler_options *options,
599b8e80941Smrg		      bool gs_copy_shader,
600b8e80941Smrg		      void **code_out,
601b8e80941Smrg		      unsigned *code_size_out)
602b8e80941Smrg{
603b8e80941Smrg	enum radeon_family chip_family = device->physical_device->rad_info.family;
604b8e80941Smrg	enum ac_target_machine_options tm_options = 0;
605b8e80941Smrg	struct radv_shader_variant *variant;
606b8e80941Smrg	struct ac_shader_binary binary;
607b8e80941Smrg	struct ac_llvm_compiler ac_llvm;
608b8e80941Smrg	bool thread_compiler;
609b8e80941Smrg	variant = calloc(1, sizeof(struct radv_shader_variant));
610b8e80941Smrg	if (!variant)
611b8e80941Smrg		return NULL;
612b8e80941Smrg
613b8e80941Smrg	options->family = chip_family;
614b8e80941Smrg	options->chip_class = device->physical_device->rad_info.chip_class;
615b8e80941Smrg	options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
616b8e80941Smrg	options->dump_preoptir = options->dump_shader &&
617b8e80941Smrg				 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
618b8e80941Smrg	options->record_llvm_ir = device->keep_shader_info;
619b8e80941Smrg	options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
620b8e80941Smrg	options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
621b8e80941Smrg	options->address32_hi = device->physical_device->rad_info.address32_hi;
622b8e80941Smrg
623b8e80941Smrg	if (options->supports_spill)
624b8e80941Smrg		tm_options |= AC_TM_SUPPORTS_SPILL;
625b8e80941Smrg	if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
626b8e80941Smrg		tm_options |= AC_TM_SISCHED;
627b8e80941Smrg	if (options->check_ir)
628b8e80941Smrg		tm_options |= AC_TM_CHECK_IR;
629b8e80941Smrg	if (device->instance->debug_flags & RADV_DEBUG_NO_LOAD_STORE_OPT)
630b8e80941Smrg		tm_options |= AC_TM_NO_LOAD_STORE_OPT;
631b8e80941Smrg
632b8e80941Smrg	thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
633b8e80941Smrg	radv_init_llvm_once();
634b8e80941Smrg	radv_init_llvm_compiler(&ac_llvm,
635b8e80941Smrg				thread_compiler,
636b8e80941Smrg				chip_family, tm_options);
637b8e80941Smrg	if (gs_copy_shader) {
638b8e80941Smrg		assert(shader_count == 1);
639b8e80941Smrg		radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
640b8e80941Smrg					    &variant->config, &variant->info,
641b8e80941Smrg					    options);
642b8e80941Smrg	} else {
643b8e80941Smrg		radv_compile_nir_shader(&ac_llvm, &binary, &variant->config,
644b8e80941Smrg					&variant->info, shaders, shader_count,
645b8e80941Smrg					options);
646b8e80941Smrg	}
647b8e80941Smrg
648b8e80941Smrg	radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
649b8e80941Smrg
650b8e80941Smrg	radv_fill_shader_variant(device, variant, &binary, stage);
651b8e80941Smrg
652b8e80941Smrg	if (code_out) {
653b8e80941Smrg		*code_out = binary.code;
654b8e80941Smrg		*code_size_out = binary.code_size;
655b8e80941Smrg	} else
656b8e80941Smrg		free(binary.code);
657b8e80941Smrg	free(binary.config);
658b8e80941Smrg	free(binary.rodata);
659b8e80941Smrg	free(binary.global_symbol_offsets);
660b8e80941Smrg	free(binary.relocs);
661b8e80941Smrg	variant->ref_count = 1;
662b8e80941Smrg
663b8e80941Smrg	if (device->keep_shader_info) {
664b8e80941Smrg		variant->disasm_string = binary.disasm_string;
665b8e80941Smrg		variant->llvm_ir_string = binary.llvm_ir_string;
666b8e80941Smrg		if (!gs_copy_shader && !module->nir) {
667b8e80941Smrg			variant->nir = *shaders;
668b8e80941Smrg			variant->spirv = (uint32_t *)module->data;
669b8e80941Smrg			variant->spirv_size = module->size;
670b8e80941Smrg		}
671b8e80941Smrg	} else {
672b8e80941Smrg		free(binary.disasm_string);
673b8e80941Smrg	}
674b8e80941Smrg
675b8e80941Smrg	return variant;
676b8e80941Smrg}
677b8e80941Smrg
678b8e80941Smrgstruct radv_shader_variant *
679b8e80941Smrgradv_shader_variant_create(struct radv_device *device,
680b8e80941Smrg			   struct radv_shader_module *module,
681b8e80941Smrg			   struct nir_shader *const *shaders,
682b8e80941Smrg			   int shader_count,
683b8e80941Smrg			   struct radv_pipeline_layout *layout,
684b8e80941Smrg			   const struct radv_shader_variant_key *key,
685b8e80941Smrg			   void **code_out,
686b8e80941Smrg			   unsigned *code_size_out)
687b8e80941Smrg{
688b8e80941Smrg	struct radv_nir_compiler_options options = {0};
689b8e80941Smrg
690b8e80941Smrg	options.layout = layout;
691b8e80941Smrg	if (key)
692b8e80941Smrg		options.key = *key;
693b8e80941Smrg
694b8e80941Smrg	options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
695b8e80941Smrg	options.supports_spill = true;
696b8e80941Smrg
697b8e80941Smrg	return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
698b8e80941Smrg				     &options, false, code_out, code_size_out);
699b8e80941Smrg}
700b8e80941Smrg
701b8e80941Smrgstruct radv_shader_variant *
702b8e80941Smrgradv_create_gs_copy_shader(struct radv_device *device,
703b8e80941Smrg			   struct nir_shader *shader,
704b8e80941Smrg			   void **code_out,
705b8e80941Smrg			   unsigned *code_size_out,
706b8e80941Smrg			   bool multiview)
707b8e80941Smrg{
708b8e80941Smrg	struct radv_nir_compiler_options options = {0};
709b8e80941Smrg
710b8e80941Smrg	options.key.has_multiview_view_index = multiview;
711b8e80941Smrg
712b8e80941Smrg	return shader_variant_create(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
713b8e80941Smrg				     &options, true, code_out, code_size_out);
714b8e80941Smrg}
715b8e80941Smrg
716b8e80941Smrgvoid
717b8e80941Smrgradv_shader_variant_destroy(struct radv_device *device,
718b8e80941Smrg			    struct radv_shader_variant *variant)
719b8e80941Smrg{
720b8e80941Smrg	if (!p_atomic_dec_zero(&variant->ref_count))
721b8e80941Smrg		return;
722b8e80941Smrg
723b8e80941Smrg	mtx_lock(&device->shader_slab_mutex);
724b8e80941Smrg	list_del(&variant->slab_list);
725b8e80941Smrg	mtx_unlock(&device->shader_slab_mutex);
726b8e80941Smrg
727b8e80941Smrg	ralloc_free(variant->nir);
728b8e80941Smrg	free(variant->disasm_string);
729b8e80941Smrg	free(variant->llvm_ir_string);
730b8e80941Smrg	free(variant);
731b8e80941Smrg}
732b8e80941Smrg
733b8e80941Smrgconst char *
734b8e80941Smrgradv_get_shader_name(struct radv_shader_variant *var, gl_shader_stage stage)
735b8e80941Smrg{
736b8e80941Smrg	switch (stage) {
737b8e80941Smrg	case MESA_SHADER_VERTEX: return var->info.vs.as_ls ? "Vertex Shader as LS" : var->info.vs.as_es ? "Vertex Shader as ES" : "Vertex Shader as VS";
738b8e80941Smrg	case MESA_SHADER_GEOMETRY: return "Geometry Shader";
739b8e80941Smrg	case MESA_SHADER_FRAGMENT: return "Pixel Shader";
740b8e80941Smrg	case MESA_SHADER_COMPUTE: return "Compute Shader";
741b8e80941Smrg	case MESA_SHADER_TESS_CTRL: return "Tessellation Control Shader";
742b8e80941Smrg	case MESA_SHADER_TESS_EVAL: return var->info.tes.as_es ? "Tessellation Evaluation Shader as ES" : "Tessellation Evaluation Shader as VS";
743b8e80941Smrg	default:
744b8e80941Smrg		return "Unknown shader";
745b8e80941Smrg	};
746b8e80941Smrg}
747b8e80941Smrg
748b8e80941Smrgstatic void
749b8e80941Smrggenerate_shader_stats(struct radv_device *device,
750b8e80941Smrg		      struct radv_shader_variant *variant,
751b8e80941Smrg		      gl_shader_stage stage,
752b8e80941Smrg		      struct _mesa_string_buffer *buf)
753b8e80941Smrg{
754b8e80941Smrg	enum chip_class chip_class = device->physical_device->rad_info.chip_class;
755b8e80941Smrg	unsigned lds_increment = chip_class >= CIK ? 512 : 256;
756b8e80941Smrg	struct ac_shader_config *conf;
757b8e80941Smrg	unsigned max_simd_waves;
758b8e80941Smrg	unsigned lds_per_wave = 0;
759b8e80941Smrg
760b8e80941Smrg	max_simd_waves = ac_get_max_simd_waves(device->physical_device->rad_info.family);
761b8e80941Smrg
762b8e80941Smrg	conf = &variant->config;
763b8e80941Smrg
764b8e80941Smrg	if (stage == MESA_SHADER_FRAGMENT) {
765b8e80941Smrg		lds_per_wave = conf->lds_size * lds_increment +
766b8e80941Smrg			       align(variant->info.fs.num_interp * 48,
767b8e80941Smrg				     lds_increment);
768b8e80941Smrg	} else if (stage == MESA_SHADER_COMPUTE) {
769b8e80941Smrg		unsigned max_workgroup_size =
770b8e80941Smrg			radv_nir_get_max_workgroup_size(chip_class, stage, variant->nir);
771b8e80941Smrg		lds_per_wave = (conf->lds_size * lds_increment) /
772b8e80941Smrg			       DIV_ROUND_UP(max_workgroup_size, 64);
773b8e80941Smrg	}
774b8e80941Smrg
775b8e80941Smrg	if (conf->num_sgprs)
776b8e80941Smrg		max_simd_waves =
777b8e80941Smrg			MIN2(max_simd_waves,
778b8e80941Smrg			     ac_get_num_physical_sgprs(chip_class) / conf->num_sgprs);
779b8e80941Smrg
780b8e80941Smrg	if (conf->num_vgprs)
781b8e80941Smrg		max_simd_waves =
782b8e80941Smrg			MIN2(max_simd_waves,
783b8e80941Smrg			     RADV_NUM_PHYSICAL_VGPRS / conf->num_vgprs);
784b8e80941Smrg
785b8e80941Smrg	/* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
786b8e80941Smrg	 * that PS can use.
787b8e80941Smrg	 */
788b8e80941Smrg	if (lds_per_wave)
789b8e80941Smrg		max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
790b8e80941Smrg
791b8e80941Smrg	if (stage == MESA_SHADER_FRAGMENT) {
792b8e80941Smrg		_mesa_string_buffer_printf(buf, "*** SHADER CONFIG ***\n"
793b8e80941Smrg					   "SPI_PS_INPUT_ADDR = 0x%04x\n"
794b8e80941Smrg					   "SPI_PS_INPUT_ENA  = 0x%04x\n",
795b8e80941Smrg					   conf->spi_ps_input_addr, conf->spi_ps_input_ena);
796b8e80941Smrg	}
797b8e80941Smrg
798b8e80941Smrg	_mesa_string_buffer_printf(buf, "*** SHADER STATS ***\n"
799b8e80941Smrg				   "SGPRS: %d\n"
800b8e80941Smrg				   "VGPRS: %d\n"
801b8e80941Smrg				   "Spilled SGPRs: %d\n"
802b8e80941Smrg				   "Spilled VGPRs: %d\n"
803b8e80941Smrg				   "PrivMem VGPRS: %d\n"
804b8e80941Smrg				   "Code Size: %d bytes\n"
805b8e80941Smrg				   "LDS: %d blocks\n"
806b8e80941Smrg				   "Scratch: %d bytes per wave\n"
807b8e80941Smrg				   "Max Waves: %d\n"
808b8e80941Smrg				   "********************\n\n\n",
809b8e80941Smrg				   conf->num_sgprs, conf->num_vgprs,
810b8e80941Smrg				   conf->spilled_sgprs, conf->spilled_vgprs,
811b8e80941Smrg				   variant->info.private_mem_vgprs, variant->code_size,
812b8e80941Smrg				   conf->lds_size, conf->scratch_bytes_per_wave,
813b8e80941Smrg				   max_simd_waves);
814b8e80941Smrg}
815b8e80941Smrg
816b8e80941Smrgvoid
817b8e80941Smrgradv_shader_dump_stats(struct radv_device *device,
818b8e80941Smrg		       struct radv_shader_variant *variant,
819b8e80941Smrg		       gl_shader_stage stage,
820b8e80941Smrg		       FILE *file)
821b8e80941Smrg{
822b8e80941Smrg	struct _mesa_string_buffer *buf = _mesa_string_buffer_create(NULL, 256);
823b8e80941Smrg
824b8e80941Smrg	generate_shader_stats(device, variant, stage, buf);
825b8e80941Smrg
826b8e80941Smrg	fprintf(file, "\n%s:\n", radv_get_shader_name(variant, stage));
827b8e80941Smrg	fprintf(file, "%s", buf->buf);
828b8e80941Smrg
829b8e80941Smrg	_mesa_string_buffer_destroy(buf);
830b8e80941Smrg}
831b8e80941Smrg
832b8e80941SmrgVkResult
833b8e80941Smrgradv_GetShaderInfoAMD(VkDevice _device,
834b8e80941Smrg		      VkPipeline _pipeline,
835b8e80941Smrg		      VkShaderStageFlagBits shaderStage,
836b8e80941Smrg		      VkShaderInfoTypeAMD infoType,
837b8e80941Smrg		      size_t* pInfoSize,
838b8e80941Smrg		      void* pInfo)
839b8e80941Smrg{
840b8e80941Smrg	RADV_FROM_HANDLE(radv_device, device, _device);
841b8e80941Smrg	RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
842b8e80941Smrg	gl_shader_stage stage = vk_to_mesa_shader_stage(shaderStage);
843b8e80941Smrg	struct radv_shader_variant *variant = pipeline->shaders[stage];
844b8e80941Smrg	struct _mesa_string_buffer *buf;
845b8e80941Smrg	VkResult result = VK_SUCCESS;
846b8e80941Smrg
847b8e80941Smrg	/* Spec doesn't indicate what to do if the stage is invalid, so just
848b8e80941Smrg	 * return no info for this. */
849b8e80941Smrg	if (!variant)
850b8e80941Smrg		return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
851b8e80941Smrg
852b8e80941Smrg	switch (infoType) {
853b8e80941Smrg	case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
854b8e80941Smrg		if (!pInfo) {
855b8e80941Smrg			*pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
856b8e80941Smrg		} else {
857b8e80941Smrg			unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= CIK ? 512 : 256;
858b8e80941Smrg			struct ac_shader_config *conf = &variant->config;
859b8e80941Smrg
860b8e80941Smrg			VkShaderStatisticsInfoAMD statistics = {};
861b8e80941Smrg			statistics.shaderStageMask = shaderStage;
862b8e80941Smrg			statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
863b8e80941Smrg			statistics.numPhysicalSgprs = ac_get_num_physical_sgprs(device->physical_device->rad_info.chip_class);
864b8e80941Smrg			statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
865b8e80941Smrg
866b8e80941Smrg			if (stage == MESA_SHADER_COMPUTE) {
867b8e80941Smrg				unsigned *local_size = variant->nir->info.cs.local_size;
868b8e80941Smrg				unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
869b8e80941Smrg
870b8e80941Smrg				statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
871b8e80941Smrg							       ceil((double)workgroup_size / statistics.numPhysicalVgprs);
872b8e80941Smrg
873b8e80941Smrg				statistics.computeWorkGroupSize[0] = local_size[0];
874b8e80941Smrg				statistics.computeWorkGroupSize[1] = local_size[1];
875b8e80941Smrg				statistics.computeWorkGroupSize[2] = local_size[2];
876b8e80941Smrg			} else {
877b8e80941Smrg				statistics.numAvailableVgprs = statistics.numPhysicalVgprs;
878b8e80941Smrg			}
879b8e80941Smrg
880b8e80941Smrg			statistics.resourceUsage.numUsedVgprs = conf->num_vgprs;
881b8e80941Smrg			statistics.resourceUsage.numUsedSgprs = conf->num_sgprs;
882b8e80941Smrg			statistics.resourceUsage.ldsSizePerLocalWorkGroup = 32768;
883b8e80941Smrg			statistics.resourceUsage.ldsUsageSizeInBytes = conf->lds_size * lds_multiplier;
884b8e80941Smrg			statistics.resourceUsage.scratchMemUsageInBytes = conf->scratch_bytes_per_wave;
885b8e80941Smrg
886b8e80941Smrg			size_t size = *pInfoSize;
887b8e80941Smrg			*pInfoSize = sizeof(statistics);
888b8e80941Smrg
889b8e80941Smrg			memcpy(pInfo, &statistics, MIN2(size, *pInfoSize));
890b8e80941Smrg
891b8e80941Smrg			if (size < *pInfoSize)
892b8e80941Smrg				result = VK_INCOMPLETE;
893b8e80941Smrg		}
894b8e80941Smrg
895b8e80941Smrg		break;
896b8e80941Smrg	case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
897b8e80941Smrg		buf = _mesa_string_buffer_create(NULL, 1024);
898b8e80941Smrg
899b8e80941Smrg		_mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(variant, stage));
900b8e80941Smrg		_mesa_string_buffer_printf(buf, "%s\n\n", variant->llvm_ir_string);
901b8e80941Smrg		_mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
902b8e80941Smrg		generate_shader_stats(device, variant, stage, buf);
903b8e80941Smrg
904b8e80941Smrg		/* Need to include the null terminator. */
905b8e80941Smrg		size_t length = buf->length + 1;
906b8e80941Smrg
907b8e80941Smrg		if (!pInfo) {
908b8e80941Smrg			*pInfoSize = length;
909b8e80941Smrg		} else {
910b8e80941Smrg			size_t size = *pInfoSize;
911b8e80941Smrg			*pInfoSize = length;
912b8e80941Smrg
913b8e80941Smrg			memcpy(pInfo, buf->buf, MIN2(size, length));
914b8e80941Smrg
915b8e80941Smrg			if (size < length)
916b8e80941Smrg				result = VK_INCOMPLETE;
917b8e80941Smrg		}
918b8e80941Smrg
919b8e80941Smrg		_mesa_string_buffer_destroy(buf);
920b8e80941Smrg		break;
921b8e80941Smrg	default:
922b8e80941Smrg		/* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
923b8e80941Smrg		result = VK_ERROR_FEATURE_NOT_PRESENT;
924b8e80941Smrg		break;
925b8e80941Smrg	}
926b8e80941Smrg
927b8e80941Smrg	return result;
928b8e80941Smrg}
929