radv_shader.c revision ed98bd31
1/*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28#include "util/mesa-sha1.h"
29#include "util/u_atomic.h"
30#include "radv_debug.h"
31#include "radv_private.h"
32#include "radv_shader.h"
33#include "radv_shader_helper.h"
34#include "nir/nir.h"
35#include "nir/nir_builder.h"
36#include "spirv/nir_spirv.h"
37
38#include <llvm-c/Core.h>
39#include <llvm-c/TargetMachine.h>
40#include <llvm-c/Support.h>
41
42#include "sid.h"
43#include "gfx9d.h"
44#include "ac_binary.h"
45#include "ac_llvm_util.h"
46#include "ac_nir_to_llvm.h"
47#include "vk_format.h"
48#include "util/debug.h"
49#include "ac_exp_param.h"
50
51#include "util/string_buffer.h"
52
53static const struct nir_shader_compiler_options nir_options = {
54	.vertex_id_zero_based = true,
55	.lower_scmp = true,
56	.lower_flrp16 = true,
57	.lower_flrp32 = true,
58	.lower_flrp64 = true,
59	.lower_device_index_to_zero = true,
60	.lower_fsat = true,
61	.lower_fdiv = true,
62	.lower_sub = true,
63	.lower_pack_snorm_2x16 = true,
64	.lower_pack_snorm_4x8 = true,
65	.lower_pack_unorm_2x16 = true,
66	.lower_pack_unorm_4x8 = true,
67	.lower_unpack_snorm_2x16 = true,
68	.lower_unpack_snorm_4x8 = true,
69	.lower_unpack_unorm_2x16 = true,
70	.lower_unpack_unorm_4x8 = true,
71	.lower_extract_byte = true,
72	.lower_extract_word = true,
73	.lower_ffma = true,
74	.lower_fpow = true,
75	.lower_mul_2x32_64 = true,
76	.max_unroll_iterations = 32
77};
78
79VkResult radv_CreateShaderModule(
80	VkDevice                                    _device,
81	const VkShaderModuleCreateInfo*             pCreateInfo,
82	const VkAllocationCallbacks*                pAllocator,
83	VkShaderModule*                             pShaderModule)
84{
85	RADV_FROM_HANDLE(radv_device, device, _device);
86	struct radv_shader_module *module;
87
88	assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
89	assert(pCreateInfo->flags == 0);
90
91	module = vk_alloc2(&device->alloc, pAllocator,
92			     sizeof(*module) + pCreateInfo->codeSize, 8,
93			     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
94	if (module == NULL)
95		return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
96
97	module->nir = NULL;
98	module->size = pCreateInfo->codeSize;
99	memcpy(module->data, pCreateInfo->pCode, module->size);
100
101	_mesa_sha1_compute(module->data, module->size, module->sha1);
102
103	*pShaderModule = radv_shader_module_to_handle(module);
104
105	return VK_SUCCESS;
106}
107
108void radv_DestroyShaderModule(
109	VkDevice                                    _device,
110	VkShaderModule                              _module,
111	const VkAllocationCallbacks*                pAllocator)
112{
113	RADV_FROM_HANDLE(radv_device, device, _device);
114	RADV_FROM_HANDLE(radv_shader_module, module, _module);
115
116	if (!module)
117		return;
118
119	vk_free2(&device->alloc, pAllocator, module);
120}
121
122void
123radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
124                  bool allow_copies)
125{
126        bool progress;
127
128        do {
129                progress = false;
130
131		NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
132		NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
133
134                NIR_PASS_V(shader, nir_lower_vars_to_ssa);
135		NIR_PASS_V(shader, nir_lower_pack);
136
137		if (allow_copies) {
138			/* Only run this pass in the first call to
139			 * radv_optimize_nir.  Later calls assume that we've
140			 * lowered away any copy_deref instructions and we
141			 *  don't want to introduce any more.
142			*/
143			NIR_PASS(progress, shader, nir_opt_find_array_copies);
144		}
145
146		NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
147		NIR_PASS(progress, shader, nir_opt_dead_write_vars);
148
149                NIR_PASS_V(shader, nir_lower_alu_to_scalar);
150                NIR_PASS_V(shader, nir_lower_phis_to_scalar);
151
152                NIR_PASS(progress, shader, nir_copy_prop);
153                NIR_PASS(progress, shader, nir_opt_remove_phis);
154                NIR_PASS(progress, shader, nir_opt_dce);
155                if (nir_opt_trivial_continues(shader)) {
156                        progress = true;
157                        NIR_PASS(progress, shader, nir_copy_prop);
158			NIR_PASS(progress, shader, nir_opt_remove_phis);
159                        NIR_PASS(progress, shader, nir_opt_dce);
160                }
161                NIR_PASS(progress, shader, nir_opt_if, true);
162                NIR_PASS(progress, shader, nir_opt_dead_cf);
163                NIR_PASS(progress, shader, nir_opt_cse);
164                NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
165                NIR_PASS(progress, shader, nir_opt_algebraic);
166                NIR_PASS(progress, shader, nir_opt_constant_folding);
167                NIR_PASS(progress, shader, nir_opt_undef);
168                NIR_PASS(progress, shader, nir_opt_conditional_discard);
169                if (shader->options->max_unroll_iterations) {
170                        NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
171                }
172        } while (progress && !optimize_conservatively);
173
174        NIR_PASS(progress, shader, nir_opt_shrink_load);
175        NIR_PASS(progress, shader, nir_opt_move_load_ubo);
176}
177
178nir_shader *
179radv_shader_compile_to_nir(struct radv_device *device,
180			   struct radv_shader_module *module,
181			   const char *entrypoint_name,
182			   gl_shader_stage stage,
183			   const VkSpecializationInfo *spec_info,
184			   const VkPipelineCreateFlags flags,
185			   const struct radv_pipeline_layout *layout)
186{
187	nir_shader *nir;
188	nir_function *entry_point;
189	if (module->nir) {
190		/* Some things such as our meta clear/blit code will give us a NIR
191		 * shader directly.  In that case, we just ignore the SPIR-V entirely
192		 * and just use the NIR shader */
193		nir = module->nir;
194		nir->options = &nir_options;
195		nir_validate_shader(nir, "in internal shader");
196
197		assert(exec_list_length(&nir->functions) == 1);
198		struct exec_node *node = exec_list_get_head(&nir->functions);
199		entry_point = exec_node_data(nir_function, node, node);
200	} else {
201		uint32_t *spirv = (uint32_t *) module->data;
202		assert(module->size % 4 == 0);
203
204		if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
205			radv_print_spirv(spirv, module->size, stderr);
206
207		uint32_t num_spec_entries = 0;
208		struct nir_spirv_specialization *spec_entries = NULL;
209		if (spec_info && spec_info->mapEntryCount > 0) {
210			num_spec_entries = spec_info->mapEntryCount;
211			spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
212			for (uint32_t i = 0; i < num_spec_entries; i++) {
213				VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
214				const void *data = spec_info->pData + entry.offset;
215				assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
216
217				spec_entries[i].id = spec_info->pMapEntries[i].constantID;
218				if (spec_info->dataSize == 8)
219					spec_entries[i].data64 = *(const uint64_t *)data;
220				else
221					spec_entries[i].data32 = *(const uint32_t *)data;
222			}
223		}
224		const struct spirv_to_nir_options spirv_options = {
225			.lower_ubo_ssbo_access_to_offsets = true,
226			.caps = {
227				.derivative_group = true,
228				.descriptor_array_dynamic_indexing = true,
229				.descriptor_array_non_uniform_indexing = true,
230				.descriptor_indexing = true,
231				.device_group = true,
232				.draw_parameters = true,
233				.float16 = true,
234				.float64 = true,
235				.gcn_shader = true,
236				.geometry_streams = true,
237				.image_read_without_format = true,
238				.image_write_without_format = true,
239				.int8 = true,
240				.int16 = true,
241				.int64 = true,
242				.int64_atomics = true,
243				.multiview = true,
244				.physical_storage_buffer_address = true,
245				.runtime_descriptor_array = true,
246				.shader_viewport_index_layer = true,
247				.stencil_export = true,
248				.storage_8bit = true,
249				.storage_16bit = true,
250				.storage_image_ms = true,
251				.subgroup_arithmetic = true,
252				.subgroup_ballot = true,
253				.subgroup_basic = true,
254				.subgroup_quad = true,
255				.subgroup_shuffle = true,
256				.subgroup_vote = true,
257				.tessellation = true,
258				.transform_feedback = true,
259				.trinary_minmax = true,
260				.variable_pointers = true,
261			},
262			.ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
263			.ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
264			.phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
265			.push_const_ptr_type = glsl_uint_type(),
266			.shared_ptr_type = glsl_uint_type(),
267		};
268		entry_point = spirv_to_nir(spirv, module->size / 4,
269					   spec_entries, num_spec_entries,
270					   stage, entrypoint_name,
271					   &spirv_options, &nir_options);
272		nir = entry_point->shader;
273		assert(nir->info.stage == stage);
274		nir_validate_shader(nir, "after spirv_to_nir");
275
276		free(spec_entries);
277
278		/* We have to lower away local constant initializers right before we
279		 * inline functions.  That way they get properly initialized at the top
280		 * of the function and not at the top of its caller.
281		 */
282		NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
283		NIR_PASS_V(nir, nir_lower_returns);
284		NIR_PASS_V(nir, nir_inline_functions);
285		NIR_PASS_V(nir, nir_opt_deref);
286
287		/* Pick off the single entrypoint that we want */
288		foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
289			if (func != entry_point)
290				exec_node_remove(&func->node);
291		}
292		assert(exec_list_length(&nir->functions) == 1);
293		entry_point->name = ralloc_strdup(entry_point, "main");
294
295		/* Make sure we lower constant initializers on output variables so that
296		 * nir_remove_dead_variables below sees the corresponding stores
297		 */
298		NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_shader_out);
299
300		/* Now that we've deleted all but the main function, we can go ahead and
301		 * lower the rest of the constant initializers.
302		 */
303		NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
304
305		/* Split member structs.  We do this before lower_io_to_temporaries so that
306		 * it doesn't lower system values to temporaries by accident.
307		 */
308		NIR_PASS_V(nir, nir_split_var_copies);
309		NIR_PASS_V(nir, nir_split_per_member_structs);
310
311		NIR_PASS_V(nir, nir_remove_dead_variables,
312		           nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
313
314		NIR_PASS_V(nir, nir_propagate_invariant);
315
316		NIR_PASS_V(nir, nir_lower_system_values);
317		NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
318		NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
319	}
320
321	/* Vulkan uses the separate-shader linking model */
322	nir->info.separate_shader = true;
323
324	nir_shader_gather_info(nir, entry_point->impl);
325
326	static const nir_lower_tex_options tex_options = {
327	  .lower_txp = ~0,
328	  .lower_tg4_offsets = true,
329	};
330
331	nir_lower_tex(nir, &tex_options);
332
333	nir_lower_vars_to_ssa(nir);
334
335	if (nir->info.stage == MESA_SHADER_VERTEX ||
336	    nir->info.stage == MESA_SHADER_GEOMETRY) {
337		NIR_PASS_V(nir, nir_lower_io_to_temporaries,
338			   nir_shader_get_entrypoint(nir), true, true);
339	} else if (nir->info.stage == MESA_SHADER_TESS_EVAL||
340		   nir->info.stage == MESA_SHADER_FRAGMENT) {
341		NIR_PASS_V(nir, nir_lower_io_to_temporaries,
342			   nir_shader_get_entrypoint(nir), true, false);
343	}
344
345	nir_split_var_copies(nir);
346
347	nir_lower_global_vars_to_local(nir);
348	nir_remove_dead_variables(nir, nir_var_function_temp);
349	nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
350			.subgroup_size = 64,
351			.ballot_bit_size = 64,
352			.lower_to_scalar = 1,
353			.lower_subgroup_masks = 1,
354			.lower_shuffle = 1,
355			.lower_shuffle_to_32bit = 1,
356			.lower_vote_eq_to_ballot = 1,
357		});
358
359	nir_lower_load_const_to_scalar(nir);
360
361	if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
362		radv_optimize_nir(nir, false, true);
363
364	/* We call nir_lower_var_copies() after the first radv_optimize_nir()
365	 * to remove any copies introduced by nir_opt_find_array_copies().
366	 */
367	nir_lower_var_copies(nir);
368
369	/* Indirect lowering must be called after the radv_optimize_nir() loop
370	 * has been called at least once. Otherwise indirect lowering can
371	 * bloat the instruction count of the loop and cause it to be
372	 * considered too large for unrolling.
373	 */
374	ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
375	radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, false);
376
377	return nir;
378}
379
380void *
381radv_alloc_shader_memory(struct radv_device *device,
382			 struct radv_shader_variant *shader)
383{
384	mtx_lock(&device->shader_slab_mutex);
385	list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
386		uint64_t offset = 0;
387		list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) {
388			if (s->bo_offset - offset >= shader->code_size) {
389				shader->bo = slab->bo;
390				shader->bo_offset = offset;
391				list_addtail(&shader->slab_list, &s->slab_list);
392				mtx_unlock(&device->shader_slab_mutex);
393				return slab->ptr + offset;
394			}
395			offset = align_u64(s->bo_offset + s->code_size, 256);
396		}
397		if (slab->size - offset >= shader->code_size) {
398			shader->bo = slab->bo;
399			shader->bo_offset = offset;
400			list_addtail(&shader->slab_list, &slab->shaders);
401			mtx_unlock(&device->shader_slab_mutex);
402			return slab->ptr + offset;
403		}
404	}
405
406	mtx_unlock(&device->shader_slab_mutex);
407	struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
408
409	slab->size = 256 * 1024;
410	slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
411	                                     RADEON_DOMAIN_VRAM,
412					     RADEON_FLAG_NO_INTERPROCESS_SHARING |
413					     (device->physical_device->cpdma_prefetch_writes_memory ?
414					             0 : RADEON_FLAG_READ_ONLY),
415					     RADV_BO_PRIORITY_SHADER);
416	slab->ptr = (char*)device->ws->buffer_map(slab->bo);
417	list_inithead(&slab->shaders);
418
419	mtx_lock(&device->shader_slab_mutex);
420	list_add(&slab->slabs, &device->shader_slabs);
421
422	shader->bo = slab->bo;
423	shader->bo_offset = 0;
424	list_add(&shader->slab_list, &slab->shaders);
425	mtx_unlock(&device->shader_slab_mutex);
426	return slab->ptr;
427}
428
429void
430radv_destroy_shader_slabs(struct radv_device *device)
431{
432	list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
433		device->ws->buffer_destroy(slab->bo);
434		free(slab);
435	}
436	mtx_destroy(&device->shader_slab_mutex);
437}
438
439/* For the UMR disassembler. */
440#define DEBUGGER_END_OF_CODE_MARKER    0xbf9f0000 /* invalid instruction */
441#define DEBUGGER_NUM_MARKERS           5
442
443static unsigned
444radv_get_shader_binary_size(struct ac_shader_binary *binary)
445{
446	return binary->code_size + DEBUGGER_NUM_MARKERS * 4;
447}
448
449static void
450radv_fill_shader_variant(struct radv_device *device,
451			 struct radv_shader_variant *variant,
452			 struct ac_shader_binary *binary,
453			 gl_shader_stage stage)
454{
455	bool scratch_enabled = variant->config.scratch_bytes_per_wave > 0;
456	struct radv_shader_info *info = &variant->info.info;
457	unsigned vgpr_comp_cnt = 0;
458
459	variant->code_size = radv_get_shader_binary_size(binary);
460	variant->rsrc2 = S_00B12C_USER_SGPR(variant->info.num_user_sgprs) |
461			 S_00B12C_USER_SGPR_MSB(variant->info.num_user_sgprs >> 5) |
462			 S_00B12C_SCRATCH_EN(scratch_enabled) |
463			 S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
464			 S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
465			 S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
466			 S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
467			 S_00B12C_SO_EN(!!info->so.num_outputs);
468
469	variant->rsrc1 = S_00B848_VGPRS((variant->config.num_vgprs - 1) / 4) |
470		S_00B848_SGPRS((variant->config.num_sgprs - 1) / 8) |
471		S_00B848_DX10_CLAMP(1) |
472		S_00B848_FLOAT_MODE(variant->config.float_mode);
473
474	switch (stage) {
475	case MESA_SHADER_TESS_EVAL:
476		vgpr_comp_cnt = 3;
477		variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
478		break;
479	case MESA_SHADER_TESS_CTRL:
480		if (device->physical_device->rad_info.chip_class >= GFX9) {
481			vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
482		} else {
483			variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
484		}
485		break;
486	case MESA_SHADER_VERTEX:
487	case MESA_SHADER_GEOMETRY:
488		vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
489		break;
490	case MESA_SHADER_FRAGMENT:
491		break;
492	case MESA_SHADER_COMPUTE:
493		variant->rsrc2 |=
494			S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
495			S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
496			S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
497			S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
498						info->cs.uses_thread_id[1] ? 1 : 0) |
499			S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
500			S_00B84C_LDS_SIZE(variant->config.lds_size);
501		break;
502	default:
503		unreachable("unsupported shader type");
504		break;
505	}
506
507	if (device->physical_device->rad_info.chip_class >= GFX9 &&
508	    stage == MESA_SHADER_GEOMETRY) {
509		unsigned es_type = variant->info.gs.es_type;
510		unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
511
512		if (es_type == MESA_SHADER_VERTEX) {
513			es_vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
514		} else if (es_type == MESA_SHADER_TESS_EVAL) {
515			es_vgpr_comp_cnt = 3;
516		} else {
517			unreachable("invalid shader ES type");
518		}
519
520		/* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
521		 * VGPR[0:4] are always loaded.
522		 */
523		if (info->uses_invocation_id) {
524			gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
525		} else if (info->uses_prim_id) {
526			gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
527		} else if (variant->info.gs.vertices_in >= 3) {
528			gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
529		} else {
530			gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
531		}
532
533		variant->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
534		variant->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
535		                  S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
536	} else if (device->physical_device->rad_info.chip_class >= GFX9 &&
537		   stage == MESA_SHADER_TESS_CTRL) {
538		variant->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
539	} else {
540		variant->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
541	}
542
543	void *ptr = radv_alloc_shader_memory(device, variant);
544	memcpy(ptr, binary->code, binary->code_size);
545
546	/* Add end-of-code markers for the UMR disassembler. */
547       uint32_t *ptr32 = (uint32_t *)ptr + binary->code_size / 4;
548       for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
549		ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
550
551}
552
553static void radv_init_llvm_target()
554{
555	LLVMInitializeAMDGPUTargetInfo();
556	LLVMInitializeAMDGPUTarget();
557	LLVMInitializeAMDGPUTargetMC();
558	LLVMInitializeAMDGPUAsmPrinter();
559
560	/* For inline assembly. */
561	LLVMInitializeAMDGPUAsmParser();
562
563	/* Workaround for bug in llvm 4.0 that causes image intrinsics
564	 * to disappear.
565	 * https://reviews.llvm.org/D26348
566	 *
567	 * Workaround for bug in llvm that causes the GPU to hang in presence
568	 * of nested loops because there is an exec mask issue. The proper
569	 * solution is to fix LLVM but this might require a bunch of work.
570	 * https://bugs.llvm.org/show_bug.cgi?id=37744
571	 *
572	 * "mesa" is the prefix for error messages.
573	 */
574	if (HAVE_LLVM >= 0x0800) {
575		const char *argv[2] = { "mesa", "-simplifycfg-sink-common=false" };
576		LLVMParseCommandLineOptions(2, argv, NULL);
577
578	} else {
579		const char *argv[3] = { "mesa", "-simplifycfg-sink-common=false",
580					"-amdgpu-skip-threshold=1" };
581		LLVMParseCommandLineOptions(3, argv, NULL);
582	}
583}
584
585static once_flag radv_init_llvm_target_once_flag = ONCE_FLAG_INIT;
586
587static void radv_init_llvm_once(void)
588{
589	call_once(&radv_init_llvm_target_once_flag, radv_init_llvm_target);
590}
591
592static struct radv_shader_variant *
593shader_variant_create(struct radv_device *device,
594		      struct radv_shader_module *module,
595		      struct nir_shader * const *shaders,
596		      int shader_count,
597		      gl_shader_stage stage,
598		      struct radv_nir_compiler_options *options,
599		      bool gs_copy_shader,
600		      void **code_out,
601		      unsigned *code_size_out)
602{
603	enum radeon_family chip_family = device->physical_device->rad_info.family;
604	enum ac_target_machine_options tm_options = 0;
605	struct radv_shader_variant *variant;
606	struct ac_shader_binary binary;
607	struct ac_llvm_compiler ac_llvm;
608	bool thread_compiler;
609	variant = calloc(1, sizeof(struct radv_shader_variant));
610	if (!variant)
611		return NULL;
612
613	options->family = chip_family;
614	options->chip_class = device->physical_device->rad_info.chip_class;
615	options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
616	options->dump_preoptir = options->dump_shader &&
617				 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
618	options->record_llvm_ir = device->keep_shader_info;
619	options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
620	options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
621	options->address32_hi = device->physical_device->rad_info.address32_hi;
622
623	if (options->supports_spill)
624		tm_options |= AC_TM_SUPPORTS_SPILL;
625	if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
626		tm_options |= AC_TM_SISCHED;
627	if (options->check_ir)
628		tm_options |= AC_TM_CHECK_IR;
629	if (device->instance->debug_flags & RADV_DEBUG_NO_LOAD_STORE_OPT)
630		tm_options |= AC_TM_NO_LOAD_STORE_OPT;
631
632	thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
633	radv_init_llvm_once();
634	radv_init_llvm_compiler(&ac_llvm,
635				thread_compiler,
636				chip_family, tm_options);
637	if (gs_copy_shader) {
638		assert(shader_count == 1);
639		radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
640					    &variant->config, &variant->info,
641					    options);
642	} else {
643		radv_compile_nir_shader(&ac_llvm, &binary, &variant->config,
644					&variant->info, shaders, shader_count,
645					options);
646	}
647
648	radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
649
650	radv_fill_shader_variant(device, variant, &binary, stage);
651
652	if (code_out) {
653		*code_out = binary.code;
654		*code_size_out = binary.code_size;
655	} else
656		free(binary.code);
657	free(binary.config);
658	free(binary.rodata);
659	free(binary.global_symbol_offsets);
660	free(binary.relocs);
661	variant->ref_count = 1;
662
663	if (device->keep_shader_info) {
664		variant->disasm_string = binary.disasm_string;
665		variant->llvm_ir_string = binary.llvm_ir_string;
666		if (!gs_copy_shader && !module->nir) {
667			variant->nir = *shaders;
668			variant->spirv = (uint32_t *)module->data;
669			variant->spirv_size = module->size;
670		}
671	} else {
672		free(binary.disasm_string);
673	}
674
675	return variant;
676}
677
678struct radv_shader_variant *
679radv_shader_variant_create(struct radv_device *device,
680			   struct radv_shader_module *module,
681			   struct nir_shader *const *shaders,
682			   int shader_count,
683			   struct radv_pipeline_layout *layout,
684			   const struct radv_shader_variant_key *key,
685			   void **code_out,
686			   unsigned *code_size_out)
687{
688	struct radv_nir_compiler_options options = {0};
689
690	options.layout = layout;
691	if (key)
692		options.key = *key;
693
694	options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
695	options.supports_spill = true;
696
697	return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
698				     &options, false, code_out, code_size_out);
699}
700
701struct radv_shader_variant *
702radv_create_gs_copy_shader(struct radv_device *device,
703			   struct nir_shader *shader,
704			   void **code_out,
705			   unsigned *code_size_out,
706			   bool multiview)
707{
708	struct radv_nir_compiler_options options = {0};
709
710	options.key.has_multiview_view_index = multiview;
711
712	return shader_variant_create(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
713				     &options, true, code_out, code_size_out);
714}
715
716void
717radv_shader_variant_destroy(struct radv_device *device,
718			    struct radv_shader_variant *variant)
719{
720	if (!p_atomic_dec_zero(&variant->ref_count))
721		return;
722
723	mtx_lock(&device->shader_slab_mutex);
724	list_del(&variant->slab_list);
725	mtx_unlock(&device->shader_slab_mutex);
726
727	ralloc_free(variant->nir);
728	free(variant->disasm_string);
729	free(variant->llvm_ir_string);
730	free(variant);
731}
732
733const char *
734radv_get_shader_name(struct radv_shader_variant *var, gl_shader_stage stage)
735{
736	switch (stage) {
737	case MESA_SHADER_VERTEX: return var->info.vs.as_ls ? "Vertex Shader as LS" : var->info.vs.as_es ? "Vertex Shader as ES" : "Vertex Shader as VS";
738	case MESA_SHADER_GEOMETRY: return "Geometry Shader";
739	case MESA_SHADER_FRAGMENT: return "Pixel Shader";
740	case MESA_SHADER_COMPUTE: return "Compute Shader";
741	case MESA_SHADER_TESS_CTRL: return "Tessellation Control Shader";
742	case MESA_SHADER_TESS_EVAL: return var->info.tes.as_es ? "Tessellation Evaluation Shader as ES" : "Tessellation Evaluation Shader as VS";
743	default:
744		return "Unknown shader";
745	};
746}
747
748static void
749generate_shader_stats(struct radv_device *device,
750		      struct radv_shader_variant *variant,
751		      gl_shader_stage stage,
752		      struct _mesa_string_buffer *buf)
753{
754	enum chip_class chip_class = device->physical_device->rad_info.chip_class;
755	unsigned lds_increment = chip_class >= CIK ? 512 : 256;
756	struct ac_shader_config *conf;
757	unsigned max_simd_waves;
758	unsigned lds_per_wave = 0;
759
760	max_simd_waves = ac_get_max_simd_waves(device->physical_device->rad_info.family);
761
762	conf = &variant->config;
763
764	if (stage == MESA_SHADER_FRAGMENT) {
765		lds_per_wave = conf->lds_size * lds_increment +
766			       align(variant->info.fs.num_interp * 48,
767				     lds_increment);
768	} else if (stage == MESA_SHADER_COMPUTE) {
769		unsigned max_workgroup_size =
770			radv_nir_get_max_workgroup_size(chip_class, stage, variant->nir);
771		lds_per_wave = (conf->lds_size * lds_increment) /
772			       DIV_ROUND_UP(max_workgroup_size, 64);
773	}
774
775	if (conf->num_sgprs)
776		max_simd_waves =
777			MIN2(max_simd_waves,
778			     ac_get_num_physical_sgprs(chip_class) / conf->num_sgprs);
779
780	if (conf->num_vgprs)
781		max_simd_waves =
782			MIN2(max_simd_waves,
783			     RADV_NUM_PHYSICAL_VGPRS / conf->num_vgprs);
784
785	/* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
786	 * that PS can use.
787	 */
788	if (lds_per_wave)
789		max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
790
791	if (stage == MESA_SHADER_FRAGMENT) {
792		_mesa_string_buffer_printf(buf, "*** SHADER CONFIG ***\n"
793					   "SPI_PS_INPUT_ADDR = 0x%04x\n"
794					   "SPI_PS_INPUT_ENA  = 0x%04x\n",
795					   conf->spi_ps_input_addr, conf->spi_ps_input_ena);
796	}
797
798	_mesa_string_buffer_printf(buf, "*** SHADER STATS ***\n"
799				   "SGPRS: %d\n"
800				   "VGPRS: %d\n"
801				   "Spilled SGPRs: %d\n"
802				   "Spilled VGPRs: %d\n"
803				   "PrivMem VGPRS: %d\n"
804				   "Code Size: %d bytes\n"
805				   "LDS: %d blocks\n"
806				   "Scratch: %d bytes per wave\n"
807				   "Max Waves: %d\n"
808				   "********************\n\n\n",
809				   conf->num_sgprs, conf->num_vgprs,
810				   conf->spilled_sgprs, conf->spilled_vgprs,
811				   variant->info.private_mem_vgprs, variant->code_size,
812				   conf->lds_size, conf->scratch_bytes_per_wave,
813				   max_simd_waves);
814}
815
816void
817radv_shader_dump_stats(struct radv_device *device,
818		       struct radv_shader_variant *variant,
819		       gl_shader_stage stage,
820		       FILE *file)
821{
822	struct _mesa_string_buffer *buf = _mesa_string_buffer_create(NULL, 256);
823
824	generate_shader_stats(device, variant, stage, buf);
825
826	fprintf(file, "\n%s:\n", radv_get_shader_name(variant, stage));
827	fprintf(file, "%s", buf->buf);
828
829	_mesa_string_buffer_destroy(buf);
830}
831
832VkResult
833radv_GetShaderInfoAMD(VkDevice _device,
834		      VkPipeline _pipeline,
835		      VkShaderStageFlagBits shaderStage,
836		      VkShaderInfoTypeAMD infoType,
837		      size_t* pInfoSize,
838		      void* pInfo)
839{
840	RADV_FROM_HANDLE(radv_device, device, _device);
841	RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
842	gl_shader_stage stage = vk_to_mesa_shader_stage(shaderStage);
843	struct radv_shader_variant *variant = pipeline->shaders[stage];
844	struct _mesa_string_buffer *buf;
845	VkResult result = VK_SUCCESS;
846
847	/* Spec doesn't indicate what to do if the stage is invalid, so just
848	 * return no info for this. */
849	if (!variant)
850		return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
851
852	switch (infoType) {
853	case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
854		if (!pInfo) {
855			*pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
856		} else {
857			unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= CIK ? 512 : 256;
858			struct ac_shader_config *conf = &variant->config;
859
860			VkShaderStatisticsInfoAMD statistics = {};
861			statistics.shaderStageMask = shaderStage;
862			statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
863			statistics.numPhysicalSgprs = ac_get_num_physical_sgprs(device->physical_device->rad_info.chip_class);
864			statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
865
866			if (stage == MESA_SHADER_COMPUTE) {
867				unsigned *local_size = variant->nir->info.cs.local_size;
868				unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
869
870				statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
871							       ceil((double)workgroup_size / statistics.numPhysicalVgprs);
872
873				statistics.computeWorkGroupSize[0] = local_size[0];
874				statistics.computeWorkGroupSize[1] = local_size[1];
875				statistics.computeWorkGroupSize[2] = local_size[2];
876			} else {
877				statistics.numAvailableVgprs = statistics.numPhysicalVgprs;
878			}
879
880			statistics.resourceUsage.numUsedVgprs = conf->num_vgprs;
881			statistics.resourceUsage.numUsedSgprs = conf->num_sgprs;
882			statistics.resourceUsage.ldsSizePerLocalWorkGroup = 32768;
883			statistics.resourceUsage.ldsUsageSizeInBytes = conf->lds_size * lds_multiplier;
884			statistics.resourceUsage.scratchMemUsageInBytes = conf->scratch_bytes_per_wave;
885
886			size_t size = *pInfoSize;
887			*pInfoSize = sizeof(statistics);
888
889			memcpy(pInfo, &statistics, MIN2(size, *pInfoSize));
890
891			if (size < *pInfoSize)
892				result = VK_INCOMPLETE;
893		}
894
895		break;
896	case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
897		buf = _mesa_string_buffer_create(NULL, 1024);
898
899		_mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(variant, stage));
900		_mesa_string_buffer_printf(buf, "%s\n\n", variant->llvm_ir_string);
901		_mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
902		generate_shader_stats(device, variant, stage, buf);
903
904		/* Need to include the null terminator. */
905		size_t length = buf->length + 1;
906
907		if (!pInfo) {
908			*pInfoSize = length;
909		} else {
910			size_t size = *pInfoSize;
911			*pInfoSize = length;
912
913			memcpy(pInfo, buf->buf, MIN2(size, length));
914
915			if (size < length)
916				result = VK_INCOMPLETE;
917		}
918
919		_mesa_string_buffer_destroy(buf);
920		break;
921	default:
922		/* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
923		result = VK_ERROR_FEATURE_NOT_PRESENT;
924		break;
925	}
926
927	return result;
928}
929