1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#include "tgsi/tgsi_parse.h"
27#include "util/u_async_debug.h"
28#include "util/u_memory.h"
29#include "util/u_upload_mgr.h"
30
31#include "amd_kernel_code_t.h"
32#include "si_build_pm4.h"
33#include "si_compute.h"
34
35#define COMPUTE_DBG(sscreen, fmt, args...) \
36	do { \
37		if ((sscreen->debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \
38	} while (0);
39
40struct dispatch_packet {
41	uint16_t header;
42	uint16_t setup;
43	uint16_t workgroup_size_x;
44	uint16_t workgroup_size_y;
45	uint16_t workgroup_size_z;
46	uint16_t reserved0;
47	uint32_t grid_size_x;
48	uint32_t grid_size_y;
49	uint32_t grid_size_z;
50	uint32_t private_segment_size;
51	uint32_t group_segment_size;
52	uint64_t kernel_object;
53	uint64_t kernarg_address;
54	uint64_t reserved2;
55};
56
57static const amd_kernel_code_t *si_compute_get_code_object(
58	const struct si_compute *program,
59	uint64_t symbol_offset)
60{
61	if (!program->use_code_object_v2) {
62		return NULL;
63	}
64	return (const amd_kernel_code_t*)
65		(program->shader.binary.code + symbol_offset);
66}
67
68static void code_object_to_config(const amd_kernel_code_t *code_object,
69				  struct si_shader_config *out_config) {
70
71	uint32_t rsrc1 = code_object->compute_pgm_resource_registers;
72	uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32;
73	out_config->num_sgprs = code_object->wavefront_sgpr_count;
74	out_config->num_vgprs = code_object->workitem_vgpr_count;
75	out_config->float_mode = G_00B028_FLOAT_MODE(rsrc1);
76	out_config->rsrc1 = rsrc1;
77	out_config->lds_size = MAX2(out_config->lds_size, G_00B84C_LDS_SIZE(rsrc2));
78	out_config->rsrc2 = rsrc2;
79	out_config->scratch_bytes_per_wave =
80		align(code_object->workitem_private_segment_byte_size * 64, 1024);
81}
82
83/* Asynchronous compute shader compilation. */
84static void si_create_compute_state_async(void *job, int thread_index)
85{
86	struct si_compute *program = (struct si_compute *)job;
87	struct si_shader *shader = &program->shader;
88	struct si_shader_selector sel;
89	struct ac_llvm_compiler *compiler;
90	struct pipe_debug_callback *debug = &program->compiler_ctx_state.debug;
91	struct si_screen *sscreen = program->screen;
92
93	assert(!debug->debug_message || debug->async);
94	assert(thread_index >= 0);
95	assert(thread_index < ARRAY_SIZE(sscreen->compiler));
96	compiler = &sscreen->compiler[thread_index];
97
98	memset(&sel, 0, sizeof(sel));
99
100	sel.screen = sscreen;
101
102	if (program->ir_type == PIPE_SHADER_IR_TGSI) {
103		tgsi_scan_shader(program->ir.tgsi, &sel.info);
104		sel.tokens = program->ir.tgsi;
105	} else {
106		assert(program->ir_type == PIPE_SHADER_IR_NIR);
107		sel.nir = program->ir.nir;
108
109		si_nir_opts(sel.nir);
110		si_nir_scan_shader(sel.nir, &sel.info);
111		si_lower_nir(&sel);
112	}
113
114	/* Store the declared LDS size into tgsi_shader_info for the shader
115	 * cache to include it.
116	 */
117	sel.info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE] = program->local_size;
118
119	sel.type = PIPE_SHADER_COMPUTE;
120	si_get_active_slot_masks(&sel.info,
121				 &program->active_const_and_shader_buffers,
122				 &program->active_samplers_and_images);
123
124	program->shader.selector = &sel;
125	program->shader.is_monolithic = true;
126	program->uses_grid_size = sel.info.uses_grid_size;
127	program->uses_bindless_samplers = sel.info.uses_bindless_samplers;
128	program->uses_bindless_images = sel.info.uses_bindless_images;
129	program->reads_variable_block_size =
130		sel.info.uses_block_size &&
131		sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
132	program->num_cs_user_data_dwords =
133		sel.info.properties[TGSI_PROPERTY_CS_USER_DATA_DWORDS];
134
135	void *ir_binary = si_get_ir_binary(&sel);
136
137	/* Try to load the shader from the shader cache. */
138	mtx_lock(&sscreen->shader_cache_mutex);
139
140	if (ir_binary &&
141	    si_shader_cache_load_shader(sscreen, ir_binary, shader)) {
142		mtx_unlock(&sscreen->shader_cache_mutex);
143
144		si_shader_dump_stats_for_shader_db(shader, debug);
145		si_shader_dump(sscreen, shader, debug, PIPE_SHADER_COMPUTE,
146			       stderr, true);
147
148		if (si_shader_binary_upload(sscreen, shader))
149			program->shader.compilation_failed = true;
150	} else {
151		mtx_unlock(&sscreen->shader_cache_mutex);
152
153		if (si_shader_create(sscreen, compiler, &program->shader, debug)) {
154			program->shader.compilation_failed = true;
155
156			if (program->ir_type == PIPE_SHADER_IR_TGSI)
157				FREE(program->ir.tgsi);
158			program->shader.selector = NULL;
159			return;
160		}
161
162		bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
163		unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS +
164				      (sel.info.uses_grid_size ? 3 : 0) +
165				      (program->reads_variable_block_size ? 3 : 0) +
166				      program->num_cs_user_data_dwords;
167
168		shader->config.rsrc1 =
169			S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
170			S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
171			S_00B848_DX10_CLAMP(1) |
172			S_00B848_FLOAT_MODE(shader->config.float_mode);
173
174		shader->config.rsrc2 =
175			S_00B84C_USER_SGPR(user_sgprs) |
176			S_00B84C_SCRATCH_EN(scratch_enabled) |
177			S_00B84C_TGID_X_EN(sel.info.uses_block_id[0]) |
178			S_00B84C_TGID_Y_EN(sel.info.uses_block_id[1]) |
179			S_00B84C_TGID_Z_EN(sel.info.uses_block_id[2]) |
180			S_00B84C_TIDIG_COMP_CNT(sel.info.uses_thread_id[2] ? 2 :
181						sel.info.uses_thread_id[1] ? 1 : 0) |
182			S_00B84C_LDS_SIZE(shader->config.lds_size);
183
184		if (ir_binary) {
185			mtx_lock(&sscreen->shader_cache_mutex);
186			if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true))
187				FREE(ir_binary);
188			mtx_unlock(&sscreen->shader_cache_mutex);
189		}
190	}
191
192	if (program->ir_type == PIPE_SHADER_IR_TGSI)
193		FREE(program->ir.tgsi);
194
195	program->shader.selector = NULL;
196}
197
198static void *si_create_compute_state(
199	struct pipe_context *ctx,
200	const struct pipe_compute_state *cso)
201{
202	struct si_context *sctx = (struct si_context *)ctx;
203	struct si_screen *sscreen = (struct si_screen *)ctx->screen;
204	struct si_compute *program = CALLOC_STRUCT(si_compute);
205
206	pipe_reference_init(&program->reference, 1);
207	program->screen = (struct si_screen *)ctx->screen;
208	program->ir_type = cso->ir_type;
209	program->local_size = cso->req_local_mem;
210	program->private_size = cso->req_private_mem;
211	program->input_size = cso->req_input_mem;
212	program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE;
213
214	if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
215		if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
216			program->ir.tgsi = tgsi_dup_tokens(cso->prog);
217			if (!program->ir.tgsi) {
218				FREE(program);
219				return NULL;
220			}
221		} else {
222			assert(cso->ir_type == PIPE_SHADER_IR_NIR);
223			program->ir.nir = (struct nir_shader *) cso->prog;
224		}
225
226		program->compiler_ctx_state.debug = sctx->debug;
227		program->compiler_ctx_state.is_debug_context = sctx->is_debug;
228		p_atomic_inc(&sscreen->num_shaders_created);
229
230		si_schedule_initial_compile(sctx, PIPE_SHADER_COMPUTE,
231					    &program->ready,
232					    &program->compiler_ctx_state,
233					    program, si_create_compute_state_async);
234	} else {
235		const struct pipe_llvm_program_header *header;
236		const char *code;
237		header = cso->prog;
238		code = cso->prog + sizeof(struct pipe_llvm_program_header);
239
240		ac_elf_read(code, header->num_bytes, &program->shader.binary);
241		if (program->use_code_object_v2) {
242			const amd_kernel_code_t *code_object =
243				si_compute_get_code_object(program, 0);
244			code_object_to_config(code_object, &program->shader.config);
245			if (program->shader.binary.reloc_count != 0) {
246				fprintf(stderr, "Error: %d unsupported relocations\n",
247					program->shader.binary.reloc_count);
248				FREE(program);
249				return NULL;
250			}
251		} else {
252			si_shader_binary_read_config(&program->shader.binary,
253				     &program->shader.config, 0);
254		}
255		si_shader_dump(sctx->screen, &program->shader, &sctx->debug,
256			       PIPE_SHADER_COMPUTE, stderr, true);
257		if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
258			fprintf(stderr, "LLVM failed to upload shader\n");
259			FREE(program);
260			return NULL;
261		}
262	}
263
264	return program;
265}
266
267static void si_bind_compute_state(struct pipe_context *ctx, void *state)
268{
269	struct si_context *sctx = (struct si_context*)ctx;
270	struct si_compute *program = (struct si_compute*)state;
271
272	sctx->cs_shader_state.program = program;
273	if (!program)
274		return;
275
276	/* Wait because we need active slot usage masks. */
277	if (program->ir_type != PIPE_SHADER_IR_NATIVE)
278		util_queue_fence_wait(&program->ready);
279
280	si_set_active_descriptors(sctx,
281				  SI_DESCS_FIRST_COMPUTE +
282				  SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
283				  program->active_const_and_shader_buffers);
284	si_set_active_descriptors(sctx,
285				  SI_DESCS_FIRST_COMPUTE +
286				  SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
287				  program->active_samplers_and_images);
288}
289
290static void si_set_global_binding(
291	struct pipe_context *ctx, unsigned first, unsigned n,
292	struct pipe_resource **resources,
293	uint32_t **handles)
294{
295	unsigned i;
296	struct si_context *sctx = (struct si_context*)ctx;
297	struct si_compute *program = sctx->cs_shader_state.program;
298
299	assert(first + n <= MAX_GLOBAL_BUFFERS);
300
301	if (!resources) {
302		for (i = 0; i < n; i++) {
303			pipe_resource_reference(&program->global_buffers[first + i], NULL);
304		}
305		return;
306	}
307
308	for (i = 0; i < n; i++) {
309		uint64_t va;
310		uint32_t offset;
311		pipe_resource_reference(&program->global_buffers[first + i], resources[i]);
312		va = si_resource(resources[i])->gpu_address;
313		offset = util_le32_to_cpu(*handles[i]);
314		va += offset;
315		va = util_cpu_to_le64(va);
316		memcpy(handles[i], &va, sizeof(va));
317	}
318}
319
320static void si_initialize_compute(struct si_context *sctx)
321{
322	struct radeon_cmdbuf *cs = sctx->gfx_cs;
323	uint64_t bc_va;
324
325	radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
326	/* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
327	radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
328	radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
329
330	if (sctx->chip_class >= CIK) {
331		/* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
332		radeon_set_sh_reg_seq(cs,
333		                     R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
334		radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) |
335		                S_00B864_SH1_CU_EN(0xffff));
336		radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) |
337		                S_00B868_SH1_CU_EN(0xffff));
338	}
339
340	/* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
341	 * and is now per pipe, so it should be handled in the
342	 * kernel if we want to use something other than the default value,
343	 * which is now 0x22f.
344	 */
345	if (sctx->chip_class <= SI) {
346		/* XXX: This should be:
347		 * (number of compute units) * 4 * (waves per simd) - 1 */
348
349		radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
350		                  0x190 /* Default value */);
351	}
352
353	/* Set the pointer to border colors. */
354	bc_va = sctx->border_color_buffer->gpu_address;
355
356	if (sctx->chip_class >= CIK) {
357		radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
358		radeon_emit(cs, bc_va >> 8);  /* R_030E00_TA_CS_BC_BASE_ADDR */
359		radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
360	} else {
361		if (sctx->screen->info.si_TA_CS_BC_BASE_ADDR_allowed) {
362			radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR,
363					      bc_va >> 8);
364		}
365	}
366
367	sctx->cs_shader_state.emitted_program = NULL;
368	sctx->cs_shader_state.initialized = true;
369}
370
371static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
372                                            struct si_shader *shader,
373                                            struct si_shader_config *config)
374{
375	uint64_t scratch_bo_size, scratch_needed;
376	scratch_bo_size = 0;
377	scratch_needed = config->scratch_bytes_per_wave * sctx->scratch_waves;
378	if (sctx->compute_scratch_buffer)
379		scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
380
381	if (scratch_bo_size < scratch_needed) {
382		si_resource_reference(&sctx->compute_scratch_buffer, NULL);
383
384		sctx->compute_scratch_buffer =
385			si_aligned_buffer_create(&sctx->screen->b,
386						 SI_RESOURCE_FLAG_UNMAPPABLE,
387						 PIPE_USAGE_DEFAULT,
388						 scratch_needed, 256);
389
390		if (!sctx->compute_scratch_buffer)
391			return false;
392	}
393
394	if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
395		uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
396
397		si_shader_apply_scratch_relocs(shader, scratch_va);
398
399		if (si_shader_binary_upload(sctx->screen, shader))
400			return false;
401
402		si_resource_reference(&shader->scratch_bo,
403		                        sctx->compute_scratch_buffer);
404	}
405
406	return true;
407}
408
409static bool si_switch_compute_shader(struct si_context *sctx,
410                                     struct si_compute *program,
411				     struct si_shader *shader,
412				     const amd_kernel_code_t *code_object,
413				     unsigned offset)
414{
415	struct radeon_cmdbuf *cs = sctx->gfx_cs;
416	struct si_shader_config inline_config = {0};
417	struct si_shader_config *config;
418	uint64_t shader_va;
419
420	if (sctx->cs_shader_state.emitted_program == program &&
421	    sctx->cs_shader_state.offset == offset)
422		return true;
423
424	if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
425		config = &shader->config;
426	} else {
427		unsigned lds_blocks;
428
429		config = &inline_config;
430		if (code_object) {
431			code_object_to_config(code_object, config);
432		} else {
433			si_shader_binary_read_config(&shader->binary, config, offset);
434		}
435
436		lds_blocks = config->lds_size;
437		/* XXX: We are over allocating LDS.  For SI, the shader reports
438		* LDS in blocks of 256 bytes, so if there are 4 bytes lds
439		* allocated in the shader and 4 bytes allocated by the state
440		* tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
441		*/
442		if (sctx->chip_class <= SI) {
443			lds_blocks += align(program->local_size, 256) >> 8;
444		} else {
445			lds_blocks += align(program->local_size, 512) >> 9;
446		}
447
448		/* TODO: use si_multiwave_lds_size_workaround */
449		assert(lds_blocks <= 0xFF);
450
451		config->rsrc2 &= C_00B84C_LDS_SIZE;
452		config->rsrc2 |=  S_00B84C_LDS_SIZE(lds_blocks);
453	}
454
455	if (!si_setup_compute_scratch_buffer(sctx, shader, config))
456		return false;
457
458	if (shader->scratch_bo) {
459		COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; "
460		            "Total Scratch: %u bytes\n", sctx->scratch_waves,
461			    config->scratch_bytes_per_wave,
462			    config->scratch_bytes_per_wave *
463			    sctx->scratch_waves);
464
465		radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
466			      shader->scratch_bo, RADEON_USAGE_READWRITE,
467			      RADEON_PRIO_SCRATCH_BUFFER);
468	}
469
470	/* Prefetch the compute shader to TC L2.
471	 *
472	 * We should also prefetch graphics shaders if a compute dispatch was
473	 * the last command, and the compute shader if a draw call was the last
474	 * command. However, that would add more complexity and we're likely
475	 * to get a shader state change in that case anyway.
476	 */
477	if (sctx->chip_class >= CIK) {
478		cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b,
479					 0, program->shader.bo->b.b.width0);
480	}
481
482	shader_va = shader->bo->gpu_address + offset;
483	if (program->use_code_object_v2) {
484		/* Shader code is placed after the amd_kernel_code_t
485		 * struct. */
486		shader_va += sizeof(amd_kernel_code_t);
487	}
488
489	radeon_add_to_buffer_list(sctx, sctx->gfx_cs, shader->bo,
490	                          RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
491
492	radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
493	radeon_emit(cs, shader_va >> 8);
494	radeon_emit(cs, S_00B834_DATA(shader_va >> 40));
495
496	radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
497	radeon_emit(cs, config->rsrc1);
498	radeon_emit(cs, config->rsrc2);
499
500	COMPUTE_DBG(sctx->screen, "COMPUTE_PGM_RSRC1: 0x%08x "
501		"COMPUTE_PGM_RSRC2: 0x%08x\n", config->rsrc1, config->rsrc2);
502
503	sctx->max_seen_compute_scratch_bytes_per_wave =
504		MAX2(sctx->max_seen_compute_scratch_bytes_per_wave,
505		     config->scratch_bytes_per_wave);
506
507	radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
508	          S_00B860_WAVES(sctx->scratch_waves)
509	             | S_00B860_WAVESIZE(sctx->max_seen_compute_scratch_bytes_per_wave >> 10));
510
511	sctx->cs_shader_state.emitted_program = program;
512	sctx->cs_shader_state.offset = offset;
513	sctx->cs_shader_state.uses_scratch =
514		config->scratch_bytes_per_wave != 0;
515
516	return true;
517}
518
519static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
520					  const amd_kernel_code_t *code_object,
521					  unsigned user_sgpr)
522{
523	struct radeon_cmdbuf *cs = sctx->gfx_cs;
524	uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
525
526	unsigned max_private_element_size = AMD_HSA_BITS_GET(
527			code_object->code_properties,
528			AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE);
529
530	uint32_t scratch_dword0 = scratch_va & 0xffffffff;
531	uint32_t scratch_dword1 =
532		S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
533		S_008F04_SWIZZLE_ENABLE(1);
534
535	/* Disable address clamping */
536	uint32_t scratch_dword2 = 0xffffffff;
537	uint32_t scratch_dword3 =
538		S_008F0C_INDEX_STRIDE(3) |
539		S_008F0C_ADD_TID_ENABLE(1);
540
541	if (sctx->chip_class >= GFX9) {
542		assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
543	} else {
544		scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
545
546		if (sctx->chip_class < VI) {
547			/* BUF_DATA_FORMAT is ignored, but it cannot be
548			 * BUF_DATA_FORMAT_INVALID. */
549			scratch_dword3 |=
550				S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
551		}
552	}
553
554	radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
555							(user_sgpr * 4), 4);
556	radeon_emit(cs, scratch_dword0);
557	radeon_emit(cs, scratch_dword1);
558	radeon_emit(cs, scratch_dword2);
559	radeon_emit(cs, scratch_dword3);
560}
561
562static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
563                                      const amd_kernel_code_t *code_object,
564				      const struct pipe_grid_info *info,
565				      uint64_t kernel_args_va)
566{
567	struct si_compute *program = sctx->cs_shader_state.program;
568	struct radeon_cmdbuf *cs = sctx->gfx_cs;
569
570	static const enum amd_code_property_mask_t workgroup_count_masks [] = {
571		AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
572		AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y,
573		AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z
574	};
575
576	unsigned i, user_sgpr = 0;
577	if (AMD_HSA_BITS_GET(code_object->code_properties,
578			AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER)) {
579		if (code_object->workitem_private_segment_byte_size > 0) {
580			setup_scratch_rsrc_user_sgprs(sctx, code_object,
581								user_sgpr);
582		}
583		user_sgpr += 4;
584	}
585
586	if (AMD_HSA_BITS_GET(code_object->code_properties,
587			AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) {
588		struct dispatch_packet dispatch;
589		unsigned dispatch_offset;
590		struct si_resource *dispatch_buf = NULL;
591		uint64_t dispatch_va;
592
593		/* Upload dispatch ptr */
594		memset(&dispatch, 0, sizeof(dispatch));
595
596		dispatch.workgroup_size_x = util_cpu_to_le16(info->block[0]);
597		dispatch.workgroup_size_y = util_cpu_to_le16(info->block[1]);
598		dispatch.workgroup_size_z = util_cpu_to_le16(info->block[2]);
599
600		dispatch.grid_size_x = util_cpu_to_le32(info->grid[0] * info->block[0]);
601		dispatch.grid_size_y = util_cpu_to_le32(info->grid[1] * info->block[1]);
602		dispatch.grid_size_z = util_cpu_to_le32(info->grid[2] * info->block[2]);
603
604		dispatch.private_segment_size = util_cpu_to_le32(program->private_size);
605		dispatch.group_segment_size = util_cpu_to_le32(program->local_size);
606
607		dispatch.kernarg_address = util_cpu_to_le64(kernel_args_va);
608
609		u_upload_data(sctx->b.const_uploader, 0, sizeof(dispatch),
610                              256, &dispatch, &dispatch_offset,
611                              (struct pipe_resource**)&dispatch_buf);
612
613		if (!dispatch_buf) {
614			fprintf(stderr, "Error: Failed to allocate dispatch "
615					"packet.");
616		}
617		radeon_add_to_buffer_list(sctx, sctx->gfx_cs, dispatch_buf,
618				  RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
619
620		dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
621
622		radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
623							(user_sgpr * 4), 2);
624		radeon_emit(cs, dispatch_va);
625		radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) |
626                                S_008F04_STRIDE(0));
627
628		si_resource_reference(&dispatch_buf, NULL);
629		user_sgpr += 2;
630	}
631
632	if (AMD_HSA_BITS_GET(code_object->code_properties,
633			AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)) {
634		radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
635							(user_sgpr * 4), 2);
636		radeon_emit(cs, kernel_args_va);
637		radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
638		                S_008F04_STRIDE(0));
639		user_sgpr += 2;
640	}
641
642	for (i = 0; i < 3 && user_sgpr < 16; i++) {
643		if (code_object->code_properties & workgroup_count_masks[i]) {
644			radeon_set_sh_reg_seq(cs,
645				R_00B900_COMPUTE_USER_DATA_0 +
646				(user_sgpr * 4), 1);
647			radeon_emit(cs, info->grid[i]);
648			user_sgpr += 1;
649		}
650	}
651}
652
653static bool si_upload_compute_input(struct si_context *sctx,
654				    const amd_kernel_code_t *code_object,
655				    const struct pipe_grid_info *info)
656{
657	struct radeon_cmdbuf *cs = sctx->gfx_cs;
658	struct si_compute *program = sctx->cs_shader_state.program;
659	struct si_resource *input_buffer = NULL;
660	unsigned kernel_args_size;
661	unsigned num_work_size_bytes = program->use_code_object_v2 ? 0 : 36;
662	uint32_t kernel_args_offset = 0;
663	uint32_t *kernel_args;
664	void *kernel_args_ptr;
665	uint64_t kernel_args_va;
666	unsigned i;
667
668	/* The extra num_work_size_bytes are for work group / work item size information */
669	kernel_args_size = program->input_size + num_work_size_bytes;
670
671	u_upload_alloc(sctx->b.const_uploader, 0, kernel_args_size,
672		       sctx->screen->info.tcc_cache_line_size,
673		       &kernel_args_offset,
674		       (struct pipe_resource**)&input_buffer, &kernel_args_ptr);
675
676	if (unlikely(!kernel_args_ptr))
677		return false;
678
679	kernel_args = (uint32_t*)kernel_args_ptr;
680	kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
681
682	if (!code_object) {
683		for (i = 0; i < 3; i++) {
684			kernel_args[i] = util_cpu_to_le32(info->grid[i]);
685			kernel_args[i + 3] = util_cpu_to_le32(info->grid[i] * info->block[i]);
686			kernel_args[i + 6] = util_cpu_to_le32(info->block[i]);
687		}
688	}
689
690	memcpy(kernel_args + (num_work_size_bytes / 4), info->input,
691	       program->input_size);
692
693
694	for (i = 0; i < (kernel_args_size / 4); i++) {
695		COMPUTE_DBG(sctx->screen, "input %u : %u\n", i,
696			kernel_args[i]);
697	}
698
699
700	radeon_add_to_buffer_list(sctx, sctx->gfx_cs, input_buffer,
701				  RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
702
703	if (code_object) {
704		si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va);
705	} else {
706		radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
707		radeon_emit(cs, kernel_args_va);
708		radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
709		                S_008F04_STRIDE(0));
710	}
711
712	si_resource_reference(&input_buffer, NULL);
713
714	return true;
715}
716
717static void si_setup_tgsi_user_data(struct si_context *sctx,
718                                const struct pipe_grid_info *info)
719{
720	struct si_compute *program = sctx->cs_shader_state.program;
721	struct radeon_cmdbuf *cs = sctx->gfx_cs;
722	unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
723				 4 * SI_NUM_RESOURCE_SGPRS;
724	unsigned block_size_reg = grid_size_reg +
725				  /* 12 bytes = 3 dwords. */
726				  12 * program->uses_grid_size;
727	unsigned cs_user_data_reg = block_size_reg +
728				    12 * program->reads_variable_block_size;
729
730	if (info->indirect) {
731		if (program->uses_grid_size) {
732			for (unsigned i = 0; i < 3; ++i) {
733				si_cp_copy_data(sctx,
734						COPY_DATA_REG, NULL, (grid_size_reg >> 2) + i,
735						COPY_DATA_SRC_MEM, si_resource(info->indirect),
736						info->indirect_offset + 4 * i);
737			}
738		}
739	} else {
740		if (program->uses_grid_size) {
741			radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
742			radeon_emit(cs, info->grid[0]);
743			radeon_emit(cs, info->grid[1]);
744			radeon_emit(cs, info->grid[2]);
745		}
746		if (program->reads_variable_block_size) {
747			radeon_set_sh_reg_seq(cs, block_size_reg, 3);
748			radeon_emit(cs, info->block[0]);
749			radeon_emit(cs, info->block[1]);
750			radeon_emit(cs, info->block[2]);
751		}
752	}
753
754	if (program->num_cs_user_data_dwords) {
755		radeon_set_sh_reg_seq(cs, cs_user_data_reg, program->num_cs_user_data_dwords);
756		radeon_emit_array(cs, sctx->cs_user_data, program->num_cs_user_data_dwords);
757	}
758}
759
760static void si_emit_dispatch_packets(struct si_context *sctx,
761                                     const struct pipe_grid_info *info)
762{
763	struct si_screen *sscreen = sctx->screen;
764	struct radeon_cmdbuf *cs = sctx->gfx_cs;
765	bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
766	unsigned waves_per_threadgroup =
767		DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
768	unsigned compute_resource_limits =
769		S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
770
771	if (sctx->chip_class >= CIK) {
772		unsigned num_cu_per_se = sscreen->info.num_good_compute_units /
773					 sscreen->info.max_se;
774
775		/* Force even distribution on all SIMDs in CU if the workgroup
776		 * size is 64. This has shown some good improvements if # of CUs
777		 * per SE is not a multiple of 4.
778		 */
779		if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
780			compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
781
782		compute_resource_limits |= S_00B854_WAVES_PER_SH(sctx->cs_max_waves_per_sh);
783	} else {
784		/* SI */
785		if (sctx->cs_max_waves_per_sh) {
786			unsigned limit_div16 = DIV_ROUND_UP(sctx->cs_max_waves_per_sh, 16);
787			compute_resource_limits |= S_00B854_WAVES_PER_SH_SI(limit_div16);
788		}
789	}
790
791	radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
792			  compute_resource_limits);
793
794	unsigned dispatch_initiator =
795		S_00B800_COMPUTE_SHADER_EN(1) |
796		S_00B800_FORCE_START_AT_000(1) |
797		/* If the KMD allows it (there is a KMD hw register for it),
798		 * allow launching waves out-of-order. (same as Vulkan) */
799		S_00B800_ORDER_MODE(sctx->chip_class >= CIK);
800
801	const uint *last_block = info->last_block;
802	bool partial_block_en = last_block[0] || last_block[1] || last_block[2];
803
804	radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
805
806	if (partial_block_en) {
807		unsigned partial[3];
808
809		/* If no partial_block, these should be an entire block size, not 0. */
810		partial[0] = last_block[0] ? last_block[0] : info->block[0];
811		partial[1] = last_block[1] ? last_block[1] : info->block[1];
812		partial[2] = last_block[2] ? last_block[2] : info->block[2];
813
814		radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]) |
815				S_00B81C_NUM_THREAD_PARTIAL(partial[0]));
816		radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]) |
817				S_00B820_NUM_THREAD_PARTIAL(partial[1]));
818		radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]) |
819				S_00B824_NUM_THREAD_PARTIAL(partial[2]));
820
821		dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
822	} else {
823		radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]));
824		radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
825		radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
826	}
827
828	if (info->indirect) {
829		uint64_t base_va = si_resource(info->indirect)->gpu_address;
830
831		radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
832		                 si_resource(info->indirect),
833		                 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
834
835		radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
836		                PKT3_SHADER_TYPE_S(1));
837		radeon_emit(cs, 1);
838		radeon_emit(cs, base_va);
839		radeon_emit(cs, base_va >> 32);
840
841		radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
842		                PKT3_SHADER_TYPE_S(1));
843		radeon_emit(cs, info->indirect_offset);
844		radeon_emit(cs, dispatch_initiator);
845	} else {
846		radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
847		                PKT3_SHADER_TYPE_S(1));
848		radeon_emit(cs, info->grid[0]);
849		radeon_emit(cs, info->grid[1]);
850		radeon_emit(cs, info->grid[2]);
851		radeon_emit(cs, dispatch_initiator);
852	}
853}
854
855
856static void si_launch_grid(
857		struct pipe_context *ctx, const struct pipe_grid_info *info)
858{
859	struct si_context *sctx = (struct si_context*)ctx;
860	struct si_compute *program = sctx->cs_shader_state.program;
861	const amd_kernel_code_t *code_object =
862		si_compute_get_code_object(program, info->pc);
863	int i;
864	/* HW bug workaround when CS threadgroups > 256 threads and async
865	 * compute isn't used, i.e. only one compute job can run at a time.
866	 * If async compute is possible, the threadgroup size must be limited
867	 * to 256 threads on all queues to avoid the bug.
868	 * Only SI and certain CIK chips are affected.
869	 */
870	bool cs_regalloc_hang =
871		(sctx->chip_class == SI ||
872		 sctx->family == CHIP_BONAIRE ||
873		 sctx->family == CHIP_KABINI) &&
874		info->block[0] * info->block[1] * info->block[2] > 256;
875
876	if (cs_regalloc_hang)
877		sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
878				 SI_CONTEXT_CS_PARTIAL_FLUSH;
879
880	if (program->ir_type != PIPE_SHADER_IR_NATIVE &&
881	    program->shader.compilation_failed)
882		return;
883
884	if (sctx->has_graphics) {
885		if (sctx->last_num_draw_calls != sctx->num_draw_calls) {
886			si_update_fb_dirtiness_after_rendering(sctx);
887			sctx->last_num_draw_calls = sctx->num_draw_calls;
888		}
889
890		si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE);
891	}
892
893	/* Add buffer sizes for memory checking in need_cs_space. */
894	si_context_add_resource_size(sctx, &program->shader.bo->b.b);
895	/* TODO: add the scratch buffer */
896
897	if (info->indirect) {
898		si_context_add_resource_size(sctx, info->indirect);
899
900		/* Indirect buffers use TC L2 on GFX9, but not older hw. */
901		if (sctx->chip_class <= VI &&
902		    si_resource(info->indirect)->TC_L2_dirty) {
903			sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
904			si_resource(info->indirect)->TC_L2_dirty = false;
905		}
906	}
907
908	si_need_gfx_cs_space(sctx);
909
910	if (sctx->bo_list_add_all_compute_resources)
911		si_compute_resources_add_all_to_bo_list(sctx);
912
913	if (!sctx->cs_shader_state.initialized)
914		si_initialize_compute(sctx);
915
916	if (sctx->flags)
917		si_emit_cache_flush(sctx);
918
919	if (!si_switch_compute_shader(sctx, program, &program->shader,
920					code_object, info->pc))
921		return;
922
923	si_upload_compute_shader_descriptors(sctx);
924	si_emit_compute_shader_pointers(sctx);
925
926	if (sctx->has_graphics &&
927	    si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) {
928		sctx->atoms.s.render_cond.emit(sctx);
929		si_set_atom_dirty(sctx, &sctx->atoms.s.render_cond, false);
930	}
931
932	if ((program->input_size ||
933            program->ir_type == PIPE_SHADER_IR_NATIVE) &&
934           unlikely(!si_upload_compute_input(sctx, code_object, info))) {
935		return;
936	}
937
938	/* Global buffers */
939	for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
940		struct si_resource *buffer =
941			si_resource(program->global_buffers[i]);
942		if (!buffer) {
943			continue;
944		}
945		radeon_add_to_buffer_list(sctx, sctx->gfx_cs, buffer,
946					  RADEON_USAGE_READWRITE,
947					  RADEON_PRIO_COMPUTE_GLOBAL);
948	}
949
950	if (program->ir_type != PIPE_SHADER_IR_NATIVE)
951		si_setup_tgsi_user_data(sctx, info);
952
953	si_emit_dispatch_packets(sctx, info);
954
955	if (unlikely(sctx->current_saved_cs)) {
956		si_trace_emit(sctx);
957		si_log_compute_state(sctx, sctx->log);
958	}
959
960	sctx->compute_is_busy = true;
961	sctx->num_compute_calls++;
962	if (sctx->cs_shader_state.uses_scratch)
963		sctx->num_spill_compute_calls++;
964
965	if (cs_regalloc_hang)
966		sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
967}
968
969void si_destroy_compute(struct si_compute *program)
970{
971	if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
972		util_queue_drop_job(&program->screen->shader_compiler_queue,
973				    &program->ready);
974		util_queue_fence_destroy(&program->ready);
975	}
976
977	si_shader_destroy(&program->shader);
978	FREE(program);
979}
980
981static void si_delete_compute_state(struct pipe_context *ctx, void* state){
982	struct si_compute *program = (struct si_compute *)state;
983	struct si_context *sctx = (struct si_context*)ctx;
984
985	if (!state)
986		return;
987
988	if (program == sctx->cs_shader_state.program)
989		sctx->cs_shader_state.program = NULL;
990
991	if (program == sctx->cs_shader_state.emitted_program)
992		sctx->cs_shader_state.emitted_program = NULL;
993
994	si_compute_reference(&program, NULL);
995}
996
997static void si_set_compute_resources(struct pipe_context * ctx_,
998		unsigned start, unsigned count,
999		struct pipe_surface ** surfaces) { }
1000
1001void si_init_compute_functions(struct si_context *sctx)
1002{
1003	sctx->b.create_compute_state = si_create_compute_state;
1004	sctx->b.delete_compute_state = si_delete_compute_state;
1005	sctx->b.bind_compute_state = si_bind_compute_state;
1006	sctx->b.set_compute_resources = si_set_compute_resources;
1007	sctx->b.set_global_binding = si_set_global_binding;
1008	sctx->b.launch_grid = si_launch_grid;
1009}
1010