1848b8605Smrg/* 2848b8605Smrg * Copyright 2013 Advanced Micro Devices, Inc. 3b8e80941Smrg * All Rights Reserved. 4848b8605Smrg * 5848b8605Smrg * Permission is hereby granted, free of charge, to any person obtaining a 6848b8605Smrg * copy of this software and associated documentation files (the "Software"), 7848b8605Smrg * to deal in the Software without restriction, including without limitation 8848b8605Smrg * on the rights to use, copy, modify, merge, publish, distribute, sub 9848b8605Smrg * license, and/or sell copies of the Software, and to permit persons to whom 10848b8605Smrg * the Software is furnished to do so, subject to the following conditions: 11848b8605Smrg * 12848b8605Smrg * The above copyright notice and this permission notice (including the next 13848b8605Smrg * paragraph) shall be included in all copies or substantial portions of the 14848b8605Smrg * Software. 15848b8605Smrg * 16848b8605Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17848b8605Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18848b8605Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19848b8605Smrg * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 20848b8605Smrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21848b8605Smrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22848b8605Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE. 23848b8605Smrg * 24848b8605Smrg */ 25848b8605Smrg 26b8e80941Smrg#include "tgsi/tgsi_parse.h" 27b8e80941Smrg#include "util/u_async_debug.h" 28848b8605Smrg#include "util/u_memory.h" 29b8e80941Smrg#include "util/u_upload_mgr.h" 30b8e80941Smrg 31b8e80941Smrg#include "amd_kernel_code_t.h" 32b8e80941Smrg#include "si_build_pm4.h" 33b8e80941Smrg#include "si_compute.h" 34b8e80941Smrg 35b8e80941Smrg#define COMPUTE_DBG(sscreen, fmt, args...) \ 36b8e80941Smrg do { \ 37b8e80941Smrg if ((sscreen->debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \ 38b8e80941Smrg } while (0); 39b8e80941Smrg 40b8e80941Smrgstruct dispatch_packet { 41b8e80941Smrg uint16_t header; 42b8e80941Smrg uint16_t setup; 43b8e80941Smrg uint16_t workgroup_size_x; 44b8e80941Smrg uint16_t workgroup_size_y; 45b8e80941Smrg uint16_t workgroup_size_z; 46b8e80941Smrg uint16_t reserved0; 47b8e80941Smrg uint32_t grid_size_x; 48b8e80941Smrg uint32_t grid_size_y; 49b8e80941Smrg uint32_t grid_size_z; 50b8e80941Smrg uint32_t private_segment_size; 51b8e80941Smrg uint32_t group_segment_size; 52b8e80941Smrg uint64_t kernel_object; 53b8e80941Smrg uint64_t kernarg_address; 54b8e80941Smrg uint64_t reserved2; 55b8e80941Smrg}; 56848b8605Smrg 57b8e80941Smrgstatic const amd_kernel_code_t *si_compute_get_code_object( 58b8e80941Smrg const struct si_compute *program, 59b8e80941Smrg uint64_t symbol_offset) 60b8e80941Smrg{ 61b8e80941Smrg if (!program->use_code_object_v2) { 62b8e80941Smrg return NULL; 63b8e80941Smrg } 64b8e80941Smrg return (const amd_kernel_code_t*) 65b8e80941Smrg (program->shader.binary.code + symbol_offset); 66b8e80941Smrg} 67848b8605Smrg 68b8e80941Smrgstatic void code_object_to_config(const amd_kernel_code_t *code_object, 69b8e80941Smrg struct si_shader_config *out_config) { 70b8e80941Smrg 71b8e80941Smrg uint32_t rsrc1 = code_object->compute_pgm_resource_registers; 72b8e80941Smrg uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32; 73b8e80941Smrg out_config->num_sgprs = code_object->wavefront_sgpr_count; 74b8e80941Smrg out_config->num_vgprs = code_object->workitem_vgpr_count; 75b8e80941Smrg out_config->float_mode = G_00B028_FLOAT_MODE(rsrc1); 76b8e80941Smrg out_config->rsrc1 = rsrc1; 77b8e80941Smrg out_config->lds_size = MAX2(out_config->lds_size, G_00B84C_LDS_SIZE(rsrc2)); 78b8e80941Smrg out_config->rsrc2 = rsrc2; 79b8e80941Smrg out_config->scratch_bytes_per_wave = 80b8e80941Smrg align(code_object->workitem_private_segment_byte_size * 64, 1024); 81b8e80941Smrg} 82848b8605Smrg 83b8e80941Smrg/* Asynchronous compute shader compilation. */ 84b8e80941Smrgstatic void si_create_compute_state_async(void *job, int thread_index) 85b8e80941Smrg{ 86b8e80941Smrg struct si_compute *program = (struct si_compute *)job; 87b8e80941Smrg struct si_shader *shader = &program->shader; 88b8e80941Smrg struct si_shader_selector sel; 89b8e80941Smrg struct ac_llvm_compiler *compiler; 90b8e80941Smrg struct pipe_debug_callback *debug = &program->compiler_ctx_state.debug; 91b8e80941Smrg struct si_screen *sscreen = program->screen; 92848b8605Smrg 93b8e80941Smrg assert(!debug->debug_message || debug->async); 94b8e80941Smrg assert(thread_index >= 0); 95b8e80941Smrg assert(thread_index < ARRAY_SIZE(sscreen->compiler)); 96b8e80941Smrg compiler = &sscreen->compiler[thread_index]; 97848b8605Smrg 98b8e80941Smrg memset(&sel, 0, sizeof(sel)); 99848b8605Smrg 100b8e80941Smrg sel.screen = sscreen; 101848b8605Smrg 102b8e80941Smrg if (program->ir_type == PIPE_SHADER_IR_TGSI) { 103b8e80941Smrg tgsi_scan_shader(program->ir.tgsi, &sel.info); 104b8e80941Smrg sel.tokens = program->ir.tgsi; 105b8e80941Smrg } else { 106b8e80941Smrg assert(program->ir_type == PIPE_SHADER_IR_NIR); 107b8e80941Smrg sel.nir = program->ir.nir; 108b8e80941Smrg 109b8e80941Smrg si_nir_opts(sel.nir); 110b8e80941Smrg si_nir_scan_shader(sel.nir, &sel.info); 111b8e80941Smrg si_lower_nir(&sel); 112b8e80941Smrg } 113b8e80941Smrg 114b8e80941Smrg /* Store the declared LDS size into tgsi_shader_info for the shader 115b8e80941Smrg * cache to include it. 116b8e80941Smrg */ 117b8e80941Smrg sel.info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE] = program->local_size; 118b8e80941Smrg 119b8e80941Smrg sel.type = PIPE_SHADER_COMPUTE; 120b8e80941Smrg si_get_active_slot_masks(&sel.info, 121b8e80941Smrg &program->active_const_and_shader_buffers, 122b8e80941Smrg &program->active_samplers_and_images); 123b8e80941Smrg 124b8e80941Smrg program->shader.selector = &sel; 125b8e80941Smrg program->shader.is_monolithic = true; 126b8e80941Smrg program->uses_grid_size = sel.info.uses_grid_size; 127b8e80941Smrg program->uses_bindless_samplers = sel.info.uses_bindless_samplers; 128b8e80941Smrg program->uses_bindless_images = sel.info.uses_bindless_images; 129b8e80941Smrg program->reads_variable_block_size = 130b8e80941Smrg sel.info.uses_block_size && 131b8e80941Smrg sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0; 132b8e80941Smrg program->num_cs_user_data_dwords = 133b8e80941Smrg sel.info.properties[TGSI_PROPERTY_CS_USER_DATA_DWORDS]; 134b8e80941Smrg 135b8e80941Smrg void *ir_binary = si_get_ir_binary(&sel); 136b8e80941Smrg 137b8e80941Smrg /* Try to load the shader from the shader cache. */ 138b8e80941Smrg mtx_lock(&sscreen->shader_cache_mutex); 139b8e80941Smrg 140b8e80941Smrg if (ir_binary && 141b8e80941Smrg si_shader_cache_load_shader(sscreen, ir_binary, shader)) { 142b8e80941Smrg mtx_unlock(&sscreen->shader_cache_mutex); 143b8e80941Smrg 144b8e80941Smrg si_shader_dump_stats_for_shader_db(shader, debug); 145b8e80941Smrg si_shader_dump(sscreen, shader, debug, PIPE_SHADER_COMPUTE, 146b8e80941Smrg stderr, true); 147b8e80941Smrg 148b8e80941Smrg if (si_shader_binary_upload(sscreen, shader)) 149b8e80941Smrg program->shader.compilation_failed = true; 150b8e80941Smrg } else { 151b8e80941Smrg mtx_unlock(&sscreen->shader_cache_mutex); 152b8e80941Smrg 153b8e80941Smrg if (si_shader_create(sscreen, compiler, &program->shader, debug)) { 154b8e80941Smrg program->shader.compilation_failed = true; 155b8e80941Smrg 156b8e80941Smrg if (program->ir_type == PIPE_SHADER_IR_TGSI) 157b8e80941Smrg FREE(program->ir.tgsi); 158b8e80941Smrg program->shader.selector = NULL; 159b8e80941Smrg return; 160b8e80941Smrg } 161b8e80941Smrg 162b8e80941Smrg bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0; 163b8e80941Smrg unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS + 164b8e80941Smrg (sel.info.uses_grid_size ? 3 : 0) + 165b8e80941Smrg (program->reads_variable_block_size ? 3 : 0) + 166b8e80941Smrg program->num_cs_user_data_dwords; 167b8e80941Smrg 168b8e80941Smrg shader->config.rsrc1 = 169b8e80941Smrg S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) | 170b8e80941Smrg S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) | 171b8e80941Smrg S_00B848_DX10_CLAMP(1) | 172b8e80941Smrg S_00B848_FLOAT_MODE(shader->config.float_mode); 173b8e80941Smrg 174b8e80941Smrg shader->config.rsrc2 = 175b8e80941Smrg S_00B84C_USER_SGPR(user_sgprs) | 176b8e80941Smrg S_00B84C_SCRATCH_EN(scratch_enabled) | 177b8e80941Smrg S_00B84C_TGID_X_EN(sel.info.uses_block_id[0]) | 178b8e80941Smrg S_00B84C_TGID_Y_EN(sel.info.uses_block_id[1]) | 179b8e80941Smrg S_00B84C_TGID_Z_EN(sel.info.uses_block_id[2]) | 180b8e80941Smrg S_00B84C_TIDIG_COMP_CNT(sel.info.uses_thread_id[2] ? 2 : 181b8e80941Smrg sel.info.uses_thread_id[1] ? 1 : 0) | 182b8e80941Smrg S_00B84C_LDS_SIZE(shader->config.lds_size); 183b8e80941Smrg 184b8e80941Smrg if (ir_binary) { 185b8e80941Smrg mtx_lock(&sscreen->shader_cache_mutex); 186b8e80941Smrg if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true)) 187b8e80941Smrg FREE(ir_binary); 188b8e80941Smrg mtx_unlock(&sscreen->shader_cache_mutex); 189b8e80941Smrg } 190b8e80941Smrg } 191b8e80941Smrg 192b8e80941Smrg if (program->ir_type == PIPE_SHADER_IR_TGSI) 193b8e80941Smrg FREE(program->ir.tgsi); 194b8e80941Smrg 195b8e80941Smrg program->shader.selector = NULL; 196b8e80941Smrg} 197848b8605Smrg 198848b8605Smrgstatic void *si_create_compute_state( 199848b8605Smrg struct pipe_context *ctx, 200848b8605Smrg const struct pipe_compute_state *cso) 201848b8605Smrg{ 202848b8605Smrg struct si_context *sctx = (struct si_context *)ctx; 203b8e80941Smrg struct si_screen *sscreen = (struct si_screen *)ctx->screen; 204b8e80941Smrg struct si_compute *program = CALLOC_STRUCT(si_compute); 205848b8605Smrg 206b8e80941Smrg pipe_reference_init(&program->reference, 1); 207b8e80941Smrg program->screen = (struct si_screen *)ctx->screen; 208b8e80941Smrg program->ir_type = cso->ir_type; 209848b8605Smrg program->local_size = cso->req_local_mem; 210848b8605Smrg program->private_size = cso->req_private_mem; 211848b8605Smrg program->input_size = cso->req_input_mem; 212b8e80941Smrg program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE; 213b8e80941Smrg 214b8e80941Smrg if (cso->ir_type != PIPE_SHADER_IR_NATIVE) { 215b8e80941Smrg if (cso->ir_type == PIPE_SHADER_IR_TGSI) { 216b8e80941Smrg program->ir.tgsi = tgsi_dup_tokens(cso->prog); 217b8e80941Smrg if (!program->ir.tgsi) { 218b8e80941Smrg FREE(program); 219b8e80941Smrg return NULL; 220b8e80941Smrg } 221b8e80941Smrg } else { 222b8e80941Smrg assert(cso->ir_type == PIPE_SHADER_IR_NIR); 223b8e80941Smrg program->ir.nir = (struct nir_shader *) cso->prog; 224b8e80941Smrg } 225848b8605Smrg 226b8e80941Smrg program->compiler_ctx_state.debug = sctx->debug; 227b8e80941Smrg program->compiler_ctx_state.is_debug_context = sctx->is_debug; 228b8e80941Smrg p_atomic_inc(&sscreen->num_shaders_created); 229848b8605Smrg 230b8e80941Smrg si_schedule_initial_compile(sctx, PIPE_SHADER_COMPUTE, 231b8e80941Smrg &program->ready, 232b8e80941Smrg &program->compiler_ctx_state, 233b8e80941Smrg program, si_create_compute_state_async); 234b8e80941Smrg } else { 235b8e80941Smrg const struct pipe_llvm_program_header *header; 236b8e80941Smrg const char *code; 237b8e80941Smrg header = cso->prog; 238b8e80941Smrg code = cso->prog + sizeof(struct pipe_llvm_program_header); 239b8e80941Smrg 240b8e80941Smrg ac_elf_read(code, header->num_bytes, &program->shader.binary); 241b8e80941Smrg if (program->use_code_object_v2) { 242b8e80941Smrg const amd_kernel_code_t *code_object = 243b8e80941Smrg si_compute_get_code_object(program, 0); 244b8e80941Smrg code_object_to_config(code_object, &program->shader.config); 245b8e80941Smrg if (program->shader.binary.reloc_count != 0) { 246b8e80941Smrg fprintf(stderr, "Error: %d unsupported relocations\n", 247b8e80941Smrg program->shader.binary.reloc_count); 248b8e80941Smrg FREE(program); 249b8e80941Smrg return NULL; 250b8e80941Smrg } 251b8e80941Smrg } else { 252b8e80941Smrg si_shader_binary_read_config(&program->shader.binary, 253b8e80941Smrg &program->shader.config, 0); 254b8e80941Smrg } 255b8e80941Smrg si_shader_dump(sctx->screen, &program->shader, &sctx->debug, 256b8e80941Smrg PIPE_SHADER_COMPUTE, stderr, true); 257b8e80941Smrg if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) { 258b8e80941Smrg fprintf(stderr, "LLVM failed to upload shader\n"); 259b8e80941Smrg FREE(program); 260b8e80941Smrg return NULL; 261b8e80941Smrg } 262b8e80941Smrg } 263848b8605Smrg 264848b8605Smrg return program; 265848b8605Smrg} 266848b8605Smrg 267848b8605Smrgstatic void si_bind_compute_state(struct pipe_context *ctx, void *state) 268848b8605Smrg{ 269848b8605Smrg struct si_context *sctx = (struct si_context*)ctx; 270b8e80941Smrg struct si_compute *program = (struct si_compute*)state; 271b8e80941Smrg 272b8e80941Smrg sctx->cs_shader_state.program = program; 273b8e80941Smrg if (!program) 274b8e80941Smrg return; 275b8e80941Smrg 276b8e80941Smrg /* Wait because we need active slot usage masks. */ 277b8e80941Smrg if (program->ir_type != PIPE_SHADER_IR_NATIVE) 278b8e80941Smrg util_queue_fence_wait(&program->ready); 279b8e80941Smrg 280b8e80941Smrg si_set_active_descriptors(sctx, 281b8e80941Smrg SI_DESCS_FIRST_COMPUTE + 282b8e80941Smrg SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS, 283b8e80941Smrg program->active_const_and_shader_buffers); 284b8e80941Smrg si_set_active_descriptors(sctx, 285b8e80941Smrg SI_DESCS_FIRST_COMPUTE + 286b8e80941Smrg SI_SHADER_DESCS_SAMPLERS_AND_IMAGES, 287b8e80941Smrg program->active_samplers_and_images); 288848b8605Smrg} 289848b8605Smrg 290848b8605Smrgstatic void si_set_global_binding( 291848b8605Smrg struct pipe_context *ctx, unsigned first, unsigned n, 292848b8605Smrg struct pipe_resource **resources, 293848b8605Smrg uint32_t **handles) 294848b8605Smrg{ 295848b8605Smrg unsigned i; 296848b8605Smrg struct si_context *sctx = (struct si_context*)ctx; 297b8e80941Smrg struct si_compute *program = sctx->cs_shader_state.program; 298b8e80941Smrg 299b8e80941Smrg assert(first + n <= MAX_GLOBAL_BUFFERS); 300848b8605Smrg 301848b8605Smrg if (!resources) { 302b8e80941Smrg for (i = 0; i < n; i++) { 303b8e80941Smrg pipe_resource_reference(&program->global_buffers[first + i], NULL); 304848b8605Smrg } 305848b8605Smrg return; 306848b8605Smrg } 307848b8605Smrg 308b8e80941Smrg for (i = 0; i < n; i++) { 309848b8605Smrg uint64_t va; 310848b8605Smrg uint32_t offset; 311b8e80941Smrg pipe_resource_reference(&program->global_buffers[first + i], resources[i]); 312b8e80941Smrg va = si_resource(resources[i])->gpu_address; 313848b8605Smrg offset = util_le32_to_cpu(*handles[i]); 314848b8605Smrg va += offset; 315848b8605Smrg va = util_cpu_to_le64(va); 316848b8605Smrg memcpy(handles[i], &va, sizeof(va)); 317848b8605Smrg } 318848b8605Smrg} 319848b8605Smrg 320b8e80941Smrgstatic void si_initialize_compute(struct si_context *sctx) 321848b8605Smrg{ 322b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 323b8e80941Smrg uint64_t bc_va; 324b8e80941Smrg 325b8e80941Smrg radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2); 326b8e80941Smrg /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */ 327b8e80941Smrg radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff)); 328b8e80941Smrg radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff)); 329b8e80941Smrg 330b8e80941Smrg if (sctx->chip_class >= CIK) { 331b8e80941Smrg /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */ 332b8e80941Smrg radeon_set_sh_reg_seq(cs, 333b8e80941Smrg R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2); 334b8e80941Smrg radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) | 335b8e80941Smrg S_00B864_SH1_CU_EN(0xffff)); 336b8e80941Smrg radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) | 337b8e80941Smrg S_00B868_SH1_CU_EN(0xffff)); 338b8e80941Smrg } 339848b8605Smrg 340b8e80941Smrg /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID 341b8e80941Smrg * and is now per pipe, so it should be handled in the 342b8e80941Smrg * kernel if we want to use something other than the default value, 343b8e80941Smrg * which is now 0x22f. 344b8e80941Smrg */ 345b8e80941Smrg if (sctx->chip_class <= SI) { 346b8e80941Smrg /* XXX: This should be: 347b8e80941Smrg * (number of compute units) * 4 * (waves per simd) - 1 */ 348b8e80941Smrg 349b8e80941Smrg radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID, 350b8e80941Smrg 0x190 /* Default value */); 351848b8605Smrg } 352848b8605Smrg 353b8e80941Smrg /* Set the pointer to border colors. */ 354b8e80941Smrg bc_va = sctx->border_color_buffer->gpu_address; 355848b8605Smrg 356b8e80941Smrg if (sctx->chip_class >= CIK) { 357b8e80941Smrg radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2); 358b8e80941Smrg radeon_emit(cs, bc_va >> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */ 359b8e80941Smrg radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */ 360b8e80941Smrg } else { 361b8e80941Smrg if (sctx->screen->info.si_TA_CS_BC_BASE_ADDR_allowed) { 362b8e80941Smrg radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR, 363b8e80941Smrg bc_va >> 8); 364b8e80941Smrg } 365848b8605Smrg } 366848b8605Smrg 367b8e80941Smrg sctx->cs_shader_state.emitted_program = NULL; 368b8e80941Smrg sctx->cs_shader_state.initialized = true; 369848b8605Smrg} 370848b8605Smrg 371b8e80941Smrgstatic bool si_setup_compute_scratch_buffer(struct si_context *sctx, 372b8e80941Smrg struct si_shader *shader, 373b8e80941Smrg struct si_shader_config *config) 374848b8605Smrg{ 375b8e80941Smrg uint64_t scratch_bo_size, scratch_needed; 376b8e80941Smrg scratch_bo_size = 0; 377b8e80941Smrg scratch_needed = config->scratch_bytes_per_wave * sctx->scratch_waves; 378b8e80941Smrg if (sctx->compute_scratch_buffer) 379b8e80941Smrg scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0; 380b8e80941Smrg 381b8e80941Smrg if (scratch_bo_size < scratch_needed) { 382b8e80941Smrg si_resource_reference(&sctx->compute_scratch_buffer, NULL); 383b8e80941Smrg 384b8e80941Smrg sctx->compute_scratch_buffer = 385b8e80941Smrg si_aligned_buffer_create(&sctx->screen->b, 386b8e80941Smrg SI_RESOURCE_FLAG_UNMAPPABLE, 387b8e80941Smrg PIPE_USAGE_DEFAULT, 388b8e80941Smrg scratch_needed, 256); 389b8e80941Smrg 390b8e80941Smrg if (!sctx->compute_scratch_buffer) 391b8e80941Smrg return false; 392b8e80941Smrg } 393b8e80941Smrg 394b8e80941Smrg if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) { 395b8e80941Smrg uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address; 396b8e80941Smrg 397b8e80941Smrg si_shader_apply_scratch_relocs(shader, scratch_va); 398b8e80941Smrg 399b8e80941Smrg if (si_shader_binary_upload(sctx->screen, shader)) 400b8e80941Smrg return false; 401b8e80941Smrg 402b8e80941Smrg si_resource_reference(&shader->scratch_bo, 403b8e80941Smrg sctx->compute_scratch_buffer); 404b8e80941Smrg } 405b8e80941Smrg 406b8e80941Smrg return true; 407b8e80941Smrg} 408b8e80941Smrg 409b8e80941Smrgstatic bool si_switch_compute_shader(struct si_context *sctx, 410b8e80941Smrg struct si_compute *program, 411b8e80941Smrg struct si_shader *shader, 412b8e80941Smrg const amd_kernel_code_t *code_object, 413b8e80941Smrg unsigned offset) 414b8e80941Smrg{ 415b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 416b8e80941Smrg struct si_shader_config inline_config = {0}; 417b8e80941Smrg struct si_shader_config *config; 418848b8605Smrg uint64_t shader_va; 419848b8605Smrg 420b8e80941Smrg if (sctx->cs_shader_state.emitted_program == program && 421b8e80941Smrg sctx->cs_shader_state.offset == offset) 422b8e80941Smrg return true; 423848b8605Smrg 424b8e80941Smrg if (program->ir_type != PIPE_SHADER_IR_NATIVE) { 425b8e80941Smrg config = &shader->config; 426b8e80941Smrg } else { 427b8e80941Smrg unsigned lds_blocks; 428848b8605Smrg 429b8e80941Smrg config = &inline_config; 430b8e80941Smrg if (code_object) { 431b8e80941Smrg code_object_to_config(code_object, config); 432b8e80941Smrg } else { 433b8e80941Smrg si_shader_binary_read_config(&shader->binary, config, offset); 434b8e80941Smrg } 435848b8605Smrg 436b8e80941Smrg lds_blocks = config->lds_size; 437b8e80941Smrg /* XXX: We are over allocating LDS. For SI, the shader reports 438b8e80941Smrg * LDS in blocks of 256 bytes, so if there are 4 bytes lds 439b8e80941Smrg * allocated in the shader and 4 bytes allocated by the state 440b8e80941Smrg * tracker, then we will set LDS_SIZE to 512 bytes rather than 256. 441b8e80941Smrg */ 442b8e80941Smrg if (sctx->chip_class <= SI) { 443b8e80941Smrg lds_blocks += align(program->local_size, 256) >> 8; 444b8e80941Smrg } else { 445b8e80941Smrg lds_blocks += align(program->local_size, 512) >> 9; 446b8e80941Smrg } 447848b8605Smrg 448b8e80941Smrg /* TODO: use si_multiwave_lds_size_workaround */ 449b8e80941Smrg assert(lds_blocks <= 0xFF); 450848b8605Smrg 451b8e80941Smrg config->rsrc2 &= C_00B84C_LDS_SIZE; 452b8e80941Smrg config->rsrc2 |= S_00B84C_LDS_SIZE(lds_blocks); 453848b8605Smrg } 454848b8605Smrg 455b8e80941Smrg if (!si_setup_compute_scratch_buffer(sctx, shader, config)) 456b8e80941Smrg return false; 457848b8605Smrg 458b8e80941Smrg if (shader->scratch_bo) { 459b8e80941Smrg COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; " 460b8e80941Smrg "Total Scratch: %u bytes\n", sctx->scratch_waves, 461b8e80941Smrg config->scratch_bytes_per_wave, 462b8e80941Smrg config->scratch_bytes_per_wave * 463b8e80941Smrg sctx->scratch_waves); 464b8e80941Smrg 465b8e80941Smrg radeon_add_to_buffer_list(sctx, sctx->gfx_cs, 466b8e80941Smrg shader->scratch_bo, RADEON_USAGE_READWRITE, 467b8e80941Smrg RADEON_PRIO_SCRATCH_BUFFER); 468b8e80941Smrg } 469848b8605Smrg 470b8e80941Smrg /* Prefetch the compute shader to TC L2. 471b8e80941Smrg * 472b8e80941Smrg * We should also prefetch graphics shaders if a compute dispatch was 473b8e80941Smrg * the last command, and the compute shader if a draw call was the last 474b8e80941Smrg * command. However, that would add more complexity and we're likely 475b8e80941Smrg * to get a shader state change in that case anyway. 476b8e80941Smrg */ 477b8e80941Smrg if (sctx->chip_class >= CIK) { 478b8e80941Smrg cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b, 479b8e80941Smrg 0, program->shader.bo->b.b.width0); 480b8e80941Smrg } 481848b8605Smrg 482b8e80941Smrg shader_va = shader->bo->gpu_address + offset; 483b8e80941Smrg if (program->use_code_object_v2) { 484b8e80941Smrg /* Shader code is placed after the amd_kernel_code_t 485b8e80941Smrg * struct. */ 486b8e80941Smrg shader_va += sizeof(amd_kernel_code_t); 487b8e80941Smrg } 488b8e80941Smrg 489b8e80941Smrg radeon_add_to_buffer_list(sctx, sctx->gfx_cs, shader->bo, 490b8e80941Smrg RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); 491b8e80941Smrg 492b8e80941Smrg radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2); 493b8e80941Smrg radeon_emit(cs, shader_va >> 8); 494b8e80941Smrg radeon_emit(cs, S_00B834_DATA(shader_va >> 40)); 495b8e80941Smrg 496b8e80941Smrg radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2); 497b8e80941Smrg radeon_emit(cs, config->rsrc1); 498b8e80941Smrg radeon_emit(cs, config->rsrc2); 499b8e80941Smrg 500b8e80941Smrg COMPUTE_DBG(sctx->screen, "COMPUTE_PGM_RSRC1: 0x%08x " 501b8e80941Smrg "COMPUTE_PGM_RSRC2: 0x%08x\n", config->rsrc1, config->rsrc2); 502b8e80941Smrg 503b8e80941Smrg sctx->max_seen_compute_scratch_bytes_per_wave = 504b8e80941Smrg MAX2(sctx->max_seen_compute_scratch_bytes_per_wave, 505b8e80941Smrg config->scratch_bytes_per_wave); 506b8e80941Smrg 507b8e80941Smrg radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE, 508b8e80941Smrg S_00B860_WAVES(sctx->scratch_waves) 509b8e80941Smrg | S_00B860_WAVESIZE(sctx->max_seen_compute_scratch_bytes_per_wave >> 10)); 510b8e80941Smrg 511b8e80941Smrg sctx->cs_shader_state.emitted_program = program; 512b8e80941Smrg sctx->cs_shader_state.offset = offset; 513b8e80941Smrg sctx->cs_shader_state.uses_scratch = 514b8e80941Smrg config->scratch_bytes_per_wave != 0; 515b8e80941Smrg 516b8e80941Smrg return true; 517b8e80941Smrg} 518b8e80941Smrg 519b8e80941Smrgstatic void setup_scratch_rsrc_user_sgprs(struct si_context *sctx, 520b8e80941Smrg const amd_kernel_code_t *code_object, 521b8e80941Smrg unsigned user_sgpr) 522b8e80941Smrg{ 523b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 524b8e80941Smrg uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address; 525b8e80941Smrg 526b8e80941Smrg unsigned max_private_element_size = AMD_HSA_BITS_GET( 527b8e80941Smrg code_object->code_properties, 528b8e80941Smrg AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE); 529b8e80941Smrg 530b8e80941Smrg uint32_t scratch_dword0 = scratch_va & 0xffffffff; 531b8e80941Smrg uint32_t scratch_dword1 = 532b8e80941Smrg S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) | 533b8e80941Smrg S_008F04_SWIZZLE_ENABLE(1); 534b8e80941Smrg 535b8e80941Smrg /* Disable address clamping */ 536b8e80941Smrg uint32_t scratch_dword2 = 0xffffffff; 537b8e80941Smrg uint32_t scratch_dword3 = 538b8e80941Smrg S_008F0C_INDEX_STRIDE(3) | 539b8e80941Smrg S_008F0C_ADD_TID_ENABLE(1); 540b8e80941Smrg 541b8e80941Smrg if (sctx->chip_class >= GFX9) { 542b8e80941Smrg assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */ 543b8e80941Smrg } else { 544b8e80941Smrg scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size); 545b8e80941Smrg 546b8e80941Smrg if (sctx->chip_class < VI) { 547b8e80941Smrg /* BUF_DATA_FORMAT is ignored, but it cannot be 548b8e80941Smrg * BUF_DATA_FORMAT_INVALID. */ 549b8e80941Smrg scratch_dword3 |= 550b8e80941Smrg S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8); 551b8e80941Smrg } 552b8e80941Smrg } 553b8e80941Smrg 554b8e80941Smrg radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + 555b8e80941Smrg (user_sgpr * 4), 4); 556b8e80941Smrg radeon_emit(cs, scratch_dword0); 557b8e80941Smrg radeon_emit(cs, scratch_dword1); 558b8e80941Smrg radeon_emit(cs, scratch_dword2); 559b8e80941Smrg radeon_emit(cs, scratch_dword3); 560b8e80941Smrg} 561b8e80941Smrg 562b8e80941Smrgstatic void si_setup_user_sgprs_co_v2(struct si_context *sctx, 563b8e80941Smrg const amd_kernel_code_t *code_object, 564b8e80941Smrg const struct pipe_grid_info *info, 565b8e80941Smrg uint64_t kernel_args_va) 566b8e80941Smrg{ 567b8e80941Smrg struct si_compute *program = sctx->cs_shader_state.program; 568b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 569b8e80941Smrg 570b8e80941Smrg static const enum amd_code_property_mask_t workgroup_count_masks [] = { 571b8e80941Smrg AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X, 572b8e80941Smrg AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y, 573b8e80941Smrg AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z 574b8e80941Smrg }; 575b8e80941Smrg 576b8e80941Smrg unsigned i, user_sgpr = 0; 577b8e80941Smrg if (AMD_HSA_BITS_GET(code_object->code_properties, 578b8e80941Smrg AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER)) { 579b8e80941Smrg if (code_object->workitem_private_segment_byte_size > 0) { 580b8e80941Smrg setup_scratch_rsrc_user_sgprs(sctx, code_object, 581b8e80941Smrg user_sgpr); 582b8e80941Smrg } 583b8e80941Smrg user_sgpr += 4; 584b8e80941Smrg } 585b8e80941Smrg 586b8e80941Smrg if (AMD_HSA_BITS_GET(code_object->code_properties, 587b8e80941Smrg AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) { 588b8e80941Smrg struct dispatch_packet dispatch; 589b8e80941Smrg unsigned dispatch_offset; 590b8e80941Smrg struct si_resource *dispatch_buf = NULL; 591b8e80941Smrg uint64_t dispatch_va; 592b8e80941Smrg 593b8e80941Smrg /* Upload dispatch ptr */ 594b8e80941Smrg memset(&dispatch, 0, sizeof(dispatch)); 595b8e80941Smrg 596b8e80941Smrg dispatch.workgroup_size_x = util_cpu_to_le16(info->block[0]); 597b8e80941Smrg dispatch.workgroup_size_y = util_cpu_to_le16(info->block[1]); 598b8e80941Smrg dispatch.workgroup_size_z = util_cpu_to_le16(info->block[2]); 599b8e80941Smrg 600b8e80941Smrg dispatch.grid_size_x = util_cpu_to_le32(info->grid[0] * info->block[0]); 601b8e80941Smrg dispatch.grid_size_y = util_cpu_to_le32(info->grid[1] * info->block[1]); 602b8e80941Smrg dispatch.grid_size_z = util_cpu_to_le32(info->grid[2] * info->block[2]); 603b8e80941Smrg 604b8e80941Smrg dispatch.private_segment_size = util_cpu_to_le32(program->private_size); 605b8e80941Smrg dispatch.group_segment_size = util_cpu_to_le32(program->local_size); 606b8e80941Smrg 607b8e80941Smrg dispatch.kernarg_address = util_cpu_to_le64(kernel_args_va); 608b8e80941Smrg 609b8e80941Smrg u_upload_data(sctx->b.const_uploader, 0, sizeof(dispatch), 610b8e80941Smrg 256, &dispatch, &dispatch_offset, 611b8e80941Smrg (struct pipe_resource**)&dispatch_buf); 612b8e80941Smrg 613b8e80941Smrg if (!dispatch_buf) { 614b8e80941Smrg fprintf(stderr, "Error: Failed to allocate dispatch " 615b8e80941Smrg "packet."); 616b8e80941Smrg } 617b8e80941Smrg radeon_add_to_buffer_list(sctx, sctx->gfx_cs, dispatch_buf, 618b8e80941Smrg RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER); 619b8e80941Smrg 620b8e80941Smrg dispatch_va = dispatch_buf->gpu_address + dispatch_offset; 621b8e80941Smrg 622b8e80941Smrg radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + 623b8e80941Smrg (user_sgpr * 4), 2); 624b8e80941Smrg radeon_emit(cs, dispatch_va); 625b8e80941Smrg radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) | 626b8e80941Smrg S_008F04_STRIDE(0)); 627b8e80941Smrg 628b8e80941Smrg si_resource_reference(&dispatch_buf, NULL); 629b8e80941Smrg user_sgpr += 2; 630b8e80941Smrg } 631b8e80941Smrg 632b8e80941Smrg if (AMD_HSA_BITS_GET(code_object->code_properties, 633b8e80941Smrg AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)) { 634b8e80941Smrg radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + 635b8e80941Smrg (user_sgpr * 4), 2); 636b8e80941Smrg radeon_emit(cs, kernel_args_va); 637b8e80941Smrg radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) | 638b8e80941Smrg S_008F04_STRIDE(0)); 639b8e80941Smrg user_sgpr += 2; 640b8e80941Smrg } 641b8e80941Smrg 642b8e80941Smrg for (i = 0; i < 3 && user_sgpr < 16; i++) { 643b8e80941Smrg if (code_object->code_properties & workgroup_count_masks[i]) { 644b8e80941Smrg radeon_set_sh_reg_seq(cs, 645b8e80941Smrg R_00B900_COMPUTE_USER_DATA_0 + 646b8e80941Smrg (user_sgpr * 4), 1); 647b8e80941Smrg radeon_emit(cs, info->grid[i]); 648b8e80941Smrg user_sgpr += 1; 649848b8605Smrg } 650b8e80941Smrg } 651b8e80941Smrg} 652848b8605Smrg 653b8e80941Smrgstatic bool si_upload_compute_input(struct si_context *sctx, 654b8e80941Smrg const amd_kernel_code_t *code_object, 655b8e80941Smrg const struct pipe_grid_info *info) 656b8e80941Smrg{ 657b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 658b8e80941Smrg struct si_compute *program = sctx->cs_shader_state.program; 659b8e80941Smrg struct si_resource *input_buffer = NULL; 660b8e80941Smrg unsigned kernel_args_size; 661b8e80941Smrg unsigned num_work_size_bytes = program->use_code_object_v2 ? 0 : 36; 662b8e80941Smrg uint32_t kernel_args_offset = 0; 663b8e80941Smrg uint32_t *kernel_args; 664b8e80941Smrg void *kernel_args_ptr; 665b8e80941Smrg uint64_t kernel_args_va; 666b8e80941Smrg unsigned i; 667b8e80941Smrg 668b8e80941Smrg /* The extra num_work_size_bytes are for work group / work item size information */ 669b8e80941Smrg kernel_args_size = program->input_size + num_work_size_bytes; 670b8e80941Smrg 671b8e80941Smrg u_upload_alloc(sctx->b.const_uploader, 0, kernel_args_size, 672b8e80941Smrg sctx->screen->info.tcc_cache_line_size, 673b8e80941Smrg &kernel_args_offset, 674b8e80941Smrg (struct pipe_resource**)&input_buffer, &kernel_args_ptr); 675b8e80941Smrg 676b8e80941Smrg if (unlikely(!kernel_args_ptr)) 677b8e80941Smrg return false; 678b8e80941Smrg 679b8e80941Smrg kernel_args = (uint32_t*)kernel_args_ptr; 680b8e80941Smrg kernel_args_va = input_buffer->gpu_address + kernel_args_offset; 681b8e80941Smrg 682b8e80941Smrg if (!code_object) { 683b8e80941Smrg for (i = 0; i < 3; i++) { 684b8e80941Smrg kernel_args[i] = util_cpu_to_le32(info->grid[i]); 685b8e80941Smrg kernel_args[i + 3] = util_cpu_to_le32(info->grid[i] * info->block[i]); 686b8e80941Smrg kernel_args[i + 6] = util_cpu_to_le32(info->block[i]); 687b8e80941Smrg } 688848b8605Smrg } 689848b8605Smrg 690b8e80941Smrg memcpy(kernel_args + (num_work_size_bytes / 4), info->input, 691b8e80941Smrg program->input_size); 692b8e80941Smrg 693b8e80941Smrg 694848b8605Smrg for (i = 0; i < (kernel_args_size / 4); i++) { 695848b8605Smrg COMPUTE_DBG(sctx->screen, "input %u : %u\n", i, 696848b8605Smrg kernel_args[i]); 697848b8605Smrg } 698848b8605Smrg 699848b8605Smrg 700b8e80941Smrg radeon_add_to_buffer_list(sctx, sctx->gfx_cs, input_buffer, 701b8e80941Smrg RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER); 702b8e80941Smrg 703b8e80941Smrg if (code_object) { 704b8e80941Smrg si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va); 705b8e80941Smrg } else { 706b8e80941Smrg radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2); 707b8e80941Smrg radeon_emit(cs, kernel_args_va); 708b8e80941Smrg radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) | 709b8e80941Smrg S_008F04_STRIDE(0)); 710b8e80941Smrg } 711b8e80941Smrg 712b8e80941Smrg si_resource_reference(&input_buffer, NULL); 713848b8605Smrg 714b8e80941Smrg return true; 715b8e80941Smrg} 716848b8605Smrg 717b8e80941Smrgstatic void si_setup_tgsi_user_data(struct si_context *sctx, 718b8e80941Smrg const struct pipe_grid_info *info) 719b8e80941Smrg{ 720b8e80941Smrg struct si_compute *program = sctx->cs_shader_state.program; 721b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 722b8e80941Smrg unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 + 723b8e80941Smrg 4 * SI_NUM_RESOURCE_SGPRS; 724b8e80941Smrg unsigned block_size_reg = grid_size_reg + 725b8e80941Smrg /* 12 bytes = 3 dwords. */ 726b8e80941Smrg 12 * program->uses_grid_size; 727b8e80941Smrg unsigned cs_user_data_reg = block_size_reg + 728b8e80941Smrg 12 * program->reads_variable_block_size; 729b8e80941Smrg 730b8e80941Smrg if (info->indirect) { 731b8e80941Smrg if (program->uses_grid_size) { 732b8e80941Smrg for (unsigned i = 0; i < 3; ++i) { 733b8e80941Smrg si_cp_copy_data(sctx, 734b8e80941Smrg COPY_DATA_REG, NULL, (grid_size_reg >> 2) + i, 735b8e80941Smrg COPY_DATA_SRC_MEM, si_resource(info->indirect), 736b8e80941Smrg info->indirect_offset + 4 * i); 737b8e80941Smrg } 738b8e80941Smrg } 739b8e80941Smrg } else { 740b8e80941Smrg if (program->uses_grid_size) { 741b8e80941Smrg radeon_set_sh_reg_seq(cs, grid_size_reg, 3); 742b8e80941Smrg radeon_emit(cs, info->grid[0]); 743b8e80941Smrg radeon_emit(cs, info->grid[1]); 744b8e80941Smrg radeon_emit(cs, info->grid[2]); 745b8e80941Smrg } 746b8e80941Smrg if (program->reads_variable_block_size) { 747b8e80941Smrg radeon_set_sh_reg_seq(cs, block_size_reg, 3); 748b8e80941Smrg radeon_emit(cs, info->block[0]); 749b8e80941Smrg radeon_emit(cs, info->block[1]); 750b8e80941Smrg radeon_emit(cs, info->block[2]); 751b8e80941Smrg } 752b8e80941Smrg } 753848b8605Smrg 754b8e80941Smrg if (program->num_cs_user_data_dwords) { 755b8e80941Smrg radeon_set_sh_reg_seq(cs, cs_user_data_reg, program->num_cs_user_data_dwords); 756b8e80941Smrg radeon_emit_array(cs, sctx->cs_user_data, program->num_cs_user_data_dwords); 757b8e80941Smrg } 758b8e80941Smrg} 759848b8605Smrg 760b8e80941Smrgstatic void si_emit_dispatch_packets(struct si_context *sctx, 761b8e80941Smrg const struct pipe_grid_info *info) 762b8e80941Smrg{ 763b8e80941Smrg struct si_screen *sscreen = sctx->screen; 764b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 765b8e80941Smrg bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off; 766b8e80941Smrg unsigned waves_per_threadgroup = 767b8e80941Smrg DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64); 768b8e80941Smrg unsigned compute_resource_limits = 769b8e80941Smrg S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0); 770b8e80941Smrg 771b8e80941Smrg if (sctx->chip_class >= CIK) { 772b8e80941Smrg unsigned num_cu_per_se = sscreen->info.num_good_compute_units / 773b8e80941Smrg sscreen->info.max_se; 774b8e80941Smrg 775b8e80941Smrg /* Force even distribution on all SIMDs in CU if the workgroup 776b8e80941Smrg * size is 64. This has shown some good improvements if # of CUs 777b8e80941Smrg * per SE is not a multiple of 4. 778b8e80941Smrg */ 779b8e80941Smrg if (num_cu_per_se % 4 && waves_per_threadgroup == 1) 780b8e80941Smrg compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1); 781848b8605Smrg 782b8e80941Smrg compute_resource_limits |= S_00B854_WAVES_PER_SH(sctx->cs_max_waves_per_sh); 783b8e80941Smrg } else { 784b8e80941Smrg /* SI */ 785b8e80941Smrg if (sctx->cs_max_waves_per_sh) { 786b8e80941Smrg unsigned limit_div16 = DIV_ROUND_UP(sctx->cs_max_waves_per_sh, 16); 787b8e80941Smrg compute_resource_limits |= S_00B854_WAVES_PER_SH_SI(limit_div16); 788848b8605Smrg } 789848b8605Smrg } 790848b8605Smrg 791b8e80941Smrg radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS, 792b8e80941Smrg compute_resource_limits); 793b8e80941Smrg 794b8e80941Smrg unsigned dispatch_initiator = 795b8e80941Smrg S_00B800_COMPUTE_SHADER_EN(1) | 796b8e80941Smrg S_00B800_FORCE_START_AT_000(1) | 797b8e80941Smrg /* If the KMD allows it (there is a KMD hw register for it), 798b8e80941Smrg * allow launching waves out-of-order. (same as Vulkan) */ 799b8e80941Smrg S_00B800_ORDER_MODE(sctx->chip_class >= CIK); 800b8e80941Smrg 801b8e80941Smrg const uint *last_block = info->last_block; 802b8e80941Smrg bool partial_block_en = last_block[0] || last_block[1] || last_block[2]; 803848b8605Smrg 804b8e80941Smrg radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); 805b8e80941Smrg 806b8e80941Smrg if (partial_block_en) { 807b8e80941Smrg unsigned partial[3]; 808b8e80941Smrg 809b8e80941Smrg /* If no partial_block, these should be an entire block size, not 0. */ 810b8e80941Smrg partial[0] = last_block[0] ? last_block[0] : info->block[0]; 811b8e80941Smrg partial[1] = last_block[1] ? last_block[1] : info->block[1]; 812b8e80941Smrg partial[2] = last_block[2] ? last_block[2] : info->block[2]; 813b8e80941Smrg 814b8e80941Smrg radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]) | 815b8e80941Smrg S_00B81C_NUM_THREAD_PARTIAL(partial[0])); 816b8e80941Smrg radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]) | 817b8e80941Smrg S_00B820_NUM_THREAD_PARTIAL(partial[1])); 818b8e80941Smrg radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]) | 819b8e80941Smrg S_00B824_NUM_THREAD_PARTIAL(partial[2])); 820b8e80941Smrg 821b8e80941Smrg dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1); 822b8e80941Smrg } else { 823b8e80941Smrg radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0])); 824b8e80941Smrg radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1])); 825b8e80941Smrg radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2])); 826848b8605Smrg } 827848b8605Smrg 828b8e80941Smrg if (info->indirect) { 829b8e80941Smrg uint64_t base_va = si_resource(info->indirect)->gpu_address; 830848b8605Smrg 831b8e80941Smrg radeon_add_to_buffer_list(sctx, sctx->gfx_cs, 832b8e80941Smrg si_resource(info->indirect), 833b8e80941Smrg RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT); 834b8e80941Smrg 835b8e80941Smrg radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) | 836b8e80941Smrg PKT3_SHADER_TYPE_S(1)); 837b8e80941Smrg radeon_emit(cs, 1); 838b8e80941Smrg radeon_emit(cs, base_va); 839b8e80941Smrg radeon_emit(cs, base_va >> 32); 840b8e80941Smrg 841b8e80941Smrg radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) | 842b8e80941Smrg PKT3_SHADER_TYPE_S(1)); 843b8e80941Smrg radeon_emit(cs, info->indirect_offset); 844b8e80941Smrg radeon_emit(cs, dispatch_initiator); 845848b8605Smrg } else { 846b8e80941Smrg radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) | 847b8e80941Smrg PKT3_SHADER_TYPE_S(1)); 848b8e80941Smrg radeon_emit(cs, info->grid[0]); 849b8e80941Smrg radeon_emit(cs, info->grid[1]); 850b8e80941Smrg radeon_emit(cs, info->grid[2]); 851b8e80941Smrg radeon_emit(cs, dispatch_initiator); 852b8e80941Smrg } 853b8e80941Smrg} 854848b8605Smrg 855848b8605Smrg 856b8e80941Smrgstatic void si_launch_grid( 857b8e80941Smrg struct pipe_context *ctx, const struct pipe_grid_info *info) 858b8e80941Smrg{ 859b8e80941Smrg struct si_context *sctx = (struct si_context*)ctx; 860b8e80941Smrg struct si_compute *program = sctx->cs_shader_state.program; 861b8e80941Smrg const amd_kernel_code_t *code_object = 862b8e80941Smrg si_compute_get_code_object(program, info->pc); 863b8e80941Smrg int i; 864b8e80941Smrg /* HW bug workaround when CS threadgroups > 256 threads and async 865b8e80941Smrg * compute isn't used, i.e. only one compute job can run at a time. 866b8e80941Smrg * If async compute is possible, the threadgroup size must be limited 867b8e80941Smrg * to 256 threads on all queues to avoid the bug. 868b8e80941Smrg * Only SI and certain CIK chips are affected. 869b8e80941Smrg */ 870b8e80941Smrg bool cs_regalloc_hang = 871b8e80941Smrg (sctx->chip_class == SI || 872b8e80941Smrg sctx->family == CHIP_BONAIRE || 873b8e80941Smrg sctx->family == CHIP_KABINI) && 874b8e80941Smrg info->block[0] * info->block[1] * info->block[2] > 256; 875b8e80941Smrg 876b8e80941Smrg if (cs_regalloc_hang) 877b8e80941Smrg sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | 878b8e80941Smrg SI_CONTEXT_CS_PARTIAL_FLUSH; 879b8e80941Smrg 880b8e80941Smrg if (program->ir_type != PIPE_SHADER_IR_NATIVE && 881b8e80941Smrg program->shader.compilation_failed) 882b8e80941Smrg return; 883848b8605Smrg 884b8e80941Smrg if (sctx->has_graphics) { 885b8e80941Smrg if (sctx->last_num_draw_calls != sctx->num_draw_calls) { 886b8e80941Smrg si_update_fb_dirtiness_after_rendering(sctx); 887b8e80941Smrg sctx->last_num_draw_calls = sctx->num_draw_calls; 888b8e80941Smrg } 889b8e80941Smrg 890b8e80941Smrg si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE); 891b8e80941Smrg } 892848b8605Smrg 893b8e80941Smrg /* Add buffer sizes for memory checking in need_cs_space. */ 894b8e80941Smrg si_context_add_resource_size(sctx, &program->shader.bo->b.b); 895b8e80941Smrg /* TODO: add the scratch buffer */ 896848b8605Smrg 897b8e80941Smrg if (info->indirect) { 898b8e80941Smrg si_context_add_resource_size(sctx, info->indirect); 899b8e80941Smrg 900b8e80941Smrg /* Indirect buffers use TC L2 on GFX9, but not older hw. */ 901b8e80941Smrg if (sctx->chip_class <= VI && 902b8e80941Smrg si_resource(info->indirect)->TC_L2_dirty) { 903b8e80941Smrg sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2; 904b8e80941Smrg si_resource(info->indirect)->TC_L2_dirty = false; 905b8e80941Smrg } 906848b8605Smrg } 907848b8605Smrg 908b8e80941Smrg si_need_gfx_cs_space(sctx); 909848b8605Smrg 910b8e80941Smrg if (sctx->bo_list_add_all_compute_resources) 911b8e80941Smrg si_compute_resources_add_all_to_bo_list(sctx); 912848b8605Smrg 913b8e80941Smrg if (!sctx->cs_shader_state.initialized) 914b8e80941Smrg si_initialize_compute(sctx); 915b8e80941Smrg 916b8e80941Smrg if (sctx->flags) 917b8e80941Smrg si_emit_cache_flush(sctx); 918848b8605Smrg 919b8e80941Smrg if (!si_switch_compute_shader(sctx, program, &program->shader, 920b8e80941Smrg code_object, info->pc)) 921848b8605Smrg return; 922b8e80941Smrg 923b8e80941Smrg si_upload_compute_shader_descriptors(sctx); 924b8e80941Smrg si_emit_compute_shader_pointers(sctx); 925b8e80941Smrg 926b8e80941Smrg if (sctx->has_graphics && 927b8e80941Smrg si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) { 928b8e80941Smrg sctx->atoms.s.render_cond.emit(sctx); 929b8e80941Smrg si_set_atom_dirty(sctx, &sctx->atoms.s.render_cond, false); 930848b8605Smrg } 931848b8605Smrg 932b8e80941Smrg if ((program->input_size || 933b8e80941Smrg program->ir_type == PIPE_SHADER_IR_NATIVE) && 934b8e80941Smrg unlikely(!si_upload_compute_input(sctx, code_object, info))) { 935b8e80941Smrg return; 936b8e80941Smrg } 937b8e80941Smrg 938b8e80941Smrg /* Global buffers */ 939b8e80941Smrg for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) { 940b8e80941Smrg struct si_resource *buffer = 941b8e80941Smrg si_resource(program->global_buffers[i]); 942b8e80941Smrg if (!buffer) { 943b8e80941Smrg continue; 944848b8605Smrg } 945b8e80941Smrg radeon_add_to_buffer_list(sctx, sctx->gfx_cs, buffer, 946b8e80941Smrg RADEON_USAGE_READWRITE, 947b8e80941Smrg RADEON_PRIO_COMPUTE_GLOBAL); 948b8e80941Smrg } 949b8e80941Smrg 950b8e80941Smrg if (program->ir_type != PIPE_SHADER_IR_NATIVE) 951b8e80941Smrg si_setup_tgsi_user_data(sctx, info); 952b8e80941Smrg 953b8e80941Smrg si_emit_dispatch_packets(sctx, info); 954b8e80941Smrg 955b8e80941Smrg if (unlikely(sctx->current_saved_cs)) { 956b8e80941Smrg si_trace_emit(sctx); 957b8e80941Smrg si_log_compute_state(sctx, sctx->log); 958848b8605Smrg } 959848b8605Smrg 960b8e80941Smrg sctx->compute_is_busy = true; 961b8e80941Smrg sctx->num_compute_calls++; 962b8e80941Smrg if (sctx->cs_shader_state.uses_scratch) 963b8e80941Smrg sctx->num_spill_compute_calls++; 964b8e80941Smrg 965b8e80941Smrg if (cs_regalloc_hang) 966b8e80941Smrg sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH; 967b8e80941Smrg} 968b8e80941Smrg 969b8e80941Smrgvoid si_destroy_compute(struct si_compute *program) 970b8e80941Smrg{ 971b8e80941Smrg if (program->ir_type != PIPE_SHADER_IR_NATIVE) { 972b8e80941Smrg util_queue_drop_job(&program->screen->shader_compiler_queue, 973b8e80941Smrg &program->ready); 974b8e80941Smrg util_queue_fence_destroy(&program->ready); 975848b8605Smrg } 976848b8605Smrg 977b8e80941Smrg si_shader_destroy(&program->shader); 978848b8605Smrg FREE(program); 979848b8605Smrg} 980848b8605Smrg 981b8e80941Smrgstatic void si_delete_compute_state(struct pipe_context *ctx, void* state){ 982b8e80941Smrg struct si_compute *program = (struct si_compute *)state; 983b8e80941Smrg struct si_context *sctx = (struct si_context*)ctx; 984b8e80941Smrg 985b8e80941Smrg if (!state) 986b8e80941Smrg return; 987b8e80941Smrg 988b8e80941Smrg if (program == sctx->cs_shader_state.program) 989b8e80941Smrg sctx->cs_shader_state.program = NULL; 990b8e80941Smrg 991b8e80941Smrg if (program == sctx->cs_shader_state.emitted_program) 992b8e80941Smrg sctx->cs_shader_state.emitted_program = NULL; 993b8e80941Smrg 994b8e80941Smrg si_compute_reference(&program, NULL); 995b8e80941Smrg} 996b8e80941Smrg 997848b8605Smrgstatic void si_set_compute_resources(struct pipe_context * ctx_, 998848b8605Smrg unsigned start, unsigned count, 999848b8605Smrg struct pipe_surface ** surfaces) { } 1000848b8605Smrg 1001848b8605Smrgvoid si_init_compute_functions(struct si_context *sctx) 1002848b8605Smrg{ 1003b8e80941Smrg sctx->b.create_compute_state = si_create_compute_state; 1004b8e80941Smrg sctx->b.delete_compute_state = si_delete_compute_state; 1005b8e80941Smrg sctx->b.bind_compute_state = si_bind_compute_state; 1006b8e80941Smrg sctx->b.set_compute_resources = si_set_compute_resources; 1007b8e80941Smrg sctx->b.set_global_binding = si_set_global_binding; 1008b8e80941Smrg sctx->b.launch_grid = si_launch_grid; 1009848b8605Smrg} 1010