1b8e80941Smrg/* 2b8e80941Smrg * Copyright 2015 Advanced Micro Devices, Inc. 3b8e80941Smrg * All Rights Reserved. 4b8e80941Smrg * 5b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a 6b8e80941Smrg * copy of this software and associated documentation files (the "Software"), 7b8e80941Smrg * to deal in the Software without restriction, including without limitation 8b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the 10b8e80941Smrg * Software is furnished to do so, subject to the following conditions: 11b8e80941Smrg * 12b8e80941Smrg * The above copyright notice and this permission notice (including the next 13b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the 14b8e80941Smrg * Software. 15b8e80941Smrg * 16b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21b8e80941Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22b8e80941Smrg * SOFTWARE. 23b8e80941Smrg */ 24b8e80941Smrg 25b8e80941Smrg#include "si_build_pm4.h" 26b8e80941Smrg#include "si_query.h" 27b8e80941Smrg#include "util/u_memory.h" 28b8e80941Smrg 29b8e80941Smrg 30b8e80941Smrgenum si_pc_block_flags { 31b8e80941Smrg /* This block is part of the shader engine */ 32b8e80941Smrg SI_PC_BLOCK_SE = (1 << 0), 33b8e80941Smrg 34b8e80941Smrg /* Expose per-instance groups instead of summing all instances (within 35b8e80941Smrg * an SE). */ 36b8e80941Smrg SI_PC_BLOCK_INSTANCE_GROUPS = (1 << 1), 37b8e80941Smrg 38b8e80941Smrg /* Expose per-SE groups instead of summing instances across SEs. */ 39b8e80941Smrg SI_PC_BLOCK_SE_GROUPS = (1 << 2), 40b8e80941Smrg 41b8e80941Smrg /* Shader block */ 42b8e80941Smrg SI_PC_BLOCK_SHADER = (1 << 3), 43b8e80941Smrg 44b8e80941Smrg /* Non-shader block with perfcounters windowed by shaders. */ 45b8e80941Smrg SI_PC_BLOCK_SHADER_WINDOWED = (1 << 4), 46b8e80941Smrg}; 47b8e80941Smrg 48b8e80941Smrgenum si_pc_reg_layout { 49b8e80941Smrg /* All secondary selector dwords follow as one block after the primary 50b8e80941Smrg * selector dwords for the counters that have secondary selectors. 51b8e80941Smrg */ 52b8e80941Smrg SI_PC_MULTI_BLOCK = 0, 53b8e80941Smrg 54b8e80941Smrg /* Each secondary selector dword follows immediately afters the 55b8e80941Smrg * corresponding primary. 56b8e80941Smrg */ 57b8e80941Smrg SI_PC_MULTI_ALTERNATE = 1, 58b8e80941Smrg 59b8e80941Smrg /* All secondary selector dwords follow as one block after all primary 60b8e80941Smrg * selector dwords. 61b8e80941Smrg */ 62b8e80941Smrg SI_PC_MULTI_TAIL = 2, 63b8e80941Smrg 64b8e80941Smrg /* Free-form arrangement of selector registers. */ 65b8e80941Smrg SI_PC_MULTI_CUSTOM = 3, 66b8e80941Smrg 67b8e80941Smrg SI_PC_MULTI_MASK = 3, 68b8e80941Smrg 69b8e80941Smrg /* Registers are laid out in decreasing rather than increasing order. */ 70b8e80941Smrg SI_PC_REG_REVERSE = 4, 71b8e80941Smrg 72b8e80941Smrg SI_PC_FAKE = 8, 73b8e80941Smrg}; 74b8e80941Smrg 75b8e80941Smrgstruct si_pc_block_base { 76b8e80941Smrg const char *name; 77b8e80941Smrg unsigned num_counters; 78b8e80941Smrg unsigned flags; 79b8e80941Smrg 80b8e80941Smrg unsigned select_or; 81b8e80941Smrg unsigned select0; 82b8e80941Smrg unsigned counter0_lo; 83b8e80941Smrg unsigned *select; 84b8e80941Smrg unsigned *counters; 85b8e80941Smrg unsigned num_multi; 86b8e80941Smrg unsigned num_prelude; 87b8e80941Smrg unsigned layout; 88b8e80941Smrg}; 89b8e80941Smrg 90b8e80941Smrgstruct si_pc_block_gfxdescr { 91b8e80941Smrg struct si_pc_block_base *b; 92b8e80941Smrg unsigned selectors; 93b8e80941Smrg unsigned instances; 94b8e80941Smrg}; 95b8e80941Smrg 96b8e80941Smrgstruct si_pc_block { 97b8e80941Smrg const struct si_pc_block_gfxdescr *b; 98b8e80941Smrg unsigned num_instances; 99b8e80941Smrg 100b8e80941Smrg unsigned num_groups; 101b8e80941Smrg char *group_names; 102b8e80941Smrg unsigned group_name_stride; 103b8e80941Smrg 104b8e80941Smrg char *selector_names; 105b8e80941Smrg unsigned selector_name_stride; 106b8e80941Smrg}; 107b8e80941Smrg 108b8e80941Smrg/* The order is chosen to be compatible with GPUPerfStudio's hardcoding of 109b8e80941Smrg * performance counter group IDs. 110b8e80941Smrg */ 111b8e80941Smrgstatic const char * const si_pc_shader_type_suffixes[] = { 112b8e80941Smrg "", "_ES", "_GS", "_VS", "_PS", "_LS", "_HS", "_CS" 113b8e80941Smrg}; 114b8e80941Smrg 115b8e80941Smrgstatic const unsigned si_pc_shader_type_bits[] = { 116b8e80941Smrg 0x7f, 117b8e80941Smrg S_036780_ES_EN(1), 118b8e80941Smrg S_036780_GS_EN(1), 119b8e80941Smrg S_036780_VS_EN(1), 120b8e80941Smrg S_036780_PS_EN(1), 121b8e80941Smrg S_036780_LS_EN(1), 122b8e80941Smrg S_036780_HS_EN(1), 123b8e80941Smrg S_036780_CS_EN(1), 124b8e80941Smrg}; 125b8e80941Smrg 126b8e80941Smrg/* Max counters per HW block */ 127b8e80941Smrg#define SI_QUERY_MAX_COUNTERS 16 128b8e80941Smrg 129b8e80941Smrg#define SI_PC_SHADERS_WINDOWING (1 << 31) 130b8e80941Smrg 131b8e80941Smrgstruct si_query_group { 132b8e80941Smrg struct si_query_group *next; 133b8e80941Smrg struct si_pc_block *block; 134b8e80941Smrg unsigned sub_gid; /* only used during init */ 135b8e80941Smrg unsigned result_base; /* only used during init */ 136b8e80941Smrg int se; 137b8e80941Smrg int instance; 138b8e80941Smrg unsigned num_counters; 139b8e80941Smrg unsigned selectors[SI_QUERY_MAX_COUNTERS]; 140b8e80941Smrg}; 141b8e80941Smrg 142b8e80941Smrgstruct si_query_counter { 143b8e80941Smrg unsigned base; 144b8e80941Smrg unsigned qwords; 145b8e80941Smrg unsigned stride; /* in uint64s */ 146b8e80941Smrg}; 147b8e80941Smrg 148b8e80941Smrgstruct si_query_pc { 149b8e80941Smrg struct si_query b; 150b8e80941Smrg struct si_query_buffer buffer; 151b8e80941Smrg 152b8e80941Smrg /* Size of the results in memory, in bytes. */ 153b8e80941Smrg unsigned result_size; 154b8e80941Smrg 155b8e80941Smrg unsigned shaders; 156b8e80941Smrg unsigned num_counters; 157b8e80941Smrg struct si_query_counter *counters; 158b8e80941Smrg struct si_query_group *groups; 159b8e80941Smrg}; 160b8e80941Smrg 161b8e80941Smrg 162b8e80941Smrgstatic struct si_pc_block_base cik_CB = { 163b8e80941Smrg .name = "CB", 164b8e80941Smrg .num_counters = 4, 165b8e80941Smrg .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS, 166b8e80941Smrg 167b8e80941Smrg .select0 = R_037000_CB_PERFCOUNTER_FILTER, 168b8e80941Smrg .counter0_lo = R_035018_CB_PERFCOUNTER0_LO, 169b8e80941Smrg .num_multi = 1, 170b8e80941Smrg .num_prelude = 1, 171b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE, 172b8e80941Smrg}; 173b8e80941Smrg 174b8e80941Smrgstatic unsigned cik_CPC_select[] = { 175b8e80941Smrg R_036024_CPC_PERFCOUNTER0_SELECT, 176b8e80941Smrg R_036010_CPC_PERFCOUNTER0_SELECT1, 177b8e80941Smrg R_03600C_CPC_PERFCOUNTER1_SELECT, 178b8e80941Smrg}; 179b8e80941Smrgstatic struct si_pc_block_base cik_CPC = { 180b8e80941Smrg .name = "CPC", 181b8e80941Smrg .num_counters = 2, 182b8e80941Smrg 183b8e80941Smrg .select = cik_CPC_select, 184b8e80941Smrg .counter0_lo = R_034018_CPC_PERFCOUNTER0_LO, 185b8e80941Smrg .num_multi = 1, 186b8e80941Smrg .layout = SI_PC_MULTI_CUSTOM | SI_PC_REG_REVERSE, 187b8e80941Smrg}; 188b8e80941Smrg 189b8e80941Smrgstatic struct si_pc_block_base cik_CPF = { 190b8e80941Smrg .name = "CPF", 191b8e80941Smrg .num_counters = 2, 192b8e80941Smrg 193b8e80941Smrg .select0 = R_03601C_CPF_PERFCOUNTER0_SELECT, 194b8e80941Smrg .counter0_lo = R_034028_CPF_PERFCOUNTER0_LO, 195b8e80941Smrg .num_multi = 1, 196b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE | SI_PC_REG_REVERSE, 197b8e80941Smrg}; 198b8e80941Smrg 199b8e80941Smrgstatic struct si_pc_block_base cik_CPG = { 200b8e80941Smrg .name = "CPG", 201b8e80941Smrg .num_counters = 2, 202b8e80941Smrg 203b8e80941Smrg .select0 = R_036008_CPG_PERFCOUNTER0_SELECT, 204b8e80941Smrg .counter0_lo = R_034008_CPG_PERFCOUNTER0_LO, 205b8e80941Smrg .num_multi = 1, 206b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE | SI_PC_REG_REVERSE, 207b8e80941Smrg}; 208b8e80941Smrg 209b8e80941Smrgstatic struct si_pc_block_base cik_DB = { 210b8e80941Smrg .name = "DB", 211b8e80941Smrg .num_counters = 4, 212b8e80941Smrg .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS, 213b8e80941Smrg 214b8e80941Smrg .select0 = R_037100_DB_PERFCOUNTER0_SELECT, 215b8e80941Smrg .counter0_lo = R_035100_DB_PERFCOUNTER0_LO, 216b8e80941Smrg .num_multi = 3, // really only 2, but there's a gap between registers 217b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE, 218b8e80941Smrg}; 219b8e80941Smrg 220b8e80941Smrgstatic struct si_pc_block_base cik_GDS = { 221b8e80941Smrg .name = "GDS", 222b8e80941Smrg .num_counters = 4, 223b8e80941Smrg 224b8e80941Smrg .select0 = R_036A00_GDS_PERFCOUNTER0_SELECT, 225b8e80941Smrg .counter0_lo = R_034A00_GDS_PERFCOUNTER0_LO, 226b8e80941Smrg .num_multi = 1, 227b8e80941Smrg .layout = SI_PC_MULTI_TAIL, 228b8e80941Smrg}; 229b8e80941Smrg 230b8e80941Smrgstatic unsigned cik_GRBM_counters[] = { 231b8e80941Smrg R_034100_GRBM_PERFCOUNTER0_LO, 232b8e80941Smrg R_03410C_GRBM_PERFCOUNTER1_LO, 233b8e80941Smrg}; 234b8e80941Smrgstatic struct si_pc_block_base cik_GRBM = { 235b8e80941Smrg .name = "GRBM", 236b8e80941Smrg .num_counters = 2, 237b8e80941Smrg 238b8e80941Smrg .select0 = R_036100_GRBM_PERFCOUNTER0_SELECT, 239b8e80941Smrg .counters = cik_GRBM_counters, 240b8e80941Smrg}; 241b8e80941Smrg 242b8e80941Smrgstatic struct si_pc_block_base cik_GRBMSE = { 243b8e80941Smrg .name = "GRBMSE", 244b8e80941Smrg .num_counters = 4, 245b8e80941Smrg 246b8e80941Smrg .select0 = R_036108_GRBM_SE0_PERFCOUNTER_SELECT, 247b8e80941Smrg .counter0_lo = R_034114_GRBM_SE0_PERFCOUNTER_LO, 248b8e80941Smrg}; 249b8e80941Smrg 250b8e80941Smrgstatic struct si_pc_block_base cik_IA = { 251b8e80941Smrg .name = "IA", 252b8e80941Smrg .num_counters = 4, 253b8e80941Smrg 254b8e80941Smrg .select0 = R_036210_IA_PERFCOUNTER0_SELECT, 255b8e80941Smrg .counter0_lo = R_034220_IA_PERFCOUNTER0_LO, 256b8e80941Smrg .num_multi = 1, 257b8e80941Smrg .layout = SI_PC_MULTI_TAIL, 258b8e80941Smrg}; 259b8e80941Smrg 260b8e80941Smrgstatic struct si_pc_block_base cik_PA_SC = { 261b8e80941Smrg .name = "PA_SC", 262b8e80941Smrg .num_counters = 8, 263b8e80941Smrg .flags = SI_PC_BLOCK_SE, 264b8e80941Smrg 265b8e80941Smrg .select0 = R_036500_PA_SC_PERFCOUNTER0_SELECT, 266b8e80941Smrg .counter0_lo = R_034500_PA_SC_PERFCOUNTER0_LO, 267b8e80941Smrg .num_multi = 1, 268b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE, 269b8e80941Smrg}; 270b8e80941Smrg 271b8e80941Smrg/* According to docs, PA_SU counters are only 48 bits wide. */ 272b8e80941Smrgstatic struct si_pc_block_base cik_PA_SU = { 273b8e80941Smrg .name = "PA_SU", 274b8e80941Smrg .num_counters = 4, 275b8e80941Smrg .flags = SI_PC_BLOCK_SE, 276b8e80941Smrg 277b8e80941Smrg .select0 = R_036400_PA_SU_PERFCOUNTER0_SELECT, 278b8e80941Smrg .counter0_lo = R_034400_PA_SU_PERFCOUNTER0_LO, 279b8e80941Smrg .num_multi = 2, 280b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE, 281b8e80941Smrg}; 282b8e80941Smrg 283b8e80941Smrgstatic struct si_pc_block_base cik_SPI = { 284b8e80941Smrg .name = "SPI", 285b8e80941Smrg .num_counters = 6, 286b8e80941Smrg .flags = SI_PC_BLOCK_SE, 287b8e80941Smrg 288b8e80941Smrg .select0 = R_036600_SPI_PERFCOUNTER0_SELECT, 289b8e80941Smrg .counter0_lo = R_034604_SPI_PERFCOUNTER0_LO, 290b8e80941Smrg .num_multi = 4, 291b8e80941Smrg .layout = SI_PC_MULTI_BLOCK, 292b8e80941Smrg}; 293b8e80941Smrg 294b8e80941Smrgstatic struct si_pc_block_base cik_SQ = { 295b8e80941Smrg .name = "SQ", 296b8e80941Smrg .num_counters = 16, 297b8e80941Smrg .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_SHADER, 298b8e80941Smrg 299b8e80941Smrg .select0 = R_036700_SQ_PERFCOUNTER0_SELECT, 300b8e80941Smrg .select_or = S_036700_SQC_BANK_MASK(15) | 301b8e80941Smrg S_036700_SQC_CLIENT_MASK(15) | 302b8e80941Smrg S_036700_SIMD_MASK(15), 303b8e80941Smrg .counter0_lo = R_034700_SQ_PERFCOUNTER0_LO, 304b8e80941Smrg}; 305b8e80941Smrg 306b8e80941Smrgstatic struct si_pc_block_base cik_SX = { 307b8e80941Smrg .name = "SX", 308b8e80941Smrg .num_counters = 4, 309b8e80941Smrg .flags = SI_PC_BLOCK_SE, 310b8e80941Smrg 311b8e80941Smrg .select0 = R_036900_SX_PERFCOUNTER0_SELECT, 312b8e80941Smrg .counter0_lo = R_034900_SX_PERFCOUNTER0_LO, 313b8e80941Smrg .num_multi = 2, 314b8e80941Smrg .layout = SI_PC_MULTI_TAIL, 315b8e80941Smrg}; 316b8e80941Smrg 317b8e80941Smrgstatic struct si_pc_block_base cik_TA = { 318b8e80941Smrg .name = "TA", 319b8e80941Smrg .num_counters = 2, 320b8e80941Smrg .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED, 321b8e80941Smrg 322b8e80941Smrg .select0 = R_036B00_TA_PERFCOUNTER0_SELECT, 323b8e80941Smrg .counter0_lo = R_034B00_TA_PERFCOUNTER0_LO, 324b8e80941Smrg .num_multi = 1, 325b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE, 326b8e80941Smrg}; 327b8e80941Smrg 328b8e80941Smrgstatic struct si_pc_block_base cik_TD = { 329b8e80941Smrg .name = "TD", 330b8e80941Smrg .num_counters = 2, 331b8e80941Smrg .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED, 332b8e80941Smrg 333b8e80941Smrg .select0 = R_036C00_TD_PERFCOUNTER0_SELECT, 334b8e80941Smrg .counter0_lo = R_034C00_TD_PERFCOUNTER0_LO, 335b8e80941Smrg .num_multi = 1, 336b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE, 337b8e80941Smrg}; 338b8e80941Smrg 339b8e80941Smrgstatic struct si_pc_block_base cik_TCA = { 340b8e80941Smrg .name = "TCA", 341b8e80941Smrg .num_counters = 4, 342b8e80941Smrg .flags = SI_PC_BLOCK_INSTANCE_GROUPS, 343b8e80941Smrg 344b8e80941Smrg .select0 = R_036E40_TCA_PERFCOUNTER0_SELECT, 345b8e80941Smrg .counter0_lo = R_034E40_TCA_PERFCOUNTER0_LO, 346b8e80941Smrg .num_multi = 2, 347b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE, 348b8e80941Smrg}; 349b8e80941Smrg 350b8e80941Smrgstatic struct si_pc_block_base cik_TCC = { 351b8e80941Smrg .name = "TCC", 352b8e80941Smrg .num_counters = 4, 353b8e80941Smrg .flags = SI_PC_BLOCK_INSTANCE_GROUPS, 354b8e80941Smrg 355b8e80941Smrg .select0 = R_036E00_TCC_PERFCOUNTER0_SELECT, 356b8e80941Smrg .counter0_lo = R_034E00_TCC_PERFCOUNTER0_LO, 357b8e80941Smrg .num_multi = 2, 358b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE, 359b8e80941Smrg}; 360b8e80941Smrg 361b8e80941Smrgstatic struct si_pc_block_base cik_TCP = { 362b8e80941Smrg .name = "TCP", 363b8e80941Smrg .num_counters = 4, 364b8e80941Smrg .flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED, 365b8e80941Smrg 366b8e80941Smrg .select0 = R_036D00_TCP_PERFCOUNTER0_SELECT, 367b8e80941Smrg .counter0_lo = R_034D00_TCP_PERFCOUNTER0_LO, 368b8e80941Smrg .num_multi = 2, 369b8e80941Smrg .layout = SI_PC_MULTI_ALTERNATE, 370b8e80941Smrg}; 371b8e80941Smrg 372b8e80941Smrgstatic struct si_pc_block_base cik_VGT = { 373b8e80941Smrg .name = "VGT", 374b8e80941Smrg .num_counters = 4, 375b8e80941Smrg .flags = SI_PC_BLOCK_SE, 376b8e80941Smrg 377b8e80941Smrg .select0 = R_036230_VGT_PERFCOUNTER0_SELECT, 378b8e80941Smrg .counter0_lo = R_034240_VGT_PERFCOUNTER0_LO, 379b8e80941Smrg .num_multi = 1, 380b8e80941Smrg .layout = SI_PC_MULTI_TAIL, 381b8e80941Smrg}; 382b8e80941Smrg 383b8e80941Smrgstatic struct si_pc_block_base cik_WD = { 384b8e80941Smrg .name = "WD", 385b8e80941Smrg .num_counters = 4, 386b8e80941Smrg 387b8e80941Smrg .select0 = R_036200_WD_PERFCOUNTER0_SELECT, 388b8e80941Smrg .counter0_lo = R_034200_WD_PERFCOUNTER0_LO, 389b8e80941Smrg}; 390b8e80941Smrg 391b8e80941Smrgstatic struct si_pc_block_base cik_MC = { 392b8e80941Smrg .name = "MC", 393b8e80941Smrg .num_counters = 4, 394b8e80941Smrg 395b8e80941Smrg .layout = SI_PC_FAKE, 396b8e80941Smrg}; 397b8e80941Smrg 398b8e80941Smrgstatic struct si_pc_block_base cik_SRBM = { 399b8e80941Smrg .name = "SRBM", 400b8e80941Smrg .num_counters = 2, 401b8e80941Smrg 402b8e80941Smrg .layout = SI_PC_FAKE, 403b8e80941Smrg}; 404b8e80941Smrg 405b8e80941Smrg/* Both the number of instances and selectors varies between chips of the same 406b8e80941Smrg * class. We only differentiate by class here and simply expose the maximum 407b8e80941Smrg * number over all chips in a class. 408b8e80941Smrg * 409b8e80941Smrg * Unfortunately, GPUPerfStudio uses the order of performance counter groups 410b8e80941Smrg * blindly once it believes it has identified the hardware, so the order of 411b8e80941Smrg * blocks here matters. 412b8e80941Smrg */ 413b8e80941Smrgstatic struct si_pc_block_gfxdescr groups_CIK[] = { 414b8e80941Smrg { &cik_CB, 226}, 415b8e80941Smrg { &cik_CPF, 17 }, 416b8e80941Smrg { &cik_DB, 257}, 417b8e80941Smrg { &cik_GRBM, 34 }, 418b8e80941Smrg { &cik_GRBMSE, 15 }, 419b8e80941Smrg { &cik_PA_SU, 153 }, 420b8e80941Smrg { &cik_PA_SC, 395 }, 421b8e80941Smrg { &cik_SPI, 186 }, 422b8e80941Smrg { &cik_SQ, 252 }, 423b8e80941Smrg { &cik_SX, 32 }, 424b8e80941Smrg { &cik_TA, 111, 11 }, 425b8e80941Smrg { &cik_TCA, 39, 2 }, 426b8e80941Smrg { &cik_TCC, 160}, 427b8e80941Smrg { &cik_TD, 55, 11 }, 428b8e80941Smrg { &cik_TCP, 154, 11 }, 429b8e80941Smrg { &cik_GDS, 121 }, 430b8e80941Smrg { &cik_VGT, 140 }, 431b8e80941Smrg { &cik_IA, 22 }, 432b8e80941Smrg { &cik_MC, 22 }, 433b8e80941Smrg { &cik_SRBM, 19 }, 434b8e80941Smrg { &cik_WD, 22 }, 435b8e80941Smrg { &cik_CPG, 46 }, 436b8e80941Smrg { &cik_CPC, 22 }, 437b8e80941Smrg 438b8e80941Smrg}; 439b8e80941Smrg 440b8e80941Smrgstatic struct si_pc_block_gfxdescr groups_VI[] = { 441b8e80941Smrg { &cik_CB, 405}, 442b8e80941Smrg { &cik_CPF, 19 }, 443b8e80941Smrg { &cik_DB, 257}, 444b8e80941Smrg { &cik_GRBM, 34 }, 445b8e80941Smrg { &cik_GRBMSE, 15 }, 446b8e80941Smrg { &cik_PA_SU, 154 }, 447b8e80941Smrg { &cik_PA_SC, 397 }, 448b8e80941Smrg { &cik_SPI, 197 }, 449b8e80941Smrg { &cik_SQ, 273 }, 450b8e80941Smrg { &cik_SX, 34 }, 451b8e80941Smrg { &cik_TA, 119, 16 }, 452b8e80941Smrg { &cik_TCA, 35, 2 }, 453b8e80941Smrg { &cik_TCC, 192}, 454b8e80941Smrg { &cik_TD, 55, 16 }, 455b8e80941Smrg { &cik_TCP, 180, 16 }, 456b8e80941Smrg { &cik_GDS, 121 }, 457b8e80941Smrg { &cik_VGT, 147 }, 458b8e80941Smrg { &cik_IA, 24 }, 459b8e80941Smrg { &cik_MC, 22 }, 460b8e80941Smrg { &cik_SRBM, 27 }, 461b8e80941Smrg { &cik_WD, 37 }, 462b8e80941Smrg { &cik_CPG, 48 }, 463b8e80941Smrg { &cik_CPC, 24 }, 464b8e80941Smrg 465b8e80941Smrg}; 466b8e80941Smrg 467b8e80941Smrgstatic struct si_pc_block_gfxdescr groups_gfx9[] = { 468b8e80941Smrg { &cik_CB, 438}, 469b8e80941Smrg { &cik_CPF, 32 }, 470b8e80941Smrg { &cik_DB, 328}, 471b8e80941Smrg { &cik_GRBM, 38 }, 472b8e80941Smrg { &cik_GRBMSE, 16 }, 473b8e80941Smrg { &cik_PA_SU, 292 }, 474b8e80941Smrg { &cik_PA_SC, 491 }, 475b8e80941Smrg { &cik_SPI, 196 }, 476b8e80941Smrg { &cik_SQ, 374 }, 477b8e80941Smrg { &cik_SX, 208 }, 478b8e80941Smrg { &cik_TA, 119, 16 }, 479b8e80941Smrg { &cik_TCA, 35, 2 }, 480b8e80941Smrg { &cik_TCC, 256}, 481b8e80941Smrg { &cik_TD, 57, 16 }, 482b8e80941Smrg { &cik_TCP, 85, 16 }, 483b8e80941Smrg { &cik_GDS, 121 }, 484b8e80941Smrg { &cik_VGT, 148 }, 485b8e80941Smrg { &cik_IA, 32 }, 486b8e80941Smrg { &cik_WD, 58 }, 487b8e80941Smrg { &cik_CPG, 59 }, 488b8e80941Smrg { &cik_CPC, 35 }, 489b8e80941Smrg}; 490b8e80941Smrg 491b8e80941Smrgstatic bool si_pc_block_has_per_se_groups(const struct si_perfcounters *pc, 492b8e80941Smrg const struct si_pc_block *block) 493b8e80941Smrg{ 494b8e80941Smrg return block->b->b->flags & SI_PC_BLOCK_SE_GROUPS || 495b8e80941Smrg (block->b->b->flags & SI_PC_BLOCK_SE && pc->separate_se); 496b8e80941Smrg} 497b8e80941Smrg 498b8e80941Smrgstatic bool si_pc_block_has_per_instance_groups(const struct si_perfcounters *pc, 499b8e80941Smrg const struct si_pc_block *block) 500b8e80941Smrg{ 501b8e80941Smrg return block->b->b->flags & SI_PC_BLOCK_INSTANCE_GROUPS || 502b8e80941Smrg (block->num_instances > 1 && pc->separate_instance); 503b8e80941Smrg} 504b8e80941Smrg 505b8e80941Smrgstatic struct si_pc_block * 506b8e80941Smrglookup_counter(struct si_perfcounters *pc, unsigned index, 507b8e80941Smrg unsigned *base_gid, unsigned *sub_index) 508b8e80941Smrg{ 509b8e80941Smrg struct si_pc_block *block = pc->blocks; 510b8e80941Smrg unsigned bid; 511b8e80941Smrg 512b8e80941Smrg *base_gid = 0; 513b8e80941Smrg for (bid = 0; bid < pc->num_blocks; ++bid, ++block) { 514b8e80941Smrg unsigned total = block->num_groups * block->b->selectors; 515b8e80941Smrg 516b8e80941Smrg if (index < total) { 517b8e80941Smrg *sub_index = index; 518b8e80941Smrg return block; 519b8e80941Smrg } 520b8e80941Smrg 521b8e80941Smrg index -= total; 522b8e80941Smrg *base_gid += block->num_groups; 523b8e80941Smrg } 524b8e80941Smrg 525b8e80941Smrg return NULL; 526b8e80941Smrg} 527b8e80941Smrg 528b8e80941Smrgstatic struct si_pc_block * 529b8e80941Smrglookup_group(struct si_perfcounters *pc, unsigned *index) 530b8e80941Smrg{ 531b8e80941Smrg unsigned bid; 532b8e80941Smrg struct si_pc_block *block = pc->blocks; 533b8e80941Smrg 534b8e80941Smrg for (bid = 0; bid < pc->num_blocks; ++bid, ++block) { 535b8e80941Smrg if (*index < block->num_groups) 536b8e80941Smrg return block; 537b8e80941Smrg *index -= block->num_groups; 538b8e80941Smrg } 539b8e80941Smrg 540b8e80941Smrg return NULL; 541b8e80941Smrg} 542b8e80941Smrg 543b8e80941Smrgstatic void si_pc_emit_instance(struct si_context *sctx, 544b8e80941Smrg int se, int instance) 545b8e80941Smrg{ 546b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 547b8e80941Smrg unsigned value = S_030800_SH_BROADCAST_WRITES(1); 548b8e80941Smrg 549b8e80941Smrg if (se >= 0) { 550b8e80941Smrg value |= S_030800_SE_INDEX(se); 551b8e80941Smrg } else { 552b8e80941Smrg value |= S_030800_SE_BROADCAST_WRITES(1); 553b8e80941Smrg } 554b8e80941Smrg 555b8e80941Smrg if (instance >= 0) { 556b8e80941Smrg value |= S_030800_INSTANCE_INDEX(instance); 557b8e80941Smrg } else { 558b8e80941Smrg value |= S_030800_INSTANCE_BROADCAST_WRITES(1); 559b8e80941Smrg } 560b8e80941Smrg 561b8e80941Smrg radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, value); 562b8e80941Smrg} 563b8e80941Smrg 564b8e80941Smrgstatic void si_pc_emit_shaders(struct si_context *sctx, 565b8e80941Smrg unsigned shaders) 566b8e80941Smrg{ 567b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 568b8e80941Smrg 569b8e80941Smrg radeon_set_uconfig_reg_seq(cs, R_036780_SQ_PERFCOUNTER_CTRL, 2); 570b8e80941Smrg radeon_emit(cs, shaders & 0x7f); 571b8e80941Smrg radeon_emit(cs, 0xffffffff); 572b8e80941Smrg} 573b8e80941Smrg 574b8e80941Smrgstatic void si_pc_emit_select(struct si_context *sctx, 575b8e80941Smrg struct si_pc_block *block, 576b8e80941Smrg unsigned count, unsigned *selectors) 577b8e80941Smrg{ 578b8e80941Smrg struct si_pc_block_base *regs = block->b->b; 579b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 580b8e80941Smrg unsigned idx; 581b8e80941Smrg unsigned layout_multi = regs->layout & SI_PC_MULTI_MASK; 582b8e80941Smrg unsigned dw; 583b8e80941Smrg 584b8e80941Smrg assert(count <= regs->num_counters); 585b8e80941Smrg 586b8e80941Smrg if (regs->layout & SI_PC_FAKE) 587b8e80941Smrg return; 588b8e80941Smrg 589b8e80941Smrg if (layout_multi == SI_PC_MULTI_BLOCK) { 590b8e80941Smrg assert(!(regs->layout & SI_PC_REG_REVERSE)); 591b8e80941Smrg 592b8e80941Smrg dw = count + regs->num_prelude; 593b8e80941Smrg if (count >= regs->num_multi) 594b8e80941Smrg dw += regs->num_multi; 595b8e80941Smrg radeon_set_uconfig_reg_seq(cs, regs->select0, dw); 596b8e80941Smrg for (idx = 0; idx < regs->num_prelude; ++idx) 597b8e80941Smrg radeon_emit(cs, 0); 598b8e80941Smrg for (idx = 0; idx < MIN2(count, regs->num_multi); ++idx) 599b8e80941Smrg radeon_emit(cs, selectors[idx] | regs->select_or); 600b8e80941Smrg 601b8e80941Smrg if (count < regs->num_multi) { 602b8e80941Smrg unsigned select1 = 603b8e80941Smrg regs->select0 + 4 * regs->num_multi; 604b8e80941Smrg radeon_set_uconfig_reg_seq(cs, select1, count); 605b8e80941Smrg } 606b8e80941Smrg 607b8e80941Smrg for (idx = 0; idx < MIN2(count, regs->num_multi); ++idx) 608b8e80941Smrg radeon_emit(cs, 0); 609b8e80941Smrg 610b8e80941Smrg if (count > regs->num_multi) { 611b8e80941Smrg for (idx = regs->num_multi; idx < count; ++idx) 612b8e80941Smrg radeon_emit(cs, selectors[idx] | regs->select_or); 613b8e80941Smrg } 614b8e80941Smrg } else if (layout_multi == SI_PC_MULTI_TAIL) { 615b8e80941Smrg unsigned select1, select1_count; 616b8e80941Smrg 617b8e80941Smrg assert(!(regs->layout & SI_PC_REG_REVERSE)); 618b8e80941Smrg 619b8e80941Smrg radeon_set_uconfig_reg_seq(cs, regs->select0, count + regs->num_prelude); 620b8e80941Smrg for (idx = 0; idx < regs->num_prelude; ++idx) 621b8e80941Smrg radeon_emit(cs, 0); 622b8e80941Smrg for (idx = 0; idx < count; ++idx) 623b8e80941Smrg radeon_emit(cs, selectors[idx] | regs->select_or); 624b8e80941Smrg 625b8e80941Smrg select1 = regs->select0 + 4 * regs->num_counters; 626b8e80941Smrg select1_count = MIN2(count, regs->num_multi); 627b8e80941Smrg radeon_set_uconfig_reg_seq(cs, select1, select1_count); 628b8e80941Smrg for (idx = 0; idx < select1_count; ++idx) 629b8e80941Smrg radeon_emit(cs, 0); 630b8e80941Smrg } else if (layout_multi == SI_PC_MULTI_CUSTOM) { 631b8e80941Smrg unsigned *reg = regs->select; 632b8e80941Smrg for (idx = 0; idx < count; ++idx) { 633b8e80941Smrg radeon_set_uconfig_reg(cs, *reg++, selectors[idx] | regs->select_or); 634b8e80941Smrg if (idx < regs->num_multi) 635b8e80941Smrg radeon_set_uconfig_reg(cs, *reg++, 0); 636b8e80941Smrg } 637b8e80941Smrg } else { 638b8e80941Smrg assert(layout_multi == SI_PC_MULTI_ALTERNATE); 639b8e80941Smrg 640b8e80941Smrg unsigned reg_base = regs->select0; 641b8e80941Smrg unsigned reg_count = count + MIN2(count, regs->num_multi); 642b8e80941Smrg reg_count += regs->num_prelude; 643b8e80941Smrg 644b8e80941Smrg if (!(regs->layout & SI_PC_REG_REVERSE)) { 645b8e80941Smrg radeon_set_uconfig_reg_seq(cs, reg_base, reg_count); 646b8e80941Smrg 647b8e80941Smrg for (idx = 0; idx < regs->num_prelude; ++idx) 648b8e80941Smrg radeon_emit(cs, 0); 649b8e80941Smrg for (idx = 0; idx < count; ++idx) { 650b8e80941Smrg radeon_emit(cs, selectors[idx] | regs->select_or); 651b8e80941Smrg if (idx < regs->num_multi) 652b8e80941Smrg radeon_emit(cs, 0); 653b8e80941Smrg } 654b8e80941Smrg } else { 655b8e80941Smrg reg_base -= (reg_count - 1) * 4; 656b8e80941Smrg radeon_set_uconfig_reg_seq(cs, reg_base, reg_count); 657b8e80941Smrg 658b8e80941Smrg for (idx = count; idx > 0; --idx) { 659b8e80941Smrg if (idx <= regs->num_multi) 660b8e80941Smrg radeon_emit(cs, 0); 661b8e80941Smrg radeon_emit(cs, selectors[idx - 1] | regs->select_or); 662b8e80941Smrg } 663b8e80941Smrg for (idx = 0; idx < regs->num_prelude; ++idx) 664b8e80941Smrg radeon_emit(cs, 0); 665b8e80941Smrg } 666b8e80941Smrg } 667b8e80941Smrg} 668b8e80941Smrg 669b8e80941Smrgstatic void si_pc_emit_start(struct si_context *sctx, 670b8e80941Smrg struct si_resource *buffer, uint64_t va) 671b8e80941Smrg{ 672b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 673b8e80941Smrg 674b8e80941Smrg si_cp_copy_data(sctx, 675b8e80941Smrg COPY_DATA_DST_MEM, buffer, va - buffer->gpu_address, 676b8e80941Smrg COPY_DATA_IMM, NULL, 1); 677b8e80941Smrg 678b8e80941Smrg radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL, 679b8e80941Smrg S_036020_PERFMON_STATE(V_036020_DISABLE_AND_RESET)); 680b8e80941Smrg radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 681b8e80941Smrg radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_START) | EVENT_INDEX(0)); 682b8e80941Smrg radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL, 683b8e80941Smrg S_036020_PERFMON_STATE(V_036020_START_COUNTING)); 684b8e80941Smrg} 685b8e80941Smrg 686b8e80941Smrg/* Note: The buffer was already added in si_pc_emit_start, so we don't have to 687b8e80941Smrg * do it again in here. */ 688b8e80941Smrgstatic void si_pc_emit_stop(struct si_context *sctx, 689b8e80941Smrg struct si_resource *buffer, uint64_t va) 690b8e80941Smrg{ 691b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 692b8e80941Smrg 693b8e80941Smrg si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0, 694b8e80941Smrg EOP_DST_SEL_MEM, EOP_INT_SEL_NONE, 695b8e80941Smrg EOP_DATA_SEL_VALUE_32BIT, 696b8e80941Smrg buffer, va, 0, SI_NOT_QUERY); 697b8e80941Smrg si_cp_wait_mem(sctx, cs, va, 0, 0xffffffff, WAIT_REG_MEM_EQUAL); 698b8e80941Smrg 699b8e80941Smrg radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 700b8e80941Smrg radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_SAMPLE) | EVENT_INDEX(0)); 701b8e80941Smrg radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 702b8e80941Smrg radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_STOP) | EVENT_INDEX(0)); 703b8e80941Smrg radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL, 704b8e80941Smrg S_036020_PERFMON_STATE(V_036020_STOP_COUNTING) | 705b8e80941Smrg S_036020_PERFMON_SAMPLE_ENABLE(1)); 706b8e80941Smrg} 707b8e80941Smrg 708b8e80941Smrgstatic void si_pc_emit_read(struct si_context *sctx, 709b8e80941Smrg struct si_pc_block *block, 710b8e80941Smrg unsigned count, uint64_t va) 711b8e80941Smrg{ 712b8e80941Smrg struct si_pc_block_base *regs = block->b->b; 713b8e80941Smrg struct radeon_cmdbuf *cs = sctx->gfx_cs; 714b8e80941Smrg unsigned idx; 715b8e80941Smrg unsigned reg = regs->counter0_lo; 716b8e80941Smrg unsigned reg_delta = 8; 717b8e80941Smrg 718b8e80941Smrg if (!(regs->layout & SI_PC_FAKE)) { 719b8e80941Smrg if (regs->layout & SI_PC_REG_REVERSE) 720b8e80941Smrg reg_delta = -reg_delta; 721b8e80941Smrg 722b8e80941Smrg for (idx = 0; idx < count; ++idx) { 723b8e80941Smrg if (regs->counters) 724b8e80941Smrg reg = regs->counters[idx]; 725b8e80941Smrg 726b8e80941Smrg radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); 727b8e80941Smrg radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) | 728b8e80941Smrg COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | 729b8e80941Smrg COPY_DATA_COUNT_SEL); /* 64 bits */ 730b8e80941Smrg radeon_emit(cs, reg >> 2); 731b8e80941Smrg radeon_emit(cs, 0); /* unused */ 732b8e80941Smrg radeon_emit(cs, va); 733b8e80941Smrg radeon_emit(cs, va >> 32); 734b8e80941Smrg va += sizeof(uint64_t); 735b8e80941Smrg reg += reg_delta; 736b8e80941Smrg } 737b8e80941Smrg } else { 738b8e80941Smrg for (idx = 0; idx < count; ++idx) { 739b8e80941Smrg radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); 740b8e80941Smrg radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) | 741b8e80941Smrg COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) | 742b8e80941Smrg COPY_DATA_COUNT_SEL); 743b8e80941Smrg radeon_emit(cs, 0); /* immediate */ 744b8e80941Smrg radeon_emit(cs, 0); 745b8e80941Smrg radeon_emit(cs, va); 746b8e80941Smrg radeon_emit(cs, va >> 32); 747b8e80941Smrg va += sizeof(uint64_t); 748b8e80941Smrg } 749b8e80941Smrg } 750b8e80941Smrg} 751b8e80941Smrg 752b8e80941Smrgstatic void si_pc_query_destroy(struct si_screen *sscreen, 753b8e80941Smrg struct si_query *squery) 754b8e80941Smrg{ 755b8e80941Smrg struct si_query_pc *query = (struct si_query_pc *)squery; 756b8e80941Smrg 757b8e80941Smrg while (query->groups) { 758b8e80941Smrg struct si_query_group *group = query->groups; 759b8e80941Smrg query->groups = group->next; 760b8e80941Smrg FREE(group); 761b8e80941Smrg } 762b8e80941Smrg 763b8e80941Smrg FREE(query->counters); 764b8e80941Smrg 765b8e80941Smrg si_query_buffer_destroy(sscreen, &query->buffer); 766b8e80941Smrg FREE(query); 767b8e80941Smrg} 768b8e80941Smrg 769b8e80941Smrgstatic void si_pc_query_resume(struct si_context *sctx, struct si_query *squery) 770b8e80941Smrg/* 771b8e80941Smrg struct si_query_hw *hwquery, 772b8e80941Smrg struct si_resource *buffer, uint64_t va)*/ 773b8e80941Smrg{ 774b8e80941Smrg struct si_query_pc *query = (struct si_query_pc *)squery; 775b8e80941Smrg int current_se = -1; 776b8e80941Smrg int current_instance = -1; 777b8e80941Smrg 778b8e80941Smrg if (!si_query_buffer_alloc(sctx, &query->buffer, NULL, query->result_size)) 779b8e80941Smrg return; 780b8e80941Smrg si_need_gfx_cs_space(sctx); 781b8e80941Smrg 782b8e80941Smrg if (query->shaders) 783b8e80941Smrg si_pc_emit_shaders(sctx, query->shaders); 784b8e80941Smrg 785b8e80941Smrg for (struct si_query_group *group = query->groups; group; group = group->next) { 786b8e80941Smrg struct si_pc_block *block = group->block; 787b8e80941Smrg 788b8e80941Smrg if (group->se != current_se || group->instance != current_instance) { 789b8e80941Smrg current_se = group->se; 790b8e80941Smrg current_instance = group->instance; 791b8e80941Smrg si_pc_emit_instance(sctx, group->se, group->instance); 792b8e80941Smrg } 793b8e80941Smrg 794b8e80941Smrg si_pc_emit_select(sctx, block, group->num_counters, group->selectors); 795b8e80941Smrg } 796b8e80941Smrg 797b8e80941Smrg if (current_se != -1 || current_instance != -1) 798b8e80941Smrg si_pc_emit_instance(sctx, -1, -1); 799b8e80941Smrg 800b8e80941Smrg uint64_t va = query->buffer.buf->gpu_address + query->buffer.results_end; 801b8e80941Smrg si_pc_emit_start(sctx, query->buffer.buf, va); 802b8e80941Smrg} 803b8e80941Smrg 804b8e80941Smrgstatic void si_pc_query_suspend(struct si_context *sctx, struct si_query *squery) 805b8e80941Smrg{ 806b8e80941Smrg struct si_query_pc *query = (struct si_query_pc *)squery; 807b8e80941Smrg 808b8e80941Smrg if (!query->buffer.buf) 809b8e80941Smrg return; 810b8e80941Smrg 811b8e80941Smrg uint64_t va = query->buffer.buf->gpu_address + query->buffer.results_end; 812b8e80941Smrg query->buffer.results_end += query->result_size; 813b8e80941Smrg 814b8e80941Smrg si_pc_emit_stop(sctx, query->buffer.buf, va); 815b8e80941Smrg 816b8e80941Smrg for (struct si_query_group *group = query->groups; group; group = group->next) { 817b8e80941Smrg struct si_pc_block *block = group->block; 818b8e80941Smrg unsigned se = group->se >= 0 ? group->se : 0; 819b8e80941Smrg unsigned se_end = se + 1; 820b8e80941Smrg 821b8e80941Smrg if ((block->b->b->flags & SI_PC_BLOCK_SE) && (group->se < 0)) 822b8e80941Smrg se_end = sctx->screen->info.max_se; 823b8e80941Smrg 824b8e80941Smrg do { 825b8e80941Smrg unsigned instance = group->instance >= 0 ? group->instance : 0; 826b8e80941Smrg 827b8e80941Smrg do { 828b8e80941Smrg si_pc_emit_instance(sctx, se, instance); 829b8e80941Smrg si_pc_emit_read(sctx, block, group->num_counters, va); 830b8e80941Smrg va += sizeof(uint64_t) * group->num_counters; 831b8e80941Smrg } while (group->instance < 0 && ++instance < block->num_instances); 832b8e80941Smrg } while (++se < se_end); 833b8e80941Smrg } 834b8e80941Smrg 835b8e80941Smrg si_pc_emit_instance(sctx, -1, -1); 836b8e80941Smrg} 837b8e80941Smrg 838b8e80941Smrgstatic bool si_pc_query_begin(struct si_context *ctx, struct si_query *squery) 839b8e80941Smrg{ 840b8e80941Smrg struct si_query_pc *query = (struct si_query_pc *)squery; 841b8e80941Smrg 842b8e80941Smrg si_query_buffer_reset(ctx, &query->buffer); 843b8e80941Smrg 844b8e80941Smrg LIST_ADDTAIL(&query->b.active_list, &ctx->active_queries); 845b8e80941Smrg ctx->num_cs_dw_queries_suspend += query->b.num_cs_dw_suspend; 846b8e80941Smrg 847b8e80941Smrg si_pc_query_resume(ctx, squery); 848b8e80941Smrg 849b8e80941Smrg return true; 850b8e80941Smrg} 851b8e80941Smrg 852b8e80941Smrgstatic bool si_pc_query_end(struct si_context *ctx, struct si_query *squery) 853b8e80941Smrg{ 854b8e80941Smrg struct si_query_pc *query = (struct si_query_pc *)squery; 855b8e80941Smrg 856b8e80941Smrg si_pc_query_suspend(ctx, squery); 857b8e80941Smrg 858b8e80941Smrg LIST_DEL(&squery->active_list); 859b8e80941Smrg ctx->num_cs_dw_queries_suspend -= squery->num_cs_dw_suspend; 860b8e80941Smrg 861b8e80941Smrg return query->buffer.buf != NULL; 862b8e80941Smrg} 863b8e80941Smrg 864b8e80941Smrgstatic void si_pc_query_add_result(struct si_query_pc *query, 865b8e80941Smrg void *buffer, 866b8e80941Smrg union pipe_query_result *result) 867b8e80941Smrg{ 868b8e80941Smrg uint64_t *results = buffer; 869b8e80941Smrg unsigned i, j; 870b8e80941Smrg 871b8e80941Smrg for (i = 0; i < query->num_counters; ++i) { 872b8e80941Smrg struct si_query_counter *counter = &query->counters[i]; 873b8e80941Smrg 874b8e80941Smrg for (j = 0; j < counter->qwords; ++j) { 875b8e80941Smrg uint32_t value = results[counter->base + j * counter->stride]; 876b8e80941Smrg result->batch[i].u64 += value; 877b8e80941Smrg } 878b8e80941Smrg } 879b8e80941Smrg} 880b8e80941Smrg 881b8e80941Smrgstatic bool si_pc_query_get_result(struct si_context *sctx, struct si_query *squery, 882b8e80941Smrg bool wait, union pipe_query_result *result) 883b8e80941Smrg{ 884b8e80941Smrg struct si_query_pc *query = (struct si_query_pc *)squery; 885b8e80941Smrg 886b8e80941Smrg memset(result, 0, sizeof(result->batch[0]) * query->num_counters); 887b8e80941Smrg 888b8e80941Smrg for (struct si_query_buffer *qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) { 889b8e80941Smrg unsigned usage = PIPE_TRANSFER_READ | 890b8e80941Smrg (wait ? 0 : PIPE_TRANSFER_DONTBLOCK); 891b8e80941Smrg unsigned results_base = 0; 892b8e80941Smrg void *map; 893b8e80941Smrg 894b8e80941Smrg if (squery->b.flushed) 895b8e80941Smrg map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage); 896b8e80941Smrg else 897b8e80941Smrg map = si_buffer_map_sync_with_rings(sctx, qbuf->buf, usage); 898b8e80941Smrg 899b8e80941Smrg if (!map) 900b8e80941Smrg return false; 901b8e80941Smrg 902b8e80941Smrg while (results_base != qbuf->results_end) { 903b8e80941Smrg si_pc_query_add_result(query, map + results_base, result); 904b8e80941Smrg results_base += query->result_size; 905b8e80941Smrg } 906b8e80941Smrg } 907b8e80941Smrg 908b8e80941Smrg return true; 909b8e80941Smrg} 910b8e80941Smrg 911b8e80941Smrgstatic const struct si_query_ops batch_query_ops = { 912b8e80941Smrg .destroy = si_pc_query_destroy, 913b8e80941Smrg .begin = si_pc_query_begin, 914b8e80941Smrg .end = si_pc_query_end, 915b8e80941Smrg .get_result = si_pc_query_get_result, 916b8e80941Smrg 917b8e80941Smrg .suspend = si_pc_query_suspend, 918b8e80941Smrg .resume = si_pc_query_resume, 919b8e80941Smrg}; 920b8e80941Smrg 921b8e80941Smrgstatic struct si_query_group *get_group_state(struct si_screen *screen, 922b8e80941Smrg struct si_query_pc *query, 923b8e80941Smrg struct si_pc_block *block, 924b8e80941Smrg unsigned sub_gid) 925b8e80941Smrg{ 926b8e80941Smrg struct si_query_group *group = query->groups; 927b8e80941Smrg 928b8e80941Smrg while (group) { 929b8e80941Smrg if (group->block == block && group->sub_gid == sub_gid) 930b8e80941Smrg return group; 931b8e80941Smrg group = group->next; 932b8e80941Smrg } 933b8e80941Smrg 934b8e80941Smrg group = CALLOC_STRUCT(si_query_group); 935b8e80941Smrg if (!group) 936b8e80941Smrg return NULL; 937b8e80941Smrg 938b8e80941Smrg group->block = block; 939b8e80941Smrg group->sub_gid = sub_gid; 940b8e80941Smrg 941b8e80941Smrg if (block->b->b->flags & SI_PC_BLOCK_SHADER) { 942b8e80941Smrg unsigned sub_gids = block->num_instances; 943b8e80941Smrg unsigned shader_id; 944b8e80941Smrg unsigned shaders; 945b8e80941Smrg unsigned query_shaders; 946b8e80941Smrg 947b8e80941Smrg if (si_pc_block_has_per_se_groups(screen->perfcounters, block)) 948b8e80941Smrg sub_gids = sub_gids * screen->info.max_se; 949b8e80941Smrg shader_id = sub_gid / sub_gids; 950b8e80941Smrg sub_gid = sub_gid % sub_gids; 951b8e80941Smrg 952b8e80941Smrg shaders = si_pc_shader_type_bits[shader_id]; 953b8e80941Smrg 954b8e80941Smrg query_shaders = query->shaders & ~SI_PC_SHADERS_WINDOWING; 955b8e80941Smrg if (query_shaders && query_shaders != shaders) { 956b8e80941Smrg fprintf(stderr, "si_perfcounter: incompatible shader groups\n"); 957b8e80941Smrg FREE(group); 958b8e80941Smrg return NULL; 959b8e80941Smrg } 960b8e80941Smrg query->shaders = shaders; 961b8e80941Smrg } 962b8e80941Smrg 963b8e80941Smrg if (block->b->b->flags & SI_PC_BLOCK_SHADER_WINDOWED && !query->shaders) { 964b8e80941Smrg // A non-zero value in query->shaders ensures that the shader 965b8e80941Smrg // masking is reset unless the user explicitly requests one. 966b8e80941Smrg query->shaders = SI_PC_SHADERS_WINDOWING; 967b8e80941Smrg } 968b8e80941Smrg 969b8e80941Smrg if (si_pc_block_has_per_se_groups(screen->perfcounters, block)) { 970b8e80941Smrg group->se = sub_gid / block->num_instances; 971b8e80941Smrg sub_gid = sub_gid % block->num_instances; 972b8e80941Smrg } else { 973b8e80941Smrg group->se = -1; 974b8e80941Smrg } 975b8e80941Smrg 976b8e80941Smrg if (si_pc_block_has_per_instance_groups(screen->perfcounters, block)) { 977b8e80941Smrg group->instance = sub_gid; 978b8e80941Smrg } else { 979b8e80941Smrg group->instance = -1; 980b8e80941Smrg } 981b8e80941Smrg 982b8e80941Smrg group->next = query->groups; 983b8e80941Smrg query->groups = group; 984b8e80941Smrg 985b8e80941Smrg return group; 986b8e80941Smrg} 987b8e80941Smrg 988b8e80941Smrgstruct pipe_query *si_create_batch_query(struct pipe_context *ctx, 989b8e80941Smrg unsigned num_queries, 990b8e80941Smrg unsigned *query_types) 991b8e80941Smrg{ 992b8e80941Smrg struct si_screen *screen = 993b8e80941Smrg (struct si_screen *)ctx->screen; 994b8e80941Smrg struct si_perfcounters *pc = screen->perfcounters; 995b8e80941Smrg struct si_pc_block *block; 996b8e80941Smrg struct si_query_group *group; 997b8e80941Smrg struct si_query_pc *query; 998b8e80941Smrg unsigned base_gid, sub_gid, sub_index; 999b8e80941Smrg unsigned i, j; 1000b8e80941Smrg 1001b8e80941Smrg if (!pc) 1002b8e80941Smrg return NULL; 1003b8e80941Smrg 1004b8e80941Smrg query = CALLOC_STRUCT(si_query_pc); 1005b8e80941Smrg if (!query) 1006b8e80941Smrg return NULL; 1007b8e80941Smrg 1008b8e80941Smrg query->b.ops = &batch_query_ops; 1009b8e80941Smrg 1010b8e80941Smrg query->num_counters = num_queries; 1011b8e80941Smrg 1012b8e80941Smrg /* Collect selectors per group */ 1013b8e80941Smrg for (i = 0; i < num_queries; ++i) { 1014b8e80941Smrg unsigned sub_gid; 1015b8e80941Smrg 1016b8e80941Smrg if (query_types[i] < SI_QUERY_FIRST_PERFCOUNTER) 1017b8e80941Smrg goto error; 1018b8e80941Smrg 1019b8e80941Smrg block = lookup_counter(pc, query_types[i] - SI_QUERY_FIRST_PERFCOUNTER, 1020b8e80941Smrg &base_gid, &sub_index); 1021b8e80941Smrg if (!block) 1022b8e80941Smrg goto error; 1023b8e80941Smrg 1024b8e80941Smrg sub_gid = sub_index / block->b->selectors; 1025b8e80941Smrg sub_index = sub_index % block->b->selectors; 1026b8e80941Smrg 1027b8e80941Smrg group = get_group_state(screen, query, block, sub_gid); 1028b8e80941Smrg if (!group) 1029b8e80941Smrg goto error; 1030b8e80941Smrg 1031b8e80941Smrg if (group->num_counters >= block->b->b->num_counters) { 1032b8e80941Smrg fprintf(stderr, 1033b8e80941Smrg "perfcounter group %s: too many selected\n", 1034b8e80941Smrg block->b->b->name); 1035b8e80941Smrg goto error; 1036b8e80941Smrg } 1037b8e80941Smrg group->selectors[group->num_counters] = sub_index; 1038b8e80941Smrg ++group->num_counters; 1039b8e80941Smrg } 1040b8e80941Smrg 1041b8e80941Smrg /* Compute result bases and CS size per group */ 1042b8e80941Smrg query->b.num_cs_dw_suspend = pc->num_stop_cs_dwords; 1043b8e80941Smrg query->b.num_cs_dw_suspend += pc->num_instance_cs_dwords; 1044b8e80941Smrg 1045b8e80941Smrg i = 0; 1046b8e80941Smrg for (group = query->groups; group; group = group->next) { 1047b8e80941Smrg struct si_pc_block *block = group->block; 1048b8e80941Smrg unsigned read_dw; 1049b8e80941Smrg unsigned instances = 1; 1050b8e80941Smrg 1051b8e80941Smrg if ((block->b->b->flags & SI_PC_BLOCK_SE) && group->se < 0) 1052b8e80941Smrg instances = screen->info.max_se; 1053b8e80941Smrg if (group->instance < 0) 1054b8e80941Smrg instances *= block->num_instances; 1055b8e80941Smrg 1056b8e80941Smrg group->result_base = i; 1057b8e80941Smrg query->result_size += sizeof(uint64_t) * instances * group->num_counters; 1058b8e80941Smrg i += instances * group->num_counters; 1059b8e80941Smrg 1060b8e80941Smrg read_dw = 6 * group->num_counters; 1061b8e80941Smrg query->b.num_cs_dw_suspend += instances * read_dw; 1062b8e80941Smrg query->b.num_cs_dw_suspend += instances * pc->num_instance_cs_dwords; 1063b8e80941Smrg } 1064b8e80941Smrg 1065b8e80941Smrg if (query->shaders) { 1066b8e80941Smrg if (query->shaders == SI_PC_SHADERS_WINDOWING) 1067b8e80941Smrg query->shaders = 0xffffffff; 1068b8e80941Smrg } 1069b8e80941Smrg 1070b8e80941Smrg /* Map user-supplied query array to result indices */ 1071b8e80941Smrg query->counters = CALLOC(num_queries, sizeof(*query->counters)); 1072b8e80941Smrg for (i = 0; i < num_queries; ++i) { 1073b8e80941Smrg struct si_query_counter *counter = &query->counters[i]; 1074b8e80941Smrg struct si_pc_block *block; 1075b8e80941Smrg 1076b8e80941Smrg block = lookup_counter(pc, query_types[i] - SI_QUERY_FIRST_PERFCOUNTER, 1077b8e80941Smrg &base_gid, &sub_index); 1078b8e80941Smrg 1079b8e80941Smrg sub_gid = sub_index / block->b->selectors; 1080b8e80941Smrg sub_index = sub_index % block->b->selectors; 1081b8e80941Smrg 1082b8e80941Smrg group = get_group_state(screen, query, block, sub_gid); 1083b8e80941Smrg assert(group != NULL); 1084b8e80941Smrg 1085b8e80941Smrg for (j = 0; j < group->num_counters; ++j) { 1086b8e80941Smrg if (group->selectors[j] == sub_index) 1087b8e80941Smrg break; 1088b8e80941Smrg } 1089b8e80941Smrg 1090b8e80941Smrg counter->base = group->result_base + j; 1091b8e80941Smrg counter->stride = group->num_counters; 1092b8e80941Smrg 1093b8e80941Smrg counter->qwords = 1; 1094b8e80941Smrg if ((block->b->b->flags & SI_PC_BLOCK_SE) && group->se < 0) 1095b8e80941Smrg counter->qwords = screen->info.max_se; 1096b8e80941Smrg if (group->instance < 0) 1097b8e80941Smrg counter->qwords *= block->num_instances; 1098b8e80941Smrg } 1099b8e80941Smrg 1100b8e80941Smrg return (struct pipe_query *)query; 1101b8e80941Smrg 1102b8e80941Smrgerror: 1103b8e80941Smrg si_pc_query_destroy(screen, &query->b); 1104b8e80941Smrg return NULL; 1105b8e80941Smrg} 1106b8e80941Smrg 1107b8e80941Smrgstatic bool si_init_block_names(struct si_screen *screen, 1108b8e80941Smrg struct si_pc_block *block) 1109b8e80941Smrg{ 1110b8e80941Smrg bool per_instance_groups = si_pc_block_has_per_instance_groups(screen->perfcounters, block); 1111b8e80941Smrg bool per_se_groups = si_pc_block_has_per_se_groups(screen->perfcounters, block); 1112b8e80941Smrg unsigned i, j, k; 1113b8e80941Smrg unsigned groups_shader = 1, groups_se = 1, groups_instance = 1; 1114b8e80941Smrg unsigned namelen; 1115b8e80941Smrg char *groupname; 1116b8e80941Smrg char *p; 1117b8e80941Smrg 1118b8e80941Smrg if (per_instance_groups) 1119b8e80941Smrg groups_instance = block->num_instances; 1120b8e80941Smrg if (per_se_groups) 1121b8e80941Smrg groups_se = screen->info.max_se; 1122b8e80941Smrg if (block->b->b->flags & SI_PC_BLOCK_SHADER) 1123b8e80941Smrg groups_shader = ARRAY_SIZE(si_pc_shader_type_bits); 1124b8e80941Smrg 1125b8e80941Smrg namelen = strlen(block->b->b->name); 1126b8e80941Smrg block->group_name_stride = namelen + 1; 1127b8e80941Smrg if (block->b->b->flags & SI_PC_BLOCK_SHADER) 1128b8e80941Smrg block->group_name_stride += 3; 1129b8e80941Smrg if (per_se_groups) { 1130b8e80941Smrg assert(groups_se <= 10); 1131b8e80941Smrg block->group_name_stride += 1; 1132b8e80941Smrg 1133b8e80941Smrg if (per_instance_groups) 1134b8e80941Smrg block->group_name_stride += 1; 1135b8e80941Smrg } 1136b8e80941Smrg if (per_instance_groups) { 1137b8e80941Smrg assert(groups_instance <= 100); 1138b8e80941Smrg block->group_name_stride += 2; 1139b8e80941Smrg } 1140b8e80941Smrg 1141b8e80941Smrg block->group_names = MALLOC(block->num_groups * block->group_name_stride); 1142b8e80941Smrg if (!block->group_names) 1143b8e80941Smrg return false; 1144b8e80941Smrg 1145b8e80941Smrg groupname = block->group_names; 1146b8e80941Smrg for (i = 0; i < groups_shader; ++i) { 1147b8e80941Smrg const char *shader_suffix = si_pc_shader_type_suffixes[i]; 1148b8e80941Smrg unsigned shaderlen = strlen(shader_suffix); 1149b8e80941Smrg for (j = 0; j < groups_se; ++j) { 1150b8e80941Smrg for (k = 0; k < groups_instance; ++k) { 1151b8e80941Smrg strcpy(groupname, block->b->b->name); 1152b8e80941Smrg p = groupname + namelen; 1153b8e80941Smrg 1154b8e80941Smrg if (block->b->b->flags & SI_PC_BLOCK_SHADER) { 1155b8e80941Smrg strcpy(p, shader_suffix); 1156b8e80941Smrg p += shaderlen; 1157b8e80941Smrg } 1158b8e80941Smrg 1159b8e80941Smrg if (per_se_groups) { 1160b8e80941Smrg p += sprintf(p, "%d", j); 1161b8e80941Smrg if (per_instance_groups) 1162b8e80941Smrg *p++ = '_'; 1163b8e80941Smrg } 1164b8e80941Smrg 1165b8e80941Smrg if (per_instance_groups) 1166b8e80941Smrg p += sprintf(p, "%d", k); 1167b8e80941Smrg 1168b8e80941Smrg groupname += block->group_name_stride; 1169b8e80941Smrg } 1170b8e80941Smrg } 1171b8e80941Smrg } 1172b8e80941Smrg 1173b8e80941Smrg assert(block->b->selectors <= 1000); 1174b8e80941Smrg block->selector_name_stride = block->group_name_stride + 4; 1175b8e80941Smrg block->selector_names = MALLOC(block->num_groups * block->b->selectors * 1176b8e80941Smrg block->selector_name_stride); 1177b8e80941Smrg if (!block->selector_names) 1178b8e80941Smrg return false; 1179b8e80941Smrg 1180b8e80941Smrg groupname = block->group_names; 1181b8e80941Smrg p = block->selector_names; 1182b8e80941Smrg for (i = 0; i < block->num_groups; ++i) { 1183b8e80941Smrg for (j = 0; j < block->b->selectors; ++j) { 1184b8e80941Smrg sprintf(p, "%s_%03d", groupname, j); 1185b8e80941Smrg p += block->selector_name_stride; 1186b8e80941Smrg } 1187b8e80941Smrg groupname += block->group_name_stride; 1188b8e80941Smrg } 1189b8e80941Smrg 1190b8e80941Smrg return true; 1191b8e80941Smrg} 1192b8e80941Smrg 1193b8e80941Smrgint si_get_perfcounter_info(struct si_screen *screen, 1194b8e80941Smrg unsigned index, 1195b8e80941Smrg struct pipe_driver_query_info *info) 1196b8e80941Smrg{ 1197b8e80941Smrg struct si_perfcounters *pc = screen->perfcounters; 1198b8e80941Smrg struct si_pc_block *block; 1199b8e80941Smrg unsigned base_gid, sub; 1200b8e80941Smrg 1201b8e80941Smrg if (!pc) 1202b8e80941Smrg return 0; 1203b8e80941Smrg 1204b8e80941Smrg if (!info) { 1205b8e80941Smrg unsigned bid, num_queries = 0; 1206b8e80941Smrg 1207b8e80941Smrg for (bid = 0; bid < pc->num_blocks; ++bid) { 1208b8e80941Smrg num_queries += pc->blocks[bid].b->selectors * 1209b8e80941Smrg pc->blocks[bid].num_groups; 1210b8e80941Smrg } 1211b8e80941Smrg 1212b8e80941Smrg return num_queries; 1213b8e80941Smrg } 1214b8e80941Smrg 1215b8e80941Smrg block = lookup_counter(pc, index, &base_gid, &sub); 1216b8e80941Smrg if (!block) 1217b8e80941Smrg return 0; 1218b8e80941Smrg 1219b8e80941Smrg if (!block->selector_names) { 1220b8e80941Smrg if (!si_init_block_names(screen, block)) 1221b8e80941Smrg return 0; 1222b8e80941Smrg } 1223b8e80941Smrg info->name = block->selector_names + sub * block->selector_name_stride; 1224b8e80941Smrg info->query_type = SI_QUERY_FIRST_PERFCOUNTER + index; 1225b8e80941Smrg info->max_value.u64 = 0; 1226b8e80941Smrg info->type = PIPE_DRIVER_QUERY_TYPE_UINT64; 1227b8e80941Smrg info->result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_AVERAGE; 1228b8e80941Smrg info->group_id = base_gid + sub / block->b->selectors; 1229b8e80941Smrg info->flags = PIPE_DRIVER_QUERY_FLAG_BATCH; 1230b8e80941Smrg if (sub > 0 && sub + 1 < block->b->selectors * block->num_groups) 1231b8e80941Smrg info->flags |= PIPE_DRIVER_QUERY_FLAG_DONT_LIST; 1232b8e80941Smrg return 1; 1233b8e80941Smrg} 1234b8e80941Smrg 1235b8e80941Smrgint si_get_perfcounter_group_info(struct si_screen *screen, 1236b8e80941Smrg unsigned index, 1237b8e80941Smrg struct pipe_driver_query_group_info *info) 1238b8e80941Smrg{ 1239b8e80941Smrg struct si_perfcounters *pc = screen->perfcounters; 1240b8e80941Smrg struct si_pc_block *block; 1241b8e80941Smrg 1242b8e80941Smrg if (!pc) 1243b8e80941Smrg return 0; 1244b8e80941Smrg 1245b8e80941Smrg if (!info) 1246b8e80941Smrg return pc->num_groups; 1247b8e80941Smrg 1248b8e80941Smrg block = lookup_group(pc, &index); 1249b8e80941Smrg if (!block) 1250b8e80941Smrg return 0; 1251b8e80941Smrg 1252b8e80941Smrg if (!block->group_names) { 1253b8e80941Smrg if (!si_init_block_names(screen, block)) 1254b8e80941Smrg return 0; 1255b8e80941Smrg } 1256b8e80941Smrg info->name = block->group_names + index * block->group_name_stride; 1257b8e80941Smrg info->num_queries = block->b->selectors; 1258b8e80941Smrg info->max_active_queries = block->b->b->num_counters; 1259b8e80941Smrg return 1; 1260b8e80941Smrg} 1261b8e80941Smrg 1262b8e80941Smrgvoid si_destroy_perfcounters(struct si_screen *screen) 1263b8e80941Smrg{ 1264b8e80941Smrg struct si_perfcounters *pc = screen->perfcounters; 1265b8e80941Smrg unsigned i; 1266b8e80941Smrg 1267b8e80941Smrg if (!pc) 1268b8e80941Smrg return; 1269b8e80941Smrg 1270b8e80941Smrg for (i = 0; i < pc->num_blocks; ++i) { 1271b8e80941Smrg FREE(pc->blocks[i].group_names); 1272b8e80941Smrg FREE(pc->blocks[i].selector_names); 1273b8e80941Smrg } 1274b8e80941Smrg FREE(pc->blocks); 1275b8e80941Smrg FREE(pc); 1276b8e80941Smrg screen->perfcounters = NULL; 1277b8e80941Smrg} 1278b8e80941Smrg 1279b8e80941Smrgvoid si_init_perfcounters(struct si_screen *screen) 1280b8e80941Smrg{ 1281b8e80941Smrg struct si_perfcounters *pc; 1282b8e80941Smrg const struct si_pc_block_gfxdescr *blocks; 1283b8e80941Smrg unsigned num_blocks; 1284b8e80941Smrg unsigned i; 1285b8e80941Smrg 1286b8e80941Smrg switch (screen->info.chip_class) { 1287b8e80941Smrg case CIK: 1288b8e80941Smrg blocks = groups_CIK; 1289b8e80941Smrg num_blocks = ARRAY_SIZE(groups_CIK); 1290b8e80941Smrg break; 1291b8e80941Smrg case VI: 1292b8e80941Smrg blocks = groups_VI; 1293b8e80941Smrg num_blocks = ARRAY_SIZE(groups_VI); 1294b8e80941Smrg break; 1295b8e80941Smrg case GFX9: 1296b8e80941Smrg blocks = groups_gfx9; 1297b8e80941Smrg num_blocks = ARRAY_SIZE(groups_gfx9); 1298b8e80941Smrg break; 1299b8e80941Smrg case SI: 1300b8e80941Smrg default: 1301b8e80941Smrg return; /* not implemented */ 1302b8e80941Smrg } 1303b8e80941Smrg 1304b8e80941Smrg if (screen->info.max_sh_per_se != 1) { 1305b8e80941Smrg /* This should not happen on non-SI chips. */ 1306b8e80941Smrg fprintf(stderr, "si_init_perfcounters: max_sh_per_se = %d not " 1307b8e80941Smrg "supported (inaccurate performance counters)\n", 1308b8e80941Smrg screen->info.max_sh_per_se); 1309b8e80941Smrg } 1310b8e80941Smrg 1311b8e80941Smrg screen->perfcounters = pc = CALLOC_STRUCT(si_perfcounters); 1312b8e80941Smrg if (!pc) 1313b8e80941Smrg return; 1314b8e80941Smrg 1315b8e80941Smrg pc->num_stop_cs_dwords = 14 + si_cp_write_fence_dwords(screen); 1316b8e80941Smrg pc->num_instance_cs_dwords = 3; 1317b8e80941Smrg 1318b8e80941Smrg pc->separate_se = debug_get_bool_option("RADEON_PC_SEPARATE_SE", false); 1319b8e80941Smrg pc->separate_instance = debug_get_bool_option("RADEON_PC_SEPARATE_INSTANCE", false); 1320b8e80941Smrg 1321b8e80941Smrg pc->blocks = CALLOC(num_blocks, sizeof(struct si_pc_block)); 1322b8e80941Smrg if (!pc->blocks) 1323b8e80941Smrg goto error; 1324b8e80941Smrg pc->num_blocks = num_blocks; 1325b8e80941Smrg 1326b8e80941Smrg for (i = 0; i < num_blocks; ++i) { 1327b8e80941Smrg struct si_pc_block *block = &pc->blocks[i]; 1328b8e80941Smrg block->b = &blocks[i]; 1329b8e80941Smrg block->num_instances = MAX2(1, block->b->instances); 1330b8e80941Smrg 1331b8e80941Smrg if (!strcmp(block->b->b->name, "CB") || 1332b8e80941Smrg !strcmp(block->b->b->name, "DB")) 1333b8e80941Smrg block->num_instances = screen->info.max_se; 1334b8e80941Smrg else if (!strcmp(block->b->b->name, "TCC")) 1335b8e80941Smrg block->num_instances = screen->info.num_tcc_blocks; 1336b8e80941Smrg else if (!strcmp(block->b->b->name, "IA")) 1337b8e80941Smrg block->num_instances = MAX2(1, screen->info.max_se / 2); 1338b8e80941Smrg 1339b8e80941Smrg if (si_pc_block_has_per_instance_groups(pc, block)) { 1340b8e80941Smrg block->num_groups = block->num_instances; 1341b8e80941Smrg } else { 1342b8e80941Smrg block->num_groups = 1; 1343b8e80941Smrg } 1344b8e80941Smrg 1345b8e80941Smrg if (si_pc_block_has_per_se_groups(pc, block)) 1346b8e80941Smrg block->num_groups *= screen->info.max_se; 1347b8e80941Smrg if (block->b->b->flags & SI_PC_BLOCK_SHADER) 1348b8e80941Smrg block->num_groups *= ARRAY_SIZE(si_pc_shader_type_bits); 1349b8e80941Smrg 1350b8e80941Smrg pc->num_groups += block->num_groups; 1351b8e80941Smrg } 1352b8e80941Smrg 1353b8e80941Smrg return; 1354b8e80941Smrg 1355b8e80941Smrgerror: 1356b8e80941Smrg si_destroy_perfcounters(screen); 1357b8e80941Smrg} 1358