si_perfcounter.c revision b8e80941
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "si_build_pm4.h"
26#include "si_query.h"
27#include "util/u_memory.h"
28
29
30enum si_pc_block_flags {
31	/* This block is part of the shader engine */
32	SI_PC_BLOCK_SE = (1 << 0),
33
34	/* Expose per-instance groups instead of summing all instances (within
35	 * an SE). */
36	SI_PC_BLOCK_INSTANCE_GROUPS = (1 << 1),
37
38	/* Expose per-SE groups instead of summing instances across SEs. */
39	SI_PC_BLOCK_SE_GROUPS = (1 << 2),
40
41	/* Shader block */
42	SI_PC_BLOCK_SHADER = (1 << 3),
43
44	/* Non-shader block with perfcounters windowed by shaders. */
45	SI_PC_BLOCK_SHADER_WINDOWED = (1 << 4),
46};
47
48enum si_pc_reg_layout {
49	/* All secondary selector dwords follow as one block after the primary
50	 * selector dwords for the counters that have secondary selectors.
51	 */
52	SI_PC_MULTI_BLOCK = 0,
53
54	/* Each secondary selector dword follows immediately afters the
55	 * corresponding primary.
56	 */
57	SI_PC_MULTI_ALTERNATE = 1,
58
59	/* All secondary selector dwords follow as one block after all primary
60	 * selector dwords.
61	 */
62	SI_PC_MULTI_TAIL = 2,
63
64	/* Free-form arrangement of selector registers. */
65	SI_PC_MULTI_CUSTOM = 3,
66
67	SI_PC_MULTI_MASK = 3,
68
69	/* Registers are laid out in decreasing rather than increasing order. */
70	SI_PC_REG_REVERSE = 4,
71
72	SI_PC_FAKE = 8,
73};
74
75struct si_pc_block_base {
76	const char *name;
77	unsigned num_counters;
78	unsigned flags;
79
80	unsigned select_or;
81	unsigned select0;
82	unsigned counter0_lo;
83	unsigned *select;
84	unsigned *counters;
85	unsigned num_multi;
86	unsigned num_prelude;
87	unsigned layout;
88};
89
90struct si_pc_block_gfxdescr {
91	struct si_pc_block_base *b;
92	unsigned selectors;
93	unsigned instances;
94};
95
96struct si_pc_block {
97	const struct si_pc_block_gfxdescr *b;
98	unsigned num_instances;
99
100	unsigned num_groups;
101	char *group_names;
102	unsigned group_name_stride;
103
104	char *selector_names;
105	unsigned selector_name_stride;
106};
107
108/* The order is chosen to be compatible with GPUPerfStudio's hardcoding of
109 * performance counter group IDs.
110 */
111static const char * const si_pc_shader_type_suffixes[] = {
112	"", "_ES", "_GS", "_VS", "_PS", "_LS", "_HS", "_CS"
113};
114
115static const unsigned si_pc_shader_type_bits[] = {
116	0x7f,
117	S_036780_ES_EN(1),
118	S_036780_GS_EN(1),
119	S_036780_VS_EN(1),
120	S_036780_PS_EN(1),
121	S_036780_LS_EN(1),
122	S_036780_HS_EN(1),
123	S_036780_CS_EN(1),
124};
125
126/* Max counters per HW block */
127#define SI_QUERY_MAX_COUNTERS 16
128
129#define SI_PC_SHADERS_WINDOWING (1 << 31)
130
131struct si_query_group {
132	struct si_query_group *next;
133	struct si_pc_block *block;
134	unsigned sub_gid; /* only used during init */
135	unsigned result_base; /* only used during init */
136	int se;
137	int instance;
138	unsigned num_counters;
139	unsigned selectors[SI_QUERY_MAX_COUNTERS];
140};
141
142struct si_query_counter {
143	unsigned base;
144	unsigned qwords;
145	unsigned stride; /* in uint64s */
146};
147
148struct si_query_pc {
149	struct si_query b;
150	struct si_query_buffer buffer;
151
152	/* Size of the results in memory, in bytes. */
153	unsigned result_size;
154
155	unsigned shaders;
156	unsigned num_counters;
157	struct si_query_counter *counters;
158	struct si_query_group *groups;
159};
160
161
162static struct si_pc_block_base cik_CB = {
163	.name = "CB",
164	.num_counters = 4,
165	.flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS,
166
167	.select0 = R_037000_CB_PERFCOUNTER_FILTER,
168	.counter0_lo = R_035018_CB_PERFCOUNTER0_LO,
169	.num_multi = 1,
170	.num_prelude = 1,
171	.layout = SI_PC_MULTI_ALTERNATE,
172};
173
174static unsigned cik_CPC_select[] = {
175	R_036024_CPC_PERFCOUNTER0_SELECT,
176	R_036010_CPC_PERFCOUNTER0_SELECT1,
177	R_03600C_CPC_PERFCOUNTER1_SELECT,
178};
179static struct si_pc_block_base cik_CPC = {
180	.name = "CPC",
181	.num_counters = 2,
182
183	.select = cik_CPC_select,
184	.counter0_lo = R_034018_CPC_PERFCOUNTER0_LO,
185	.num_multi = 1,
186	.layout = SI_PC_MULTI_CUSTOM | SI_PC_REG_REVERSE,
187};
188
189static struct si_pc_block_base cik_CPF = {
190	.name = "CPF",
191	.num_counters = 2,
192
193	.select0 = R_03601C_CPF_PERFCOUNTER0_SELECT,
194	.counter0_lo = R_034028_CPF_PERFCOUNTER0_LO,
195	.num_multi = 1,
196	.layout = SI_PC_MULTI_ALTERNATE | SI_PC_REG_REVERSE,
197};
198
199static struct si_pc_block_base cik_CPG = {
200	.name = "CPG",
201	.num_counters = 2,
202
203	.select0 = R_036008_CPG_PERFCOUNTER0_SELECT,
204	.counter0_lo = R_034008_CPG_PERFCOUNTER0_LO,
205	.num_multi = 1,
206	.layout = SI_PC_MULTI_ALTERNATE | SI_PC_REG_REVERSE,
207};
208
209static struct si_pc_block_base cik_DB = {
210	.name = "DB",
211	.num_counters = 4,
212	.flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS,
213
214	.select0 = R_037100_DB_PERFCOUNTER0_SELECT,
215	.counter0_lo = R_035100_DB_PERFCOUNTER0_LO,
216	.num_multi = 3, // really only 2, but there's a gap between registers
217	.layout = SI_PC_MULTI_ALTERNATE,
218};
219
220static struct si_pc_block_base cik_GDS = {
221	.name = "GDS",
222	.num_counters = 4,
223
224	.select0 = R_036A00_GDS_PERFCOUNTER0_SELECT,
225	.counter0_lo = R_034A00_GDS_PERFCOUNTER0_LO,
226	.num_multi = 1,
227	.layout = SI_PC_MULTI_TAIL,
228};
229
230static unsigned cik_GRBM_counters[] = {
231	R_034100_GRBM_PERFCOUNTER0_LO,
232	R_03410C_GRBM_PERFCOUNTER1_LO,
233};
234static struct si_pc_block_base cik_GRBM = {
235	.name = "GRBM",
236	.num_counters = 2,
237
238	.select0 = R_036100_GRBM_PERFCOUNTER0_SELECT,
239	.counters = cik_GRBM_counters,
240};
241
242static struct si_pc_block_base cik_GRBMSE = {
243	.name = "GRBMSE",
244	.num_counters = 4,
245
246	.select0 = R_036108_GRBM_SE0_PERFCOUNTER_SELECT,
247	.counter0_lo = R_034114_GRBM_SE0_PERFCOUNTER_LO,
248};
249
250static struct si_pc_block_base cik_IA = {
251	.name = "IA",
252	.num_counters = 4,
253
254	.select0 = R_036210_IA_PERFCOUNTER0_SELECT,
255	.counter0_lo = R_034220_IA_PERFCOUNTER0_LO,
256	.num_multi = 1,
257	.layout = SI_PC_MULTI_TAIL,
258};
259
260static struct si_pc_block_base cik_PA_SC = {
261	.name = "PA_SC",
262	.num_counters = 8,
263	.flags = SI_PC_BLOCK_SE,
264
265	.select0 = R_036500_PA_SC_PERFCOUNTER0_SELECT,
266	.counter0_lo = R_034500_PA_SC_PERFCOUNTER0_LO,
267	.num_multi = 1,
268	.layout = SI_PC_MULTI_ALTERNATE,
269};
270
271/* According to docs, PA_SU counters are only 48 bits wide. */
272static struct si_pc_block_base cik_PA_SU = {
273	.name = "PA_SU",
274	.num_counters = 4,
275	.flags = SI_PC_BLOCK_SE,
276
277	.select0 = R_036400_PA_SU_PERFCOUNTER0_SELECT,
278	.counter0_lo = R_034400_PA_SU_PERFCOUNTER0_LO,
279	.num_multi = 2,
280	.layout = SI_PC_MULTI_ALTERNATE,
281};
282
283static struct si_pc_block_base cik_SPI = {
284	.name = "SPI",
285	.num_counters = 6,
286	.flags = SI_PC_BLOCK_SE,
287
288	.select0 = R_036600_SPI_PERFCOUNTER0_SELECT,
289	.counter0_lo = R_034604_SPI_PERFCOUNTER0_LO,
290	.num_multi = 4,
291	.layout = SI_PC_MULTI_BLOCK,
292};
293
294static struct si_pc_block_base cik_SQ = {
295	.name = "SQ",
296	.num_counters = 16,
297	.flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_SHADER,
298
299	.select0 = R_036700_SQ_PERFCOUNTER0_SELECT,
300	.select_or = S_036700_SQC_BANK_MASK(15) |
301			S_036700_SQC_CLIENT_MASK(15) |
302			S_036700_SIMD_MASK(15),
303	.counter0_lo = R_034700_SQ_PERFCOUNTER0_LO,
304};
305
306static struct si_pc_block_base cik_SX = {
307	.name = "SX",
308	.num_counters = 4,
309	.flags = SI_PC_BLOCK_SE,
310
311	.select0 = R_036900_SX_PERFCOUNTER0_SELECT,
312	.counter0_lo = R_034900_SX_PERFCOUNTER0_LO,
313	.num_multi = 2,
314	.layout = SI_PC_MULTI_TAIL,
315};
316
317static struct si_pc_block_base cik_TA = {
318	.name = "TA",
319	.num_counters = 2,
320	.flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED,
321
322	.select0 = R_036B00_TA_PERFCOUNTER0_SELECT,
323	.counter0_lo = R_034B00_TA_PERFCOUNTER0_LO,
324	.num_multi = 1,
325	.layout = SI_PC_MULTI_ALTERNATE,
326};
327
328static struct si_pc_block_base cik_TD = {
329	.name = "TD",
330	.num_counters = 2,
331	.flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED,
332
333	.select0 = R_036C00_TD_PERFCOUNTER0_SELECT,
334	.counter0_lo = R_034C00_TD_PERFCOUNTER0_LO,
335	.num_multi = 1,
336	.layout = SI_PC_MULTI_ALTERNATE,
337};
338
339static struct si_pc_block_base cik_TCA = {
340	.name = "TCA",
341	.num_counters = 4,
342	.flags = SI_PC_BLOCK_INSTANCE_GROUPS,
343
344	.select0 = R_036E40_TCA_PERFCOUNTER0_SELECT,
345	.counter0_lo = R_034E40_TCA_PERFCOUNTER0_LO,
346	.num_multi = 2,
347	.layout = SI_PC_MULTI_ALTERNATE,
348};
349
350static struct si_pc_block_base cik_TCC = {
351	.name = "TCC",
352	.num_counters = 4,
353	.flags = SI_PC_BLOCK_INSTANCE_GROUPS,
354
355	.select0 = R_036E00_TCC_PERFCOUNTER0_SELECT,
356	.counter0_lo = R_034E00_TCC_PERFCOUNTER0_LO,
357	.num_multi = 2,
358	.layout = SI_PC_MULTI_ALTERNATE,
359};
360
361static struct si_pc_block_base cik_TCP = {
362	.name = "TCP",
363	.num_counters = 4,
364	.flags = SI_PC_BLOCK_SE | SI_PC_BLOCK_INSTANCE_GROUPS | SI_PC_BLOCK_SHADER_WINDOWED,
365
366	.select0 = R_036D00_TCP_PERFCOUNTER0_SELECT,
367	.counter0_lo = R_034D00_TCP_PERFCOUNTER0_LO,
368	.num_multi = 2,
369	.layout = SI_PC_MULTI_ALTERNATE,
370};
371
372static struct si_pc_block_base cik_VGT = {
373	.name = "VGT",
374	.num_counters = 4,
375	.flags = SI_PC_BLOCK_SE,
376
377	.select0 = R_036230_VGT_PERFCOUNTER0_SELECT,
378	.counter0_lo = R_034240_VGT_PERFCOUNTER0_LO,
379	.num_multi = 1,
380	.layout = SI_PC_MULTI_TAIL,
381};
382
383static struct si_pc_block_base cik_WD = {
384	.name = "WD",
385	.num_counters = 4,
386
387	.select0 = R_036200_WD_PERFCOUNTER0_SELECT,
388	.counter0_lo = R_034200_WD_PERFCOUNTER0_LO,
389};
390
391static struct si_pc_block_base cik_MC = {
392	.name = "MC",
393	.num_counters = 4,
394
395	.layout = SI_PC_FAKE,
396};
397
398static struct si_pc_block_base cik_SRBM = {
399	.name = "SRBM",
400	.num_counters = 2,
401
402	.layout = SI_PC_FAKE,
403};
404
405/* Both the number of instances and selectors varies between chips of the same
406 * class. We only differentiate by class here and simply expose the maximum
407 * number over all chips in a class.
408 *
409 * Unfortunately, GPUPerfStudio uses the order of performance counter groups
410 * blindly once it believes it has identified the hardware, so the order of
411 * blocks here matters.
412 */
413static struct si_pc_block_gfxdescr groups_CIK[] = {
414	{ &cik_CB, 226},
415	{ &cik_CPF, 17 },
416	{ &cik_DB, 257},
417	{ &cik_GRBM, 34 },
418	{ &cik_GRBMSE, 15 },
419	{ &cik_PA_SU, 153 },
420	{ &cik_PA_SC, 395 },
421	{ &cik_SPI, 186 },
422	{ &cik_SQ, 252 },
423	{ &cik_SX, 32 },
424	{ &cik_TA, 111, 11 },
425	{ &cik_TCA, 39, 2 },
426	{ &cik_TCC, 160},
427	{ &cik_TD, 55, 11 },
428	{ &cik_TCP, 154, 11 },
429	{ &cik_GDS, 121 },
430	{ &cik_VGT, 140 },
431	{ &cik_IA, 22 },
432	{ &cik_MC, 22 },
433	{ &cik_SRBM, 19 },
434	{ &cik_WD, 22 },
435	{ &cik_CPG, 46 },
436	{ &cik_CPC, 22 },
437
438};
439
440static struct si_pc_block_gfxdescr groups_VI[] = {
441	{ &cik_CB, 405},
442	{ &cik_CPF, 19 },
443	{ &cik_DB, 257},
444	{ &cik_GRBM, 34 },
445	{ &cik_GRBMSE, 15 },
446	{ &cik_PA_SU, 154 },
447	{ &cik_PA_SC, 397 },
448	{ &cik_SPI, 197 },
449	{ &cik_SQ, 273 },
450	{ &cik_SX, 34 },
451	{ &cik_TA, 119, 16 },
452	{ &cik_TCA, 35, 2 },
453	{ &cik_TCC, 192},
454	{ &cik_TD, 55, 16 },
455	{ &cik_TCP, 180, 16 },
456	{ &cik_GDS, 121 },
457	{ &cik_VGT, 147 },
458	{ &cik_IA, 24 },
459	{ &cik_MC, 22 },
460	{ &cik_SRBM, 27 },
461	{ &cik_WD, 37 },
462	{ &cik_CPG, 48 },
463	{ &cik_CPC, 24 },
464
465};
466
467static struct si_pc_block_gfxdescr groups_gfx9[] = {
468	{ &cik_CB, 438},
469	{ &cik_CPF, 32 },
470	{ &cik_DB, 328},
471	{ &cik_GRBM, 38 },
472	{ &cik_GRBMSE, 16 },
473	{ &cik_PA_SU, 292 },
474	{ &cik_PA_SC, 491 },
475	{ &cik_SPI, 196 },
476	{ &cik_SQ, 374 },
477	{ &cik_SX, 208 },
478	{ &cik_TA, 119, 16 },
479	{ &cik_TCA, 35, 2 },
480	{ &cik_TCC, 256},
481	{ &cik_TD, 57, 16 },
482	{ &cik_TCP, 85, 16 },
483	{ &cik_GDS, 121 },
484	{ &cik_VGT, 148 },
485	{ &cik_IA, 32 },
486	{ &cik_WD, 58 },
487	{ &cik_CPG, 59 },
488	{ &cik_CPC, 35 },
489};
490
491static bool si_pc_block_has_per_se_groups(const struct si_perfcounters *pc,
492					  const struct si_pc_block *block)
493{
494	return block->b->b->flags & SI_PC_BLOCK_SE_GROUPS ||
495	       (block->b->b->flags & SI_PC_BLOCK_SE && pc->separate_se);
496}
497
498static bool si_pc_block_has_per_instance_groups(const struct si_perfcounters *pc,
499						const struct si_pc_block *block)
500{
501	return block->b->b->flags & SI_PC_BLOCK_INSTANCE_GROUPS ||
502	       (block->num_instances > 1 && pc->separate_instance);
503}
504
505static struct si_pc_block *
506lookup_counter(struct si_perfcounters *pc, unsigned index,
507	       unsigned *base_gid, unsigned *sub_index)
508{
509	struct si_pc_block *block = pc->blocks;
510	unsigned bid;
511
512	*base_gid = 0;
513	for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
514		unsigned total = block->num_groups * block->b->selectors;
515
516		if (index < total) {
517			*sub_index = index;
518			return block;
519		}
520
521		index -= total;
522		*base_gid += block->num_groups;
523	}
524
525	return NULL;
526}
527
528static struct si_pc_block *
529lookup_group(struct si_perfcounters *pc, unsigned *index)
530{
531	unsigned bid;
532	struct si_pc_block *block = pc->blocks;
533
534	for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
535		if (*index < block->num_groups)
536			return block;
537		*index -= block->num_groups;
538	}
539
540	return NULL;
541}
542
543static void si_pc_emit_instance(struct si_context *sctx,
544				int se, int instance)
545{
546	struct radeon_cmdbuf *cs = sctx->gfx_cs;
547	unsigned value = S_030800_SH_BROADCAST_WRITES(1);
548
549	if (se >= 0) {
550		value |= S_030800_SE_INDEX(se);
551	} else {
552		value |= S_030800_SE_BROADCAST_WRITES(1);
553	}
554
555	if (instance >= 0) {
556		value |= S_030800_INSTANCE_INDEX(instance);
557	} else {
558		value |= S_030800_INSTANCE_BROADCAST_WRITES(1);
559	}
560
561	radeon_set_uconfig_reg(cs, R_030800_GRBM_GFX_INDEX, value);
562}
563
564static void si_pc_emit_shaders(struct si_context *sctx,
565			       unsigned shaders)
566{
567	struct radeon_cmdbuf *cs = sctx->gfx_cs;
568
569	radeon_set_uconfig_reg_seq(cs, R_036780_SQ_PERFCOUNTER_CTRL, 2);
570	radeon_emit(cs, shaders & 0x7f);
571	radeon_emit(cs, 0xffffffff);
572}
573
574static void si_pc_emit_select(struct si_context *sctx,
575		        struct si_pc_block *block,
576		        unsigned count, unsigned *selectors)
577{
578	struct si_pc_block_base *regs = block->b->b;
579	struct radeon_cmdbuf *cs = sctx->gfx_cs;
580	unsigned idx;
581	unsigned layout_multi = regs->layout & SI_PC_MULTI_MASK;
582	unsigned dw;
583
584	assert(count <= regs->num_counters);
585
586	if (regs->layout & SI_PC_FAKE)
587		return;
588
589	if (layout_multi == SI_PC_MULTI_BLOCK) {
590		assert(!(regs->layout & SI_PC_REG_REVERSE));
591
592		dw = count + regs->num_prelude;
593		if (count >= regs->num_multi)
594			dw += regs->num_multi;
595		radeon_set_uconfig_reg_seq(cs, regs->select0, dw);
596		for (idx = 0; idx < regs->num_prelude; ++idx)
597			radeon_emit(cs, 0);
598		for (idx = 0; idx < MIN2(count, regs->num_multi); ++idx)
599			radeon_emit(cs, selectors[idx] | regs->select_or);
600
601		if (count < regs->num_multi) {
602			unsigned select1 =
603				regs->select0 + 4 * regs->num_multi;
604			radeon_set_uconfig_reg_seq(cs, select1, count);
605		}
606
607		for (idx = 0; idx < MIN2(count, regs->num_multi); ++idx)
608			radeon_emit(cs, 0);
609
610		if (count > regs->num_multi) {
611			for (idx = regs->num_multi; idx < count; ++idx)
612				radeon_emit(cs, selectors[idx] | regs->select_or);
613		}
614	} else if (layout_multi == SI_PC_MULTI_TAIL) {
615		unsigned select1, select1_count;
616
617		assert(!(regs->layout & SI_PC_REG_REVERSE));
618
619		radeon_set_uconfig_reg_seq(cs, regs->select0, count + regs->num_prelude);
620		for (idx = 0; idx < regs->num_prelude; ++idx)
621			radeon_emit(cs, 0);
622		for (idx = 0; idx < count; ++idx)
623			radeon_emit(cs, selectors[idx] | regs->select_or);
624
625		select1 = regs->select0 + 4 * regs->num_counters;
626		select1_count = MIN2(count, regs->num_multi);
627		radeon_set_uconfig_reg_seq(cs, select1, select1_count);
628		for (idx = 0; idx < select1_count; ++idx)
629			radeon_emit(cs, 0);
630	} else if (layout_multi == SI_PC_MULTI_CUSTOM) {
631		unsigned *reg = regs->select;
632		for (idx = 0; idx < count; ++idx) {
633			radeon_set_uconfig_reg(cs, *reg++, selectors[idx] | regs->select_or);
634			if (idx < regs->num_multi)
635				radeon_set_uconfig_reg(cs, *reg++, 0);
636		}
637	} else {
638		assert(layout_multi == SI_PC_MULTI_ALTERNATE);
639
640		unsigned reg_base = regs->select0;
641		unsigned reg_count = count + MIN2(count, regs->num_multi);
642		reg_count += regs->num_prelude;
643
644		if (!(regs->layout & SI_PC_REG_REVERSE)) {
645			radeon_set_uconfig_reg_seq(cs, reg_base, reg_count);
646
647			for (idx = 0; idx < regs->num_prelude; ++idx)
648				radeon_emit(cs, 0);
649			for (idx = 0; idx < count; ++idx) {
650				radeon_emit(cs, selectors[idx] | regs->select_or);
651				if (idx < regs->num_multi)
652					radeon_emit(cs, 0);
653			}
654		} else {
655			reg_base -= (reg_count - 1) * 4;
656			radeon_set_uconfig_reg_seq(cs, reg_base, reg_count);
657
658			for (idx = count; idx > 0; --idx) {
659				if (idx <= regs->num_multi)
660					radeon_emit(cs, 0);
661				radeon_emit(cs, selectors[idx - 1] | regs->select_or);
662			}
663			for (idx = 0; idx < regs->num_prelude; ++idx)
664				radeon_emit(cs, 0);
665		}
666	}
667}
668
669static void si_pc_emit_start(struct si_context *sctx,
670			     struct si_resource *buffer, uint64_t va)
671{
672	struct radeon_cmdbuf *cs = sctx->gfx_cs;
673
674	si_cp_copy_data(sctx,
675			COPY_DATA_DST_MEM, buffer, va - buffer->gpu_address,
676			COPY_DATA_IMM, NULL, 1);
677
678	radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
679			       S_036020_PERFMON_STATE(V_036020_DISABLE_AND_RESET));
680	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
681	radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_START) | EVENT_INDEX(0));
682	radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
683			       S_036020_PERFMON_STATE(V_036020_START_COUNTING));
684}
685
686/* Note: The buffer was already added in si_pc_emit_start, so we don't have to
687 * do it again in here. */
688static void si_pc_emit_stop(struct si_context *sctx,
689			    struct si_resource *buffer, uint64_t va)
690{
691	struct radeon_cmdbuf *cs = sctx->gfx_cs;
692
693	si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
694			  EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
695			  EOP_DATA_SEL_VALUE_32BIT,
696			  buffer, va, 0, SI_NOT_QUERY);
697	si_cp_wait_mem(sctx, cs, va, 0, 0xffffffff, WAIT_REG_MEM_EQUAL);
698
699	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
700	radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_SAMPLE) | EVENT_INDEX(0));
701	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
702	radeon_emit(cs, EVENT_TYPE(V_028A90_PERFCOUNTER_STOP) | EVENT_INDEX(0));
703	radeon_set_uconfig_reg(cs, R_036020_CP_PERFMON_CNTL,
704			       S_036020_PERFMON_STATE(V_036020_STOP_COUNTING) |
705			       S_036020_PERFMON_SAMPLE_ENABLE(1));
706}
707
708static void si_pc_emit_read(struct si_context *sctx,
709			    struct si_pc_block *block,
710			    unsigned count, uint64_t va)
711{
712	struct si_pc_block_base *regs = block->b->b;
713	struct radeon_cmdbuf *cs = sctx->gfx_cs;
714	unsigned idx;
715	unsigned reg = regs->counter0_lo;
716	unsigned reg_delta = 8;
717
718	if (!(regs->layout & SI_PC_FAKE)) {
719		if (regs->layout & SI_PC_REG_REVERSE)
720			reg_delta = -reg_delta;
721
722		for (idx = 0; idx < count; ++idx) {
723			if (regs->counters)
724				reg = regs->counters[idx];
725
726			radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
727			radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_PERF) |
728					COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
729					COPY_DATA_COUNT_SEL); /* 64 bits */
730			radeon_emit(cs, reg >> 2);
731			radeon_emit(cs, 0); /* unused */
732			radeon_emit(cs, va);
733			radeon_emit(cs, va >> 32);
734			va += sizeof(uint64_t);
735			reg += reg_delta;
736		}
737	} else {
738		for (idx = 0; idx < count; ++idx) {
739			radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
740			radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_IMM) |
741					COPY_DATA_DST_SEL(COPY_DATA_DST_MEM) |
742					COPY_DATA_COUNT_SEL);
743			radeon_emit(cs, 0); /* immediate */
744			radeon_emit(cs, 0);
745			radeon_emit(cs, va);
746			radeon_emit(cs, va >> 32);
747			va += sizeof(uint64_t);
748		}
749	}
750}
751
752static void si_pc_query_destroy(struct si_screen *sscreen,
753				struct si_query *squery)
754{
755	struct si_query_pc *query = (struct si_query_pc *)squery;
756
757	while (query->groups) {
758		struct si_query_group *group = query->groups;
759		query->groups = group->next;
760		FREE(group);
761	}
762
763	FREE(query->counters);
764
765	si_query_buffer_destroy(sscreen, &query->buffer);
766	FREE(query);
767}
768
769static void si_pc_query_resume(struct si_context *sctx, struct si_query *squery)
770/*
771				   struct si_query_hw *hwquery,
772				   struct si_resource *buffer, uint64_t va)*/
773{
774	struct si_query_pc *query = (struct si_query_pc *)squery;
775	int current_se = -1;
776	int current_instance = -1;
777
778	if (!si_query_buffer_alloc(sctx, &query->buffer, NULL, query->result_size))
779		return;
780	si_need_gfx_cs_space(sctx);
781
782	if (query->shaders)
783		si_pc_emit_shaders(sctx, query->shaders);
784
785	for (struct si_query_group *group = query->groups; group; group = group->next) {
786		struct si_pc_block *block = group->block;
787
788		if (group->se != current_se || group->instance != current_instance) {
789			current_se = group->se;
790			current_instance = group->instance;
791			si_pc_emit_instance(sctx, group->se, group->instance);
792		}
793
794		si_pc_emit_select(sctx, block, group->num_counters, group->selectors);
795	}
796
797	if (current_se != -1 || current_instance != -1)
798		si_pc_emit_instance(sctx, -1, -1);
799
800	uint64_t va = query->buffer.buf->gpu_address + query->buffer.results_end;
801	si_pc_emit_start(sctx, query->buffer.buf, va);
802}
803
804static void si_pc_query_suspend(struct si_context *sctx, struct si_query *squery)
805{
806	struct si_query_pc *query = (struct si_query_pc *)squery;
807
808	if (!query->buffer.buf)
809		return;
810
811	uint64_t va = query->buffer.buf->gpu_address + query->buffer.results_end;
812	query->buffer.results_end += query->result_size;
813
814	si_pc_emit_stop(sctx, query->buffer.buf, va);
815
816	for (struct si_query_group *group = query->groups; group; group = group->next) {
817		struct si_pc_block *block = group->block;
818		unsigned se = group->se >= 0 ? group->se : 0;
819		unsigned se_end = se + 1;
820
821		if ((block->b->b->flags & SI_PC_BLOCK_SE) && (group->se < 0))
822			se_end = sctx->screen->info.max_se;
823
824		do {
825			unsigned instance = group->instance >= 0 ? group->instance : 0;
826
827			do {
828				si_pc_emit_instance(sctx, se, instance);
829				si_pc_emit_read(sctx, block, group->num_counters, va);
830				va += sizeof(uint64_t) * group->num_counters;
831			} while (group->instance < 0 && ++instance < block->num_instances);
832		} while (++se < se_end);
833	}
834
835	si_pc_emit_instance(sctx, -1, -1);
836}
837
838static bool si_pc_query_begin(struct si_context *ctx, struct si_query *squery)
839{
840	struct si_query_pc *query = (struct si_query_pc *)squery;
841
842	si_query_buffer_reset(ctx, &query->buffer);
843
844	LIST_ADDTAIL(&query->b.active_list, &ctx->active_queries);
845	ctx->num_cs_dw_queries_suspend += query->b.num_cs_dw_suspend;
846
847	si_pc_query_resume(ctx, squery);
848
849	return true;
850}
851
852static bool si_pc_query_end(struct si_context *ctx, struct si_query *squery)
853{
854	struct si_query_pc *query = (struct si_query_pc *)squery;
855
856	si_pc_query_suspend(ctx, squery);
857
858	LIST_DEL(&squery->active_list);
859	ctx->num_cs_dw_queries_suspend -= squery->num_cs_dw_suspend;
860
861	return query->buffer.buf != NULL;
862}
863
864static void si_pc_query_add_result(struct si_query_pc *query,
865				   void *buffer,
866				   union pipe_query_result *result)
867{
868	uint64_t *results = buffer;
869	unsigned i, j;
870
871	for (i = 0; i < query->num_counters; ++i) {
872		struct si_query_counter *counter = &query->counters[i];
873
874		for (j = 0; j < counter->qwords; ++j) {
875			uint32_t value = results[counter->base + j * counter->stride];
876			result->batch[i].u64 += value;
877		}
878	}
879}
880
881static bool si_pc_query_get_result(struct si_context *sctx, struct si_query *squery,
882				   bool wait, union pipe_query_result *result)
883{
884	struct si_query_pc *query = (struct si_query_pc *)squery;
885
886	memset(result, 0, sizeof(result->batch[0]) * query->num_counters);
887
888	for (struct si_query_buffer *qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
889		unsigned usage = PIPE_TRANSFER_READ |
890				 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
891		unsigned results_base = 0;
892		void *map;
893
894		if (squery->b.flushed)
895			map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
896		else
897			map = si_buffer_map_sync_with_rings(sctx, qbuf->buf, usage);
898
899		if (!map)
900			return false;
901
902		while (results_base != qbuf->results_end) {
903			si_pc_query_add_result(query, map + results_base, result);
904			results_base += query->result_size;
905		}
906	}
907
908	return true;
909}
910
911static const struct si_query_ops batch_query_ops = {
912	.destroy = si_pc_query_destroy,
913	.begin = si_pc_query_begin,
914	.end = si_pc_query_end,
915	.get_result = si_pc_query_get_result,
916
917	.suspend = si_pc_query_suspend,
918	.resume = si_pc_query_resume,
919};
920
921static struct si_query_group *get_group_state(struct si_screen *screen,
922					      struct si_query_pc *query,
923					      struct si_pc_block *block,
924					      unsigned sub_gid)
925{
926	struct si_query_group *group = query->groups;
927
928	while (group) {
929		if (group->block == block && group->sub_gid == sub_gid)
930			return group;
931		group = group->next;
932	}
933
934	group = CALLOC_STRUCT(si_query_group);
935	if (!group)
936		return NULL;
937
938	group->block = block;
939	group->sub_gid = sub_gid;
940
941	if (block->b->b->flags & SI_PC_BLOCK_SHADER) {
942		unsigned sub_gids = block->num_instances;
943		unsigned shader_id;
944		unsigned shaders;
945		unsigned query_shaders;
946
947		if (si_pc_block_has_per_se_groups(screen->perfcounters, block))
948			sub_gids = sub_gids * screen->info.max_se;
949		shader_id = sub_gid / sub_gids;
950		sub_gid = sub_gid % sub_gids;
951
952		shaders = si_pc_shader_type_bits[shader_id];
953
954		query_shaders = query->shaders & ~SI_PC_SHADERS_WINDOWING;
955		if (query_shaders && query_shaders != shaders) {
956			fprintf(stderr, "si_perfcounter: incompatible shader groups\n");
957			FREE(group);
958			return NULL;
959		}
960		query->shaders = shaders;
961	}
962
963	if (block->b->b->flags & SI_PC_BLOCK_SHADER_WINDOWED && !query->shaders) {
964		// A non-zero value in query->shaders ensures that the shader
965		// masking is reset unless the user explicitly requests one.
966		query->shaders = SI_PC_SHADERS_WINDOWING;
967	}
968
969	if (si_pc_block_has_per_se_groups(screen->perfcounters, block)) {
970		group->se = sub_gid / block->num_instances;
971		sub_gid = sub_gid % block->num_instances;
972	} else {
973		group->se = -1;
974	}
975
976	if (si_pc_block_has_per_instance_groups(screen->perfcounters, block)) {
977		group->instance = sub_gid;
978	} else {
979		group->instance = -1;
980	}
981
982	group->next = query->groups;
983	query->groups = group;
984
985	return group;
986}
987
988struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
989					 unsigned num_queries,
990					 unsigned *query_types)
991{
992	struct si_screen *screen =
993		(struct si_screen *)ctx->screen;
994	struct si_perfcounters *pc = screen->perfcounters;
995	struct si_pc_block *block;
996	struct si_query_group *group;
997	struct si_query_pc *query;
998	unsigned base_gid, sub_gid, sub_index;
999	unsigned i, j;
1000
1001	if (!pc)
1002		return NULL;
1003
1004	query = CALLOC_STRUCT(si_query_pc);
1005	if (!query)
1006		return NULL;
1007
1008	query->b.ops = &batch_query_ops;
1009
1010	query->num_counters = num_queries;
1011
1012	/* Collect selectors per group */
1013	for (i = 0; i < num_queries; ++i) {
1014		unsigned sub_gid;
1015
1016		if (query_types[i] < SI_QUERY_FIRST_PERFCOUNTER)
1017			goto error;
1018
1019		block = lookup_counter(pc, query_types[i] - SI_QUERY_FIRST_PERFCOUNTER,
1020				       &base_gid, &sub_index);
1021		if (!block)
1022			goto error;
1023
1024		sub_gid = sub_index / block->b->selectors;
1025		sub_index = sub_index % block->b->selectors;
1026
1027		group = get_group_state(screen, query, block, sub_gid);
1028		if (!group)
1029			goto error;
1030
1031		if (group->num_counters >= block->b->b->num_counters) {
1032			fprintf(stderr,
1033				"perfcounter group %s: too many selected\n",
1034				block->b->b->name);
1035			goto error;
1036		}
1037		group->selectors[group->num_counters] = sub_index;
1038		++group->num_counters;
1039	}
1040
1041	/* Compute result bases and CS size per group */
1042	query->b.num_cs_dw_suspend = pc->num_stop_cs_dwords;
1043	query->b.num_cs_dw_suspend += pc->num_instance_cs_dwords;
1044
1045	i = 0;
1046	for (group = query->groups; group; group = group->next) {
1047		struct si_pc_block *block = group->block;
1048		unsigned read_dw;
1049		unsigned instances = 1;
1050
1051		if ((block->b->b->flags & SI_PC_BLOCK_SE) && group->se < 0)
1052			instances = screen->info.max_se;
1053		if (group->instance < 0)
1054			instances *= block->num_instances;
1055
1056		group->result_base = i;
1057		query->result_size += sizeof(uint64_t) * instances * group->num_counters;
1058		i += instances * group->num_counters;
1059
1060		read_dw = 6 * group->num_counters;
1061		query->b.num_cs_dw_suspend += instances * read_dw;
1062		query->b.num_cs_dw_suspend += instances * pc->num_instance_cs_dwords;
1063	}
1064
1065	if (query->shaders) {
1066		if (query->shaders == SI_PC_SHADERS_WINDOWING)
1067			query->shaders = 0xffffffff;
1068	}
1069
1070	/* Map user-supplied query array to result indices */
1071	query->counters = CALLOC(num_queries, sizeof(*query->counters));
1072	for (i = 0; i < num_queries; ++i) {
1073		struct si_query_counter *counter = &query->counters[i];
1074		struct si_pc_block *block;
1075
1076		block = lookup_counter(pc, query_types[i] - SI_QUERY_FIRST_PERFCOUNTER,
1077				       &base_gid, &sub_index);
1078
1079		sub_gid = sub_index / block->b->selectors;
1080		sub_index = sub_index % block->b->selectors;
1081
1082		group = get_group_state(screen, query, block, sub_gid);
1083		assert(group != NULL);
1084
1085		for (j = 0; j < group->num_counters; ++j) {
1086			if (group->selectors[j] == sub_index)
1087				break;
1088		}
1089
1090		counter->base = group->result_base + j;
1091		counter->stride = group->num_counters;
1092
1093		counter->qwords = 1;
1094		if ((block->b->b->flags & SI_PC_BLOCK_SE) && group->se < 0)
1095			counter->qwords = screen->info.max_se;
1096		if (group->instance < 0)
1097			counter->qwords *= block->num_instances;
1098	}
1099
1100	return (struct pipe_query *)query;
1101
1102error:
1103	si_pc_query_destroy(screen, &query->b);
1104	return NULL;
1105}
1106
1107static bool si_init_block_names(struct si_screen *screen,
1108				struct si_pc_block *block)
1109{
1110	bool per_instance_groups = si_pc_block_has_per_instance_groups(screen->perfcounters, block);
1111	bool per_se_groups = si_pc_block_has_per_se_groups(screen->perfcounters, block);
1112	unsigned i, j, k;
1113	unsigned groups_shader = 1, groups_se = 1, groups_instance = 1;
1114	unsigned namelen;
1115	char *groupname;
1116	char *p;
1117
1118	if (per_instance_groups)
1119		groups_instance = block->num_instances;
1120	if (per_se_groups)
1121		groups_se = screen->info.max_se;
1122	if (block->b->b->flags & SI_PC_BLOCK_SHADER)
1123		groups_shader = ARRAY_SIZE(si_pc_shader_type_bits);
1124
1125	namelen = strlen(block->b->b->name);
1126	block->group_name_stride = namelen + 1;
1127	if (block->b->b->flags & SI_PC_BLOCK_SHADER)
1128		block->group_name_stride += 3;
1129	if (per_se_groups) {
1130		assert(groups_se <= 10);
1131		block->group_name_stride += 1;
1132
1133		if (per_instance_groups)
1134			block->group_name_stride += 1;
1135	}
1136	if (per_instance_groups) {
1137		assert(groups_instance <= 100);
1138		block->group_name_stride += 2;
1139	}
1140
1141	block->group_names = MALLOC(block->num_groups * block->group_name_stride);
1142	if (!block->group_names)
1143		return false;
1144
1145	groupname = block->group_names;
1146	for (i = 0; i < groups_shader; ++i) {
1147		const char *shader_suffix = si_pc_shader_type_suffixes[i];
1148		unsigned shaderlen = strlen(shader_suffix);
1149		for (j = 0; j < groups_se; ++j) {
1150			for (k = 0; k < groups_instance; ++k) {
1151				strcpy(groupname, block->b->b->name);
1152				p = groupname + namelen;
1153
1154				if (block->b->b->flags & SI_PC_BLOCK_SHADER) {
1155					strcpy(p, shader_suffix);
1156					p += shaderlen;
1157				}
1158
1159				if (per_se_groups) {
1160					p += sprintf(p, "%d", j);
1161					if (per_instance_groups)
1162						*p++ = '_';
1163				}
1164
1165				if (per_instance_groups)
1166					p += sprintf(p, "%d", k);
1167
1168				groupname += block->group_name_stride;
1169			}
1170		}
1171	}
1172
1173	assert(block->b->selectors <= 1000);
1174	block->selector_name_stride = block->group_name_stride + 4;
1175	block->selector_names = MALLOC(block->num_groups * block->b->selectors *
1176				       block->selector_name_stride);
1177	if (!block->selector_names)
1178		return false;
1179
1180	groupname = block->group_names;
1181	p = block->selector_names;
1182	for (i = 0; i < block->num_groups; ++i) {
1183		for (j = 0; j < block->b->selectors; ++j) {
1184			sprintf(p, "%s_%03d", groupname, j);
1185			p += block->selector_name_stride;
1186		}
1187		groupname += block->group_name_stride;
1188	}
1189
1190	return true;
1191}
1192
1193int si_get_perfcounter_info(struct si_screen *screen,
1194			    unsigned index,
1195			    struct pipe_driver_query_info *info)
1196{
1197	struct si_perfcounters *pc = screen->perfcounters;
1198	struct si_pc_block *block;
1199	unsigned base_gid, sub;
1200
1201	if (!pc)
1202		return 0;
1203
1204	if (!info) {
1205		unsigned bid, num_queries = 0;
1206
1207		for (bid = 0; bid < pc->num_blocks; ++bid) {
1208			num_queries += pc->blocks[bid].b->selectors *
1209				       pc->blocks[bid].num_groups;
1210		}
1211
1212		return num_queries;
1213	}
1214
1215	block = lookup_counter(pc, index, &base_gid, &sub);
1216	if (!block)
1217		return 0;
1218
1219	if (!block->selector_names) {
1220		if (!si_init_block_names(screen, block))
1221			return 0;
1222	}
1223	info->name = block->selector_names + sub * block->selector_name_stride;
1224	info->query_type = SI_QUERY_FIRST_PERFCOUNTER + index;
1225	info->max_value.u64 = 0;
1226	info->type = PIPE_DRIVER_QUERY_TYPE_UINT64;
1227	info->result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_AVERAGE;
1228	info->group_id = base_gid + sub / block->b->selectors;
1229	info->flags = PIPE_DRIVER_QUERY_FLAG_BATCH;
1230	if (sub > 0 && sub + 1 < block->b->selectors * block->num_groups)
1231		info->flags |= PIPE_DRIVER_QUERY_FLAG_DONT_LIST;
1232	return 1;
1233}
1234
1235int si_get_perfcounter_group_info(struct si_screen *screen,
1236				  unsigned index,
1237				  struct pipe_driver_query_group_info *info)
1238{
1239	struct si_perfcounters *pc = screen->perfcounters;
1240	struct si_pc_block *block;
1241
1242	if (!pc)
1243		return 0;
1244
1245	if (!info)
1246		return pc->num_groups;
1247
1248	block = lookup_group(pc, &index);
1249	if (!block)
1250		return 0;
1251
1252	if (!block->group_names) {
1253		if (!si_init_block_names(screen, block))
1254			return 0;
1255	}
1256	info->name = block->group_names + index * block->group_name_stride;
1257	info->num_queries = block->b->selectors;
1258	info->max_active_queries = block->b->b->num_counters;
1259	return 1;
1260}
1261
1262void si_destroy_perfcounters(struct si_screen *screen)
1263{
1264	struct si_perfcounters *pc = screen->perfcounters;
1265	unsigned i;
1266
1267	if (!pc)
1268		return;
1269
1270	for (i = 0; i < pc->num_blocks; ++i) {
1271		FREE(pc->blocks[i].group_names);
1272		FREE(pc->blocks[i].selector_names);
1273	}
1274	FREE(pc->blocks);
1275	FREE(pc);
1276	screen->perfcounters = NULL;
1277}
1278
1279void si_init_perfcounters(struct si_screen *screen)
1280{
1281	struct si_perfcounters *pc;
1282	const struct si_pc_block_gfxdescr *blocks;
1283	unsigned num_blocks;
1284	unsigned i;
1285
1286	switch (screen->info.chip_class) {
1287	case CIK:
1288		blocks = groups_CIK;
1289		num_blocks = ARRAY_SIZE(groups_CIK);
1290		break;
1291	case VI:
1292		blocks = groups_VI;
1293		num_blocks = ARRAY_SIZE(groups_VI);
1294		break;
1295	case GFX9:
1296		blocks = groups_gfx9;
1297		num_blocks = ARRAY_SIZE(groups_gfx9);
1298		break;
1299	case SI:
1300	default:
1301		return; /* not implemented */
1302	}
1303
1304	if (screen->info.max_sh_per_se != 1) {
1305		/* This should not happen on non-SI chips. */
1306		fprintf(stderr, "si_init_perfcounters: max_sh_per_se = %d not "
1307			"supported (inaccurate performance counters)\n",
1308			screen->info.max_sh_per_se);
1309	}
1310
1311	screen->perfcounters = pc = CALLOC_STRUCT(si_perfcounters);
1312	if (!pc)
1313		return;
1314
1315	pc->num_stop_cs_dwords = 14 + si_cp_write_fence_dwords(screen);
1316	pc->num_instance_cs_dwords = 3;
1317
1318	pc->separate_se = debug_get_bool_option("RADEON_PC_SEPARATE_SE", false);
1319	pc->separate_instance = debug_get_bool_option("RADEON_PC_SEPARATE_INSTANCE", false);
1320
1321	pc->blocks = CALLOC(num_blocks, sizeof(struct si_pc_block));
1322	if (!pc->blocks)
1323		goto error;
1324	pc->num_blocks = num_blocks;
1325
1326	for (i = 0; i < num_blocks; ++i) {
1327		struct si_pc_block *block = &pc->blocks[i];
1328		block->b = &blocks[i];
1329		block->num_instances = MAX2(1, block->b->instances);
1330
1331		if (!strcmp(block->b->b->name, "CB") ||
1332		    !strcmp(block->b->b->name, "DB"))
1333			block->num_instances = screen->info.max_se;
1334		else if (!strcmp(block->b->b->name, "TCC"))
1335			block->num_instances = screen->info.num_tcc_blocks;
1336		else if (!strcmp(block->b->b->name, "IA"))
1337			block->num_instances = MAX2(1, screen->info.max_se / 2);
1338
1339		if (si_pc_block_has_per_instance_groups(pc, block)) {
1340			block->num_groups = block->num_instances;
1341		} else {
1342			block->num_groups = 1;
1343		}
1344
1345		if (si_pc_block_has_per_se_groups(pc, block))
1346			block->num_groups *= screen->info.max_se;
1347		if (block->b->b->flags & SI_PC_BLOCK_SHADER)
1348			block->num_groups *= ARRAY_SIZE(si_pc_shader_type_bits);
1349
1350		pc->num_groups += block->num_groups;
1351	}
1352
1353	return;
1354
1355error:
1356	si_destroy_perfcounters(screen);
1357}
1358