si_query.c revision 01e04c3f
1/*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27#include "si_pipe.h"
28#include "si_query.h"
29#include "util/u_memory.h"
30#include "util/u_upload_mgr.h"
31#include "util/os_time.h"
32#include "util/u_suballoc.h"
33#include "amd/common/sid.h"
34
35#define SI_MAX_STREAMS 4
36
37struct si_hw_query_params {
38	unsigned start_offset;
39	unsigned end_offset;
40	unsigned fence_offset;
41	unsigned pair_stride;
42	unsigned pair_count;
43};
44
45/* Queries without buffer handling or suspend/resume. */
46struct si_query_sw {
47	struct si_query b;
48
49	uint64_t begin_result;
50	uint64_t end_result;
51
52	uint64_t begin_time;
53	uint64_t end_time;
54
55	/* Fence for GPU_FINISHED. */
56	struct pipe_fence_handle *fence;
57};
58
59static void si_query_sw_destroy(struct si_screen *sscreen,
60				struct si_query *rquery)
61{
62	struct si_query_sw *query = (struct si_query_sw *)rquery;
63
64	sscreen->b.fence_reference(&sscreen->b, &query->fence, NULL);
65	FREE(query);
66}
67
68static enum radeon_value_id winsys_id_from_type(unsigned type)
69{
70	switch (type) {
71	case SI_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
72	case SI_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
73	case SI_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
74	case SI_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
75	case SI_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
76	case SI_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
77	case SI_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
78	case SI_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
79	case SI_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
80	case SI_QUERY_GFX_IB_SIZE: return RADEON_GFX_IB_SIZE_COUNTER;
81	case SI_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
82	case SI_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
83	case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
84	case SI_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
85	case SI_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
86	case SI_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
87	case SI_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
88	case SI_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
89	case SI_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
90	case SI_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
91	default: unreachable("query type does not correspond to winsys id");
92	}
93}
94
95static int64_t si_finish_dma_get_cpu_time(struct si_context *sctx)
96{
97	struct pipe_fence_handle *fence = NULL;
98
99	si_flush_dma_cs(sctx, 0, &fence);
100	if (fence) {
101		sctx->ws->fence_wait(sctx->ws, fence, PIPE_TIMEOUT_INFINITE);
102		sctx->ws->fence_reference(&fence, NULL);
103	}
104
105	return os_time_get_nano();
106}
107
108static bool si_query_sw_begin(struct si_context *sctx,
109			      struct si_query *rquery)
110{
111	struct si_query_sw *query = (struct si_query_sw *)rquery;
112	enum radeon_value_id ws_id;
113
114	switch(query->b.type) {
115	case PIPE_QUERY_TIMESTAMP_DISJOINT:
116	case PIPE_QUERY_GPU_FINISHED:
117		break;
118	case SI_QUERY_TIME_ELAPSED_SDMA_SI:
119		query->begin_result = si_finish_dma_get_cpu_time(sctx);
120		break;
121	case SI_QUERY_DRAW_CALLS:
122		query->begin_result = sctx->num_draw_calls;
123		break;
124	case SI_QUERY_DECOMPRESS_CALLS:
125		query->begin_result = sctx->num_decompress_calls;
126		break;
127	case SI_QUERY_MRT_DRAW_CALLS:
128		query->begin_result = sctx->num_mrt_draw_calls;
129		break;
130	case SI_QUERY_PRIM_RESTART_CALLS:
131		query->begin_result = sctx->num_prim_restart_calls;
132		break;
133	case SI_QUERY_SPILL_DRAW_CALLS:
134		query->begin_result = sctx->num_spill_draw_calls;
135		break;
136	case SI_QUERY_COMPUTE_CALLS:
137		query->begin_result = sctx->num_compute_calls;
138		break;
139	case SI_QUERY_SPILL_COMPUTE_CALLS:
140		query->begin_result = sctx->num_spill_compute_calls;
141		break;
142	case SI_QUERY_DMA_CALLS:
143		query->begin_result = sctx->num_dma_calls;
144		break;
145	case SI_QUERY_CP_DMA_CALLS:
146		query->begin_result = sctx->num_cp_dma_calls;
147		break;
148	case SI_QUERY_NUM_VS_FLUSHES:
149		query->begin_result = sctx->num_vs_flushes;
150		break;
151	case SI_QUERY_NUM_PS_FLUSHES:
152		query->begin_result = sctx->num_ps_flushes;
153		break;
154	case SI_QUERY_NUM_CS_FLUSHES:
155		query->begin_result = sctx->num_cs_flushes;
156		break;
157	case SI_QUERY_NUM_CB_CACHE_FLUSHES:
158		query->begin_result = sctx->num_cb_cache_flushes;
159		break;
160	case SI_QUERY_NUM_DB_CACHE_FLUSHES:
161		query->begin_result = sctx->num_db_cache_flushes;
162		break;
163	case SI_QUERY_NUM_L2_INVALIDATES:
164		query->begin_result = sctx->num_L2_invalidates;
165		break;
166	case SI_QUERY_NUM_L2_WRITEBACKS:
167		query->begin_result = sctx->num_L2_writebacks;
168		break;
169	case SI_QUERY_NUM_RESIDENT_HANDLES:
170		query->begin_result = sctx->num_resident_handles;
171		break;
172	case SI_QUERY_TC_OFFLOADED_SLOTS:
173		query->begin_result = sctx->tc ? sctx->tc->num_offloaded_slots : 0;
174		break;
175	case SI_QUERY_TC_DIRECT_SLOTS:
176		query->begin_result = sctx->tc ? sctx->tc->num_direct_slots : 0;
177		break;
178	case SI_QUERY_TC_NUM_SYNCS:
179		query->begin_result = sctx->tc ? sctx->tc->num_syncs : 0;
180		break;
181	case SI_QUERY_REQUESTED_VRAM:
182	case SI_QUERY_REQUESTED_GTT:
183	case SI_QUERY_MAPPED_VRAM:
184	case SI_QUERY_MAPPED_GTT:
185	case SI_QUERY_VRAM_USAGE:
186	case SI_QUERY_VRAM_VIS_USAGE:
187	case SI_QUERY_GTT_USAGE:
188	case SI_QUERY_GPU_TEMPERATURE:
189	case SI_QUERY_CURRENT_GPU_SCLK:
190	case SI_QUERY_CURRENT_GPU_MCLK:
191	case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
192	case SI_QUERY_NUM_MAPPED_BUFFERS:
193		query->begin_result = 0;
194		break;
195	case SI_QUERY_BUFFER_WAIT_TIME:
196	case SI_QUERY_GFX_IB_SIZE:
197	case SI_QUERY_NUM_GFX_IBS:
198	case SI_QUERY_NUM_SDMA_IBS:
199	case SI_QUERY_NUM_BYTES_MOVED:
200	case SI_QUERY_NUM_EVICTIONS:
201	case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
202		enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
203		query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
204		break;
205	}
206	case SI_QUERY_GFX_BO_LIST_SIZE:
207		ws_id = winsys_id_from_type(query->b.type);
208		query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
209		query->begin_time = sctx->ws->query_value(sctx->ws,
210							  RADEON_NUM_GFX_IBS);
211		break;
212	case SI_QUERY_CS_THREAD_BUSY:
213		ws_id = winsys_id_from_type(query->b.type);
214		query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
215		query->begin_time = os_time_get_nano();
216		break;
217	case SI_QUERY_GALLIUM_THREAD_BUSY:
218		query->begin_result =
219			sctx->tc ? util_queue_get_thread_time_nano(&sctx->tc->queue, 0) : 0;
220		query->begin_time = os_time_get_nano();
221		break;
222	case SI_QUERY_GPU_LOAD:
223	case SI_QUERY_GPU_SHADERS_BUSY:
224	case SI_QUERY_GPU_TA_BUSY:
225	case SI_QUERY_GPU_GDS_BUSY:
226	case SI_QUERY_GPU_VGT_BUSY:
227	case SI_QUERY_GPU_IA_BUSY:
228	case SI_QUERY_GPU_SX_BUSY:
229	case SI_QUERY_GPU_WD_BUSY:
230	case SI_QUERY_GPU_BCI_BUSY:
231	case SI_QUERY_GPU_SC_BUSY:
232	case SI_QUERY_GPU_PA_BUSY:
233	case SI_QUERY_GPU_DB_BUSY:
234	case SI_QUERY_GPU_CP_BUSY:
235	case SI_QUERY_GPU_CB_BUSY:
236	case SI_QUERY_GPU_SDMA_BUSY:
237	case SI_QUERY_GPU_PFP_BUSY:
238	case SI_QUERY_GPU_MEQ_BUSY:
239	case SI_QUERY_GPU_ME_BUSY:
240	case SI_QUERY_GPU_SURF_SYNC_BUSY:
241	case SI_QUERY_GPU_CP_DMA_BUSY:
242	case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
243		query->begin_result = si_begin_counter(sctx->screen,
244							 query->b.type);
245		break;
246	case SI_QUERY_NUM_COMPILATIONS:
247		query->begin_result = p_atomic_read(&sctx->screen->num_compilations);
248		break;
249	case SI_QUERY_NUM_SHADERS_CREATED:
250		query->begin_result = p_atomic_read(&sctx->screen->num_shaders_created);
251		break;
252	case SI_QUERY_NUM_SHADER_CACHE_HITS:
253		query->begin_result =
254			p_atomic_read(&sctx->screen->num_shader_cache_hits);
255		break;
256	case SI_QUERY_GPIN_ASIC_ID:
257	case SI_QUERY_GPIN_NUM_SIMD:
258	case SI_QUERY_GPIN_NUM_RB:
259	case SI_QUERY_GPIN_NUM_SPI:
260	case SI_QUERY_GPIN_NUM_SE:
261		break;
262	default:
263		unreachable("si_query_sw_begin: bad query type");
264	}
265
266	return true;
267}
268
269static bool si_query_sw_end(struct si_context *sctx,
270			    struct si_query *rquery)
271{
272	struct si_query_sw *query = (struct si_query_sw *)rquery;
273	enum radeon_value_id ws_id;
274
275	switch(query->b.type) {
276	case PIPE_QUERY_TIMESTAMP_DISJOINT:
277		break;
278	case PIPE_QUERY_GPU_FINISHED:
279		sctx->b.flush(&sctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
280		break;
281	case SI_QUERY_TIME_ELAPSED_SDMA_SI:
282		query->end_result = si_finish_dma_get_cpu_time(sctx);
283		break;
284	case SI_QUERY_DRAW_CALLS:
285		query->end_result = sctx->num_draw_calls;
286		break;
287	case SI_QUERY_DECOMPRESS_CALLS:
288		query->end_result = sctx->num_decompress_calls;
289		break;
290	case SI_QUERY_MRT_DRAW_CALLS:
291		query->end_result = sctx->num_mrt_draw_calls;
292		break;
293	case SI_QUERY_PRIM_RESTART_CALLS:
294		query->end_result = sctx->num_prim_restart_calls;
295		break;
296	case SI_QUERY_SPILL_DRAW_CALLS:
297		query->end_result = sctx->num_spill_draw_calls;
298		break;
299	case SI_QUERY_COMPUTE_CALLS:
300		query->end_result = sctx->num_compute_calls;
301		break;
302	case SI_QUERY_SPILL_COMPUTE_CALLS:
303		query->end_result = sctx->num_spill_compute_calls;
304		break;
305	case SI_QUERY_DMA_CALLS:
306		query->end_result = sctx->num_dma_calls;
307		break;
308	case SI_QUERY_CP_DMA_CALLS:
309		query->end_result = sctx->num_cp_dma_calls;
310		break;
311	case SI_QUERY_NUM_VS_FLUSHES:
312		query->end_result = sctx->num_vs_flushes;
313		break;
314	case SI_QUERY_NUM_PS_FLUSHES:
315		query->end_result = sctx->num_ps_flushes;
316		break;
317	case SI_QUERY_NUM_CS_FLUSHES:
318		query->end_result = sctx->num_cs_flushes;
319		break;
320	case SI_QUERY_NUM_CB_CACHE_FLUSHES:
321		query->end_result = sctx->num_cb_cache_flushes;
322		break;
323	case SI_QUERY_NUM_DB_CACHE_FLUSHES:
324		query->end_result = sctx->num_db_cache_flushes;
325		break;
326	case SI_QUERY_NUM_L2_INVALIDATES:
327		query->end_result = sctx->num_L2_invalidates;
328		break;
329	case SI_QUERY_NUM_L2_WRITEBACKS:
330		query->end_result = sctx->num_L2_writebacks;
331		break;
332	case SI_QUERY_NUM_RESIDENT_HANDLES:
333		query->end_result = sctx->num_resident_handles;
334		break;
335	case SI_QUERY_TC_OFFLOADED_SLOTS:
336		query->end_result = sctx->tc ? sctx->tc->num_offloaded_slots : 0;
337		break;
338	case SI_QUERY_TC_DIRECT_SLOTS:
339		query->end_result = sctx->tc ? sctx->tc->num_direct_slots : 0;
340		break;
341	case SI_QUERY_TC_NUM_SYNCS:
342		query->end_result = sctx->tc ? sctx->tc->num_syncs : 0;
343		break;
344	case SI_QUERY_REQUESTED_VRAM:
345	case SI_QUERY_REQUESTED_GTT:
346	case SI_QUERY_MAPPED_VRAM:
347	case SI_QUERY_MAPPED_GTT:
348	case SI_QUERY_VRAM_USAGE:
349	case SI_QUERY_VRAM_VIS_USAGE:
350	case SI_QUERY_GTT_USAGE:
351	case SI_QUERY_GPU_TEMPERATURE:
352	case SI_QUERY_CURRENT_GPU_SCLK:
353	case SI_QUERY_CURRENT_GPU_MCLK:
354	case SI_QUERY_BUFFER_WAIT_TIME:
355	case SI_QUERY_GFX_IB_SIZE:
356	case SI_QUERY_NUM_MAPPED_BUFFERS:
357	case SI_QUERY_NUM_GFX_IBS:
358	case SI_QUERY_NUM_SDMA_IBS:
359	case SI_QUERY_NUM_BYTES_MOVED:
360	case SI_QUERY_NUM_EVICTIONS:
361	case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
362		enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
363		query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
364		break;
365	}
366	case SI_QUERY_GFX_BO_LIST_SIZE:
367		ws_id = winsys_id_from_type(query->b.type);
368		query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
369		query->end_time = sctx->ws->query_value(sctx->ws,
370							RADEON_NUM_GFX_IBS);
371		break;
372	case SI_QUERY_CS_THREAD_BUSY:
373		ws_id = winsys_id_from_type(query->b.type);
374		query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
375		query->end_time = os_time_get_nano();
376		break;
377	case SI_QUERY_GALLIUM_THREAD_BUSY:
378		query->end_result =
379			sctx->tc ? util_queue_get_thread_time_nano(&sctx->tc->queue, 0) : 0;
380		query->end_time = os_time_get_nano();
381		break;
382	case SI_QUERY_GPU_LOAD:
383	case SI_QUERY_GPU_SHADERS_BUSY:
384	case SI_QUERY_GPU_TA_BUSY:
385	case SI_QUERY_GPU_GDS_BUSY:
386	case SI_QUERY_GPU_VGT_BUSY:
387	case SI_QUERY_GPU_IA_BUSY:
388	case SI_QUERY_GPU_SX_BUSY:
389	case SI_QUERY_GPU_WD_BUSY:
390	case SI_QUERY_GPU_BCI_BUSY:
391	case SI_QUERY_GPU_SC_BUSY:
392	case SI_QUERY_GPU_PA_BUSY:
393	case SI_QUERY_GPU_DB_BUSY:
394	case SI_QUERY_GPU_CP_BUSY:
395	case SI_QUERY_GPU_CB_BUSY:
396	case SI_QUERY_GPU_SDMA_BUSY:
397	case SI_QUERY_GPU_PFP_BUSY:
398	case SI_QUERY_GPU_MEQ_BUSY:
399	case SI_QUERY_GPU_ME_BUSY:
400	case SI_QUERY_GPU_SURF_SYNC_BUSY:
401	case SI_QUERY_GPU_CP_DMA_BUSY:
402	case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
403		query->end_result = si_end_counter(sctx->screen,
404						     query->b.type,
405						     query->begin_result);
406		query->begin_result = 0;
407		break;
408	case SI_QUERY_NUM_COMPILATIONS:
409		query->end_result = p_atomic_read(&sctx->screen->num_compilations);
410		break;
411	case SI_QUERY_NUM_SHADERS_CREATED:
412		query->end_result = p_atomic_read(&sctx->screen->num_shaders_created);
413		break;
414	case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
415		query->end_result = sctx->last_tex_ps_draw_ratio;
416		break;
417	case SI_QUERY_NUM_SHADER_CACHE_HITS:
418		query->end_result =
419			p_atomic_read(&sctx->screen->num_shader_cache_hits);
420		break;
421	case SI_QUERY_GPIN_ASIC_ID:
422	case SI_QUERY_GPIN_NUM_SIMD:
423	case SI_QUERY_GPIN_NUM_RB:
424	case SI_QUERY_GPIN_NUM_SPI:
425	case SI_QUERY_GPIN_NUM_SE:
426		break;
427	default:
428		unreachable("si_query_sw_end: bad query type");
429	}
430
431	return true;
432}
433
434static bool si_query_sw_get_result(struct si_context *sctx,
435				   struct si_query *rquery,
436				   bool wait,
437				   union pipe_query_result *result)
438{
439	struct si_query_sw *query = (struct si_query_sw *)rquery;
440
441	switch (query->b.type) {
442	case PIPE_QUERY_TIMESTAMP_DISJOINT:
443		/* Convert from cycles per millisecond to cycles per second (Hz). */
444		result->timestamp_disjoint.frequency =
445			(uint64_t)sctx->screen->info.clock_crystal_freq * 1000;
446		result->timestamp_disjoint.disjoint = false;
447		return true;
448	case PIPE_QUERY_GPU_FINISHED: {
449		struct pipe_screen *screen = sctx->b.screen;
450		struct pipe_context *ctx = rquery->b.flushed ? NULL : &sctx->b;
451
452		result->b = screen->fence_finish(screen, ctx, query->fence,
453						 wait ? PIPE_TIMEOUT_INFINITE : 0);
454		return result->b;
455	}
456
457	case SI_QUERY_GFX_BO_LIST_SIZE:
458		result->u64 = (query->end_result - query->begin_result) /
459			      (query->end_time - query->begin_time);
460		return true;
461	case SI_QUERY_CS_THREAD_BUSY:
462	case SI_QUERY_GALLIUM_THREAD_BUSY:
463		result->u64 = (query->end_result - query->begin_result) * 100 /
464			      (query->end_time - query->begin_time);
465		return true;
466	case SI_QUERY_GPIN_ASIC_ID:
467		result->u32 = 0;
468		return true;
469	case SI_QUERY_GPIN_NUM_SIMD:
470		result->u32 = sctx->screen->info.num_good_compute_units;
471		return true;
472	case SI_QUERY_GPIN_NUM_RB:
473		result->u32 = sctx->screen->info.num_render_backends;
474		return true;
475	case SI_QUERY_GPIN_NUM_SPI:
476		result->u32 = 1; /* all supported chips have one SPI per SE */
477		return true;
478	case SI_QUERY_GPIN_NUM_SE:
479		result->u32 = sctx->screen->info.max_se;
480		return true;
481	}
482
483	result->u64 = query->end_result - query->begin_result;
484
485	switch (query->b.type) {
486	case SI_QUERY_BUFFER_WAIT_TIME:
487	case SI_QUERY_GPU_TEMPERATURE:
488		result->u64 /= 1000;
489		break;
490	case SI_QUERY_CURRENT_GPU_SCLK:
491	case SI_QUERY_CURRENT_GPU_MCLK:
492		result->u64 *= 1000000;
493		break;
494	}
495
496	return true;
497}
498
499
500static struct si_query_ops sw_query_ops = {
501	.destroy = si_query_sw_destroy,
502	.begin = si_query_sw_begin,
503	.end = si_query_sw_end,
504	.get_result = si_query_sw_get_result,
505	.get_result_resource = NULL
506};
507
508static struct pipe_query *si_query_sw_create(unsigned query_type)
509{
510	struct si_query_sw *query;
511
512	query = CALLOC_STRUCT(si_query_sw);
513	if (!query)
514		return NULL;
515
516	query->b.type = query_type;
517	query->b.ops = &sw_query_ops;
518
519	return (struct pipe_query *)query;
520}
521
522void si_query_hw_destroy(struct si_screen *sscreen,
523			 struct si_query *rquery)
524{
525	struct si_query_hw *query = (struct si_query_hw *)rquery;
526	struct si_query_buffer *prev = query->buffer.previous;
527
528	/* Release all query buffers. */
529	while (prev) {
530		struct si_query_buffer *qbuf = prev;
531		prev = prev->previous;
532		r600_resource_reference(&qbuf->buf, NULL);
533		FREE(qbuf);
534	}
535
536	r600_resource_reference(&query->buffer.buf, NULL);
537	r600_resource_reference(&query->workaround_buf, NULL);
538	FREE(rquery);
539}
540
541static struct r600_resource *si_new_query_buffer(struct si_screen *sscreen,
542						 struct si_query_hw *query)
543{
544	unsigned buf_size = MAX2(query->result_size,
545				 sscreen->info.min_alloc_size);
546
547	/* Queries are normally read by the CPU after
548	 * being written by the gpu, hence staging is probably a good
549	 * usage pattern.
550	 */
551	struct r600_resource *buf = r600_resource(
552		pipe_buffer_create(&sscreen->b, 0,
553				   PIPE_USAGE_STAGING, buf_size));
554	if (!buf)
555		return NULL;
556
557	if (!query->ops->prepare_buffer(sscreen, query, buf)) {
558		r600_resource_reference(&buf, NULL);
559		return NULL;
560	}
561
562	return buf;
563}
564
565static bool si_query_hw_prepare_buffer(struct si_screen *sscreen,
566				       struct si_query_hw *query,
567				       struct r600_resource *buffer)
568{
569	/* Callers ensure that the buffer is currently unused by the GPU. */
570	uint32_t *results = sscreen->ws->buffer_map(buffer->buf, NULL,
571						   PIPE_TRANSFER_WRITE |
572						   PIPE_TRANSFER_UNSYNCHRONIZED);
573	if (!results)
574		return false;
575
576	memset(results, 0, buffer->b.b.width0);
577
578	if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
579	    query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
580	    query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
581		unsigned max_rbs = sscreen->info.num_render_backends;
582		unsigned enabled_rb_mask = sscreen->info.enabled_rb_mask;
583		unsigned num_results;
584		unsigned i, j;
585
586		/* Set top bits for unused backends. */
587		num_results = buffer->b.b.width0 / query->result_size;
588		for (j = 0; j < num_results; j++) {
589			for (i = 0; i < max_rbs; i++) {
590				if (!(enabled_rb_mask & (1<<i))) {
591					results[(i * 4)+1] = 0x80000000;
592					results[(i * 4)+3] = 0x80000000;
593				}
594			}
595			results += 4 * max_rbs;
596		}
597	}
598
599	return true;
600}
601
602static void si_query_hw_get_result_resource(struct si_context *sctx,
603					    struct si_query *rquery,
604					    bool wait,
605					    enum pipe_query_value_type result_type,
606					    int index,
607					    struct pipe_resource *resource,
608					    unsigned offset);
609
610static struct si_query_ops query_hw_ops = {
611	.destroy = si_query_hw_destroy,
612	.begin = si_query_hw_begin,
613	.end = si_query_hw_end,
614	.get_result = si_query_hw_get_result,
615	.get_result_resource = si_query_hw_get_result_resource,
616};
617
618static void si_query_hw_do_emit_start(struct si_context *sctx,
619				      struct si_query_hw *query,
620				      struct r600_resource *buffer,
621				      uint64_t va);
622static void si_query_hw_do_emit_stop(struct si_context *sctx,
623				     struct si_query_hw *query,
624				     struct r600_resource *buffer,
625				     uint64_t va);
626static void si_query_hw_add_result(struct si_screen *sscreen,
627				   struct si_query_hw *, void *buffer,
628				   union pipe_query_result *result);
629static void si_query_hw_clear_result(struct si_query_hw *,
630				     union pipe_query_result *);
631
632static struct si_query_hw_ops query_hw_default_hw_ops = {
633	.prepare_buffer = si_query_hw_prepare_buffer,
634	.emit_start = si_query_hw_do_emit_start,
635	.emit_stop = si_query_hw_do_emit_stop,
636	.clear_result = si_query_hw_clear_result,
637	.add_result = si_query_hw_add_result,
638};
639
640bool si_query_hw_init(struct si_screen *sscreen,
641		      struct si_query_hw *query)
642{
643	query->buffer.buf = si_new_query_buffer(sscreen, query);
644	if (!query->buffer.buf)
645		return false;
646
647	return true;
648}
649
650static struct pipe_query *si_query_hw_create(struct si_screen *sscreen,
651					     unsigned query_type,
652					     unsigned index)
653{
654	struct si_query_hw *query = CALLOC_STRUCT(si_query_hw);
655	if (!query)
656		return NULL;
657
658	query->b.type = query_type;
659	query->b.ops = &query_hw_ops;
660	query->ops = &query_hw_default_hw_ops;
661
662	switch (query_type) {
663	case PIPE_QUERY_OCCLUSION_COUNTER:
664	case PIPE_QUERY_OCCLUSION_PREDICATE:
665	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
666		query->result_size = 16 * sscreen->info.num_render_backends;
667		query->result_size += 16; /* for the fence + alignment */
668		query->num_cs_dw_end = 6 + si_cp_write_fence_dwords(sscreen);
669		break;
670	case SI_QUERY_TIME_ELAPSED_SDMA:
671		/* GET_GLOBAL_TIMESTAMP only works if the offset is a multiple of 32. */
672		query->result_size = 64;
673		query->num_cs_dw_end = 0;
674		break;
675	case PIPE_QUERY_TIME_ELAPSED:
676		query->result_size = 24;
677		query->num_cs_dw_end = 8 + si_cp_write_fence_dwords(sscreen);
678		break;
679	case PIPE_QUERY_TIMESTAMP:
680		query->result_size = 16;
681		query->num_cs_dw_end = 8 + si_cp_write_fence_dwords(sscreen);
682		query->flags = SI_QUERY_HW_FLAG_NO_START;
683		break;
684	case PIPE_QUERY_PRIMITIVES_EMITTED:
685	case PIPE_QUERY_PRIMITIVES_GENERATED:
686	case PIPE_QUERY_SO_STATISTICS:
687	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
688		/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
689		query->result_size = 32;
690		query->num_cs_dw_end = 6;
691		query->stream = index;
692		break;
693	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
694		/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
695		query->result_size = 32 * SI_MAX_STREAMS;
696		query->num_cs_dw_end = 6 * SI_MAX_STREAMS;
697		break;
698	case PIPE_QUERY_PIPELINE_STATISTICS:
699		/* 11 values on GCN. */
700		query->result_size = 11 * 16;
701		query->result_size += 8; /* for the fence + alignment */
702		query->num_cs_dw_end = 6 + si_cp_write_fence_dwords(sscreen);
703		break;
704	default:
705		assert(0);
706		FREE(query);
707		return NULL;
708	}
709
710	if (!si_query_hw_init(sscreen, query)) {
711		FREE(query);
712		return NULL;
713	}
714
715	return (struct pipe_query *)query;
716}
717
718static void si_update_occlusion_query_state(struct si_context *sctx,
719					    unsigned type, int diff)
720{
721	if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
722	    type == PIPE_QUERY_OCCLUSION_PREDICATE ||
723	    type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
724		bool old_enable = sctx->num_occlusion_queries != 0;
725		bool old_perfect_enable =
726			sctx->num_perfect_occlusion_queries != 0;
727		bool enable, perfect_enable;
728
729		sctx->num_occlusion_queries += diff;
730		assert(sctx->num_occlusion_queries >= 0);
731
732		if (type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
733			sctx->num_perfect_occlusion_queries += diff;
734			assert(sctx->num_perfect_occlusion_queries >= 0);
735		}
736
737		enable = sctx->num_occlusion_queries != 0;
738		perfect_enable = sctx->num_perfect_occlusion_queries != 0;
739
740		if (enable != old_enable || perfect_enable != old_perfect_enable) {
741			si_set_occlusion_query_state(sctx, old_perfect_enable);
742		}
743	}
744}
745
746static unsigned event_type_for_stream(unsigned stream)
747{
748	switch (stream) {
749	default:
750	case 0: return V_028A90_SAMPLE_STREAMOUTSTATS;
751	case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1;
752	case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2;
753	case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3;
754	}
755}
756
757static void emit_sample_streamout(struct radeon_cmdbuf *cs, uint64_t va,
758				  unsigned stream)
759{
760	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
761	radeon_emit(cs, EVENT_TYPE(event_type_for_stream(stream)) | EVENT_INDEX(3));
762	radeon_emit(cs, va);
763	radeon_emit(cs, va >> 32);
764}
765
766static void si_query_hw_do_emit_start(struct si_context *sctx,
767					struct si_query_hw *query,
768					struct r600_resource *buffer,
769					uint64_t va)
770{
771	struct radeon_cmdbuf *cs = sctx->gfx_cs;
772
773	switch (query->b.type) {
774	case SI_QUERY_TIME_ELAPSED_SDMA:
775		si_dma_emit_timestamp(sctx, buffer, va - buffer->gpu_address);
776		return;
777	case PIPE_QUERY_OCCLUSION_COUNTER:
778	case PIPE_QUERY_OCCLUSION_PREDICATE:
779	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
780		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
781		radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
782		radeon_emit(cs, va);
783		radeon_emit(cs, va >> 32);
784		break;
785	case PIPE_QUERY_PRIMITIVES_EMITTED:
786	case PIPE_QUERY_PRIMITIVES_GENERATED:
787	case PIPE_QUERY_SO_STATISTICS:
788	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
789		emit_sample_streamout(cs, va, query->stream);
790		break;
791	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
792		for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream)
793			emit_sample_streamout(cs, va + 32 * stream, stream);
794		break;
795	case PIPE_QUERY_TIME_ELAPSED:
796		si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
797				  EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
798				  EOP_DATA_SEL_TIMESTAMP, NULL, va,
799				  0, query->b.type);
800		break;
801	case PIPE_QUERY_PIPELINE_STATISTICS:
802		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
803		radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
804		radeon_emit(cs, va);
805		radeon_emit(cs, va >> 32);
806		break;
807	default:
808		assert(0);
809	}
810	radeon_add_to_buffer_list(sctx, sctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
811				  RADEON_PRIO_QUERY);
812}
813
814static void si_query_hw_emit_start(struct si_context *sctx,
815				   struct si_query_hw *query)
816{
817	uint64_t va;
818
819	if (!query->buffer.buf)
820		return; // previous buffer allocation failure
821
822	si_update_occlusion_query_state(sctx, query->b.type, 1);
823	si_update_prims_generated_query_state(sctx, query->b.type, 1);
824
825	if (query->b.type != SI_QUERY_TIME_ELAPSED_SDMA)
826		si_need_gfx_cs_space(sctx);
827
828	/* Get a new query buffer if needed. */
829	if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
830		struct si_query_buffer *qbuf = MALLOC_STRUCT(si_query_buffer);
831		*qbuf = query->buffer;
832		query->buffer.results_end = 0;
833		query->buffer.previous = qbuf;
834		query->buffer.buf = si_new_query_buffer(sctx->screen, query);
835		if (!query->buffer.buf)
836			return;
837	}
838
839	/* emit begin query */
840	va = query->buffer.buf->gpu_address + query->buffer.results_end;
841
842	query->ops->emit_start(sctx, query, query->buffer.buf, va);
843
844	sctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
845}
846
847static void si_query_hw_do_emit_stop(struct si_context *sctx,
848				       struct si_query_hw *query,
849				       struct r600_resource *buffer,
850				       uint64_t va)
851{
852	struct radeon_cmdbuf *cs = sctx->gfx_cs;
853	uint64_t fence_va = 0;
854
855	switch (query->b.type) {
856	case SI_QUERY_TIME_ELAPSED_SDMA:
857		si_dma_emit_timestamp(sctx, buffer, va + 32 - buffer->gpu_address);
858		return;
859	case PIPE_QUERY_OCCLUSION_COUNTER:
860	case PIPE_QUERY_OCCLUSION_PREDICATE:
861	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
862		va += 8;
863		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
864		radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
865		radeon_emit(cs, va);
866		radeon_emit(cs, va >> 32);
867
868		fence_va = va + sctx->screen->info.num_render_backends * 16 - 8;
869		break;
870	case PIPE_QUERY_PRIMITIVES_EMITTED:
871	case PIPE_QUERY_PRIMITIVES_GENERATED:
872	case PIPE_QUERY_SO_STATISTICS:
873	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
874		va += 16;
875		emit_sample_streamout(cs, va, query->stream);
876		break;
877	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
878		va += 16;
879		for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream)
880			emit_sample_streamout(cs, va + 32 * stream, stream);
881		break;
882	case PIPE_QUERY_TIME_ELAPSED:
883		va += 8;
884		/* fall through */
885	case PIPE_QUERY_TIMESTAMP:
886		si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS,
887				  0, EOP_DST_SEL_MEM,
888				  EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
889				  EOP_DATA_SEL_TIMESTAMP, NULL, va,
890				  0, query->b.type);
891		fence_va = va + 8;
892		break;
893	case PIPE_QUERY_PIPELINE_STATISTICS: {
894		unsigned sample_size = (query->result_size - 8) / 2;
895
896		va += sample_size;
897		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
898		radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
899		radeon_emit(cs, va);
900		radeon_emit(cs, va >> 32);
901
902		fence_va = va + sample_size;
903		break;
904	}
905	default:
906		assert(0);
907	}
908	radeon_add_to_buffer_list(sctx, sctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
909				  RADEON_PRIO_QUERY);
910
911	if (fence_va) {
912		si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
913				  EOP_DST_SEL_MEM,
914				  EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
915				  EOP_DATA_SEL_VALUE_32BIT,
916				  query->buffer.buf, fence_va, 0x80000000,
917				  query->b.type);
918	}
919}
920
921static void si_query_hw_emit_stop(struct si_context *sctx,
922				  struct si_query_hw *query)
923{
924	uint64_t va;
925
926	if (!query->buffer.buf)
927		return; // previous buffer allocation failure
928
929	/* The queries which need begin already called this in begin_query. */
930	if (query->flags & SI_QUERY_HW_FLAG_NO_START)
931		si_need_gfx_cs_space(sctx);
932
933	/* emit end query */
934	va = query->buffer.buf->gpu_address + query->buffer.results_end;
935
936	query->ops->emit_stop(sctx, query, query->buffer.buf, va);
937
938	query->buffer.results_end += query->result_size;
939
940	if (!(query->flags & SI_QUERY_HW_FLAG_NO_START))
941		sctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
942
943	si_update_occlusion_query_state(sctx, query->b.type, -1);
944	si_update_prims_generated_query_state(sctx, query->b.type, -1);
945}
946
947static void emit_set_predicate(struct si_context *ctx,
948			       struct r600_resource *buf, uint64_t va,
949			       uint32_t op)
950{
951	struct radeon_cmdbuf *cs = ctx->gfx_cs;
952
953	if (ctx->chip_class >= GFX9) {
954		radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
955		radeon_emit(cs, op);
956		radeon_emit(cs, va);
957		radeon_emit(cs, va >> 32);
958	} else {
959		radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
960		radeon_emit(cs, va);
961		radeon_emit(cs, op | ((va >> 32) & 0xFF));
962	}
963	radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_READ,
964				  RADEON_PRIO_QUERY);
965}
966
967static void si_emit_query_predication(struct si_context *ctx)
968{
969	struct si_query_hw *query = (struct si_query_hw *)ctx->render_cond;
970	struct si_query_buffer *qbuf;
971	uint32_t op;
972	bool flag_wait, invert;
973
974	if (!query)
975		return;
976
977	invert = ctx->render_cond_invert;
978	flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
979		    ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
980
981	if (query->workaround_buf) {
982		op = PRED_OP(PREDICATION_OP_BOOL64);
983	} else {
984		switch (query->b.type) {
985		case PIPE_QUERY_OCCLUSION_COUNTER:
986		case PIPE_QUERY_OCCLUSION_PREDICATE:
987		case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
988			op = PRED_OP(PREDICATION_OP_ZPASS);
989			break;
990		case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
991		case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
992			op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
993			invert = !invert;
994			break;
995		default:
996			assert(0);
997			return;
998		}
999	}
1000
1001	/* if true then invert, see GL_ARB_conditional_render_inverted */
1002	if (invert)
1003		op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */
1004	else
1005		op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
1006
1007	/* Use the value written by compute shader as a workaround. Note that
1008	 * the wait flag does not apply in this predication mode.
1009	 *
1010	 * The shader outputs the result value to L2. Workarounds only affect VI
1011	 * and later, where the CP reads data from L2, so we don't need an
1012	 * additional flush.
1013	 */
1014	if (query->workaround_buf) {
1015		uint64_t va = query->workaround_buf->gpu_address + query->workaround_offset;
1016		emit_set_predicate(ctx, query->workaround_buf, va, op);
1017		return;
1018	}
1019
1020	op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
1021
1022	/* emit predicate packets for all data blocks */
1023	for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1024		unsigned results_base = 0;
1025		uint64_t va_base = qbuf->buf->gpu_address;
1026
1027		while (results_base < qbuf->results_end) {
1028			uint64_t va = va_base + results_base;
1029
1030			if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
1031				for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
1032					emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
1033
1034					/* set CONTINUE bit for all packets except the first */
1035					op |= PREDICATION_CONTINUE;
1036				}
1037			} else {
1038				emit_set_predicate(ctx, qbuf->buf, va, op);
1039				op |= PREDICATION_CONTINUE;
1040			}
1041
1042			results_base += query->result_size;
1043		}
1044	}
1045}
1046
1047static struct pipe_query *si_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
1048{
1049	struct si_screen *sscreen =
1050		(struct si_screen *)ctx->screen;
1051
1052	if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
1053	    query_type == PIPE_QUERY_GPU_FINISHED ||
1054	    (query_type >= PIPE_QUERY_DRIVER_SPECIFIC &&
1055	     query_type != SI_QUERY_TIME_ELAPSED_SDMA))
1056		return si_query_sw_create(query_type);
1057
1058	return si_query_hw_create(sscreen, query_type, index);
1059}
1060
1061static void si_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
1062{
1063	struct si_context *sctx = (struct si_context *)ctx;
1064	struct si_query *rquery = (struct si_query *)query;
1065
1066	rquery->ops->destroy(sctx->screen, rquery);
1067}
1068
1069static boolean si_begin_query(struct pipe_context *ctx,
1070                                struct pipe_query *query)
1071{
1072	struct si_context *sctx = (struct si_context *)ctx;
1073	struct si_query *rquery = (struct si_query *)query;
1074
1075	return rquery->ops->begin(sctx, rquery);
1076}
1077
1078void si_query_hw_reset_buffers(struct si_context *sctx,
1079			       struct si_query_hw *query)
1080{
1081	struct si_query_buffer *prev = query->buffer.previous;
1082
1083	/* Discard the old query buffers. */
1084	while (prev) {
1085		struct si_query_buffer *qbuf = prev;
1086		prev = prev->previous;
1087		r600_resource_reference(&qbuf->buf, NULL);
1088		FREE(qbuf);
1089	}
1090
1091	query->buffer.results_end = 0;
1092	query->buffer.previous = NULL;
1093
1094	/* Obtain a new buffer if the current one can't be mapped without a stall. */
1095	if (si_rings_is_buffer_referenced(sctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
1096	    !sctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
1097		r600_resource_reference(&query->buffer.buf, NULL);
1098		query->buffer.buf = si_new_query_buffer(sctx->screen, query);
1099	} else {
1100		if (!query->ops->prepare_buffer(sctx->screen, query, query->buffer.buf))
1101			r600_resource_reference(&query->buffer.buf, NULL);
1102	}
1103}
1104
1105bool si_query_hw_begin(struct si_context *sctx,
1106		       struct si_query *rquery)
1107{
1108	struct si_query_hw *query = (struct si_query_hw *)rquery;
1109
1110	if (query->flags & SI_QUERY_HW_FLAG_NO_START) {
1111		assert(0);
1112		return false;
1113	}
1114
1115	if (!(query->flags & SI_QUERY_HW_FLAG_BEGIN_RESUMES))
1116		si_query_hw_reset_buffers(sctx, query);
1117
1118	r600_resource_reference(&query->workaround_buf, NULL);
1119
1120	si_query_hw_emit_start(sctx, query);
1121	if (!query->buffer.buf)
1122		return false;
1123
1124	LIST_ADDTAIL(&query->list, &sctx->active_queries);
1125	return true;
1126}
1127
1128static bool si_end_query(struct pipe_context *ctx, struct pipe_query *query)
1129{
1130	struct si_context *sctx = (struct si_context *)ctx;
1131	struct si_query *rquery = (struct si_query *)query;
1132
1133	return rquery->ops->end(sctx, rquery);
1134}
1135
1136bool si_query_hw_end(struct si_context *sctx,
1137		     struct si_query *rquery)
1138{
1139	struct si_query_hw *query = (struct si_query_hw *)rquery;
1140
1141	if (query->flags & SI_QUERY_HW_FLAG_NO_START)
1142		si_query_hw_reset_buffers(sctx, query);
1143
1144	si_query_hw_emit_stop(sctx, query);
1145
1146	if (!(query->flags & SI_QUERY_HW_FLAG_NO_START))
1147		LIST_DELINIT(&query->list);
1148
1149	if (!query->buffer.buf)
1150		return false;
1151
1152	return true;
1153}
1154
1155static void si_get_hw_query_params(struct si_context *sctx,
1156				   struct si_query_hw *rquery, int index,
1157				   struct si_hw_query_params *params)
1158{
1159	unsigned max_rbs = sctx->screen->info.num_render_backends;
1160
1161	params->pair_stride = 0;
1162	params->pair_count = 1;
1163
1164	switch (rquery->b.type) {
1165	case PIPE_QUERY_OCCLUSION_COUNTER:
1166	case PIPE_QUERY_OCCLUSION_PREDICATE:
1167	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1168		params->start_offset = 0;
1169		params->end_offset = 8;
1170		params->fence_offset = max_rbs * 16;
1171		params->pair_stride = 16;
1172		params->pair_count = max_rbs;
1173		break;
1174	case PIPE_QUERY_TIME_ELAPSED:
1175		params->start_offset = 0;
1176		params->end_offset = 8;
1177		params->fence_offset = 16;
1178		break;
1179	case PIPE_QUERY_TIMESTAMP:
1180		params->start_offset = 0;
1181		params->end_offset = 0;
1182		params->fence_offset = 8;
1183		break;
1184	case PIPE_QUERY_PRIMITIVES_EMITTED:
1185		params->start_offset = 8;
1186		params->end_offset = 24;
1187		params->fence_offset = params->end_offset + 4;
1188		break;
1189	case PIPE_QUERY_PRIMITIVES_GENERATED:
1190		params->start_offset = 0;
1191		params->end_offset = 16;
1192		params->fence_offset = params->end_offset + 4;
1193		break;
1194	case PIPE_QUERY_SO_STATISTICS:
1195		params->start_offset = 8 - index * 8;
1196		params->end_offset = 24 - index * 8;
1197		params->fence_offset = params->end_offset + 4;
1198		break;
1199	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1200		params->pair_count = SI_MAX_STREAMS;
1201		params->pair_stride = 32;
1202	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1203		params->start_offset = 0;
1204		params->end_offset = 16;
1205
1206		/* We can re-use the high dword of the last 64-bit value as a
1207		 * fence: it is initialized as 0, and the high bit is set by
1208		 * the write of the streamout stats event.
1209		 */
1210		params->fence_offset = rquery->result_size - 4;
1211		break;
1212	case PIPE_QUERY_PIPELINE_STATISTICS:
1213	{
1214		static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1215		params->start_offset = offsets[index];
1216		params->end_offset = 88 + offsets[index];
1217		params->fence_offset = 2 * 88;
1218		break;
1219	}
1220	default:
1221		unreachable("si_get_hw_query_params unsupported");
1222	}
1223}
1224
1225static unsigned si_query_read_result(void *map, unsigned start_index, unsigned end_index,
1226				     bool test_status_bit)
1227{
1228	uint32_t *current_result = (uint32_t*)map;
1229	uint64_t start, end;
1230
1231	start = (uint64_t)current_result[start_index] |
1232		(uint64_t)current_result[start_index+1] << 32;
1233	end = (uint64_t)current_result[end_index] |
1234	      (uint64_t)current_result[end_index+1] << 32;
1235
1236	if (!test_status_bit ||
1237	    ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1238		return end - start;
1239	}
1240	return 0;
1241}
1242
1243static void si_query_hw_add_result(struct si_screen *sscreen,
1244				     struct si_query_hw *query,
1245				     void *buffer,
1246				     union pipe_query_result *result)
1247{
1248	unsigned max_rbs = sscreen->info.num_render_backends;
1249
1250	switch (query->b.type) {
1251	case PIPE_QUERY_OCCLUSION_COUNTER: {
1252		for (unsigned i = 0; i < max_rbs; ++i) {
1253			unsigned results_base = i * 16;
1254			result->u64 +=
1255				si_query_read_result(buffer + results_base, 0, 2, true);
1256		}
1257		break;
1258	}
1259	case PIPE_QUERY_OCCLUSION_PREDICATE:
1260	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
1261		for (unsigned i = 0; i < max_rbs; ++i) {
1262			unsigned results_base = i * 16;
1263			result->b = result->b ||
1264				si_query_read_result(buffer + results_base, 0, 2, true) != 0;
1265		}
1266		break;
1267	}
1268	case PIPE_QUERY_TIME_ELAPSED:
1269		result->u64 += si_query_read_result(buffer, 0, 2, false);
1270		break;
1271	case SI_QUERY_TIME_ELAPSED_SDMA:
1272		result->u64 += si_query_read_result(buffer, 0, 32/4, false);
1273		break;
1274	case PIPE_QUERY_TIMESTAMP:
1275		result->u64 = *(uint64_t*)buffer;
1276		break;
1277	case PIPE_QUERY_PRIMITIVES_EMITTED:
1278		/* SAMPLE_STREAMOUTSTATS stores this structure:
1279		 * {
1280		 *    u64 NumPrimitivesWritten;
1281		 *    u64 PrimitiveStorageNeeded;
1282		 * }
1283		 * We only need NumPrimitivesWritten here. */
1284		result->u64 += si_query_read_result(buffer, 2, 6, true);
1285		break;
1286	case PIPE_QUERY_PRIMITIVES_GENERATED:
1287		/* Here we read PrimitiveStorageNeeded. */
1288		result->u64 += si_query_read_result(buffer, 0, 4, true);
1289		break;
1290	case PIPE_QUERY_SO_STATISTICS:
1291		result->so_statistics.num_primitives_written +=
1292			si_query_read_result(buffer, 2, 6, true);
1293		result->so_statistics.primitives_storage_needed +=
1294			si_query_read_result(buffer, 0, 4, true);
1295		break;
1296	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1297		result->b = result->b ||
1298			si_query_read_result(buffer, 2, 6, true) !=
1299			si_query_read_result(buffer, 0, 4, true);
1300		break;
1301	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1302		for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
1303			result->b = result->b ||
1304				si_query_read_result(buffer, 2, 6, true) !=
1305				si_query_read_result(buffer, 0, 4, true);
1306			buffer = (char *)buffer + 32;
1307		}
1308		break;
1309	case PIPE_QUERY_PIPELINE_STATISTICS:
1310		result->pipeline_statistics.ps_invocations +=
1311			si_query_read_result(buffer, 0, 22, false);
1312		result->pipeline_statistics.c_primitives +=
1313			si_query_read_result(buffer, 2, 24, false);
1314		result->pipeline_statistics.c_invocations +=
1315			si_query_read_result(buffer, 4, 26, false);
1316		result->pipeline_statistics.vs_invocations +=
1317			si_query_read_result(buffer, 6, 28, false);
1318		result->pipeline_statistics.gs_invocations +=
1319			si_query_read_result(buffer, 8, 30, false);
1320		result->pipeline_statistics.gs_primitives +=
1321			si_query_read_result(buffer, 10, 32, false);
1322		result->pipeline_statistics.ia_primitives +=
1323			si_query_read_result(buffer, 12, 34, false);
1324		result->pipeline_statistics.ia_vertices +=
1325			si_query_read_result(buffer, 14, 36, false);
1326		result->pipeline_statistics.hs_invocations +=
1327			si_query_read_result(buffer, 16, 38, false);
1328		result->pipeline_statistics.ds_invocations +=
1329			si_query_read_result(buffer, 18, 40, false);
1330		result->pipeline_statistics.cs_invocations +=
1331			si_query_read_result(buffer, 20, 42, false);
1332#if 0 /* for testing */
1333		printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1334		       "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1335		       "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1336		       result->pipeline_statistics.ia_vertices,
1337		       result->pipeline_statistics.ia_primitives,
1338		       result->pipeline_statistics.vs_invocations,
1339		       result->pipeline_statistics.hs_invocations,
1340		       result->pipeline_statistics.ds_invocations,
1341		       result->pipeline_statistics.gs_invocations,
1342		       result->pipeline_statistics.gs_primitives,
1343		       result->pipeline_statistics.c_invocations,
1344		       result->pipeline_statistics.c_primitives,
1345		       result->pipeline_statistics.ps_invocations,
1346		       result->pipeline_statistics.cs_invocations);
1347#endif
1348		break;
1349	default:
1350		assert(0);
1351	}
1352}
1353
1354static boolean si_get_query_result(struct pipe_context *ctx,
1355				   struct pipe_query *query, boolean wait,
1356				   union pipe_query_result *result)
1357{
1358	struct si_context *sctx = (struct si_context *)ctx;
1359	struct si_query *rquery = (struct si_query *)query;
1360
1361	return rquery->ops->get_result(sctx, rquery, wait, result);
1362}
1363
1364static void si_get_query_result_resource(struct pipe_context *ctx,
1365					 struct pipe_query *query,
1366					 boolean wait,
1367					 enum pipe_query_value_type result_type,
1368					 int index,
1369					 struct pipe_resource *resource,
1370					 unsigned offset)
1371{
1372	struct si_context *sctx = (struct si_context *)ctx;
1373	struct si_query *rquery = (struct si_query *)query;
1374
1375	rquery->ops->get_result_resource(sctx, rquery, wait, result_type, index,
1376	                                 resource, offset);
1377}
1378
1379static void si_query_hw_clear_result(struct si_query_hw *query,
1380				       union pipe_query_result *result)
1381{
1382	util_query_clear_result(result, query->b.type);
1383}
1384
1385bool si_query_hw_get_result(struct si_context *sctx,
1386			    struct si_query *rquery,
1387			    bool wait, union pipe_query_result *result)
1388{
1389	struct si_screen *sscreen = sctx->screen;
1390	struct si_query_hw *query = (struct si_query_hw *)rquery;
1391	struct si_query_buffer *qbuf;
1392
1393	query->ops->clear_result(query, result);
1394
1395	for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1396		unsigned usage = PIPE_TRANSFER_READ |
1397				 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
1398		unsigned results_base = 0;
1399		void *map;
1400
1401		if (rquery->b.flushed)
1402			map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
1403		else
1404			map = si_buffer_map_sync_with_rings(sctx, qbuf->buf, usage);
1405
1406		if (!map)
1407			return false;
1408
1409		while (results_base != qbuf->results_end) {
1410			query->ops->add_result(sscreen, query, map + results_base,
1411					       result);
1412			results_base += query->result_size;
1413		}
1414	}
1415
1416	/* Convert the time to expected units. */
1417	if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
1418	    rquery->type == SI_QUERY_TIME_ELAPSED_SDMA ||
1419	    rquery->type == PIPE_QUERY_TIMESTAMP) {
1420		result->u64 = (1000000 * result->u64) / sscreen->info.clock_crystal_freq;
1421	}
1422	return true;
1423}
1424
1425static void si_restore_qbo_state(struct si_context *sctx,
1426				 struct si_qbo_state *st)
1427{
1428	sctx->b.bind_compute_state(&sctx->b, st->saved_compute);
1429
1430	sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1431	pipe_resource_reference(&st->saved_const0.buffer, NULL);
1432
1433	sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
1434	for (unsigned i = 0; i < 3; ++i)
1435		pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1436}
1437
1438static void si_query_hw_get_result_resource(struct si_context *sctx,
1439                                              struct si_query *rquery,
1440                                              bool wait,
1441                                              enum pipe_query_value_type result_type,
1442                                              int index,
1443                                              struct pipe_resource *resource,
1444                                              unsigned offset)
1445{
1446	struct si_query_hw *query = (struct si_query_hw *)rquery;
1447	struct si_query_buffer *qbuf;
1448	struct si_query_buffer *qbuf_prev;
1449	struct pipe_resource *tmp_buffer = NULL;
1450	unsigned tmp_buffer_offset = 0;
1451	struct si_qbo_state saved_state = {};
1452	struct pipe_grid_info grid = {};
1453	struct pipe_constant_buffer constant_buffer = {};
1454	struct pipe_shader_buffer ssbo[3];
1455	struct si_hw_query_params params;
1456	struct {
1457		uint32_t end_offset;
1458		uint32_t result_stride;
1459		uint32_t result_count;
1460		uint32_t config;
1461		uint32_t fence_offset;
1462		uint32_t pair_stride;
1463		uint32_t pair_count;
1464	} consts;
1465
1466	if (!sctx->query_result_shader) {
1467		sctx->query_result_shader = si_create_query_result_cs(sctx);
1468		if (!sctx->query_result_shader)
1469			return;
1470	}
1471
1472	if (query->buffer.previous) {
1473		u_suballocator_alloc(sctx->allocator_zeroed_memory, 16, 16,
1474				     &tmp_buffer_offset, &tmp_buffer);
1475		if (!tmp_buffer)
1476			return;
1477	}
1478
1479	si_save_qbo_state(sctx, &saved_state);
1480
1481	si_get_hw_query_params(sctx, query, index >= 0 ? index : 0, &params);
1482	consts.end_offset = params.end_offset - params.start_offset;
1483	consts.fence_offset = params.fence_offset - params.start_offset;
1484	consts.result_stride = query->result_size;
1485	consts.pair_stride = params.pair_stride;
1486	consts.pair_count = params.pair_count;
1487
1488	constant_buffer.buffer_size = sizeof(consts);
1489	constant_buffer.user_buffer = &consts;
1490
1491	ssbo[1].buffer = tmp_buffer;
1492	ssbo[1].buffer_offset = tmp_buffer_offset;
1493	ssbo[1].buffer_size = 16;
1494
1495	ssbo[2] = ssbo[1];
1496
1497	sctx->b.bind_compute_state(&sctx->b, sctx->query_result_shader);
1498
1499	grid.block[0] = 1;
1500	grid.block[1] = 1;
1501	grid.block[2] = 1;
1502	grid.grid[0] = 1;
1503	grid.grid[1] = 1;
1504	grid.grid[2] = 1;
1505
1506	consts.config = 0;
1507	if (index < 0)
1508		consts.config |= 4;
1509	if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1510	    query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE)
1511		consts.config |= 8;
1512	else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1513		 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1514		consts.config |= 8 | 256;
1515	else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1516		 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1517		consts.config |= 32;
1518
1519	switch (result_type) {
1520	case PIPE_QUERY_TYPE_U64:
1521	case PIPE_QUERY_TYPE_I64:
1522		consts.config |= 64;
1523		break;
1524	case PIPE_QUERY_TYPE_I32:
1525		consts.config |= 128;
1526		break;
1527	case PIPE_QUERY_TYPE_U32:
1528		break;
1529	}
1530
1531	sctx->flags |= sctx->screen->barrier_flags.cp_to_L2;
1532
1533	for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1534		if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1535			qbuf_prev = qbuf->previous;
1536			consts.result_count = qbuf->results_end / query->result_size;
1537			consts.config &= ~3;
1538			if (qbuf != &query->buffer)
1539				consts.config |= 1;
1540			if (qbuf->previous)
1541				consts.config |= 2;
1542		} else {
1543			/* Only read the last timestamp. */
1544			qbuf_prev = NULL;
1545			consts.result_count = 0;
1546			consts.config |= 16;
1547			params.start_offset += qbuf->results_end - query->result_size;
1548		}
1549
1550		sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1551
1552		ssbo[0].buffer = &qbuf->buf->b.b;
1553		ssbo[0].buffer_offset = params.start_offset;
1554		ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1555
1556		if (!qbuf->previous) {
1557			ssbo[2].buffer = resource;
1558			ssbo[2].buffer_offset = offset;
1559			ssbo[2].buffer_size = 8;
1560
1561			r600_resource(resource)->TC_L2_dirty = true;
1562		}
1563
1564		sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
1565
1566		if (wait && qbuf == &query->buffer) {
1567			uint64_t va;
1568
1569			/* Wait for result availability. Wait only for readiness
1570			 * of the last entry, since the fence writes should be
1571			 * serialized in the CP.
1572			 */
1573			va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1574			va += params.fence_offset;
1575
1576			si_cp_wait_mem(sctx, va, 0x80000000, 0x80000000, 0);
1577		}
1578
1579		sctx->b.launch_grid(&sctx->b, &grid);
1580		sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
1581	}
1582
1583	si_restore_qbo_state(sctx, &saved_state);
1584	pipe_resource_reference(&tmp_buffer, NULL);
1585}
1586
1587static void si_render_condition(struct pipe_context *ctx,
1588				struct pipe_query *query,
1589				boolean condition,
1590				enum pipe_render_cond_flag mode)
1591{
1592	struct si_context *sctx = (struct si_context *)ctx;
1593	struct si_query_hw *rquery = (struct si_query_hw *)query;
1594	struct si_atom *atom = &sctx->atoms.s.render_cond;
1595
1596	if (query) {
1597		bool needs_workaround = false;
1598
1599		/* There was a firmware regression in VI which causes successive
1600		 * SET_PREDICATION packets to give the wrong answer for
1601		 * non-inverted stream overflow predication.
1602		 */
1603		if (((sctx->chip_class == VI && sctx->screen->info.pfp_fw_feature < 49) ||
1604		     (sctx->chip_class == GFX9 && sctx->screen->info.pfp_fw_feature < 38)) &&
1605		    !condition &&
1606		    (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE ||
1607		     (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE &&
1608		      (rquery->buffer.previous ||
1609		       rquery->buffer.results_end > rquery->result_size)))) {
1610			needs_workaround = true;
1611		}
1612
1613		if (needs_workaround && !rquery->workaround_buf) {
1614			bool old_force_off = sctx->render_cond_force_off;
1615			sctx->render_cond_force_off = true;
1616
1617			u_suballocator_alloc(
1618				sctx->allocator_zeroed_memory, 8, 8,
1619				&rquery->workaround_offset,
1620				(struct pipe_resource **)&rquery->workaround_buf);
1621
1622			/* Reset to NULL to avoid a redundant SET_PREDICATION
1623			 * from launching the compute grid.
1624			 */
1625			sctx->render_cond = NULL;
1626
1627			ctx->get_query_result_resource(
1628				ctx, query, true, PIPE_QUERY_TYPE_U64, 0,
1629				&rquery->workaround_buf->b.b, rquery->workaround_offset);
1630
1631			/* Settings this in the render cond atom is too late,
1632			 * so set it here. */
1633			sctx->flags |= sctx->screen->barrier_flags.L2_to_cp |
1634				       SI_CONTEXT_FLUSH_FOR_RENDER_COND;
1635
1636			sctx->render_cond_force_off = old_force_off;
1637		}
1638	}
1639
1640	sctx->render_cond = query;
1641	sctx->render_cond_invert = condition;
1642	sctx->render_cond_mode = mode;
1643
1644	si_set_atom_dirty(sctx, atom, query != NULL);
1645}
1646
1647void si_suspend_queries(struct si_context *sctx)
1648{
1649	struct si_query_hw *query;
1650
1651	LIST_FOR_EACH_ENTRY(query, &sctx->active_queries, list) {
1652		si_query_hw_emit_stop(sctx, query);
1653	}
1654	assert(sctx->num_cs_dw_queries_suspend == 0);
1655}
1656
1657void si_resume_queries(struct si_context *sctx)
1658{
1659	struct si_query_hw *query;
1660
1661	assert(sctx->num_cs_dw_queries_suspend == 0);
1662
1663	/* Check CS space here. Resuming must not be interrupted by flushes. */
1664	si_need_gfx_cs_space(sctx);
1665
1666	LIST_FOR_EACH_ENTRY(query, &sctx->active_queries, list) {
1667		si_query_hw_emit_start(sctx, query);
1668	}
1669}
1670
1671#define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1672	{ \
1673		.name = name_, \
1674		.query_type = SI_QUERY_##query_type_, \
1675		.type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1676		.result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1677		.group_id = group_id_ \
1678	}
1679
1680#define X(name_, query_type_, type_, result_type_) \
1681	XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1682
1683#define XG(group_, name_, query_type_, type_, result_type_) \
1684	XFULL(name_, query_type_, type_, result_type_, SI_QUERY_GROUP_##group_)
1685
1686static struct pipe_driver_query_info si_driver_query_list[] = {
1687	X("num-compilations",		NUM_COMPILATIONS,	UINT64, CUMULATIVE),
1688	X("num-shaders-created",	NUM_SHADERS_CREATED,	UINT64, CUMULATIVE),
1689	X("num-shader-cache-hits",	NUM_SHADER_CACHE_HITS,	UINT64, CUMULATIVE),
1690	X("draw-calls",			DRAW_CALLS,		UINT64, AVERAGE),
1691	X("decompress-calls",		DECOMPRESS_CALLS,	UINT64, AVERAGE),
1692	X("MRT-draw-calls",		MRT_DRAW_CALLS,		UINT64, AVERAGE),
1693	X("prim-restart-calls",		PRIM_RESTART_CALLS,	UINT64, AVERAGE),
1694	X("spill-draw-calls",		SPILL_DRAW_CALLS,	UINT64, AVERAGE),
1695	X("compute-calls",		COMPUTE_CALLS,		UINT64, AVERAGE),
1696	X("spill-compute-calls",	SPILL_COMPUTE_CALLS,	UINT64, AVERAGE),
1697	X("dma-calls",			DMA_CALLS,		UINT64, AVERAGE),
1698	X("cp-dma-calls",		CP_DMA_CALLS,		UINT64, AVERAGE),
1699	X("num-vs-flushes",		NUM_VS_FLUSHES,		UINT64, AVERAGE),
1700	X("num-ps-flushes",		NUM_PS_FLUSHES,		UINT64, AVERAGE),
1701	X("num-cs-flushes",		NUM_CS_FLUSHES,		UINT64, AVERAGE),
1702	X("num-CB-cache-flushes",	NUM_CB_CACHE_FLUSHES,	UINT64, AVERAGE),
1703	X("num-DB-cache-flushes",	NUM_DB_CACHE_FLUSHES,	UINT64, AVERAGE),
1704	X("num-L2-invalidates",		NUM_L2_INVALIDATES,	UINT64, AVERAGE),
1705	X("num-L2-writebacks",		NUM_L2_WRITEBACKS,	UINT64, AVERAGE),
1706	X("num-resident-handles",	NUM_RESIDENT_HANDLES,	UINT64, AVERAGE),
1707	X("tc-offloaded-slots",		TC_OFFLOADED_SLOTS,     UINT64, AVERAGE),
1708	X("tc-direct-slots",		TC_DIRECT_SLOTS,	UINT64, AVERAGE),
1709	X("tc-num-syncs",		TC_NUM_SYNCS,		UINT64, AVERAGE),
1710	X("CS-thread-busy",		CS_THREAD_BUSY,		UINT64, AVERAGE),
1711	X("gallium-thread-busy",	GALLIUM_THREAD_BUSY,	UINT64, AVERAGE),
1712	X("requested-VRAM",		REQUESTED_VRAM,		BYTES, AVERAGE),
1713	X("requested-GTT",		REQUESTED_GTT,		BYTES, AVERAGE),
1714	X("mapped-VRAM",		MAPPED_VRAM,		BYTES, AVERAGE),
1715	X("mapped-GTT",			MAPPED_GTT,		BYTES, AVERAGE),
1716	X("buffer-wait-time",		BUFFER_WAIT_TIME,	MICROSECONDS, CUMULATIVE),
1717	X("num-mapped-buffers",		NUM_MAPPED_BUFFERS,	UINT64, AVERAGE),
1718	X("num-GFX-IBs",		NUM_GFX_IBS,		UINT64, AVERAGE),
1719	X("num-SDMA-IBs",		NUM_SDMA_IBS,		UINT64, AVERAGE),
1720	X("GFX-BO-list-size",		GFX_BO_LIST_SIZE,	UINT64, AVERAGE),
1721	X("GFX-IB-size",		GFX_IB_SIZE,		UINT64, AVERAGE),
1722	X("num-bytes-moved",		NUM_BYTES_MOVED,	BYTES, CUMULATIVE),
1723	X("num-evictions",		NUM_EVICTIONS,		UINT64, CUMULATIVE),
1724	X("VRAM-CPU-page-faults",	NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1725	X("VRAM-usage",			VRAM_USAGE,		BYTES, AVERAGE),
1726	X("VRAM-vis-usage",		VRAM_VIS_USAGE,		BYTES, AVERAGE),
1727	X("GTT-usage",			GTT_USAGE,		BYTES, AVERAGE),
1728	X("back-buffer-ps-draw-ratio",	BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1729
1730	/* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1731	 * which use it as a fallback path to detect the GPU type.
1732	 *
1733	 * Note: The names of these queries are significant for GPUPerfStudio
1734	 * (and possibly their order as well). */
1735	XG(GPIN, "GPIN_000",		GPIN_ASIC_ID,		UINT, AVERAGE),
1736	XG(GPIN, "GPIN_001",		GPIN_NUM_SIMD,		UINT, AVERAGE),
1737	XG(GPIN, "GPIN_002",		GPIN_NUM_RB,		UINT, AVERAGE),
1738	XG(GPIN, "GPIN_003",		GPIN_NUM_SPI,		UINT, AVERAGE),
1739	XG(GPIN, "GPIN_004",		GPIN_NUM_SE,		UINT, AVERAGE),
1740
1741	X("temperature",		GPU_TEMPERATURE,	UINT64, AVERAGE),
1742	X("shader-clock",		CURRENT_GPU_SCLK,	HZ, AVERAGE),
1743	X("memory-clock",		CURRENT_GPU_MCLK,	HZ, AVERAGE),
1744
1745	/* The following queries must be at the end of the list because their
1746	 * availability is adjusted dynamically based on the DRM version. */
1747	X("GPU-load",			GPU_LOAD,		UINT64, AVERAGE),
1748	X("GPU-shaders-busy",		GPU_SHADERS_BUSY,	UINT64, AVERAGE),
1749	X("GPU-ta-busy",		GPU_TA_BUSY,		UINT64, AVERAGE),
1750	X("GPU-gds-busy",		GPU_GDS_BUSY,		UINT64, AVERAGE),
1751	X("GPU-vgt-busy",		GPU_VGT_BUSY,		UINT64, AVERAGE),
1752	X("GPU-ia-busy",		GPU_IA_BUSY,		UINT64, AVERAGE),
1753	X("GPU-sx-busy",		GPU_SX_BUSY,		UINT64, AVERAGE),
1754	X("GPU-wd-busy",		GPU_WD_BUSY,		UINT64, AVERAGE),
1755	X("GPU-bci-busy",		GPU_BCI_BUSY,		UINT64, AVERAGE),
1756	X("GPU-sc-busy",		GPU_SC_BUSY,		UINT64, AVERAGE),
1757	X("GPU-pa-busy",		GPU_PA_BUSY,		UINT64, AVERAGE),
1758	X("GPU-db-busy",		GPU_DB_BUSY,		UINT64, AVERAGE),
1759	X("GPU-cp-busy",		GPU_CP_BUSY,		UINT64, AVERAGE),
1760	X("GPU-cb-busy",		GPU_CB_BUSY,		UINT64, AVERAGE),
1761
1762	/* SRBM_STATUS2 */
1763	X("GPU-sdma-busy",		GPU_SDMA_BUSY,		UINT64, AVERAGE),
1764
1765	/* CP_STAT */
1766	X("GPU-pfp-busy",		GPU_PFP_BUSY,		UINT64, AVERAGE),
1767	X("GPU-meq-busy",		GPU_MEQ_BUSY,		UINT64, AVERAGE),
1768	X("GPU-me-busy",		GPU_ME_BUSY,		UINT64, AVERAGE),
1769	X("GPU-surf-sync-busy",		GPU_SURF_SYNC_BUSY,	UINT64, AVERAGE),
1770	X("GPU-cp-dma-busy",		GPU_CP_DMA_BUSY,	UINT64, AVERAGE),
1771	X("GPU-scratch-ram-busy",	GPU_SCRATCH_RAM_BUSY,	UINT64, AVERAGE),
1772};
1773
1774#undef X
1775#undef XG
1776#undef XFULL
1777
1778static unsigned si_get_num_queries(struct si_screen *sscreen)
1779{
1780	/* amdgpu */
1781	if (sscreen->info.drm_major == 3) {
1782		if (sscreen->info.chip_class >= VI)
1783			return ARRAY_SIZE(si_driver_query_list);
1784		else
1785			return ARRAY_SIZE(si_driver_query_list) - 7;
1786	}
1787
1788	/* radeon */
1789	if (sscreen->info.has_read_registers_query) {
1790		if (sscreen->info.chip_class == CIK)
1791			return ARRAY_SIZE(si_driver_query_list) - 6;
1792		else
1793			return ARRAY_SIZE(si_driver_query_list) - 7;
1794	}
1795
1796	return ARRAY_SIZE(si_driver_query_list) - 21;
1797}
1798
1799static int si_get_driver_query_info(struct pipe_screen *screen,
1800				    unsigned index,
1801				    struct pipe_driver_query_info *info)
1802{
1803	struct si_screen *sscreen = (struct si_screen*)screen;
1804	unsigned num_queries = si_get_num_queries(sscreen);
1805
1806	if (!info) {
1807		unsigned num_perfcounters =
1808			si_get_perfcounter_info(sscreen, 0, NULL);
1809
1810		return num_queries + num_perfcounters;
1811	}
1812
1813	if (index >= num_queries)
1814		return si_get_perfcounter_info(sscreen, index - num_queries, info);
1815
1816	*info = si_driver_query_list[index];
1817
1818	switch (info->query_type) {
1819	case SI_QUERY_REQUESTED_VRAM:
1820	case SI_QUERY_VRAM_USAGE:
1821	case SI_QUERY_MAPPED_VRAM:
1822		info->max_value.u64 = sscreen->info.vram_size;
1823		break;
1824	case SI_QUERY_REQUESTED_GTT:
1825	case SI_QUERY_GTT_USAGE:
1826	case SI_QUERY_MAPPED_GTT:
1827		info->max_value.u64 = sscreen->info.gart_size;
1828		break;
1829	case SI_QUERY_GPU_TEMPERATURE:
1830		info->max_value.u64 = 125;
1831		break;
1832	case SI_QUERY_VRAM_VIS_USAGE:
1833		info->max_value.u64 = sscreen->info.vram_vis_size;
1834		break;
1835	}
1836
1837	if (info->group_id != ~(unsigned)0 && sscreen->perfcounters)
1838		info->group_id += sscreen->perfcounters->num_groups;
1839
1840	return 1;
1841}
1842
1843/* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1844 * performance counter groups, so be careful when changing this and related
1845 * functions.
1846 */
1847static int si_get_driver_query_group_info(struct pipe_screen *screen,
1848					  unsigned index,
1849					  struct pipe_driver_query_group_info *info)
1850{
1851	struct si_screen *sscreen = (struct si_screen *)screen;
1852	unsigned num_pc_groups = 0;
1853
1854	if (sscreen->perfcounters)
1855		num_pc_groups = sscreen->perfcounters->num_groups;
1856
1857	if (!info)
1858		return num_pc_groups + SI_NUM_SW_QUERY_GROUPS;
1859
1860	if (index < num_pc_groups)
1861		return si_get_perfcounter_group_info(sscreen, index, info);
1862
1863	index -= num_pc_groups;
1864	if (index >= SI_NUM_SW_QUERY_GROUPS)
1865		return 0;
1866
1867	info->name = "GPIN";
1868	info->max_active_queries = 5;
1869	info->num_queries = 5;
1870	return 1;
1871}
1872
1873void si_init_query_functions(struct si_context *sctx)
1874{
1875	sctx->b.create_query = si_create_query;
1876	sctx->b.create_batch_query = si_create_batch_query;
1877	sctx->b.destroy_query = si_destroy_query;
1878	sctx->b.begin_query = si_begin_query;
1879	sctx->b.end_query = si_end_query;
1880	sctx->b.get_query_result = si_get_query_result;
1881	sctx->b.get_query_result_resource = si_get_query_result_resource;
1882	sctx->atoms.s.render_cond.emit = si_emit_query_predication;
1883
1884	if (((struct si_screen*)sctx->b.screen)->info.num_render_backends > 0)
1885	    sctx->b.render_condition = si_render_condition;
1886
1887	LIST_INITHEAD(&sctx->active_queries);
1888}
1889
1890void si_init_screen_query_functions(struct si_screen *sscreen)
1891{
1892	sscreen->b.get_driver_query_info = si_get_driver_query_info;
1893	sscreen->b.get_driver_query_group_info = si_get_driver_query_group_info;
1894}
1895