1b8e80941Smrg/*
2b8e80941Smrg * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3b8e80941Smrg * Copyright 2014 Marek Olšák <marek.olsak@amd.com>
4b8e80941Smrg * Copyright 2018 Advanced Micro Devices, Inc.
5b8e80941Smrg * All Rights Reserved.
6b8e80941Smrg *
7b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
8b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
9b8e80941Smrg * to deal in the Software without restriction, including without limitation
10b8e80941Smrg * on the rights to use, copy, modify, merge, publish, distribute, sub
11b8e80941Smrg * license, and/or sell copies of the Software, and to permit persons to whom
12b8e80941Smrg * the Software is furnished to do so, subject to the following conditions:
13b8e80941Smrg *
14b8e80941Smrg * The above copyright notice and this permission notice (including the next
15b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
16b8e80941Smrg * Software.
17b8e80941Smrg *
18b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21b8e80941Smrg * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22b8e80941Smrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23b8e80941Smrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24b8e80941Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE.
25b8e80941Smrg */
26b8e80941Smrg
27b8e80941Smrg#include "si_pipe.h"
28b8e80941Smrg#include "si_query.h"
29b8e80941Smrg#include "util/u_memory.h"
30b8e80941Smrg#include "util/u_upload_mgr.h"
31b8e80941Smrg#include "util/os_time.h"
32b8e80941Smrg#include "util/u_suballoc.h"
33b8e80941Smrg#include "amd/common/sid.h"
34b8e80941Smrg
35b8e80941Smrg#define SI_MAX_STREAMS 4
36b8e80941Smrg
37b8e80941Smrgstatic const struct si_query_ops query_hw_ops;
38b8e80941Smrg
39b8e80941Smrgstruct si_hw_query_params {
40b8e80941Smrg	unsigned start_offset;
41b8e80941Smrg	unsigned end_offset;
42b8e80941Smrg	unsigned fence_offset;
43b8e80941Smrg	unsigned pair_stride;
44b8e80941Smrg	unsigned pair_count;
45b8e80941Smrg};
46b8e80941Smrg
47b8e80941Smrg/* Queries without buffer handling or suspend/resume. */
48b8e80941Smrgstruct si_query_sw {
49b8e80941Smrg	struct si_query b;
50b8e80941Smrg
51b8e80941Smrg	uint64_t begin_result;
52b8e80941Smrg	uint64_t end_result;
53b8e80941Smrg
54b8e80941Smrg	uint64_t begin_time;
55b8e80941Smrg	uint64_t end_time;
56b8e80941Smrg
57b8e80941Smrg	/* Fence for GPU_FINISHED. */
58b8e80941Smrg	struct pipe_fence_handle *fence;
59b8e80941Smrg};
60b8e80941Smrg
61b8e80941Smrgstatic void si_query_sw_destroy(struct si_screen *sscreen,
62b8e80941Smrg				struct si_query *squery)
63b8e80941Smrg{
64b8e80941Smrg	struct si_query_sw *query = (struct si_query_sw *)squery;
65b8e80941Smrg
66b8e80941Smrg	sscreen->b.fence_reference(&sscreen->b, &query->fence, NULL);
67b8e80941Smrg	FREE(query);
68b8e80941Smrg}
69b8e80941Smrg
70b8e80941Smrgstatic enum radeon_value_id winsys_id_from_type(unsigned type)
71b8e80941Smrg{
72b8e80941Smrg	switch (type) {
73b8e80941Smrg	case SI_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
74b8e80941Smrg	case SI_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
75b8e80941Smrg	case SI_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
76b8e80941Smrg	case SI_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
77b8e80941Smrg	case SI_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
78b8e80941Smrg	case SI_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS;
79b8e80941Smrg	case SI_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS;
80b8e80941Smrg	case SI_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS;
81b8e80941Smrg	case SI_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER;
82b8e80941Smrg	case SI_QUERY_GFX_IB_SIZE: return RADEON_GFX_IB_SIZE_COUNTER;
83b8e80941Smrg	case SI_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
84b8e80941Smrg	case SI_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
85b8e80941Smrg	case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS;
86b8e80941Smrg	case SI_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
87b8e80941Smrg	case SI_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE;
88b8e80941Smrg	case SI_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
89b8e80941Smrg	case SI_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
90b8e80941Smrg	case SI_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK;
91b8e80941Smrg	case SI_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK;
92b8e80941Smrg	case SI_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME;
93b8e80941Smrg	default: unreachable("query type does not correspond to winsys id");
94b8e80941Smrg	}
95b8e80941Smrg}
96b8e80941Smrg
97b8e80941Smrgstatic int64_t si_finish_dma_get_cpu_time(struct si_context *sctx)
98b8e80941Smrg{
99b8e80941Smrg	struct pipe_fence_handle *fence = NULL;
100b8e80941Smrg
101b8e80941Smrg	si_flush_dma_cs(sctx, 0, &fence);
102b8e80941Smrg	if (fence) {
103b8e80941Smrg		sctx->ws->fence_wait(sctx->ws, fence, PIPE_TIMEOUT_INFINITE);
104b8e80941Smrg		sctx->ws->fence_reference(&fence, NULL);
105b8e80941Smrg	}
106b8e80941Smrg
107b8e80941Smrg	return os_time_get_nano();
108b8e80941Smrg}
109b8e80941Smrg
110b8e80941Smrgstatic bool si_query_sw_begin(struct si_context *sctx,
111b8e80941Smrg			      struct si_query *squery)
112b8e80941Smrg{
113b8e80941Smrg	struct si_query_sw *query = (struct si_query_sw *)squery;
114b8e80941Smrg	enum radeon_value_id ws_id;
115b8e80941Smrg
116b8e80941Smrg	switch(query->b.type) {
117b8e80941Smrg	case PIPE_QUERY_TIMESTAMP_DISJOINT:
118b8e80941Smrg	case PIPE_QUERY_GPU_FINISHED:
119b8e80941Smrg		break;
120b8e80941Smrg	case SI_QUERY_TIME_ELAPSED_SDMA_SI:
121b8e80941Smrg		query->begin_result = si_finish_dma_get_cpu_time(sctx);
122b8e80941Smrg		break;
123b8e80941Smrg	case SI_QUERY_DRAW_CALLS:
124b8e80941Smrg		query->begin_result = sctx->num_draw_calls;
125b8e80941Smrg		break;
126b8e80941Smrg	case SI_QUERY_DECOMPRESS_CALLS:
127b8e80941Smrg		query->begin_result = sctx->num_decompress_calls;
128b8e80941Smrg		break;
129b8e80941Smrg	case SI_QUERY_MRT_DRAW_CALLS:
130b8e80941Smrg		query->begin_result = sctx->num_mrt_draw_calls;
131b8e80941Smrg		break;
132b8e80941Smrg	case SI_QUERY_PRIM_RESTART_CALLS:
133b8e80941Smrg		query->begin_result = sctx->num_prim_restart_calls;
134b8e80941Smrg		break;
135b8e80941Smrg	case SI_QUERY_SPILL_DRAW_CALLS:
136b8e80941Smrg		query->begin_result = sctx->num_spill_draw_calls;
137b8e80941Smrg		break;
138b8e80941Smrg	case SI_QUERY_COMPUTE_CALLS:
139b8e80941Smrg		query->begin_result = sctx->num_compute_calls;
140b8e80941Smrg		break;
141b8e80941Smrg	case SI_QUERY_SPILL_COMPUTE_CALLS:
142b8e80941Smrg		query->begin_result = sctx->num_spill_compute_calls;
143b8e80941Smrg		break;
144b8e80941Smrg	case SI_QUERY_DMA_CALLS:
145b8e80941Smrg		query->begin_result = sctx->num_dma_calls;
146b8e80941Smrg		break;
147b8e80941Smrg	case SI_QUERY_CP_DMA_CALLS:
148b8e80941Smrg		query->begin_result = sctx->num_cp_dma_calls;
149b8e80941Smrg		break;
150b8e80941Smrg	case SI_QUERY_NUM_VS_FLUSHES:
151b8e80941Smrg		query->begin_result = sctx->num_vs_flushes;
152b8e80941Smrg		break;
153b8e80941Smrg	case SI_QUERY_NUM_PS_FLUSHES:
154b8e80941Smrg		query->begin_result = sctx->num_ps_flushes;
155b8e80941Smrg		break;
156b8e80941Smrg	case SI_QUERY_NUM_CS_FLUSHES:
157b8e80941Smrg		query->begin_result = sctx->num_cs_flushes;
158b8e80941Smrg		break;
159b8e80941Smrg	case SI_QUERY_NUM_CB_CACHE_FLUSHES:
160b8e80941Smrg		query->begin_result = sctx->num_cb_cache_flushes;
161b8e80941Smrg		break;
162b8e80941Smrg	case SI_QUERY_NUM_DB_CACHE_FLUSHES:
163b8e80941Smrg		query->begin_result = sctx->num_db_cache_flushes;
164b8e80941Smrg		break;
165b8e80941Smrg	case SI_QUERY_NUM_L2_INVALIDATES:
166b8e80941Smrg		query->begin_result = sctx->num_L2_invalidates;
167b8e80941Smrg		break;
168b8e80941Smrg	case SI_QUERY_NUM_L2_WRITEBACKS:
169b8e80941Smrg		query->begin_result = sctx->num_L2_writebacks;
170b8e80941Smrg		break;
171b8e80941Smrg	case SI_QUERY_NUM_RESIDENT_HANDLES:
172b8e80941Smrg		query->begin_result = sctx->num_resident_handles;
173b8e80941Smrg		break;
174b8e80941Smrg	case SI_QUERY_TC_OFFLOADED_SLOTS:
175b8e80941Smrg		query->begin_result = sctx->tc ? sctx->tc->num_offloaded_slots : 0;
176b8e80941Smrg		break;
177b8e80941Smrg	case SI_QUERY_TC_DIRECT_SLOTS:
178b8e80941Smrg		query->begin_result = sctx->tc ? sctx->tc->num_direct_slots : 0;
179b8e80941Smrg		break;
180b8e80941Smrg	case SI_QUERY_TC_NUM_SYNCS:
181b8e80941Smrg		query->begin_result = sctx->tc ? sctx->tc->num_syncs : 0;
182b8e80941Smrg		break;
183b8e80941Smrg	case SI_QUERY_REQUESTED_VRAM:
184b8e80941Smrg	case SI_QUERY_REQUESTED_GTT:
185b8e80941Smrg	case SI_QUERY_MAPPED_VRAM:
186b8e80941Smrg	case SI_QUERY_MAPPED_GTT:
187b8e80941Smrg	case SI_QUERY_VRAM_USAGE:
188b8e80941Smrg	case SI_QUERY_VRAM_VIS_USAGE:
189b8e80941Smrg	case SI_QUERY_GTT_USAGE:
190b8e80941Smrg	case SI_QUERY_GPU_TEMPERATURE:
191b8e80941Smrg	case SI_QUERY_CURRENT_GPU_SCLK:
192b8e80941Smrg	case SI_QUERY_CURRENT_GPU_MCLK:
193b8e80941Smrg	case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
194b8e80941Smrg	case SI_QUERY_NUM_MAPPED_BUFFERS:
195b8e80941Smrg		query->begin_result = 0;
196b8e80941Smrg		break;
197b8e80941Smrg	case SI_QUERY_BUFFER_WAIT_TIME:
198b8e80941Smrg	case SI_QUERY_GFX_IB_SIZE:
199b8e80941Smrg	case SI_QUERY_NUM_GFX_IBS:
200b8e80941Smrg	case SI_QUERY_NUM_SDMA_IBS:
201b8e80941Smrg	case SI_QUERY_NUM_BYTES_MOVED:
202b8e80941Smrg	case SI_QUERY_NUM_EVICTIONS:
203b8e80941Smrg	case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
204b8e80941Smrg		enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
205b8e80941Smrg		query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
206b8e80941Smrg		break;
207b8e80941Smrg	}
208b8e80941Smrg	case SI_QUERY_GFX_BO_LIST_SIZE:
209b8e80941Smrg		ws_id = winsys_id_from_type(query->b.type);
210b8e80941Smrg		query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
211b8e80941Smrg		query->begin_time = sctx->ws->query_value(sctx->ws,
212b8e80941Smrg							  RADEON_NUM_GFX_IBS);
213b8e80941Smrg		break;
214b8e80941Smrg	case SI_QUERY_CS_THREAD_BUSY:
215b8e80941Smrg		ws_id = winsys_id_from_type(query->b.type);
216b8e80941Smrg		query->begin_result = sctx->ws->query_value(sctx->ws, ws_id);
217b8e80941Smrg		query->begin_time = os_time_get_nano();
218b8e80941Smrg		break;
219b8e80941Smrg	case SI_QUERY_GALLIUM_THREAD_BUSY:
220b8e80941Smrg		query->begin_result =
221b8e80941Smrg			sctx->tc ? util_queue_get_thread_time_nano(&sctx->tc->queue, 0) : 0;
222b8e80941Smrg		query->begin_time = os_time_get_nano();
223b8e80941Smrg		break;
224b8e80941Smrg	case SI_QUERY_GPU_LOAD:
225b8e80941Smrg	case SI_QUERY_GPU_SHADERS_BUSY:
226b8e80941Smrg	case SI_QUERY_GPU_TA_BUSY:
227b8e80941Smrg	case SI_QUERY_GPU_GDS_BUSY:
228b8e80941Smrg	case SI_QUERY_GPU_VGT_BUSY:
229b8e80941Smrg	case SI_QUERY_GPU_IA_BUSY:
230b8e80941Smrg	case SI_QUERY_GPU_SX_BUSY:
231b8e80941Smrg	case SI_QUERY_GPU_WD_BUSY:
232b8e80941Smrg	case SI_QUERY_GPU_BCI_BUSY:
233b8e80941Smrg	case SI_QUERY_GPU_SC_BUSY:
234b8e80941Smrg	case SI_QUERY_GPU_PA_BUSY:
235b8e80941Smrg	case SI_QUERY_GPU_DB_BUSY:
236b8e80941Smrg	case SI_QUERY_GPU_CP_BUSY:
237b8e80941Smrg	case SI_QUERY_GPU_CB_BUSY:
238b8e80941Smrg	case SI_QUERY_GPU_SDMA_BUSY:
239b8e80941Smrg	case SI_QUERY_GPU_PFP_BUSY:
240b8e80941Smrg	case SI_QUERY_GPU_MEQ_BUSY:
241b8e80941Smrg	case SI_QUERY_GPU_ME_BUSY:
242b8e80941Smrg	case SI_QUERY_GPU_SURF_SYNC_BUSY:
243b8e80941Smrg	case SI_QUERY_GPU_CP_DMA_BUSY:
244b8e80941Smrg	case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
245b8e80941Smrg		query->begin_result = si_begin_counter(sctx->screen,
246b8e80941Smrg							 query->b.type);
247b8e80941Smrg		break;
248b8e80941Smrg	case SI_QUERY_NUM_COMPILATIONS:
249b8e80941Smrg		query->begin_result = p_atomic_read(&sctx->screen->num_compilations);
250b8e80941Smrg		break;
251b8e80941Smrg	case SI_QUERY_NUM_SHADERS_CREATED:
252b8e80941Smrg		query->begin_result = p_atomic_read(&sctx->screen->num_shaders_created);
253b8e80941Smrg		break;
254b8e80941Smrg	case SI_QUERY_NUM_SHADER_CACHE_HITS:
255b8e80941Smrg		query->begin_result =
256b8e80941Smrg			p_atomic_read(&sctx->screen->num_shader_cache_hits);
257b8e80941Smrg		break;
258b8e80941Smrg	case SI_QUERY_GPIN_ASIC_ID:
259b8e80941Smrg	case SI_QUERY_GPIN_NUM_SIMD:
260b8e80941Smrg	case SI_QUERY_GPIN_NUM_RB:
261b8e80941Smrg	case SI_QUERY_GPIN_NUM_SPI:
262b8e80941Smrg	case SI_QUERY_GPIN_NUM_SE:
263b8e80941Smrg		break;
264b8e80941Smrg	default:
265b8e80941Smrg		unreachable("si_query_sw_begin: bad query type");
266b8e80941Smrg	}
267b8e80941Smrg
268b8e80941Smrg	return true;
269b8e80941Smrg}
270b8e80941Smrg
271b8e80941Smrgstatic bool si_query_sw_end(struct si_context *sctx,
272b8e80941Smrg			    struct si_query *squery)
273b8e80941Smrg{
274b8e80941Smrg	struct si_query_sw *query = (struct si_query_sw *)squery;
275b8e80941Smrg	enum radeon_value_id ws_id;
276b8e80941Smrg
277b8e80941Smrg	switch(query->b.type) {
278b8e80941Smrg	case PIPE_QUERY_TIMESTAMP_DISJOINT:
279b8e80941Smrg		break;
280b8e80941Smrg	case PIPE_QUERY_GPU_FINISHED:
281b8e80941Smrg		sctx->b.flush(&sctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
282b8e80941Smrg		break;
283b8e80941Smrg	case SI_QUERY_TIME_ELAPSED_SDMA_SI:
284b8e80941Smrg		query->end_result = si_finish_dma_get_cpu_time(sctx);
285b8e80941Smrg		break;
286b8e80941Smrg	case SI_QUERY_DRAW_CALLS:
287b8e80941Smrg		query->end_result = sctx->num_draw_calls;
288b8e80941Smrg		break;
289b8e80941Smrg	case SI_QUERY_DECOMPRESS_CALLS:
290b8e80941Smrg		query->end_result = sctx->num_decompress_calls;
291b8e80941Smrg		break;
292b8e80941Smrg	case SI_QUERY_MRT_DRAW_CALLS:
293b8e80941Smrg		query->end_result = sctx->num_mrt_draw_calls;
294b8e80941Smrg		break;
295b8e80941Smrg	case SI_QUERY_PRIM_RESTART_CALLS:
296b8e80941Smrg		query->end_result = sctx->num_prim_restart_calls;
297b8e80941Smrg		break;
298b8e80941Smrg	case SI_QUERY_SPILL_DRAW_CALLS:
299b8e80941Smrg		query->end_result = sctx->num_spill_draw_calls;
300b8e80941Smrg		break;
301b8e80941Smrg	case SI_QUERY_COMPUTE_CALLS:
302b8e80941Smrg		query->end_result = sctx->num_compute_calls;
303b8e80941Smrg		break;
304b8e80941Smrg	case SI_QUERY_SPILL_COMPUTE_CALLS:
305b8e80941Smrg		query->end_result = sctx->num_spill_compute_calls;
306b8e80941Smrg		break;
307b8e80941Smrg	case SI_QUERY_DMA_CALLS:
308b8e80941Smrg		query->end_result = sctx->num_dma_calls;
309b8e80941Smrg		break;
310b8e80941Smrg	case SI_QUERY_CP_DMA_CALLS:
311b8e80941Smrg		query->end_result = sctx->num_cp_dma_calls;
312b8e80941Smrg		break;
313b8e80941Smrg	case SI_QUERY_NUM_VS_FLUSHES:
314b8e80941Smrg		query->end_result = sctx->num_vs_flushes;
315b8e80941Smrg		break;
316b8e80941Smrg	case SI_QUERY_NUM_PS_FLUSHES:
317b8e80941Smrg		query->end_result = sctx->num_ps_flushes;
318b8e80941Smrg		break;
319b8e80941Smrg	case SI_QUERY_NUM_CS_FLUSHES:
320b8e80941Smrg		query->end_result = sctx->num_cs_flushes;
321b8e80941Smrg		break;
322b8e80941Smrg	case SI_QUERY_NUM_CB_CACHE_FLUSHES:
323b8e80941Smrg		query->end_result = sctx->num_cb_cache_flushes;
324b8e80941Smrg		break;
325b8e80941Smrg	case SI_QUERY_NUM_DB_CACHE_FLUSHES:
326b8e80941Smrg		query->end_result = sctx->num_db_cache_flushes;
327b8e80941Smrg		break;
328b8e80941Smrg	case SI_QUERY_NUM_L2_INVALIDATES:
329b8e80941Smrg		query->end_result = sctx->num_L2_invalidates;
330b8e80941Smrg		break;
331b8e80941Smrg	case SI_QUERY_NUM_L2_WRITEBACKS:
332b8e80941Smrg		query->end_result = sctx->num_L2_writebacks;
333b8e80941Smrg		break;
334b8e80941Smrg	case SI_QUERY_NUM_RESIDENT_HANDLES:
335b8e80941Smrg		query->end_result = sctx->num_resident_handles;
336b8e80941Smrg		break;
337b8e80941Smrg	case SI_QUERY_TC_OFFLOADED_SLOTS:
338b8e80941Smrg		query->end_result = sctx->tc ? sctx->tc->num_offloaded_slots : 0;
339b8e80941Smrg		break;
340b8e80941Smrg	case SI_QUERY_TC_DIRECT_SLOTS:
341b8e80941Smrg		query->end_result = sctx->tc ? sctx->tc->num_direct_slots : 0;
342b8e80941Smrg		break;
343b8e80941Smrg	case SI_QUERY_TC_NUM_SYNCS:
344b8e80941Smrg		query->end_result = sctx->tc ? sctx->tc->num_syncs : 0;
345b8e80941Smrg		break;
346b8e80941Smrg	case SI_QUERY_REQUESTED_VRAM:
347b8e80941Smrg	case SI_QUERY_REQUESTED_GTT:
348b8e80941Smrg	case SI_QUERY_MAPPED_VRAM:
349b8e80941Smrg	case SI_QUERY_MAPPED_GTT:
350b8e80941Smrg	case SI_QUERY_VRAM_USAGE:
351b8e80941Smrg	case SI_QUERY_VRAM_VIS_USAGE:
352b8e80941Smrg	case SI_QUERY_GTT_USAGE:
353b8e80941Smrg	case SI_QUERY_GPU_TEMPERATURE:
354b8e80941Smrg	case SI_QUERY_CURRENT_GPU_SCLK:
355b8e80941Smrg	case SI_QUERY_CURRENT_GPU_MCLK:
356b8e80941Smrg	case SI_QUERY_BUFFER_WAIT_TIME:
357b8e80941Smrg	case SI_QUERY_GFX_IB_SIZE:
358b8e80941Smrg	case SI_QUERY_NUM_MAPPED_BUFFERS:
359b8e80941Smrg	case SI_QUERY_NUM_GFX_IBS:
360b8e80941Smrg	case SI_QUERY_NUM_SDMA_IBS:
361b8e80941Smrg	case SI_QUERY_NUM_BYTES_MOVED:
362b8e80941Smrg	case SI_QUERY_NUM_EVICTIONS:
363b8e80941Smrg	case SI_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: {
364b8e80941Smrg		enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
365b8e80941Smrg		query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
366b8e80941Smrg		break;
367b8e80941Smrg	}
368b8e80941Smrg	case SI_QUERY_GFX_BO_LIST_SIZE:
369b8e80941Smrg		ws_id = winsys_id_from_type(query->b.type);
370b8e80941Smrg		query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
371b8e80941Smrg		query->end_time = sctx->ws->query_value(sctx->ws,
372b8e80941Smrg							RADEON_NUM_GFX_IBS);
373b8e80941Smrg		break;
374b8e80941Smrg	case SI_QUERY_CS_THREAD_BUSY:
375b8e80941Smrg		ws_id = winsys_id_from_type(query->b.type);
376b8e80941Smrg		query->end_result = sctx->ws->query_value(sctx->ws, ws_id);
377b8e80941Smrg		query->end_time = os_time_get_nano();
378b8e80941Smrg		break;
379b8e80941Smrg	case SI_QUERY_GALLIUM_THREAD_BUSY:
380b8e80941Smrg		query->end_result =
381b8e80941Smrg			sctx->tc ? util_queue_get_thread_time_nano(&sctx->tc->queue, 0) : 0;
382b8e80941Smrg		query->end_time = os_time_get_nano();
383b8e80941Smrg		break;
384b8e80941Smrg	case SI_QUERY_GPU_LOAD:
385b8e80941Smrg	case SI_QUERY_GPU_SHADERS_BUSY:
386b8e80941Smrg	case SI_QUERY_GPU_TA_BUSY:
387b8e80941Smrg	case SI_QUERY_GPU_GDS_BUSY:
388b8e80941Smrg	case SI_QUERY_GPU_VGT_BUSY:
389b8e80941Smrg	case SI_QUERY_GPU_IA_BUSY:
390b8e80941Smrg	case SI_QUERY_GPU_SX_BUSY:
391b8e80941Smrg	case SI_QUERY_GPU_WD_BUSY:
392b8e80941Smrg	case SI_QUERY_GPU_BCI_BUSY:
393b8e80941Smrg	case SI_QUERY_GPU_SC_BUSY:
394b8e80941Smrg	case SI_QUERY_GPU_PA_BUSY:
395b8e80941Smrg	case SI_QUERY_GPU_DB_BUSY:
396b8e80941Smrg	case SI_QUERY_GPU_CP_BUSY:
397b8e80941Smrg	case SI_QUERY_GPU_CB_BUSY:
398b8e80941Smrg	case SI_QUERY_GPU_SDMA_BUSY:
399b8e80941Smrg	case SI_QUERY_GPU_PFP_BUSY:
400b8e80941Smrg	case SI_QUERY_GPU_MEQ_BUSY:
401b8e80941Smrg	case SI_QUERY_GPU_ME_BUSY:
402b8e80941Smrg	case SI_QUERY_GPU_SURF_SYNC_BUSY:
403b8e80941Smrg	case SI_QUERY_GPU_CP_DMA_BUSY:
404b8e80941Smrg	case SI_QUERY_GPU_SCRATCH_RAM_BUSY:
405b8e80941Smrg		query->end_result = si_end_counter(sctx->screen,
406b8e80941Smrg						     query->b.type,
407b8e80941Smrg						     query->begin_result);
408b8e80941Smrg		query->begin_result = 0;
409b8e80941Smrg		break;
410b8e80941Smrg	case SI_QUERY_NUM_COMPILATIONS:
411b8e80941Smrg		query->end_result = p_atomic_read(&sctx->screen->num_compilations);
412b8e80941Smrg		break;
413b8e80941Smrg	case SI_QUERY_NUM_SHADERS_CREATED:
414b8e80941Smrg		query->end_result = p_atomic_read(&sctx->screen->num_shaders_created);
415b8e80941Smrg		break;
416b8e80941Smrg	case SI_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
417b8e80941Smrg		query->end_result = sctx->last_tex_ps_draw_ratio;
418b8e80941Smrg		break;
419b8e80941Smrg	case SI_QUERY_NUM_SHADER_CACHE_HITS:
420b8e80941Smrg		query->end_result =
421b8e80941Smrg			p_atomic_read(&sctx->screen->num_shader_cache_hits);
422b8e80941Smrg		break;
423b8e80941Smrg	case SI_QUERY_GPIN_ASIC_ID:
424b8e80941Smrg	case SI_QUERY_GPIN_NUM_SIMD:
425b8e80941Smrg	case SI_QUERY_GPIN_NUM_RB:
426b8e80941Smrg	case SI_QUERY_GPIN_NUM_SPI:
427b8e80941Smrg	case SI_QUERY_GPIN_NUM_SE:
428b8e80941Smrg		break;
429b8e80941Smrg	default:
430b8e80941Smrg		unreachable("si_query_sw_end: bad query type");
431b8e80941Smrg	}
432b8e80941Smrg
433b8e80941Smrg	return true;
434b8e80941Smrg}
435b8e80941Smrg
436b8e80941Smrgstatic bool si_query_sw_get_result(struct si_context *sctx,
437b8e80941Smrg				   struct si_query *squery,
438b8e80941Smrg				   bool wait,
439b8e80941Smrg				   union pipe_query_result *result)
440b8e80941Smrg{
441b8e80941Smrg	struct si_query_sw *query = (struct si_query_sw *)squery;
442b8e80941Smrg
443b8e80941Smrg	switch (query->b.type) {
444b8e80941Smrg	case PIPE_QUERY_TIMESTAMP_DISJOINT:
445b8e80941Smrg		/* Convert from cycles per millisecond to cycles per second (Hz). */
446b8e80941Smrg		result->timestamp_disjoint.frequency =
447b8e80941Smrg			(uint64_t)sctx->screen->info.clock_crystal_freq * 1000;
448b8e80941Smrg		result->timestamp_disjoint.disjoint = false;
449b8e80941Smrg		return true;
450b8e80941Smrg	case PIPE_QUERY_GPU_FINISHED: {
451b8e80941Smrg		struct pipe_screen *screen = sctx->b.screen;
452b8e80941Smrg		struct pipe_context *ctx = squery->b.flushed ? NULL : &sctx->b;
453b8e80941Smrg
454b8e80941Smrg		result->b = screen->fence_finish(screen, ctx, query->fence,
455b8e80941Smrg						 wait ? PIPE_TIMEOUT_INFINITE : 0);
456b8e80941Smrg		return result->b;
457b8e80941Smrg	}
458b8e80941Smrg
459b8e80941Smrg	case SI_QUERY_GFX_BO_LIST_SIZE:
460b8e80941Smrg		result->u64 = (query->end_result - query->begin_result) /
461b8e80941Smrg			      (query->end_time - query->begin_time);
462b8e80941Smrg		return true;
463b8e80941Smrg	case SI_QUERY_CS_THREAD_BUSY:
464b8e80941Smrg	case SI_QUERY_GALLIUM_THREAD_BUSY:
465b8e80941Smrg		result->u64 = (query->end_result - query->begin_result) * 100 /
466b8e80941Smrg			      (query->end_time - query->begin_time);
467b8e80941Smrg		return true;
468b8e80941Smrg	case SI_QUERY_GPIN_ASIC_ID:
469b8e80941Smrg		result->u32 = 0;
470b8e80941Smrg		return true;
471b8e80941Smrg	case SI_QUERY_GPIN_NUM_SIMD:
472b8e80941Smrg		result->u32 = sctx->screen->info.num_good_compute_units;
473b8e80941Smrg		return true;
474b8e80941Smrg	case SI_QUERY_GPIN_NUM_RB:
475b8e80941Smrg		result->u32 = sctx->screen->info.num_render_backends;
476b8e80941Smrg		return true;
477b8e80941Smrg	case SI_QUERY_GPIN_NUM_SPI:
478b8e80941Smrg		result->u32 = 1; /* all supported chips have one SPI per SE */
479b8e80941Smrg		return true;
480b8e80941Smrg	case SI_QUERY_GPIN_NUM_SE:
481b8e80941Smrg		result->u32 = sctx->screen->info.max_se;
482b8e80941Smrg		return true;
483b8e80941Smrg	}
484b8e80941Smrg
485b8e80941Smrg	result->u64 = query->end_result - query->begin_result;
486b8e80941Smrg
487b8e80941Smrg	switch (query->b.type) {
488b8e80941Smrg	case SI_QUERY_BUFFER_WAIT_TIME:
489b8e80941Smrg	case SI_QUERY_GPU_TEMPERATURE:
490b8e80941Smrg		result->u64 /= 1000;
491b8e80941Smrg		break;
492b8e80941Smrg	case SI_QUERY_CURRENT_GPU_SCLK:
493b8e80941Smrg	case SI_QUERY_CURRENT_GPU_MCLK:
494b8e80941Smrg		result->u64 *= 1000000;
495b8e80941Smrg		break;
496b8e80941Smrg	}
497b8e80941Smrg
498b8e80941Smrg	return true;
499b8e80941Smrg}
500b8e80941Smrg
501b8e80941Smrg
502b8e80941Smrgstatic const struct si_query_ops sw_query_ops = {
503b8e80941Smrg	.destroy = si_query_sw_destroy,
504b8e80941Smrg	.begin = si_query_sw_begin,
505b8e80941Smrg	.end = si_query_sw_end,
506b8e80941Smrg	.get_result = si_query_sw_get_result,
507b8e80941Smrg	.get_result_resource = NULL
508b8e80941Smrg};
509b8e80941Smrg
510b8e80941Smrgstatic struct pipe_query *si_query_sw_create(unsigned query_type)
511b8e80941Smrg{
512b8e80941Smrg	struct si_query_sw *query;
513b8e80941Smrg
514b8e80941Smrg	query = CALLOC_STRUCT(si_query_sw);
515b8e80941Smrg	if (!query)
516b8e80941Smrg		return NULL;
517b8e80941Smrg
518b8e80941Smrg	query->b.type = query_type;
519b8e80941Smrg	query->b.ops = &sw_query_ops;
520b8e80941Smrg
521b8e80941Smrg	return (struct pipe_query *)query;
522b8e80941Smrg}
523b8e80941Smrg
524b8e80941Smrgvoid si_query_buffer_destroy(struct si_screen *sscreen, struct si_query_buffer *buffer)
525b8e80941Smrg{
526b8e80941Smrg	struct si_query_buffer *prev = buffer->previous;
527b8e80941Smrg
528b8e80941Smrg	/* Release all query buffers. */
529b8e80941Smrg	while (prev) {
530b8e80941Smrg		struct si_query_buffer *qbuf = prev;
531b8e80941Smrg		prev = prev->previous;
532b8e80941Smrg		si_resource_reference(&qbuf->buf, NULL);
533b8e80941Smrg		FREE(qbuf);
534b8e80941Smrg	}
535b8e80941Smrg
536b8e80941Smrg	si_resource_reference(&buffer->buf, NULL);
537b8e80941Smrg}
538b8e80941Smrg
539b8e80941Smrgvoid si_query_buffer_reset(struct si_context *sctx, struct si_query_buffer *buffer)
540b8e80941Smrg{
541b8e80941Smrg	/* Discard all query buffers except for the oldest. */
542b8e80941Smrg	while (buffer->previous) {
543b8e80941Smrg		struct si_query_buffer *qbuf = buffer->previous;
544b8e80941Smrg		buffer->previous = qbuf->previous;
545b8e80941Smrg
546b8e80941Smrg		si_resource_reference(&buffer->buf, NULL);
547b8e80941Smrg		buffer->buf = qbuf->buf; /* move ownership */
548b8e80941Smrg		FREE(qbuf);
549b8e80941Smrg	}
550b8e80941Smrg	buffer->results_end = 0;
551b8e80941Smrg
552b8e80941Smrg	if (!buffer->buf)
553b8e80941Smrg		return;
554b8e80941Smrg
555b8e80941Smrg	/* Discard even the oldest buffer if it can't be mapped without a stall. */
556b8e80941Smrg	if (si_rings_is_buffer_referenced(sctx, buffer->buf->buf, RADEON_USAGE_READWRITE) ||
557b8e80941Smrg	    !sctx->ws->buffer_wait(buffer->buf->buf, 0, RADEON_USAGE_READWRITE)) {
558b8e80941Smrg		si_resource_reference(&buffer->buf, NULL);
559b8e80941Smrg	} else {
560b8e80941Smrg		buffer->unprepared = true;
561b8e80941Smrg	}
562b8e80941Smrg}
563b8e80941Smrg
564b8e80941Smrgbool si_query_buffer_alloc(struct si_context *sctx, struct si_query_buffer *buffer,
565b8e80941Smrg			   bool (*prepare_buffer)(struct si_context *, struct si_query_buffer*),
566b8e80941Smrg			   unsigned size)
567b8e80941Smrg{
568b8e80941Smrg	bool unprepared = buffer->unprepared;
569b8e80941Smrg	buffer->unprepared = false;
570b8e80941Smrg
571b8e80941Smrg	if (!buffer->buf || buffer->results_end + size > buffer->buf->b.b.width0) {
572b8e80941Smrg		if (buffer->buf) {
573b8e80941Smrg			struct si_query_buffer *qbuf = MALLOC_STRUCT(si_query_buffer);
574b8e80941Smrg			memcpy(qbuf, buffer, sizeof(*qbuf));
575b8e80941Smrg			buffer->previous = qbuf;
576b8e80941Smrg		}
577b8e80941Smrg		buffer->results_end = 0;
578b8e80941Smrg
579b8e80941Smrg		/* Queries are normally read by the CPU after
580b8e80941Smrg		 * being written by the gpu, hence staging is probably a good
581b8e80941Smrg		 * usage pattern.
582b8e80941Smrg		 */
583b8e80941Smrg		struct si_screen *screen = sctx->screen;
584b8e80941Smrg		unsigned buf_size = MAX2(size, screen->info.min_alloc_size);
585b8e80941Smrg		buffer->buf = si_resource(
586b8e80941Smrg			pipe_buffer_create(&screen->b, 0, PIPE_USAGE_STAGING, buf_size));
587b8e80941Smrg		if (unlikely(!buffer->buf))
588b8e80941Smrg			return false;
589b8e80941Smrg		unprepared = true;
590b8e80941Smrg	}
591b8e80941Smrg
592b8e80941Smrg	if (unprepared && prepare_buffer) {
593b8e80941Smrg		if (unlikely(!prepare_buffer(sctx, buffer))) {
594b8e80941Smrg			si_resource_reference(&buffer->buf, NULL);
595b8e80941Smrg			return false;
596b8e80941Smrg		}
597b8e80941Smrg	}
598b8e80941Smrg
599b8e80941Smrg	return true;
600b8e80941Smrg}
601b8e80941Smrg
602b8e80941Smrg
603b8e80941Smrgvoid si_query_hw_destroy(struct si_screen *sscreen,
604b8e80941Smrg			 struct si_query *squery)
605b8e80941Smrg{
606b8e80941Smrg	struct si_query_hw *query = (struct si_query_hw *)squery;
607b8e80941Smrg
608b8e80941Smrg	si_query_buffer_destroy(sscreen, &query->buffer);
609b8e80941Smrg	si_resource_reference(&query->workaround_buf, NULL);
610b8e80941Smrg	FREE(squery);
611b8e80941Smrg}
612b8e80941Smrg
613b8e80941Smrgstatic bool si_query_hw_prepare_buffer(struct si_context *sctx,
614b8e80941Smrg				       struct si_query_buffer *qbuf)
615b8e80941Smrg{
616b8e80941Smrg	static const struct si_query_hw si_query_hw_s;
617b8e80941Smrg	struct si_query_hw *query = container_of(qbuf, &si_query_hw_s, buffer);
618b8e80941Smrg	struct si_screen *screen = sctx->screen;
619b8e80941Smrg
620b8e80941Smrg	/* The caller ensures that the buffer is currently unused by the GPU. */
621b8e80941Smrg	uint32_t *results = screen->ws->buffer_map(qbuf->buf->buf, NULL,
622b8e80941Smrg						   PIPE_TRANSFER_WRITE |
623b8e80941Smrg						   PIPE_TRANSFER_UNSYNCHRONIZED);
624b8e80941Smrg	if (!results)
625b8e80941Smrg		return false;
626b8e80941Smrg
627b8e80941Smrg	memset(results, 0, qbuf->buf->b.b.width0);
628b8e80941Smrg
629b8e80941Smrg	if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
630b8e80941Smrg	    query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
631b8e80941Smrg	    query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
632b8e80941Smrg		unsigned max_rbs = screen->info.num_render_backends;
633b8e80941Smrg		unsigned enabled_rb_mask = screen->info.enabled_rb_mask;
634b8e80941Smrg		unsigned num_results;
635b8e80941Smrg		unsigned i, j;
636b8e80941Smrg
637b8e80941Smrg		/* Set top bits for unused backends. */
638b8e80941Smrg		num_results = qbuf->buf->b.b.width0 / query->result_size;
639b8e80941Smrg		for (j = 0; j < num_results; j++) {
640b8e80941Smrg			for (i = 0; i < max_rbs; i++) {
641b8e80941Smrg				if (!(enabled_rb_mask & (1<<i))) {
642b8e80941Smrg					results[(i * 4)+1] = 0x80000000;
643b8e80941Smrg					results[(i * 4)+3] = 0x80000000;
644b8e80941Smrg				}
645b8e80941Smrg			}
646b8e80941Smrg			results += 4 * max_rbs;
647b8e80941Smrg		}
648b8e80941Smrg	}
649b8e80941Smrg
650b8e80941Smrg	return true;
651b8e80941Smrg}
652b8e80941Smrg
653b8e80941Smrgstatic void si_query_hw_get_result_resource(struct si_context *sctx,
654b8e80941Smrg					    struct si_query *squery,
655b8e80941Smrg					    bool wait,
656b8e80941Smrg					    enum pipe_query_value_type result_type,
657b8e80941Smrg					    int index,
658b8e80941Smrg					    struct pipe_resource *resource,
659b8e80941Smrg					    unsigned offset);
660b8e80941Smrg
661b8e80941Smrgstatic void si_query_hw_do_emit_start(struct si_context *sctx,
662b8e80941Smrg				      struct si_query_hw *query,
663b8e80941Smrg				      struct si_resource *buffer,
664b8e80941Smrg				      uint64_t va);
665b8e80941Smrgstatic void si_query_hw_do_emit_stop(struct si_context *sctx,
666b8e80941Smrg				     struct si_query_hw *query,
667b8e80941Smrg				     struct si_resource *buffer,
668b8e80941Smrg				     uint64_t va);
669b8e80941Smrgstatic void si_query_hw_add_result(struct si_screen *sscreen,
670b8e80941Smrg				   struct si_query_hw *, void *buffer,
671b8e80941Smrg				   union pipe_query_result *result);
672b8e80941Smrgstatic void si_query_hw_clear_result(struct si_query_hw *,
673b8e80941Smrg				     union pipe_query_result *);
674b8e80941Smrg
675b8e80941Smrgstatic struct si_query_hw_ops query_hw_default_hw_ops = {
676b8e80941Smrg	.prepare_buffer = si_query_hw_prepare_buffer,
677b8e80941Smrg	.emit_start = si_query_hw_do_emit_start,
678b8e80941Smrg	.emit_stop = si_query_hw_do_emit_stop,
679b8e80941Smrg	.clear_result = si_query_hw_clear_result,
680b8e80941Smrg	.add_result = si_query_hw_add_result,
681b8e80941Smrg};
682b8e80941Smrg
683b8e80941Smrgstatic struct pipe_query *si_query_hw_create(struct si_screen *sscreen,
684b8e80941Smrg					     unsigned query_type,
685b8e80941Smrg					     unsigned index)
686b8e80941Smrg{
687b8e80941Smrg	struct si_query_hw *query = CALLOC_STRUCT(si_query_hw);
688b8e80941Smrg	if (!query)
689b8e80941Smrg		return NULL;
690b8e80941Smrg
691b8e80941Smrg	query->b.type = query_type;
692b8e80941Smrg	query->b.ops = &query_hw_ops;
693b8e80941Smrg	query->ops = &query_hw_default_hw_ops;
694b8e80941Smrg
695b8e80941Smrg	switch (query_type) {
696b8e80941Smrg	case PIPE_QUERY_OCCLUSION_COUNTER:
697b8e80941Smrg	case PIPE_QUERY_OCCLUSION_PREDICATE:
698b8e80941Smrg	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
699b8e80941Smrg		query->result_size = 16 * sscreen->info.num_render_backends;
700b8e80941Smrg		query->result_size += 16; /* for the fence + alignment */
701b8e80941Smrg		query->b.num_cs_dw_suspend = 6 + si_cp_write_fence_dwords(sscreen);
702b8e80941Smrg		break;
703b8e80941Smrg	case SI_QUERY_TIME_ELAPSED_SDMA:
704b8e80941Smrg		/* GET_GLOBAL_TIMESTAMP only works if the offset is a multiple of 32. */
705b8e80941Smrg		query->result_size = 64;
706b8e80941Smrg		break;
707b8e80941Smrg	case PIPE_QUERY_TIME_ELAPSED:
708b8e80941Smrg		query->result_size = 24;
709b8e80941Smrg		query->b.num_cs_dw_suspend = 8 + si_cp_write_fence_dwords(sscreen);
710b8e80941Smrg		break;
711b8e80941Smrg	case PIPE_QUERY_TIMESTAMP:
712b8e80941Smrg		query->result_size = 16;
713b8e80941Smrg		query->b.num_cs_dw_suspend = 8 + si_cp_write_fence_dwords(sscreen);
714b8e80941Smrg		query->flags = SI_QUERY_HW_FLAG_NO_START;
715b8e80941Smrg		break;
716b8e80941Smrg	case PIPE_QUERY_PRIMITIVES_EMITTED:
717b8e80941Smrg	case PIPE_QUERY_PRIMITIVES_GENERATED:
718b8e80941Smrg	case PIPE_QUERY_SO_STATISTICS:
719b8e80941Smrg	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
720b8e80941Smrg		/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
721b8e80941Smrg		query->result_size = 32;
722b8e80941Smrg		query->b.num_cs_dw_suspend = 6;
723b8e80941Smrg		query->stream = index;
724b8e80941Smrg		break;
725b8e80941Smrg	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
726b8e80941Smrg		/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
727b8e80941Smrg		query->result_size = 32 * SI_MAX_STREAMS;
728b8e80941Smrg		query->b.num_cs_dw_suspend = 6 * SI_MAX_STREAMS;
729b8e80941Smrg		break;
730b8e80941Smrg	case PIPE_QUERY_PIPELINE_STATISTICS:
731b8e80941Smrg		/* 11 values on GCN. */
732b8e80941Smrg		query->result_size = 11 * 16;
733b8e80941Smrg		query->result_size += 8; /* for the fence + alignment */
734b8e80941Smrg		query->b.num_cs_dw_suspend = 6 + si_cp_write_fence_dwords(sscreen);
735b8e80941Smrg		break;
736b8e80941Smrg	default:
737b8e80941Smrg		assert(0);
738b8e80941Smrg		FREE(query);
739b8e80941Smrg		return NULL;
740b8e80941Smrg	}
741b8e80941Smrg
742b8e80941Smrg	return (struct pipe_query *)query;
743b8e80941Smrg}
744b8e80941Smrg
745b8e80941Smrgstatic void si_update_occlusion_query_state(struct si_context *sctx,
746b8e80941Smrg					    unsigned type, int diff)
747b8e80941Smrg{
748b8e80941Smrg	if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
749b8e80941Smrg	    type == PIPE_QUERY_OCCLUSION_PREDICATE ||
750b8e80941Smrg	    type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
751b8e80941Smrg		bool old_enable = sctx->num_occlusion_queries != 0;
752b8e80941Smrg		bool old_perfect_enable =
753b8e80941Smrg			sctx->num_perfect_occlusion_queries != 0;
754b8e80941Smrg		bool enable, perfect_enable;
755b8e80941Smrg
756b8e80941Smrg		sctx->num_occlusion_queries += diff;
757b8e80941Smrg		assert(sctx->num_occlusion_queries >= 0);
758b8e80941Smrg
759b8e80941Smrg		if (type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
760b8e80941Smrg			sctx->num_perfect_occlusion_queries += diff;
761b8e80941Smrg			assert(sctx->num_perfect_occlusion_queries >= 0);
762b8e80941Smrg		}
763b8e80941Smrg
764b8e80941Smrg		enable = sctx->num_occlusion_queries != 0;
765b8e80941Smrg		perfect_enable = sctx->num_perfect_occlusion_queries != 0;
766b8e80941Smrg
767b8e80941Smrg		if (enable != old_enable || perfect_enable != old_perfect_enable) {
768b8e80941Smrg			si_set_occlusion_query_state(sctx, old_perfect_enable);
769b8e80941Smrg		}
770b8e80941Smrg	}
771b8e80941Smrg}
772b8e80941Smrg
773b8e80941Smrgstatic unsigned event_type_for_stream(unsigned stream)
774b8e80941Smrg{
775b8e80941Smrg	switch (stream) {
776b8e80941Smrg	default:
777b8e80941Smrg	case 0: return V_028A90_SAMPLE_STREAMOUTSTATS;
778b8e80941Smrg	case 1: return V_028A90_SAMPLE_STREAMOUTSTATS1;
779b8e80941Smrg	case 2: return V_028A90_SAMPLE_STREAMOUTSTATS2;
780b8e80941Smrg	case 3: return V_028A90_SAMPLE_STREAMOUTSTATS3;
781b8e80941Smrg	}
782b8e80941Smrg}
783b8e80941Smrg
784b8e80941Smrgstatic void emit_sample_streamout(struct radeon_cmdbuf *cs, uint64_t va,
785b8e80941Smrg				  unsigned stream)
786b8e80941Smrg{
787b8e80941Smrg	radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
788b8e80941Smrg	radeon_emit(cs, EVENT_TYPE(event_type_for_stream(stream)) | EVENT_INDEX(3));
789b8e80941Smrg	radeon_emit(cs, va);
790b8e80941Smrg	radeon_emit(cs, va >> 32);
791b8e80941Smrg}
792b8e80941Smrg
793b8e80941Smrgstatic void si_query_hw_do_emit_start(struct si_context *sctx,
794b8e80941Smrg					struct si_query_hw *query,
795b8e80941Smrg					struct si_resource *buffer,
796b8e80941Smrg					uint64_t va)
797b8e80941Smrg{
798b8e80941Smrg	struct radeon_cmdbuf *cs = sctx->gfx_cs;
799b8e80941Smrg
800b8e80941Smrg	switch (query->b.type) {
801b8e80941Smrg	case SI_QUERY_TIME_ELAPSED_SDMA:
802b8e80941Smrg		si_dma_emit_timestamp(sctx, buffer, va - buffer->gpu_address);
803b8e80941Smrg		return;
804b8e80941Smrg	case PIPE_QUERY_OCCLUSION_COUNTER:
805b8e80941Smrg	case PIPE_QUERY_OCCLUSION_PREDICATE:
806b8e80941Smrg	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
807b8e80941Smrg		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
808b8e80941Smrg		radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
809b8e80941Smrg		radeon_emit(cs, va);
810b8e80941Smrg		radeon_emit(cs, va >> 32);
811b8e80941Smrg		break;
812b8e80941Smrg	case PIPE_QUERY_PRIMITIVES_EMITTED:
813b8e80941Smrg	case PIPE_QUERY_PRIMITIVES_GENERATED:
814b8e80941Smrg	case PIPE_QUERY_SO_STATISTICS:
815b8e80941Smrg	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
816b8e80941Smrg		emit_sample_streamout(cs, va, query->stream);
817b8e80941Smrg		break;
818b8e80941Smrg	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
819b8e80941Smrg		for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream)
820b8e80941Smrg			emit_sample_streamout(cs, va + 32 * stream, stream);
821b8e80941Smrg		break;
822b8e80941Smrg	case PIPE_QUERY_TIME_ELAPSED:
823b8e80941Smrg		si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
824b8e80941Smrg				  EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
825b8e80941Smrg				  EOP_DATA_SEL_TIMESTAMP, NULL, va,
826b8e80941Smrg				  0, query->b.type);
827b8e80941Smrg		break;
828b8e80941Smrg	case PIPE_QUERY_PIPELINE_STATISTICS:
829b8e80941Smrg		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
830b8e80941Smrg		radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
831b8e80941Smrg		radeon_emit(cs, va);
832b8e80941Smrg		radeon_emit(cs, va >> 32);
833b8e80941Smrg		break;
834b8e80941Smrg	default:
835b8e80941Smrg		assert(0);
836b8e80941Smrg	}
837b8e80941Smrg	radeon_add_to_buffer_list(sctx, sctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
838b8e80941Smrg				  RADEON_PRIO_QUERY);
839b8e80941Smrg}
840b8e80941Smrg
841b8e80941Smrgstatic void si_query_hw_emit_start(struct si_context *sctx,
842b8e80941Smrg				   struct si_query_hw *query)
843b8e80941Smrg{
844b8e80941Smrg	uint64_t va;
845b8e80941Smrg
846b8e80941Smrg	if (!si_query_buffer_alloc(sctx, &query->buffer, query->ops->prepare_buffer,
847b8e80941Smrg				   query->result_size))
848b8e80941Smrg		return;
849b8e80941Smrg
850b8e80941Smrg	si_update_occlusion_query_state(sctx, query->b.type, 1);
851b8e80941Smrg	si_update_prims_generated_query_state(sctx, query->b.type, 1);
852b8e80941Smrg
853b8e80941Smrg	if (query->b.type != SI_QUERY_TIME_ELAPSED_SDMA)
854b8e80941Smrg		si_need_gfx_cs_space(sctx);
855b8e80941Smrg
856b8e80941Smrg	va = query->buffer.buf->gpu_address + query->buffer.results_end;
857b8e80941Smrg	query->ops->emit_start(sctx, query, query->buffer.buf, va);
858b8e80941Smrg}
859b8e80941Smrg
860b8e80941Smrgstatic void si_query_hw_do_emit_stop(struct si_context *sctx,
861b8e80941Smrg				       struct si_query_hw *query,
862b8e80941Smrg				       struct si_resource *buffer,
863b8e80941Smrg				       uint64_t va)
864b8e80941Smrg{
865b8e80941Smrg	struct radeon_cmdbuf *cs = sctx->gfx_cs;
866b8e80941Smrg	uint64_t fence_va = 0;
867b8e80941Smrg
868b8e80941Smrg	switch (query->b.type) {
869b8e80941Smrg	case SI_QUERY_TIME_ELAPSED_SDMA:
870b8e80941Smrg		si_dma_emit_timestamp(sctx, buffer, va + 32 - buffer->gpu_address);
871b8e80941Smrg		return;
872b8e80941Smrg	case PIPE_QUERY_OCCLUSION_COUNTER:
873b8e80941Smrg	case PIPE_QUERY_OCCLUSION_PREDICATE:
874b8e80941Smrg	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
875b8e80941Smrg		va += 8;
876b8e80941Smrg		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
877b8e80941Smrg		radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
878b8e80941Smrg		radeon_emit(cs, va);
879b8e80941Smrg		radeon_emit(cs, va >> 32);
880b8e80941Smrg
881b8e80941Smrg		fence_va = va + sctx->screen->info.num_render_backends * 16 - 8;
882b8e80941Smrg		break;
883b8e80941Smrg	case PIPE_QUERY_PRIMITIVES_EMITTED:
884b8e80941Smrg	case PIPE_QUERY_PRIMITIVES_GENERATED:
885b8e80941Smrg	case PIPE_QUERY_SO_STATISTICS:
886b8e80941Smrg	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
887b8e80941Smrg		va += 16;
888b8e80941Smrg		emit_sample_streamout(cs, va, query->stream);
889b8e80941Smrg		break;
890b8e80941Smrg	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
891b8e80941Smrg		va += 16;
892b8e80941Smrg		for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream)
893b8e80941Smrg			emit_sample_streamout(cs, va + 32 * stream, stream);
894b8e80941Smrg		break;
895b8e80941Smrg	case PIPE_QUERY_TIME_ELAPSED:
896b8e80941Smrg		va += 8;
897b8e80941Smrg		/* fall through */
898b8e80941Smrg	case PIPE_QUERY_TIMESTAMP:
899b8e80941Smrg		si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
900b8e80941Smrg				  EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
901b8e80941Smrg				  EOP_DATA_SEL_TIMESTAMP, NULL, va,
902b8e80941Smrg				  0, query->b.type);
903b8e80941Smrg		fence_va = va + 8;
904b8e80941Smrg		break;
905b8e80941Smrg	case PIPE_QUERY_PIPELINE_STATISTICS: {
906b8e80941Smrg		unsigned sample_size = (query->result_size - 8) / 2;
907b8e80941Smrg
908b8e80941Smrg		va += sample_size;
909b8e80941Smrg		radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
910b8e80941Smrg		radeon_emit(cs, EVENT_TYPE(V_028A90_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
911b8e80941Smrg		radeon_emit(cs, va);
912b8e80941Smrg		radeon_emit(cs, va >> 32);
913b8e80941Smrg
914b8e80941Smrg		fence_va = va + sample_size;
915b8e80941Smrg		break;
916b8e80941Smrg	}
917b8e80941Smrg	default:
918b8e80941Smrg		assert(0);
919b8e80941Smrg	}
920b8e80941Smrg	radeon_add_to_buffer_list(sctx, sctx->gfx_cs, query->buffer.buf, RADEON_USAGE_WRITE,
921b8e80941Smrg				  RADEON_PRIO_QUERY);
922b8e80941Smrg
923b8e80941Smrg	if (fence_va) {
924b8e80941Smrg		si_cp_release_mem(sctx, V_028A90_BOTTOM_OF_PIPE_TS, 0,
925b8e80941Smrg				  EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
926b8e80941Smrg				  EOP_DATA_SEL_VALUE_32BIT,
927b8e80941Smrg				  query->buffer.buf, fence_va, 0x80000000,
928b8e80941Smrg				  query->b.type);
929b8e80941Smrg	}
930b8e80941Smrg}
931b8e80941Smrg
932b8e80941Smrgstatic void si_query_hw_emit_stop(struct si_context *sctx,
933b8e80941Smrg				  struct si_query_hw *query)
934b8e80941Smrg{
935b8e80941Smrg	uint64_t va;
936b8e80941Smrg
937b8e80941Smrg	/* The queries which need begin already called this in begin_query. */
938b8e80941Smrg	if (query->flags & SI_QUERY_HW_FLAG_NO_START) {
939b8e80941Smrg		si_need_gfx_cs_space(sctx);
940b8e80941Smrg		if (!si_query_buffer_alloc(sctx, &query->buffer, query->ops->prepare_buffer,
941b8e80941Smrg					   query->result_size))
942b8e80941Smrg			return;
943b8e80941Smrg	}
944b8e80941Smrg
945b8e80941Smrg	if (!query->buffer.buf)
946b8e80941Smrg		return; // previous buffer allocation failure
947b8e80941Smrg
948b8e80941Smrg	/* emit end query */
949b8e80941Smrg	va = query->buffer.buf->gpu_address + query->buffer.results_end;
950b8e80941Smrg
951b8e80941Smrg	query->ops->emit_stop(sctx, query, query->buffer.buf, va);
952b8e80941Smrg
953b8e80941Smrg	query->buffer.results_end += query->result_size;
954b8e80941Smrg
955b8e80941Smrg	si_update_occlusion_query_state(sctx, query->b.type, -1);
956b8e80941Smrg	si_update_prims_generated_query_state(sctx, query->b.type, -1);
957b8e80941Smrg}
958b8e80941Smrg
959b8e80941Smrgstatic void emit_set_predicate(struct si_context *ctx,
960b8e80941Smrg			       struct si_resource *buf, uint64_t va,
961b8e80941Smrg			       uint32_t op)
962b8e80941Smrg{
963b8e80941Smrg	struct radeon_cmdbuf *cs = ctx->gfx_cs;
964b8e80941Smrg
965b8e80941Smrg	if (ctx->chip_class >= GFX9) {
966b8e80941Smrg		radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 2, 0));
967b8e80941Smrg		radeon_emit(cs, op);
968b8e80941Smrg		radeon_emit(cs, va);
969b8e80941Smrg		radeon_emit(cs, va >> 32);
970b8e80941Smrg	} else {
971b8e80941Smrg		radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
972b8e80941Smrg		radeon_emit(cs, va);
973b8e80941Smrg		radeon_emit(cs, op | ((va >> 32) & 0xFF));
974b8e80941Smrg	}
975b8e80941Smrg	radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_READ,
976b8e80941Smrg				  RADEON_PRIO_QUERY);
977b8e80941Smrg}
978b8e80941Smrg
979b8e80941Smrgstatic void si_emit_query_predication(struct si_context *ctx)
980b8e80941Smrg{
981b8e80941Smrg	struct si_query_hw *query = (struct si_query_hw *)ctx->render_cond;
982b8e80941Smrg	struct si_query_buffer *qbuf;
983b8e80941Smrg	uint32_t op;
984b8e80941Smrg	bool flag_wait, invert;
985b8e80941Smrg
986b8e80941Smrg	if (!query)
987b8e80941Smrg		return;
988b8e80941Smrg
989b8e80941Smrg	invert = ctx->render_cond_invert;
990b8e80941Smrg	flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
991b8e80941Smrg		    ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
992b8e80941Smrg
993b8e80941Smrg	if (query->workaround_buf) {
994b8e80941Smrg		op = PRED_OP(PREDICATION_OP_BOOL64);
995b8e80941Smrg	} else {
996b8e80941Smrg		switch (query->b.type) {
997b8e80941Smrg		case PIPE_QUERY_OCCLUSION_COUNTER:
998b8e80941Smrg		case PIPE_QUERY_OCCLUSION_PREDICATE:
999b8e80941Smrg		case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1000b8e80941Smrg			op = PRED_OP(PREDICATION_OP_ZPASS);
1001b8e80941Smrg			break;
1002b8e80941Smrg		case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1003b8e80941Smrg		case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1004b8e80941Smrg			op = PRED_OP(PREDICATION_OP_PRIMCOUNT);
1005b8e80941Smrg			invert = !invert;
1006b8e80941Smrg			break;
1007b8e80941Smrg		default:
1008b8e80941Smrg			assert(0);
1009b8e80941Smrg			return;
1010b8e80941Smrg		}
1011b8e80941Smrg	}
1012b8e80941Smrg
1013b8e80941Smrg	/* if true then invert, see GL_ARB_conditional_render_inverted */
1014b8e80941Smrg	if (invert)
1015b8e80941Smrg		op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */
1016b8e80941Smrg	else
1017b8e80941Smrg		op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */
1018b8e80941Smrg
1019b8e80941Smrg	/* Use the value written by compute shader as a workaround. Note that
1020b8e80941Smrg	 * the wait flag does not apply in this predication mode.
1021b8e80941Smrg	 *
1022b8e80941Smrg	 * The shader outputs the result value to L2. Workarounds only affect VI
1023b8e80941Smrg	 * and later, where the CP reads data from L2, so we don't need an
1024b8e80941Smrg	 * additional flush.
1025b8e80941Smrg	 */
1026b8e80941Smrg	if (query->workaround_buf) {
1027b8e80941Smrg		uint64_t va = query->workaround_buf->gpu_address + query->workaround_offset;
1028b8e80941Smrg		emit_set_predicate(ctx, query->workaround_buf, va, op);
1029b8e80941Smrg		return;
1030b8e80941Smrg	}
1031b8e80941Smrg
1032b8e80941Smrg	op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW;
1033b8e80941Smrg
1034b8e80941Smrg	/* emit predicate packets for all data blocks */
1035b8e80941Smrg	for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1036b8e80941Smrg		unsigned results_base = 0;
1037b8e80941Smrg		uint64_t va_base = qbuf->buf->gpu_address;
1038b8e80941Smrg
1039b8e80941Smrg		while (results_base < qbuf->results_end) {
1040b8e80941Smrg			uint64_t va = va_base + results_base;
1041b8e80941Smrg
1042b8e80941Smrg			if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
1043b8e80941Smrg				for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
1044b8e80941Smrg					emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
1045b8e80941Smrg
1046b8e80941Smrg					/* set CONTINUE bit for all packets except the first */
1047b8e80941Smrg					op |= PREDICATION_CONTINUE;
1048b8e80941Smrg				}
1049b8e80941Smrg			} else {
1050b8e80941Smrg				emit_set_predicate(ctx, qbuf->buf, va, op);
1051b8e80941Smrg				op |= PREDICATION_CONTINUE;
1052b8e80941Smrg			}
1053b8e80941Smrg
1054b8e80941Smrg			results_base += query->result_size;
1055b8e80941Smrg		}
1056b8e80941Smrg	}
1057b8e80941Smrg}
1058b8e80941Smrg
1059b8e80941Smrgstatic struct pipe_query *si_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
1060b8e80941Smrg{
1061b8e80941Smrg	struct si_screen *sscreen =
1062b8e80941Smrg		(struct si_screen *)ctx->screen;
1063b8e80941Smrg
1064b8e80941Smrg	if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
1065b8e80941Smrg	    query_type == PIPE_QUERY_GPU_FINISHED ||
1066b8e80941Smrg	    (query_type >= PIPE_QUERY_DRIVER_SPECIFIC &&
1067b8e80941Smrg	     query_type != SI_QUERY_TIME_ELAPSED_SDMA))
1068b8e80941Smrg		return si_query_sw_create(query_type);
1069b8e80941Smrg
1070b8e80941Smrg	return si_query_hw_create(sscreen, query_type, index);
1071b8e80941Smrg}
1072b8e80941Smrg
1073b8e80941Smrgstatic void si_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
1074b8e80941Smrg{
1075b8e80941Smrg	struct si_context *sctx = (struct si_context *)ctx;
1076b8e80941Smrg	struct si_query *squery = (struct si_query *)query;
1077b8e80941Smrg
1078b8e80941Smrg	squery->ops->destroy(sctx->screen, squery);
1079b8e80941Smrg}
1080b8e80941Smrg
1081b8e80941Smrgstatic boolean si_begin_query(struct pipe_context *ctx,
1082b8e80941Smrg                                struct pipe_query *query)
1083b8e80941Smrg{
1084b8e80941Smrg	struct si_context *sctx = (struct si_context *)ctx;
1085b8e80941Smrg	struct si_query *squery = (struct si_query *)query;
1086b8e80941Smrg
1087b8e80941Smrg	return squery->ops->begin(sctx, squery);
1088b8e80941Smrg}
1089b8e80941Smrg
1090b8e80941Smrgbool si_query_hw_begin(struct si_context *sctx,
1091b8e80941Smrg		       struct si_query *squery)
1092b8e80941Smrg{
1093b8e80941Smrg	struct si_query_hw *query = (struct si_query_hw *)squery;
1094b8e80941Smrg
1095b8e80941Smrg	if (query->flags & SI_QUERY_HW_FLAG_NO_START) {
1096b8e80941Smrg		assert(0);
1097b8e80941Smrg		return false;
1098b8e80941Smrg	}
1099b8e80941Smrg
1100b8e80941Smrg	if (!(query->flags & SI_QUERY_HW_FLAG_BEGIN_RESUMES))
1101b8e80941Smrg		si_query_buffer_reset(sctx, &query->buffer);
1102b8e80941Smrg
1103b8e80941Smrg	si_resource_reference(&query->workaround_buf, NULL);
1104b8e80941Smrg
1105b8e80941Smrg	si_query_hw_emit_start(sctx, query);
1106b8e80941Smrg	if (!query->buffer.buf)
1107b8e80941Smrg		return false;
1108b8e80941Smrg
1109b8e80941Smrg	LIST_ADDTAIL(&query->b.active_list, &sctx->active_queries);
1110b8e80941Smrg	sctx->num_cs_dw_queries_suspend += query->b.num_cs_dw_suspend;
1111b8e80941Smrg	return true;
1112b8e80941Smrg}
1113b8e80941Smrg
1114b8e80941Smrgstatic bool si_end_query(struct pipe_context *ctx, struct pipe_query *query)
1115b8e80941Smrg{
1116b8e80941Smrg	struct si_context *sctx = (struct si_context *)ctx;
1117b8e80941Smrg	struct si_query *squery = (struct si_query *)query;
1118b8e80941Smrg
1119b8e80941Smrg	return squery->ops->end(sctx, squery);
1120b8e80941Smrg}
1121b8e80941Smrg
1122b8e80941Smrgbool si_query_hw_end(struct si_context *sctx,
1123b8e80941Smrg		     struct si_query *squery)
1124b8e80941Smrg{
1125b8e80941Smrg	struct si_query_hw *query = (struct si_query_hw *)squery;
1126b8e80941Smrg
1127b8e80941Smrg	if (query->flags & SI_QUERY_HW_FLAG_NO_START)
1128b8e80941Smrg		si_query_buffer_reset(sctx, &query->buffer);
1129b8e80941Smrg
1130b8e80941Smrg	si_query_hw_emit_stop(sctx, query);
1131b8e80941Smrg
1132b8e80941Smrg	if (!(query->flags & SI_QUERY_HW_FLAG_NO_START)) {
1133b8e80941Smrg		LIST_DELINIT(&query->b.active_list);
1134b8e80941Smrg		sctx->num_cs_dw_queries_suspend -= query->b.num_cs_dw_suspend;
1135b8e80941Smrg	}
1136b8e80941Smrg
1137b8e80941Smrg	if (!query->buffer.buf)
1138b8e80941Smrg		return false;
1139b8e80941Smrg
1140b8e80941Smrg	return true;
1141b8e80941Smrg}
1142b8e80941Smrg
1143b8e80941Smrgstatic void si_get_hw_query_params(struct si_context *sctx,
1144b8e80941Smrg				   struct si_query_hw *squery, int index,
1145b8e80941Smrg				   struct si_hw_query_params *params)
1146b8e80941Smrg{
1147b8e80941Smrg	unsigned max_rbs = sctx->screen->info.num_render_backends;
1148b8e80941Smrg
1149b8e80941Smrg	params->pair_stride = 0;
1150b8e80941Smrg	params->pair_count = 1;
1151b8e80941Smrg
1152b8e80941Smrg	switch (squery->b.type) {
1153b8e80941Smrg	case PIPE_QUERY_OCCLUSION_COUNTER:
1154b8e80941Smrg	case PIPE_QUERY_OCCLUSION_PREDICATE:
1155b8e80941Smrg	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
1156b8e80941Smrg		params->start_offset = 0;
1157b8e80941Smrg		params->end_offset = 8;
1158b8e80941Smrg		params->fence_offset = max_rbs * 16;
1159b8e80941Smrg		params->pair_stride = 16;
1160b8e80941Smrg		params->pair_count = max_rbs;
1161b8e80941Smrg		break;
1162b8e80941Smrg	case PIPE_QUERY_TIME_ELAPSED:
1163b8e80941Smrg		params->start_offset = 0;
1164b8e80941Smrg		params->end_offset = 8;
1165b8e80941Smrg		params->fence_offset = 16;
1166b8e80941Smrg		break;
1167b8e80941Smrg	case PIPE_QUERY_TIMESTAMP:
1168b8e80941Smrg		params->start_offset = 0;
1169b8e80941Smrg		params->end_offset = 0;
1170b8e80941Smrg		params->fence_offset = 8;
1171b8e80941Smrg		break;
1172b8e80941Smrg	case PIPE_QUERY_PRIMITIVES_EMITTED:
1173b8e80941Smrg		params->start_offset = 8;
1174b8e80941Smrg		params->end_offset = 24;
1175b8e80941Smrg		params->fence_offset = params->end_offset + 4;
1176b8e80941Smrg		break;
1177b8e80941Smrg	case PIPE_QUERY_PRIMITIVES_GENERATED:
1178b8e80941Smrg		params->start_offset = 0;
1179b8e80941Smrg		params->end_offset = 16;
1180b8e80941Smrg		params->fence_offset = params->end_offset + 4;
1181b8e80941Smrg		break;
1182b8e80941Smrg	case PIPE_QUERY_SO_STATISTICS:
1183b8e80941Smrg		params->start_offset = 8 - index * 8;
1184b8e80941Smrg		params->end_offset = 24 - index * 8;
1185b8e80941Smrg		params->fence_offset = params->end_offset + 4;
1186b8e80941Smrg		break;
1187b8e80941Smrg	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1188b8e80941Smrg		params->pair_count = SI_MAX_STREAMS;
1189b8e80941Smrg		params->pair_stride = 32;
1190b8e80941Smrg	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1191b8e80941Smrg		params->start_offset = 0;
1192b8e80941Smrg		params->end_offset = 16;
1193b8e80941Smrg
1194b8e80941Smrg		/* We can re-use the high dword of the last 64-bit value as a
1195b8e80941Smrg		 * fence: it is initialized as 0, and the high bit is set by
1196b8e80941Smrg		 * the write of the streamout stats event.
1197b8e80941Smrg		 */
1198b8e80941Smrg		params->fence_offset = squery->result_size - 4;
1199b8e80941Smrg		break;
1200b8e80941Smrg	case PIPE_QUERY_PIPELINE_STATISTICS:
1201b8e80941Smrg	{
1202b8e80941Smrg		static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
1203b8e80941Smrg		params->start_offset = offsets[index];
1204b8e80941Smrg		params->end_offset = 88 + offsets[index];
1205b8e80941Smrg		params->fence_offset = 2 * 88;
1206b8e80941Smrg		break;
1207b8e80941Smrg	}
1208b8e80941Smrg	default:
1209b8e80941Smrg		unreachable("si_get_hw_query_params unsupported");
1210b8e80941Smrg	}
1211b8e80941Smrg}
1212b8e80941Smrg
1213b8e80941Smrgstatic unsigned si_query_read_result(void *map, unsigned start_index, unsigned end_index,
1214b8e80941Smrg				     bool test_status_bit)
1215b8e80941Smrg{
1216b8e80941Smrg	uint32_t *current_result = (uint32_t*)map;
1217b8e80941Smrg	uint64_t start, end;
1218b8e80941Smrg
1219b8e80941Smrg	start = (uint64_t)current_result[start_index] |
1220b8e80941Smrg		(uint64_t)current_result[start_index+1] << 32;
1221b8e80941Smrg	end = (uint64_t)current_result[end_index] |
1222b8e80941Smrg	      (uint64_t)current_result[end_index+1] << 32;
1223b8e80941Smrg
1224b8e80941Smrg	if (!test_status_bit ||
1225b8e80941Smrg	    ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) {
1226b8e80941Smrg		return end - start;
1227b8e80941Smrg	}
1228b8e80941Smrg	return 0;
1229b8e80941Smrg}
1230b8e80941Smrg
1231b8e80941Smrgstatic void si_query_hw_add_result(struct si_screen *sscreen,
1232b8e80941Smrg				     struct si_query_hw *query,
1233b8e80941Smrg				     void *buffer,
1234b8e80941Smrg				     union pipe_query_result *result)
1235b8e80941Smrg{
1236b8e80941Smrg	unsigned max_rbs = sscreen->info.num_render_backends;
1237b8e80941Smrg
1238b8e80941Smrg	switch (query->b.type) {
1239b8e80941Smrg	case PIPE_QUERY_OCCLUSION_COUNTER: {
1240b8e80941Smrg		for (unsigned i = 0; i < max_rbs; ++i) {
1241b8e80941Smrg			unsigned results_base = i * 16;
1242b8e80941Smrg			result->u64 +=
1243b8e80941Smrg				si_query_read_result(buffer + results_base, 0, 2, true);
1244b8e80941Smrg		}
1245b8e80941Smrg		break;
1246b8e80941Smrg	}
1247b8e80941Smrg	case PIPE_QUERY_OCCLUSION_PREDICATE:
1248b8e80941Smrg	case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
1249b8e80941Smrg		for (unsigned i = 0; i < max_rbs; ++i) {
1250b8e80941Smrg			unsigned results_base = i * 16;
1251b8e80941Smrg			result->b = result->b ||
1252b8e80941Smrg				si_query_read_result(buffer + results_base, 0, 2, true) != 0;
1253b8e80941Smrg		}
1254b8e80941Smrg		break;
1255b8e80941Smrg	}
1256b8e80941Smrg	case PIPE_QUERY_TIME_ELAPSED:
1257b8e80941Smrg		result->u64 += si_query_read_result(buffer, 0, 2, false);
1258b8e80941Smrg		break;
1259b8e80941Smrg	case SI_QUERY_TIME_ELAPSED_SDMA:
1260b8e80941Smrg		result->u64 += si_query_read_result(buffer, 0, 32/4, false);
1261b8e80941Smrg		break;
1262b8e80941Smrg	case PIPE_QUERY_TIMESTAMP:
1263b8e80941Smrg		result->u64 = *(uint64_t*)buffer;
1264b8e80941Smrg		break;
1265b8e80941Smrg	case PIPE_QUERY_PRIMITIVES_EMITTED:
1266b8e80941Smrg		/* SAMPLE_STREAMOUTSTATS stores this structure:
1267b8e80941Smrg		 * {
1268b8e80941Smrg		 *    u64 NumPrimitivesWritten;
1269b8e80941Smrg		 *    u64 PrimitiveStorageNeeded;
1270b8e80941Smrg		 * }
1271b8e80941Smrg		 * We only need NumPrimitivesWritten here. */
1272b8e80941Smrg		result->u64 += si_query_read_result(buffer, 2, 6, true);
1273b8e80941Smrg		break;
1274b8e80941Smrg	case PIPE_QUERY_PRIMITIVES_GENERATED:
1275b8e80941Smrg		/* Here we read PrimitiveStorageNeeded. */
1276b8e80941Smrg		result->u64 += si_query_read_result(buffer, 0, 4, true);
1277b8e80941Smrg		break;
1278b8e80941Smrg	case PIPE_QUERY_SO_STATISTICS:
1279b8e80941Smrg		result->so_statistics.num_primitives_written +=
1280b8e80941Smrg			si_query_read_result(buffer, 2, 6, true);
1281b8e80941Smrg		result->so_statistics.primitives_storage_needed +=
1282b8e80941Smrg			si_query_read_result(buffer, 0, 4, true);
1283b8e80941Smrg		break;
1284b8e80941Smrg	case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
1285b8e80941Smrg		result->b = result->b ||
1286b8e80941Smrg			si_query_read_result(buffer, 2, 6, true) !=
1287b8e80941Smrg			si_query_read_result(buffer, 0, 4, true);
1288b8e80941Smrg		break;
1289b8e80941Smrg	case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
1290b8e80941Smrg		for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
1291b8e80941Smrg			result->b = result->b ||
1292b8e80941Smrg				si_query_read_result(buffer, 2, 6, true) !=
1293b8e80941Smrg				si_query_read_result(buffer, 0, 4, true);
1294b8e80941Smrg			buffer = (char *)buffer + 32;
1295b8e80941Smrg		}
1296b8e80941Smrg		break;
1297b8e80941Smrg	case PIPE_QUERY_PIPELINE_STATISTICS:
1298b8e80941Smrg		result->pipeline_statistics.ps_invocations +=
1299b8e80941Smrg			si_query_read_result(buffer, 0, 22, false);
1300b8e80941Smrg		result->pipeline_statistics.c_primitives +=
1301b8e80941Smrg			si_query_read_result(buffer, 2, 24, false);
1302b8e80941Smrg		result->pipeline_statistics.c_invocations +=
1303b8e80941Smrg			si_query_read_result(buffer, 4, 26, false);
1304b8e80941Smrg		result->pipeline_statistics.vs_invocations +=
1305b8e80941Smrg			si_query_read_result(buffer, 6, 28, false);
1306b8e80941Smrg		result->pipeline_statistics.gs_invocations +=
1307b8e80941Smrg			si_query_read_result(buffer, 8, 30, false);
1308b8e80941Smrg		result->pipeline_statistics.gs_primitives +=
1309b8e80941Smrg			si_query_read_result(buffer, 10, 32, false);
1310b8e80941Smrg		result->pipeline_statistics.ia_primitives +=
1311b8e80941Smrg			si_query_read_result(buffer, 12, 34, false);
1312b8e80941Smrg		result->pipeline_statistics.ia_vertices +=
1313b8e80941Smrg			si_query_read_result(buffer, 14, 36, false);
1314b8e80941Smrg		result->pipeline_statistics.hs_invocations +=
1315b8e80941Smrg			si_query_read_result(buffer, 16, 38, false);
1316b8e80941Smrg		result->pipeline_statistics.ds_invocations +=
1317b8e80941Smrg			si_query_read_result(buffer, 18, 40, false);
1318b8e80941Smrg		result->pipeline_statistics.cs_invocations +=
1319b8e80941Smrg			si_query_read_result(buffer, 20, 42, false);
1320b8e80941Smrg#if 0 /* for testing */
1321b8e80941Smrg		printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
1322b8e80941Smrg		       "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
1323b8e80941Smrg		       "Clipper prims=%llu, PS=%llu, CS=%llu\n",
1324b8e80941Smrg		       result->pipeline_statistics.ia_vertices,
1325b8e80941Smrg		       result->pipeline_statistics.ia_primitives,
1326b8e80941Smrg		       result->pipeline_statistics.vs_invocations,
1327b8e80941Smrg		       result->pipeline_statistics.hs_invocations,
1328b8e80941Smrg		       result->pipeline_statistics.ds_invocations,
1329b8e80941Smrg		       result->pipeline_statistics.gs_invocations,
1330b8e80941Smrg		       result->pipeline_statistics.gs_primitives,
1331b8e80941Smrg		       result->pipeline_statistics.c_invocations,
1332b8e80941Smrg		       result->pipeline_statistics.c_primitives,
1333b8e80941Smrg		       result->pipeline_statistics.ps_invocations,
1334b8e80941Smrg		       result->pipeline_statistics.cs_invocations);
1335b8e80941Smrg#endif
1336b8e80941Smrg		break;
1337b8e80941Smrg	default:
1338b8e80941Smrg		assert(0);
1339b8e80941Smrg	}
1340b8e80941Smrg}
1341b8e80941Smrg
1342b8e80941Smrgvoid si_query_hw_suspend(struct si_context *sctx, struct si_query *query)
1343b8e80941Smrg{
1344b8e80941Smrg	si_query_hw_emit_stop(sctx, (struct si_query_hw *)query);
1345b8e80941Smrg}
1346b8e80941Smrg
1347b8e80941Smrgvoid si_query_hw_resume(struct si_context *sctx, struct si_query *query)
1348b8e80941Smrg{
1349b8e80941Smrg	si_query_hw_emit_start(sctx, (struct si_query_hw *)query);
1350b8e80941Smrg}
1351b8e80941Smrg
1352b8e80941Smrgstatic const struct si_query_ops query_hw_ops = {
1353b8e80941Smrg	.destroy = si_query_hw_destroy,
1354b8e80941Smrg	.begin = si_query_hw_begin,
1355b8e80941Smrg	.end = si_query_hw_end,
1356b8e80941Smrg	.get_result = si_query_hw_get_result,
1357b8e80941Smrg	.get_result_resource = si_query_hw_get_result_resource,
1358b8e80941Smrg
1359b8e80941Smrg	.suspend = si_query_hw_suspend,
1360b8e80941Smrg	.resume = si_query_hw_resume,
1361b8e80941Smrg};
1362b8e80941Smrg
1363b8e80941Smrgstatic boolean si_get_query_result(struct pipe_context *ctx,
1364b8e80941Smrg				   struct pipe_query *query, boolean wait,
1365b8e80941Smrg				   union pipe_query_result *result)
1366b8e80941Smrg{
1367b8e80941Smrg	struct si_context *sctx = (struct si_context *)ctx;
1368b8e80941Smrg	struct si_query *squery = (struct si_query *)query;
1369b8e80941Smrg
1370b8e80941Smrg	return squery->ops->get_result(sctx, squery, wait, result);
1371b8e80941Smrg}
1372b8e80941Smrg
1373b8e80941Smrgstatic void si_get_query_result_resource(struct pipe_context *ctx,
1374b8e80941Smrg					 struct pipe_query *query,
1375b8e80941Smrg					 boolean wait,
1376b8e80941Smrg					 enum pipe_query_value_type result_type,
1377b8e80941Smrg					 int index,
1378b8e80941Smrg					 struct pipe_resource *resource,
1379b8e80941Smrg					 unsigned offset)
1380b8e80941Smrg{
1381b8e80941Smrg	struct si_context *sctx = (struct si_context *)ctx;
1382b8e80941Smrg	struct si_query *squery = (struct si_query *)query;
1383b8e80941Smrg
1384b8e80941Smrg	squery->ops->get_result_resource(sctx, squery, wait, result_type, index,
1385b8e80941Smrg	                                 resource, offset);
1386b8e80941Smrg}
1387b8e80941Smrg
1388b8e80941Smrgstatic void si_query_hw_clear_result(struct si_query_hw *query,
1389b8e80941Smrg				       union pipe_query_result *result)
1390b8e80941Smrg{
1391b8e80941Smrg	util_query_clear_result(result, query->b.type);
1392b8e80941Smrg}
1393b8e80941Smrg
1394b8e80941Smrgbool si_query_hw_get_result(struct si_context *sctx,
1395b8e80941Smrg			    struct si_query *squery,
1396b8e80941Smrg			    bool wait, union pipe_query_result *result)
1397b8e80941Smrg{
1398b8e80941Smrg	struct si_screen *sscreen = sctx->screen;
1399b8e80941Smrg	struct si_query_hw *query = (struct si_query_hw *)squery;
1400b8e80941Smrg	struct si_query_buffer *qbuf;
1401b8e80941Smrg
1402b8e80941Smrg	query->ops->clear_result(query, result);
1403b8e80941Smrg
1404b8e80941Smrg	for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
1405b8e80941Smrg		unsigned usage = PIPE_TRANSFER_READ |
1406b8e80941Smrg				 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
1407b8e80941Smrg		unsigned results_base = 0;
1408b8e80941Smrg		void *map;
1409b8e80941Smrg
1410b8e80941Smrg		if (squery->b.flushed)
1411b8e80941Smrg			map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
1412b8e80941Smrg		else
1413b8e80941Smrg			map = si_buffer_map_sync_with_rings(sctx, qbuf->buf, usage);
1414b8e80941Smrg
1415b8e80941Smrg		if (!map)
1416b8e80941Smrg			return false;
1417b8e80941Smrg
1418b8e80941Smrg		while (results_base != qbuf->results_end) {
1419b8e80941Smrg			query->ops->add_result(sscreen, query, map + results_base,
1420b8e80941Smrg					       result);
1421b8e80941Smrg			results_base += query->result_size;
1422b8e80941Smrg		}
1423b8e80941Smrg	}
1424b8e80941Smrg
1425b8e80941Smrg	/* Convert the time to expected units. */
1426b8e80941Smrg	if (squery->type == PIPE_QUERY_TIME_ELAPSED ||
1427b8e80941Smrg	    squery->type == SI_QUERY_TIME_ELAPSED_SDMA ||
1428b8e80941Smrg	    squery->type == PIPE_QUERY_TIMESTAMP) {
1429b8e80941Smrg		result->u64 = (1000000 * result->u64) / sscreen->info.clock_crystal_freq;
1430b8e80941Smrg	}
1431b8e80941Smrg	return true;
1432b8e80941Smrg}
1433b8e80941Smrg
1434b8e80941Smrgstatic void si_restore_qbo_state(struct si_context *sctx,
1435b8e80941Smrg				 struct si_qbo_state *st)
1436b8e80941Smrg{
1437b8e80941Smrg	sctx->b.bind_compute_state(&sctx->b, st->saved_compute);
1438b8e80941Smrg
1439b8e80941Smrg	sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
1440b8e80941Smrg	pipe_resource_reference(&st->saved_const0.buffer, NULL);
1441b8e80941Smrg
1442b8e80941Smrg	sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo,
1443b8e80941Smrg				   st->saved_ssbo_writable_mask);
1444b8e80941Smrg	for (unsigned i = 0; i < 3; ++i)
1445b8e80941Smrg		pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
1446b8e80941Smrg}
1447b8e80941Smrg
1448b8e80941Smrgstatic void si_query_hw_get_result_resource(struct si_context *sctx,
1449b8e80941Smrg                                              struct si_query *squery,
1450b8e80941Smrg                                              bool wait,
1451b8e80941Smrg                                              enum pipe_query_value_type result_type,
1452b8e80941Smrg                                              int index,
1453b8e80941Smrg                                              struct pipe_resource *resource,
1454b8e80941Smrg                                              unsigned offset)
1455b8e80941Smrg{
1456b8e80941Smrg	struct si_query_hw *query = (struct si_query_hw *)squery;
1457b8e80941Smrg	struct si_query_buffer *qbuf;
1458b8e80941Smrg	struct si_query_buffer *qbuf_prev;
1459b8e80941Smrg	struct pipe_resource *tmp_buffer = NULL;
1460b8e80941Smrg	unsigned tmp_buffer_offset = 0;
1461b8e80941Smrg	struct si_qbo_state saved_state = {};
1462b8e80941Smrg	struct pipe_grid_info grid = {};
1463b8e80941Smrg	struct pipe_constant_buffer constant_buffer = {};
1464b8e80941Smrg	struct pipe_shader_buffer ssbo[3];
1465b8e80941Smrg	struct si_hw_query_params params;
1466b8e80941Smrg	struct {
1467b8e80941Smrg		uint32_t end_offset;
1468b8e80941Smrg		uint32_t result_stride;
1469b8e80941Smrg		uint32_t result_count;
1470b8e80941Smrg		uint32_t config;
1471b8e80941Smrg		uint32_t fence_offset;
1472b8e80941Smrg		uint32_t pair_stride;
1473b8e80941Smrg		uint32_t pair_count;
1474b8e80941Smrg	} consts;
1475b8e80941Smrg
1476b8e80941Smrg	if (!sctx->query_result_shader) {
1477b8e80941Smrg		sctx->query_result_shader = si_create_query_result_cs(sctx);
1478b8e80941Smrg		if (!sctx->query_result_shader)
1479b8e80941Smrg			return;
1480b8e80941Smrg	}
1481b8e80941Smrg
1482b8e80941Smrg	if (query->buffer.previous) {
1483b8e80941Smrg		u_suballocator_alloc(sctx->allocator_zeroed_memory, 16, 16,
1484b8e80941Smrg				     &tmp_buffer_offset, &tmp_buffer);
1485b8e80941Smrg		if (!tmp_buffer)
1486b8e80941Smrg			return;
1487b8e80941Smrg	}
1488b8e80941Smrg
1489b8e80941Smrg	si_save_qbo_state(sctx, &saved_state);
1490b8e80941Smrg
1491b8e80941Smrg	si_get_hw_query_params(sctx, query, index >= 0 ? index : 0, &params);
1492b8e80941Smrg	consts.end_offset = params.end_offset - params.start_offset;
1493b8e80941Smrg	consts.fence_offset = params.fence_offset - params.start_offset;
1494b8e80941Smrg	consts.result_stride = query->result_size;
1495b8e80941Smrg	consts.pair_stride = params.pair_stride;
1496b8e80941Smrg	consts.pair_count = params.pair_count;
1497b8e80941Smrg
1498b8e80941Smrg	constant_buffer.buffer_size = sizeof(consts);
1499b8e80941Smrg	constant_buffer.user_buffer = &consts;
1500b8e80941Smrg
1501b8e80941Smrg	ssbo[1].buffer = tmp_buffer;
1502b8e80941Smrg	ssbo[1].buffer_offset = tmp_buffer_offset;
1503b8e80941Smrg	ssbo[1].buffer_size = 16;
1504b8e80941Smrg
1505b8e80941Smrg	ssbo[2] = ssbo[1];
1506b8e80941Smrg
1507b8e80941Smrg	sctx->b.bind_compute_state(&sctx->b, sctx->query_result_shader);
1508b8e80941Smrg
1509b8e80941Smrg	grid.block[0] = 1;
1510b8e80941Smrg	grid.block[1] = 1;
1511b8e80941Smrg	grid.block[2] = 1;
1512b8e80941Smrg	grid.grid[0] = 1;
1513b8e80941Smrg	grid.grid[1] = 1;
1514b8e80941Smrg	grid.grid[2] = 1;
1515b8e80941Smrg
1516b8e80941Smrg	consts.config = 0;
1517b8e80941Smrg	if (index < 0)
1518b8e80941Smrg		consts.config |= 4;
1519b8e80941Smrg	if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
1520b8e80941Smrg	    query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE)
1521b8e80941Smrg		consts.config |= 8;
1522b8e80941Smrg	else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
1523b8e80941Smrg		 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
1524b8e80941Smrg		consts.config |= 8 | 256;
1525b8e80941Smrg	else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
1526b8e80941Smrg		 query->b.type == PIPE_QUERY_TIME_ELAPSED)
1527b8e80941Smrg		consts.config |= 32;
1528b8e80941Smrg
1529b8e80941Smrg	switch (result_type) {
1530b8e80941Smrg	case PIPE_QUERY_TYPE_U64:
1531b8e80941Smrg	case PIPE_QUERY_TYPE_I64:
1532b8e80941Smrg		consts.config |= 64;
1533b8e80941Smrg		break;
1534b8e80941Smrg	case PIPE_QUERY_TYPE_I32:
1535b8e80941Smrg		consts.config |= 128;
1536b8e80941Smrg		break;
1537b8e80941Smrg	case PIPE_QUERY_TYPE_U32:
1538b8e80941Smrg		break;
1539b8e80941Smrg	}
1540b8e80941Smrg
1541b8e80941Smrg	sctx->flags |= sctx->screen->barrier_flags.cp_to_L2;
1542b8e80941Smrg
1543b8e80941Smrg	for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
1544b8e80941Smrg		if (query->b.type != PIPE_QUERY_TIMESTAMP) {
1545b8e80941Smrg			qbuf_prev = qbuf->previous;
1546b8e80941Smrg			consts.result_count = qbuf->results_end / query->result_size;
1547b8e80941Smrg			consts.config &= ~3;
1548b8e80941Smrg			if (qbuf != &query->buffer)
1549b8e80941Smrg				consts.config |= 1;
1550b8e80941Smrg			if (qbuf->previous)
1551b8e80941Smrg				consts.config |= 2;
1552b8e80941Smrg		} else {
1553b8e80941Smrg			/* Only read the last timestamp. */
1554b8e80941Smrg			qbuf_prev = NULL;
1555b8e80941Smrg			consts.result_count = 0;
1556b8e80941Smrg			consts.config |= 16;
1557b8e80941Smrg			params.start_offset += qbuf->results_end - query->result_size;
1558b8e80941Smrg		}
1559b8e80941Smrg
1560b8e80941Smrg		sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
1561b8e80941Smrg
1562b8e80941Smrg		ssbo[0].buffer = &qbuf->buf->b.b;
1563b8e80941Smrg		ssbo[0].buffer_offset = params.start_offset;
1564b8e80941Smrg		ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
1565b8e80941Smrg
1566b8e80941Smrg		if (!qbuf->previous) {
1567b8e80941Smrg			ssbo[2].buffer = resource;
1568b8e80941Smrg			ssbo[2].buffer_offset = offset;
1569b8e80941Smrg			ssbo[2].buffer_size = 8;
1570b8e80941Smrg
1571b8e80941Smrg			si_resource(resource)->TC_L2_dirty = true;
1572b8e80941Smrg		}
1573b8e80941Smrg
1574b8e80941Smrg		sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo,
1575b8e80941Smrg					   1 << 2);
1576b8e80941Smrg
1577b8e80941Smrg		if (wait && qbuf == &query->buffer) {
1578b8e80941Smrg			uint64_t va;
1579b8e80941Smrg
1580b8e80941Smrg			/* Wait for result availability. Wait only for readiness
1581b8e80941Smrg			 * of the last entry, since the fence writes should be
1582b8e80941Smrg			 * serialized in the CP.
1583b8e80941Smrg			 */
1584b8e80941Smrg			va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
1585b8e80941Smrg			va += params.fence_offset;
1586b8e80941Smrg
1587b8e80941Smrg			si_cp_wait_mem(sctx, sctx->gfx_cs, va, 0x80000000,
1588b8e80941Smrg				       0x80000000, WAIT_REG_MEM_EQUAL);
1589b8e80941Smrg		}
1590b8e80941Smrg
1591b8e80941Smrg		sctx->b.launch_grid(&sctx->b, &grid);
1592b8e80941Smrg		sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
1593b8e80941Smrg	}
1594b8e80941Smrg
1595b8e80941Smrg	si_restore_qbo_state(sctx, &saved_state);
1596b8e80941Smrg	pipe_resource_reference(&tmp_buffer, NULL);
1597b8e80941Smrg}
1598b8e80941Smrg
1599b8e80941Smrgstatic void si_render_condition(struct pipe_context *ctx,
1600b8e80941Smrg				struct pipe_query *query,
1601b8e80941Smrg				boolean condition,
1602b8e80941Smrg				enum pipe_render_cond_flag mode)
1603b8e80941Smrg{
1604b8e80941Smrg	struct si_context *sctx = (struct si_context *)ctx;
1605b8e80941Smrg	struct si_query_hw *squery = (struct si_query_hw *)query;
1606b8e80941Smrg	struct si_atom *atom = &sctx->atoms.s.render_cond;
1607b8e80941Smrg
1608b8e80941Smrg	if (query) {
1609b8e80941Smrg		bool needs_workaround = false;
1610b8e80941Smrg
1611b8e80941Smrg		/* There was a firmware regression in VI which causes successive
1612b8e80941Smrg		 * SET_PREDICATION packets to give the wrong answer for
1613b8e80941Smrg		 * non-inverted stream overflow predication.
1614b8e80941Smrg		 */
1615b8e80941Smrg		if (((sctx->chip_class == VI && sctx->screen->info.pfp_fw_feature < 49) ||
1616b8e80941Smrg		     (sctx->chip_class == GFX9 && sctx->screen->info.pfp_fw_feature < 38)) &&
1617b8e80941Smrg		    !condition &&
1618b8e80941Smrg		    (squery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE ||
1619b8e80941Smrg		     (squery->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE &&
1620b8e80941Smrg		      (squery->buffer.previous ||
1621b8e80941Smrg		       squery->buffer.results_end > squery->result_size)))) {
1622b8e80941Smrg			needs_workaround = true;
1623b8e80941Smrg		}
1624b8e80941Smrg
1625b8e80941Smrg		if (needs_workaround && !squery->workaround_buf) {
1626b8e80941Smrg			bool old_force_off = sctx->render_cond_force_off;
1627b8e80941Smrg			sctx->render_cond_force_off = true;
1628b8e80941Smrg
1629b8e80941Smrg			u_suballocator_alloc(
1630b8e80941Smrg				sctx->allocator_zeroed_memory, 8, 8,
1631b8e80941Smrg				&squery->workaround_offset,
1632b8e80941Smrg				(struct pipe_resource **)&squery->workaround_buf);
1633b8e80941Smrg
1634b8e80941Smrg			/* Reset to NULL to avoid a redundant SET_PREDICATION
1635b8e80941Smrg			 * from launching the compute grid.
1636b8e80941Smrg			 */
1637b8e80941Smrg			sctx->render_cond = NULL;
1638b8e80941Smrg
1639b8e80941Smrg			ctx->get_query_result_resource(
1640b8e80941Smrg				ctx, query, true, PIPE_QUERY_TYPE_U64, 0,
1641b8e80941Smrg				&squery->workaround_buf->b.b, squery->workaround_offset);
1642b8e80941Smrg
1643b8e80941Smrg			/* Settings this in the render cond atom is too late,
1644b8e80941Smrg			 * so set it here. */
1645b8e80941Smrg			sctx->flags |= sctx->screen->barrier_flags.L2_to_cp |
1646b8e80941Smrg				       SI_CONTEXT_FLUSH_FOR_RENDER_COND;
1647b8e80941Smrg
1648b8e80941Smrg			sctx->render_cond_force_off = old_force_off;
1649b8e80941Smrg		}
1650b8e80941Smrg	}
1651b8e80941Smrg
1652b8e80941Smrg	sctx->render_cond = query;
1653b8e80941Smrg	sctx->render_cond_invert = condition;
1654b8e80941Smrg	sctx->render_cond_mode = mode;
1655b8e80941Smrg
1656b8e80941Smrg	si_set_atom_dirty(sctx, atom, query != NULL);
1657b8e80941Smrg}
1658b8e80941Smrg
1659b8e80941Smrgvoid si_suspend_queries(struct si_context *sctx)
1660b8e80941Smrg{
1661b8e80941Smrg	struct si_query *query;
1662b8e80941Smrg
1663b8e80941Smrg	LIST_FOR_EACH_ENTRY(query, &sctx->active_queries, active_list)
1664b8e80941Smrg		query->ops->suspend(sctx, query);
1665b8e80941Smrg}
1666b8e80941Smrg
1667b8e80941Smrgvoid si_resume_queries(struct si_context *sctx)
1668b8e80941Smrg{
1669b8e80941Smrg	struct si_query *query;
1670b8e80941Smrg
1671b8e80941Smrg	/* Check CS space here. Resuming must not be interrupted by flushes. */
1672b8e80941Smrg	si_need_gfx_cs_space(sctx);
1673b8e80941Smrg
1674b8e80941Smrg	LIST_FOR_EACH_ENTRY(query, &sctx->active_queries, active_list)
1675b8e80941Smrg		query->ops->resume(sctx, query);
1676b8e80941Smrg}
1677b8e80941Smrg
1678b8e80941Smrg#define XFULL(name_, query_type_, type_, result_type_, group_id_) \
1679b8e80941Smrg	{ \
1680b8e80941Smrg		.name = name_, \
1681b8e80941Smrg		.query_type = SI_QUERY_##query_type_, \
1682b8e80941Smrg		.type = PIPE_DRIVER_QUERY_TYPE_##type_, \
1683b8e80941Smrg		.result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
1684b8e80941Smrg		.group_id = group_id_ \
1685b8e80941Smrg	}
1686b8e80941Smrg
1687b8e80941Smrg#define X(name_, query_type_, type_, result_type_) \
1688b8e80941Smrg	XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
1689b8e80941Smrg
1690b8e80941Smrg#define XG(group_, name_, query_type_, type_, result_type_) \
1691b8e80941Smrg	XFULL(name_, query_type_, type_, result_type_, SI_QUERY_GROUP_##group_)
1692b8e80941Smrg
1693b8e80941Smrgstatic struct pipe_driver_query_info si_driver_query_list[] = {
1694b8e80941Smrg	X("num-compilations",		NUM_COMPILATIONS,	UINT64, CUMULATIVE),
1695b8e80941Smrg	X("num-shaders-created",	NUM_SHADERS_CREATED,	UINT64, CUMULATIVE),
1696b8e80941Smrg	X("num-shader-cache-hits",	NUM_SHADER_CACHE_HITS,	UINT64, CUMULATIVE),
1697b8e80941Smrg	X("draw-calls",			DRAW_CALLS,		UINT64, AVERAGE),
1698b8e80941Smrg	X("decompress-calls",		DECOMPRESS_CALLS,	UINT64, AVERAGE),
1699b8e80941Smrg	X("MRT-draw-calls",		MRT_DRAW_CALLS,		UINT64, AVERAGE),
1700b8e80941Smrg	X("prim-restart-calls",		PRIM_RESTART_CALLS,	UINT64, AVERAGE),
1701b8e80941Smrg	X("spill-draw-calls",		SPILL_DRAW_CALLS,	UINT64, AVERAGE),
1702b8e80941Smrg	X("compute-calls",		COMPUTE_CALLS,		UINT64, AVERAGE),
1703b8e80941Smrg	X("spill-compute-calls",	SPILL_COMPUTE_CALLS,	UINT64, AVERAGE),
1704b8e80941Smrg	X("dma-calls",			DMA_CALLS,		UINT64, AVERAGE),
1705b8e80941Smrg	X("cp-dma-calls",		CP_DMA_CALLS,		UINT64, AVERAGE),
1706b8e80941Smrg	X("num-vs-flushes",		NUM_VS_FLUSHES,		UINT64, AVERAGE),
1707b8e80941Smrg	X("num-ps-flushes",		NUM_PS_FLUSHES,		UINT64, AVERAGE),
1708b8e80941Smrg	X("num-cs-flushes",		NUM_CS_FLUSHES,		UINT64, AVERAGE),
1709b8e80941Smrg	X("num-CB-cache-flushes",	NUM_CB_CACHE_FLUSHES,	UINT64, AVERAGE),
1710b8e80941Smrg	X("num-DB-cache-flushes",	NUM_DB_CACHE_FLUSHES,	UINT64, AVERAGE),
1711b8e80941Smrg	X("num-L2-invalidates",		NUM_L2_INVALIDATES,	UINT64, AVERAGE),
1712b8e80941Smrg	X("num-L2-writebacks",		NUM_L2_WRITEBACKS,	UINT64, AVERAGE),
1713b8e80941Smrg	X("num-resident-handles",	NUM_RESIDENT_HANDLES,	UINT64, AVERAGE),
1714b8e80941Smrg	X("tc-offloaded-slots",		TC_OFFLOADED_SLOTS,     UINT64, AVERAGE),
1715b8e80941Smrg	X("tc-direct-slots",		TC_DIRECT_SLOTS,	UINT64, AVERAGE),
1716b8e80941Smrg	X("tc-num-syncs",		TC_NUM_SYNCS,		UINT64, AVERAGE),
1717b8e80941Smrg	X("CS-thread-busy",		CS_THREAD_BUSY,		UINT64, AVERAGE),
1718b8e80941Smrg	X("gallium-thread-busy",	GALLIUM_THREAD_BUSY,	UINT64, AVERAGE),
1719b8e80941Smrg	X("requested-VRAM",		REQUESTED_VRAM,		BYTES, AVERAGE),
1720b8e80941Smrg	X("requested-GTT",		REQUESTED_GTT,		BYTES, AVERAGE),
1721b8e80941Smrg	X("mapped-VRAM",		MAPPED_VRAM,		BYTES, AVERAGE),
1722b8e80941Smrg	X("mapped-GTT",			MAPPED_GTT,		BYTES, AVERAGE),
1723b8e80941Smrg	X("buffer-wait-time",		BUFFER_WAIT_TIME,	MICROSECONDS, CUMULATIVE),
1724b8e80941Smrg	X("num-mapped-buffers",		NUM_MAPPED_BUFFERS,	UINT64, AVERAGE),
1725b8e80941Smrg	X("num-GFX-IBs",		NUM_GFX_IBS,		UINT64, AVERAGE),
1726b8e80941Smrg	X("num-SDMA-IBs",		NUM_SDMA_IBS,		UINT64, AVERAGE),
1727b8e80941Smrg	X("GFX-BO-list-size",		GFX_BO_LIST_SIZE,	UINT64, AVERAGE),
1728b8e80941Smrg	X("GFX-IB-size",		GFX_IB_SIZE,		UINT64, AVERAGE),
1729b8e80941Smrg	X("num-bytes-moved",		NUM_BYTES_MOVED,	BYTES, CUMULATIVE),
1730b8e80941Smrg	X("num-evictions",		NUM_EVICTIONS,		UINT64, CUMULATIVE),
1731b8e80941Smrg	X("VRAM-CPU-page-faults",	NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE),
1732b8e80941Smrg	X("VRAM-usage",			VRAM_USAGE,		BYTES, AVERAGE),
1733b8e80941Smrg	X("VRAM-vis-usage",		VRAM_VIS_USAGE,		BYTES, AVERAGE),
1734b8e80941Smrg	X("GTT-usage",			GTT_USAGE,		BYTES, AVERAGE),
1735b8e80941Smrg	X("back-buffer-ps-draw-ratio",	BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
1736b8e80941Smrg
1737b8e80941Smrg	/* GPIN queries are for the benefit of old versions of GPUPerfStudio,
1738b8e80941Smrg	 * which use it as a fallback path to detect the GPU type.
1739b8e80941Smrg	 *
1740b8e80941Smrg	 * Note: The names of these queries are significant for GPUPerfStudio
1741b8e80941Smrg	 * (and possibly their order as well). */
1742b8e80941Smrg	XG(GPIN, "GPIN_000",		GPIN_ASIC_ID,		UINT, AVERAGE),
1743b8e80941Smrg	XG(GPIN, "GPIN_001",		GPIN_NUM_SIMD,		UINT, AVERAGE),
1744b8e80941Smrg	XG(GPIN, "GPIN_002",		GPIN_NUM_RB,		UINT, AVERAGE),
1745b8e80941Smrg	XG(GPIN, "GPIN_003",		GPIN_NUM_SPI,		UINT, AVERAGE),
1746b8e80941Smrg	XG(GPIN, "GPIN_004",		GPIN_NUM_SE,		UINT, AVERAGE),
1747b8e80941Smrg
1748b8e80941Smrg	X("temperature",		GPU_TEMPERATURE,	UINT64, AVERAGE),
1749b8e80941Smrg	X("shader-clock",		CURRENT_GPU_SCLK,	HZ, AVERAGE),
1750b8e80941Smrg	X("memory-clock",		CURRENT_GPU_MCLK,	HZ, AVERAGE),
1751b8e80941Smrg
1752b8e80941Smrg	/* The following queries must be at the end of the list because their
1753b8e80941Smrg	 * availability is adjusted dynamically based on the DRM version. */
1754b8e80941Smrg	X("GPU-load",			GPU_LOAD,		UINT64, AVERAGE),
1755b8e80941Smrg	X("GPU-shaders-busy",		GPU_SHADERS_BUSY,	UINT64, AVERAGE),
1756b8e80941Smrg	X("GPU-ta-busy",		GPU_TA_BUSY,		UINT64, AVERAGE),
1757b8e80941Smrg	X("GPU-gds-busy",		GPU_GDS_BUSY,		UINT64, AVERAGE),
1758b8e80941Smrg	X("GPU-vgt-busy",		GPU_VGT_BUSY,		UINT64, AVERAGE),
1759b8e80941Smrg	X("GPU-ia-busy",		GPU_IA_BUSY,		UINT64, AVERAGE),
1760b8e80941Smrg	X("GPU-sx-busy",		GPU_SX_BUSY,		UINT64, AVERAGE),
1761b8e80941Smrg	X("GPU-wd-busy",		GPU_WD_BUSY,		UINT64, AVERAGE),
1762b8e80941Smrg	X("GPU-bci-busy",		GPU_BCI_BUSY,		UINT64, AVERAGE),
1763b8e80941Smrg	X("GPU-sc-busy",		GPU_SC_BUSY,		UINT64, AVERAGE),
1764b8e80941Smrg	X("GPU-pa-busy",		GPU_PA_BUSY,		UINT64, AVERAGE),
1765b8e80941Smrg	X("GPU-db-busy",		GPU_DB_BUSY,		UINT64, AVERAGE),
1766b8e80941Smrg	X("GPU-cp-busy",		GPU_CP_BUSY,		UINT64, AVERAGE),
1767b8e80941Smrg	X("GPU-cb-busy",		GPU_CB_BUSY,		UINT64, AVERAGE),
1768b8e80941Smrg
1769b8e80941Smrg	/* SRBM_STATUS2 */
1770b8e80941Smrg	X("GPU-sdma-busy",		GPU_SDMA_BUSY,		UINT64, AVERAGE),
1771b8e80941Smrg
1772b8e80941Smrg	/* CP_STAT */
1773b8e80941Smrg	X("GPU-pfp-busy",		GPU_PFP_BUSY,		UINT64, AVERAGE),
1774b8e80941Smrg	X("GPU-meq-busy",		GPU_MEQ_BUSY,		UINT64, AVERAGE),
1775b8e80941Smrg	X("GPU-me-busy",		GPU_ME_BUSY,		UINT64, AVERAGE),
1776b8e80941Smrg	X("GPU-surf-sync-busy",		GPU_SURF_SYNC_BUSY,	UINT64, AVERAGE),
1777b8e80941Smrg	X("GPU-cp-dma-busy",		GPU_CP_DMA_BUSY,	UINT64, AVERAGE),
1778b8e80941Smrg	X("GPU-scratch-ram-busy",	GPU_SCRATCH_RAM_BUSY,	UINT64, AVERAGE),
1779b8e80941Smrg};
1780b8e80941Smrg
1781b8e80941Smrg#undef X
1782b8e80941Smrg#undef XG
1783b8e80941Smrg#undef XFULL
1784b8e80941Smrg
1785b8e80941Smrgstatic unsigned si_get_num_queries(struct si_screen *sscreen)
1786b8e80941Smrg{
1787b8e80941Smrg	/* amdgpu */
1788b8e80941Smrg	if (sscreen->info.drm_major == 3) {
1789b8e80941Smrg		if (sscreen->info.chip_class >= VI)
1790b8e80941Smrg			return ARRAY_SIZE(si_driver_query_list);
1791b8e80941Smrg		else
1792b8e80941Smrg			return ARRAY_SIZE(si_driver_query_list) - 7;
1793b8e80941Smrg	}
1794b8e80941Smrg
1795b8e80941Smrg	/* radeon */
1796b8e80941Smrg	if (sscreen->info.has_read_registers_query) {
1797b8e80941Smrg		if (sscreen->info.chip_class == CIK)
1798b8e80941Smrg			return ARRAY_SIZE(si_driver_query_list) - 6;
1799b8e80941Smrg		else
1800b8e80941Smrg			return ARRAY_SIZE(si_driver_query_list) - 7;
1801b8e80941Smrg	}
1802b8e80941Smrg
1803b8e80941Smrg	return ARRAY_SIZE(si_driver_query_list) - 21;
1804b8e80941Smrg}
1805b8e80941Smrg
1806b8e80941Smrgstatic int si_get_driver_query_info(struct pipe_screen *screen,
1807b8e80941Smrg				    unsigned index,
1808b8e80941Smrg				    struct pipe_driver_query_info *info)
1809b8e80941Smrg{
1810b8e80941Smrg	struct si_screen *sscreen = (struct si_screen*)screen;
1811b8e80941Smrg	unsigned num_queries = si_get_num_queries(sscreen);
1812b8e80941Smrg
1813b8e80941Smrg	if (!info) {
1814b8e80941Smrg		unsigned num_perfcounters =
1815b8e80941Smrg			si_get_perfcounter_info(sscreen, 0, NULL);
1816b8e80941Smrg
1817b8e80941Smrg		return num_queries + num_perfcounters;
1818b8e80941Smrg	}
1819b8e80941Smrg
1820b8e80941Smrg	if (index >= num_queries)
1821b8e80941Smrg		return si_get_perfcounter_info(sscreen, index - num_queries, info);
1822b8e80941Smrg
1823b8e80941Smrg	*info = si_driver_query_list[index];
1824b8e80941Smrg
1825b8e80941Smrg	switch (info->query_type) {
1826b8e80941Smrg	case SI_QUERY_REQUESTED_VRAM:
1827b8e80941Smrg	case SI_QUERY_VRAM_USAGE:
1828b8e80941Smrg	case SI_QUERY_MAPPED_VRAM:
1829b8e80941Smrg		info->max_value.u64 = sscreen->info.vram_size;
1830b8e80941Smrg		break;
1831b8e80941Smrg	case SI_QUERY_REQUESTED_GTT:
1832b8e80941Smrg	case SI_QUERY_GTT_USAGE:
1833b8e80941Smrg	case SI_QUERY_MAPPED_GTT:
1834b8e80941Smrg		info->max_value.u64 = sscreen->info.gart_size;
1835b8e80941Smrg		break;
1836b8e80941Smrg	case SI_QUERY_GPU_TEMPERATURE:
1837b8e80941Smrg		info->max_value.u64 = 125;
1838b8e80941Smrg		break;
1839b8e80941Smrg	case SI_QUERY_VRAM_VIS_USAGE:
1840b8e80941Smrg		info->max_value.u64 = sscreen->info.vram_vis_size;
1841b8e80941Smrg		break;
1842b8e80941Smrg	}
1843b8e80941Smrg
1844b8e80941Smrg	if (info->group_id != ~(unsigned)0 && sscreen->perfcounters)
1845b8e80941Smrg		info->group_id += sscreen->perfcounters->num_groups;
1846b8e80941Smrg
1847b8e80941Smrg	return 1;
1848b8e80941Smrg}
1849b8e80941Smrg
1850b8e80941Smrg/* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
1851b8e80941Smrg * performance counter groups, so be careful when changing this and related
1852b8e80941Smrg * functions.
1853b8e80941Smrg */
1854b8e80941Smrgstatic int si_get_driver_query_group_info(struct pipe_screen *screen,
1855b8e80941Smrg					  unsigned index,
1856b8e80941Smrg					  struct pipe_driver_query_group_info *info)
1857b8e80941Smrg{
1858b8e80941Smrg	struct si_screen *sscreen = (struct si_screen *)screen;
1859b8e80941Smrg	unsigned num_pc_groups = 0;
1860b8e80941Smrg
1861b8e80941Smrg	if (sscreen->perfcounters)
1862b8e80941Smrg		num_pc_groups = sscreen->perfcounters->num_groups;
1863b8e80941Smrg
1864b8e80941Smrg	if (!info)
1865b8e80941Smrg		return num_pc_groups + SI_NUM_SW_QUERY_GROUPS;
1866b8e80941Smrg
1867b8e80941Smrg	if (index < num_pc_groups)
1868b8e80941Smrg		return si_get_perfcounter_group_info(sscreen, index, info);
1869b8e80941Smrg
1870b8e80941Smrg	index -= num_pc_groups;
1871b8e80941Smrg	if (index >= SI_NUM_SW_QUERY_GROUPS)
1872b8e80941Smrg		return 0;
1873b8e80941Smrg
1874b8e80941Smrg	info->name = "GPIN";
1875b8e80941Smrg	info->max_active_queries = 5;
1876b8e80941Smrg	info->num_queries = 5;
1877b8e80941Smrg	return 1;
1878b8e80941Smrg}
1879b8e80941Smrg
1880b8e80941Smrgvoid si_init_query_functions(struct si_context *sctx)
1881b8e80941Smrg{
1882b8e80941Smrg	sctx->b.create_query = si_create_query;
1883b8e80941Smrg	sctx->b.create_batch_query = si_create_batch_query;
1884b8e80941Smrg	sctx->b.destroy_query = si_destroy_query;
1885b8e80941Smrg	sctx->b.begin_query = si_begin_query;
1886b8e80941Smrg	sctx->b.end_query = si_end_query;
1887b8e80941Smrg	sctx->b.get_query_result = si_get_query_result;
1888b8e80941Smrg	sctx->b.get_query_result_resource = si_get_query_result_resource;
1889b8e80941Smrg	sctx->atoms.s.render_cond.emit = si_emit_query_predication;
1890b8e80941Smrg
1891b8e80941Smrg	if (((struct si_screen*)sctx->b.screen)->info.num_render_backends > 0)
1892b8e80941Smrg	    sctx->b.render_condition = si_render_condition;
1893b8e80941Smrg
1894b8e80941Smrg	LIST_INITHEAD(&sctx->active_queries);
1895b8e80941Smrg}
1896b8e80941Smrg
1897b8e80941Smrgvoid si_init_screen_query_functions(struct si_screen *sscreen)
1898b8e80941Smrg{
1899b8e80941Smrg	sscreen->b.get_driver_query_info = si_get_driver_query_info;
1900b8e80941Smrg	sscreen->b.get_driver_query_group_info = si_get_driver_query_group_info;
1901b8e80941Smrg}
1902