r600_query.c revision b8e80941
1/* 2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org> 3 * Copyright 2014 Marek Olšák <marek.olsak@amd.com> 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * the Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22 * USE OR OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25#include "r600_query.h" 26#include "r600_pipe.h" 27#include "r600_cs.h" 28#include "util/u_memory.h" 29#include "util/u_upload_mgr.h" 30#include "util/os_time.h" 31#include "tgsi/tgsi_text.h" 32 33#define R600_MAX_STREAMS 4 34 35struct r600_hw_query_params { 36 unsigned start_offset; 37 unsigned end_offset; 38 unsigned fence_offset; 39 unsigned pair_stride; 40 unsigned pair_count; 41}; 42 43/* Queries without buffer handling or suspend/resume. */ 44struct r600_query_sw { 45 struct r600_query b; 46 47 uint64_t begin_result; 48 uint64_t end_result; 49 50 uint64_t begin_time; 51 uint64_t end_time; 52 53 /* Fence for GPU_FINISHED. */ 54 struct pipe_fence_handle *fence; 55}; 56 57static void r600_query_sw_destroy(struct r600_common_screen *rscreen, 58 struct r600_query *rquery) 59{ 60 struct r600_query_sw *query = (struct r600_query_sw *)rquery; 61 62 rscreen->b.fence_reference(&rscreen->b, &query->fence, NULL); 63 FREE(query); 64} 65 66static enum radeon_value_id winsys_id_from_type(unsigned type) 67{ 68 switch (type) { 69 case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY; 70 case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY; 71 case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM; 72 case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT; 73 case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS; 74 case R600_QUERY_NUM_MAPPED_BUFFERS: return RADEON_NUM_MAPPED_BUFFERS; 75 case R600_QUERY_NUM_GFX_IBS: return RADEON_NUM_GFX_IBS; 76 case R600_QUERY_NUM_SDMA_IBS: return RADEON_NUM_SDMA_IBS; 77 case R600_QUERY_GFX_BO_LIST_SIZE: return RADEON_GFX_BO_LIST_COUNTER; 78 case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED; 79 case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS; 80 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: return RADEON_NUM_VRAM_CPU_PAGE_FAULTS; 81 case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE; 82 case R600_QUERY_VRAM_VIS_USAGE: return RADEON_VRAM_VIS_USAGE; 83 case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE; 84 case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE; 85 case R600_QUERY_CURRENT_GPU_SCLK: return RADEON_CURRENT_SCLK; 86 case R600_QUERY_CURRENT_GPU_MCLK: return RADEON_CURRENT_MCLK; 87 case R600_QUERY_CS_THREAD_BUSY: return RADEON_CS_THREAD_TIME; 88 default: unreachable("query type does not correspond to winsys id"); 89 } 90} 91 92static bool r600_query_sw_begin(struct r600_common_context *rctx, 93 struct r600_query *rquery) 94{ 95 struct r600_query_sw *query = (struct r600_query_sw *)rquery; 96 enum radeon_value_id ws_id; 97 98 switch(query->b.type) { 99 case PIPE_QUERY_TIMESTAMP_DISJOINT: 100 case PIPE_QUERY_GPU_FINISHED: 101 break; 102 case R600_QUERY_DRAW_CALLS: 103 query->begin_result = rctx->num_draw_calls; 104 break; 105 case R600_QUERY_DECOMPRESS_CALLS: 106 query->begin_result = rctx->num_decompress_calls; 107 break; 108 case R600_QUERY_MRT_DRAW_CALLS: 109 query->begin_result = rctx->num_mrt_draw_calls; 110 break; 111 case R600_QUERY_PRIM_RESTART_CALLS: 112 query->begin_result = rctx->num_prim_restart_calls; 113 break; 114 case R600_QUERY_SPILL_DRAW_CALLS: 115 query->begin_result = rctx->num_spill_draw_calls; 116 break; 117 case R600_QUERY_COMPUTE_CALLS: 118 query->begin_result = rctx->num_compute_calls; 119 break; 120 case R600_QUERY_SPILL_COMPUTE_CALLS: 121 query->begin_result = rctx->num_spill_compute_calls; 122 break; 123 case R600_QUERY_DMA_CALLS: 124 query->begin_result = rctx->num_dma_calls; 125 break; 126 case R600_QUERY_CP_DMA_CALLS: 127 query->begin_result = rctx->num_cp_dma_calls; 128 break; 129 case R600_QUERY_NUM_VS_FLUSHES: 130 query->begin_result = rctx->num_vs_flushes; 131 break; 132 case R600_QUERY_NUM_PS_FLUSHES: 133 query->begin_result = rctx->num_ps_flushes; 134 break; 135 case R600_QUERY_NUM_CS_FLUSHES: 136 query->begin_result = rctx->num_cs_flushes; 137 break; 138 case R600_QUERY_NUM_CB_CACHE_FLUSHES: 139 query->begin_result = rctx->num_cb_cache_flushes; 140 break; 141 case R600_QUERY_NUM_DB_CACHE_FLUSHES: 142 query->begin_result = rctx->num_db_cache_flushes; 143 break; 144 case R600_QUERY_NUM_RESIDENT_HANDLES: 145 query->begin_result = rctx->num_resident_handles; 146 break; 147 case R600_QUERY_TC_OFFLOADED_SLOTS: 148 query->begin_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0; 149 break; 150 case R600_QUERY_TC_DIRECT_SLOTS: 151 query->begin_result = rctx->tc ? rctx->tc->num_direct_slots : 0; 152 break; 153 case R600_QUERY_TC_NUM_SYNCS: 154 query->begin_result = rctx->tc ? rctx->tc->num_syncs : 0; 155 break; 156 case R600_QUERY_REQUESTED_VRAM: 157 case R600_QUERY_REQUESTED_GTT: 158 case R600_QUERY_MAPPED_VRAM: 159 case R600_QUERY_MAPPED_GTT: 160 case R600_QUERY_VRAM_USAGE: 161 case R600_QUERY_VRAM_VIS_USAGE: 162 case R600_QUERY_GTT_USAGE: 163 case R600_QUERY_GPU_TEMPERATURE: 164 case R600_QUERY_CURRENT_GPU_SCLK: 165 case R600_QUERY_CURRENT_GPU_MCLK: 166 case R600_QUERY_NUM_MAPPED_BUFFERS: 167 query->begin_result = 0; 168 break; 169 case R600_QUERY_BUFFER_WAIT_TIME: 170 case R600_QUERY_NUM_GFX_IBS: 171 case R600_QUERY_NUM_SDMA_IBS: 172 case R600_QUERY_NUM_BYTES_MOVED: 173 case R600_QUERY_NUM_EVICTIONS: 174 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: { 175 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type); 176 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id); 177 break; 178 } 179 case R600_QUERY_GFX_BO_LIST_SIZE: 180 ws_id = winsys_id_from_type(query->b.type); 181 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id); 182 query->begin_time = rctx->ws->query_value(rctx->ws, 183 RADEON_NUM_GFX_IBS); 184 break; 185 case R600_QUERY_CS_THREAD_BUSY: 186 ws_id = winsys_id_from_type(query->b.type); 187 query->begin_result = rctx->ws->query_value(rctx->ws, ws_id); 188 query->begin_time = os_time_get_nano(); 189 break; 190 case R600_QUERY_GALLIUM_THREAD_BUSY: 191 query->begin_result = 192 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0; 193 query->begin_time = os_time_get_nano(); 194 break; 195 case R600_QUERY_GPU_LOAD: 196 case R600_QUERY_GPU_SHADERS_BUSY: 197 case R600_QUERY_GPU_TA_BUSY: 198 case R600_QUERY_GPU_GDS_BUSY: 199 case R600_QUERY_GPU_VGT_BUSY: 200 case R600_QUERY_GPU_IA_BUSY: 201 case R600_QUERY_GPU_SX_BUSY: 202 case R600_QUERY_GPU_WD_BUSY: 203 case R600_QUERY_GPU_BCI_BUSY: 204 case R600_QUERY_GPU_SC_BUSY: 205 case R600_QUERY_GPU_PA_BUSY: 206 case R600_QUERY_GPU_DB_BUSY: 207 case R600_QUERY_GPU_CP_BUSY: 208 case R600_QUERY_GPU_CB_BUSY: 209 case R600_QUERY_GPU_SDMA_BUSY: 210 case R600_QUERY_GPU_PFP_BUSY: 211 case R600_QUERY_GPU_MEQ_BUSY: 212 case R600_QUERY_GPU_ME_BUSY: 213 case R600_QUERY_GPU_SURF_SYNC_BUSY: 214 case R600_QUERY_GPU_CP_DMA_BUSY: 215 case R600_QUERY_GPU_SCRATCH_RAM_BUSY: 216 query->begin_result = r600_begin_counter(rctx->screen, 217 query->b.type); 218 break; 219 case R600_QUERY_NUM_COMPILATIONS: 220 query->begin_result = p_atomic_read(&rctx->screen->num_compilations); 221 break; 222 case R600_QUERY_NUM_SHADERS_CREATED: 223 query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created); 224 break; 225 case R600_QUERY_NUM_SHADER_CACHE_HITS: 226 query->begin_result = 227 p_atomic_read(&rctx->screen->num_shader_cache_hits); 228 break; 229 case R600_QUERY_GPIN_ASIC_ID: 230 case R600_QUERY_GPIN_NUM_SIMD: 231 case R600_QUERY_GPIN_NUM_RB: 232 case R600_QUERY_GPIN_NUM_SPI: 233 case R600_QUERY_GPIN_NUM_SE: 234 break; 235 default: 236 unreachable("r600_query_sw_begin: bad query type"); 237 } 238 239 return true; 240} 241 242static bool r600_query_sw_end(struct r600_common_context *rctx, 243 struct r600_query *rquery) 244{ 245 struct r600_query_sw *query = (struct r600_query_sw *)rquery; 246 enum radeon_value_id ws_id; 247 248 switch(query->b.type) { 249 case PIPE_QUERY_TIMESTAMP_DISJOINT: 250 break; 251 case PIPE_QUERY_GPU_FINISHED: 252 rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED); 253 break; 254 case R600_QUERY_DRAW_CALLS: 255 query->end_result = rctx->num_draw_calls; 256 break; 257 case R600_QUERY_DECOMPRESS_CALLS: 258 query->end_result = rctx->num_decompress_calls; 259 break; 260 case R600_QUERY_MRT_DRAW_CALLS: 261 query->end_result = rctx->num_mrt_draw_calls; 262 break; 263 case R600_QUERY_PRIM_RESTART_CALLS: 264 query->end_result = rctx->num_prim_restart_calls; 265 break; 266 case R600_QUERY_SPILL_DRAW_CALLS: 267 query->end_result = rctx->num_spill_draw_calls; 268 break; 269 case R600_QUERY_COMPUTE_CALLS: 270 query->end_result = rctx->num_compute_calls; 271 break; 272 case R600_QUERY_SPILL_COMPUTE_CALLS: 273 query->end_result = rctx->num_spill_compute_calls; 274 break; 275 case R600_QUERY_DMA_CALLS: 276 query->end_result = rctx->num_dma_calls; 277 break; 278 case R600_QUERY_CP_DMA_CALLS: 279 query->end_result = rctx->num_cp_dma_calls; 280 break; 281 case R600_QUERY_NUM_VS_FLUSHES: 282 query->end_result = rctx->num_vs_flushes; 283 break; 284 case R600_QUERY_NUM_PS_FLUSHES: 285 query->end_result = rctx->num_ps_flushes; 286 break; 287 case R600_QUERY_NUM_CS_FLUSHES: 288 query->end_result = rctx->num_cs_flushes; 289 break; 290 case R600_QUERY_NUM_CB_CACHE_FLUSHES: 291 query->end_result = rctx->num_cb_cache_flushes; 292 break; 293 case R600_QUERY_NUM_DB_CACHE_FLUSHES: 294 query->end_result = rctx->num_db_cache_flushes; 295 break; 296 case R600_QUERY_NUM_RESIDENT_HANDLES: 297 query->end_result = rctx->num_resident_handles; 298 break; 299 case R600_QUERY_TC_OFFLOADED_SLOTS: 300 query->end_result = rctx->tc ? rctx->tc->num_offloaded_slots : 0; 301 break; 302 case R600_QUERY_TC_DIRECT_SLOTS: 303 query->end_result = rctx->tc ? rctx->tc->num_direct_slots : 0; 304 break; 305 case R600_QUERY_TC_NUM_SYNCS: 306 query->end_result = rctx->tc ? rctx->tc->num_syncs : 0; 307 break; 308 case R600_QUERY_REQUESTED_VRAM: 309 case R600_QUERY_REQUESTED_GTT: 310 case R600_QUERY_MAPPED_VRAM: 311 case R600_QUERY_MAPPED_GTT: 312 case R600_QUERY_VRAM_USAGE: 313 case R600_QUERY_VRAM_VIS_USAGE: 314 case R600_QUERY_GTT_USAGE: 315 case R600_QUERY_GPU_TEMPERATURE: 316 case R600_QUERY_CURRENT_GPU_SCLK: 317 case R600_QUERY_CURRENT_GPU_MCLK: 318 case R600_QUERY_BUFFER_WAIT_TIME: 319 case R600_QUERY_NUM_MAPPED_BUFFERS: 320 case R600_QUERY_NUM_GFX_IBS: 321 case R600_QUERY_NUM_SDMA_IBS: 322 case R600_QUERY_NUM_BYTES_MOVED: 323 case R600_QUERY_NUM_EVICTIONS: 324 case R600_QUERY_NUM_VRAM_CPU_PAGE_FAULTS: { 325 enum radeon_value_id ws_id = winsys_id_from_type(query->b.type); 326 query->end_result = rctx->ws->query_value(rctx->ws, ws_id); 327 break; 328 } 329 case R600_QUERY_GFX_BO_LIST_SIZE: 330 ws_id = winsys_id_from_type(query->b.type); 331 query->end_result = rctx->ws->query_value(rctx->ws, ws_id); 332 query->end_time = rctx->ws->query_value(rctx->ws, 333 RADEON_NUM_GFX_IBS); 334 break; 335 case R600_QUERY_CS_THREAD_BUSY: 336 ws_id = winsys_id_from_type(query->b.type); 337 query->end_result = rctx->ws->query_value(rctx->ws, ws_id); 338 query->end_time = os_time_get_nano(); 339 break; 340 case R600_QUERY_GALLIUM_THREAD_BUSY: 341 query->end_result = 342 rctx->tc ? util_queue_get_thread_time_nano(&rctx->tc->queue, 0) : 0; 343 query->end_time = os_time_get_nano(); 344 break; 345 case R600_QUERY_GPU_LOAD: 346 case R600_QUERY_GPU_SHADERS_BUSY: 347 case R600_QUERY_GPU_TA_BUSY: 348 case R600_QUERY_GPU_GDS_BUSY: 349 case R600_QUERY_GPU_VGT_BUSY: 350 case R600_QUERY_GPU_IA_BUSY: 351 case R600_QUERY_GPU_SX_BUSY: 352 case R600_QUERY_GPU_WD_BUSY: 353 case R600_QUERY_GPU_BCI_BUSY: 354 case R600_QUERY_GPU_SC_BUSY: 355 case R600_QUERY_GPU_PA_BUSY: 356 case R600_QUERY_GPU_DB_BUSY: 357 case R600_QUERY_GPU_CP_BUSY: 358 case R600_QUERY_GPU_CB_BUSY: 359 case R600_QUERY_GPU_SDMA_BUSY: 360 case R600_QUERY_GPU_PFP_BUSY: 361 case R600_QUERY_GPU_MEQ_BUSY: 362 case R600_QUERY_GPU_ME_BUSY: 363 case R600_QUERY_GPU_SURF_SYNC_BUSY: 364 case R600_QUERY_GPU_CP_DMA_BUSY: 365 case R600_QUERY_GPU_SCRATCH_RAM_BUSY: 366 query->end_result = r600_end_counter(rctx->screen, 367 query->b.type, 368 query->begin_result); 369 query->begin_result = 0; 370 break; 371 case R600_QUERY_NUM_COMPILATIONS: 372 query->end_result = p_atomic_read(&rctx->screen->num_compilations); 373 break; 374 case R600_QUERY_NUM_SHADERS_CREATED: 375 query->end_result = p_atomic_read(&rctx->screen->num_shaders_created); 376 break; 377 case R600_QUERY_NUM_SHADER_CACHE_HITS: 378 query->end_result = 379 p_atomic_read(&rctx->screen->num_shader_cache_hits); 380 break; 381 case R600_QUERY_GPIN_ASIC_ID: 382 case R600_QUERY_GPIN_NUM_SIMD: 383 case R600_QUERY_GPIN_NUM_RB: 384 case R600_QUERY_GPIN_NUM_SPI: 385 case R600_QUERY_GPIN_NUM_SE: 386 break; 387 default: 388 unreachable("r600_query_sw_end: bad query type"); 389 } 390 391 return true; 392} 393 394static bool r600_query_sw_get_result(struct r600_common_context *rctx, 395 struct r600_query *rquery, 396 bool wait, 397 union pipe_query_result *result) 398{ 399 struct r600_query_sw *query = (struct r600_query_sw *)rquery; 400 401 switch (query->b.type) { 402 case PIPE_QUERY_TIMESTAMP_DISJOINT: 403 /* Convert from cycles per millisecond to cycles per second (Hz). */ 404 result->timestamp_disjoint.frequency = 405 (uint64_t)rctx->screen->info.clock_crystal_freq * 1000; 406 result->timestamp_disjoint.disjoint = false; 407 return true; 408 case PIPE_QUERY_GPU_FINISHED: { 409 struct pipe_screen *screen = rctx->b.screen; 410 struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b; 411 412 result->b = screen->fence_finish(screen, ctx, query->fence, 413 wait ? PIPE_TIMEOUT_INFINITE : 0); 414 return result->b; 415 } 416 417 case R600_QUERY_GFX_BO_LIST_SIZE: 418 result->u64 = (query->end_result - query->begin_result) / 419 (query->end_time - query->begin_time); 420 return true; 421 case R600_QUERY_CS_THREAD_BUSY: 422 case R600_QUERY_GALLIUM_THREAD_BUSY: 423 result->u64 = (query->end_result - query->begin_result) * 100 / 424 (query->end_time - query->begin_time); 425 return true; 426 case R600_QUERY_GPIN_ASIC_ID: 427 result->u32 = 0; 428 return true; 429 case R600_QUERY_GPIN_NUM_SIMD: 430 result->u32 = rctx->screen->info.num_good_compute_units; 431 return true; 432 case R600_QUERY_GPIN_NUM_RB: 433 result->u32 = rctx->screen->info.num_render_backends; 434 return true; 435 case R600_QUERY_GPIN_NUM_SPI: 436 result->u32 = 1; /* all supported chips have one SPI per SE */ 437 return true; 438 case R600_QUERY_GPIN_NUM_SE: 439 result->u32 = rctx->screen->info.max_se; 440 return true; 441 } 442 443 result->u64 = query->end_result - query->begin_result; 444 445 switch (query->b.type) { 446 case R600_QUERY_BUFFER_WAIT_TIME: 447 case R600_QUERY_GPU_TEMPERATURE: 448 result->u64 /= 1000; 449 break; 450 case R600_QUERY_CURRENT_GPU_SCLK: 451 case R600_QUERY_CURRENT_GPU_MCLK: 452 result->u64 *= 1000000; 453 break; 454 } 455 456 return true; 457} 458 459 460static struct r600_query_ops sw_query_ops = { 461 .destroy = r600_query_sw_destroy, 462 .begin = r600_query_sw_begin, 463 .end = r600_query_sw_end, 464 .get_result = r600_query_sw_get_result, 465 .get_result_resource = NULL 466}; 467 468static struct pipe_query *r600_query_sw_create(unsigned query_type) 469{ 470 struct r600_query_sw *query; 471 472 query = CALLOC_STRUCT(r600_query_sw); 473 if (!query) 474 return NULL; 475 476 query->b.type = query_type; 477 query->b.ops = &sw_query_ops; 478 479 return (struct pipe_query *)query; 480} 481 482void r600_query_hw_destroy(struct r600_common_screen *rscreen, 483 struct r600_query *rquery) 484{ 485 struct r600_query_hw *query = (struct r600_query_hw *)rquery; 486 struct r600_query_buffer *prev = query->buffer.previous; 487 488 /* Release all query buffers. */ 489 while (prev) { 490 struct r600_query_buffer *qbuf = prev; 491 prev = prev->previous; 492 r600_resource_reference(&qbuf->buf, NULL); 493 FREE(qbuf); 494 } 495 496 r600_resource_reference(&query->buffer.buf, NULL); 497 FREE(rquery); 498} 499 500static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rscreen, 501 struct r600_query_hw *query) 502{ 503 unsigned buf_size = MAX2(query->result_size, 504 rscreen->info.min_alloc_size); 505 506 /* Queries are normally read by the CPU after 507 * being written by the gpu, hence staging is probably a good 508 * usage pattern. 509 */ 510 struct r600_resource *buf = (struct r600_resource*) 511 pipe_buffer_create(&rscreen->b, 0, 512 PIPE_USAGE_STAGING, buf_size); 513 if (!buf) 514 return NULL; 515 516 if (!query->ops->prepare_buffer(rscreen, query, buf)) { 517 r600_resource_reference(&buf, NULL); 518 return NULL; 519 } 520 521 return buf; 522} 523 524static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen, 525 struct r600_query_hw *query, 526 struct r600_resource *buffer) 527{ 528 /* Callers ensure that the buffer is currently unused by the GPU. */ 529 uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL, 530 PIPE_TRANSFER_WRITE | 531 PIPE_TRANSFER_UNSYNCHRONIZED); 532 if (!results) 533 return false; 534 535 memset(results, 0, buffer->b.b.width0); 536 537 if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER || 538 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE || 539 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) { 540 unsigned max_rbs = rscreen->info.num_render_backends; 541 unsigned enabled_rb_mask = rscreen->info.enabled_rb_mask; 542 unsigned num_results; 543 unsigned i, j; 544 545 /* Set top bits for unused backends. */ 546 num_results = buffer->b.b.width0 / query->result_size; 547 for (j = 0; j < num_results; j++) { 548 for (i = 0; i < max_rbs; i++) { 549 if (!(enabled_rb_mask & (1<<i))) { 550 results[(i * 4)+1] = 0x80000000; 551 results[(i * 4)+3] = 0x80000000; 552 } 553 } 554 results += 4 * max_rbs; 555 } 556 } 557 558 return true; 559} 560 561static void r600_query_hw_get_result_resource(struct r600_common_context *rctx, 562 struct r600_query *rquery, 563 bool wait, 564 enum pipe_query_value_type result_type, 565 int index, 566 struct pipe_resource *resource, 567 unsigned offset); 568 569static struct r600_query_ops query_hw_ops = { 570 .destroy = r600_query_hw_destroy, 571 .begin = r600_query_hw_begin, 572 .end = r600_query_hw_end, 573 .get_result = r600_query_hw_get_result, 574 .get_result_resource = r600_query_hw_get_result_resource, 575}; 576 577static void r600_query_hw_do_emit_start(struct r600_common_context *ctx, 578 struct r600_query_hw *query, 579 struct r600_resource *buffer, 580 uint64_t va); 581static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx, 582 struct r600_query_hw *query, 583 struct r600_resource *buffer, 584 uint64_t va); 585static void r600_query_hw_add_result(struct r600_common_screen *rscreen, 586 struct r600_query_hw *, void *buffer, 587 union pipe_query_result *result); 588static void r600_query_hw_clear_result(struct r600_query_hw *, 589 union pipe_query_result *); 590 591static struct r600_query_hw_ops query_hw_default_hw_ops = { 592 .prepare_buffer = r600_query_hw_prepare_buffer, 593 .emit_start = r600_query_hw_do_emit_start, 594 .emit_stop = r600_query_hw_do_emit_stop, 595 .clear_result = r600_query_hw_clear_result, 596 .add_result = r600_query_hw_add_result, 597}; 598 599bool r600_query_hw_init(struct r600_common_screen *rscreen, 600 struct r600_query_hw *query) 601{ 602 query->buffer.buf = r600_new_query_buffer(rscreen, query); 603 if (!query->buffer.buf) 604 return false; 605 606 return true; 607} 608 609static struct pipe_query *r600_query_hw_create(struct r600_common_screen *rscreen, 610 unsigned query_type, 611 unsigned index) 612{ 613 struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw); 614 if (!query) 615 return NULL; 616 617 query->b.type = query_type; 618 query->b.ops = &query_hw_ops; 619 query->ops = &query_hw_default_hw_ops; 620 621 switch (query_type) { 622 case PIPE_QUERY_OCCLUSION_COUNTER: 623 case PIPE_QUERY_OCCLUSION_PREDICATE: 624 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: 625 query->result_size = 16 * rscreen->info.num_render_backends; 626 query->result_size += 16; /* for the fence + alignment */ 627 query->num_cs_dw_begin = 6; 628 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen); 629 break; 630 case PIPE_QUERY_TIME_ELAPSED: 631 query->result_size = 24; 632 query->num_cs_dw_begin = 8; 633 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen); 634 break; 635 case PIPE_QUERY_TIMESTAMP: 636 query->result_size = 16; 637 query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rscreen); 638 query->flags = R600_QUERY_HW_FLAG_NO_START; 639 break; 640 case PIPE_QUERY_PRIMITIVES_EMITTED: 641 case PIPE_QUERY_PRIMITIVES_GENERATED: 642 case PIPE_QUERY_SO_STATISTICS: 643 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 644 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */ 645 query->result_size = 32; 646 query->num_cs_dw_begin = 6; 647 query->num_cs_dw_end = 6; 648 query->stream = index; 649 break; 650 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE: 651 /* NumPrimitivesWritten, PrimitiveStorageNeeded. */ 652 query->result_size = 32 * R600_MAX_STREAMS; 653 query->num_cs_dw_begin = 6 * R600_MAX_STREAMS; 654 query->num_cs_dw_end = 6 * R600_MAX_STREAMS; 655 break; 656 case PIPE_QUERY_PIPELINE_STATISTICS: 657 /* 11 values on EG, 8 on R600. */ 658 query->result_size = (rscreen->chip_class >= EVERGREEN ? 11 : 8) * 16; 659 query->result_size += 8; /* for the fence + alignment */ 660 query->num_cs_dw_begin = 6; 661 query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rscreen); 662 break; 663 default: 664 assert(0); 665 FREE(query); 666 return NULL; 667 } 668 669 if (!r600_query_hw_init(rscreen, query)) { 670 FREE(query); 671 return NULL; 672 } 673 674 return (struct pipe_query *)query; 675} 676 677static void r600_update_occlusion_query_state(struct r600_common_context *rctx, 678 unsigned type, int diff) 679{ 680 if (type == PIPE_QUERY_OCCLUSION_COUNTER || 681 type == PIPE_QUERY_OCCLUSION_PREDICATE || 682 type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) { 683 bool old_enable = rctx->num_occlusion_queries != 0; 684 bool old_perfect_enable = 685 rctx->num_perfect_occlusion_queries != 0; 686 bool enable, perfect_enable; 687 688 rctx->num_occlusion_queries += diff; 689 assert(rctx->num_occlusion_queries >= 0); 690 691 if (type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) { 692 rctx->num_perfect_occlusion_queries += diff; 693 assert(rctx->num_perfect_occlusion_queries >= 0); 694 } 695 696 enable = rctx->num_occlusion_queries != 0; 697 perfect_enable = rctx->num_perfect_occlusion_queries != 0; 698 699 if (enable != old_enable || perfect_enable != old_perfect_enable) { 700 struct r600_context *ctx = (struct r600_context*)rctx; 701 r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom); 702 } 703 } 704} 705 706static unsigned event_type_for_stream(unsigned stream) 707{ 708 switch (stream) { 709 default: 710 case 0: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS; 711 case 1: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS1; 712 case 2: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS2; 713 case 3: return EVENT_TYPE_SAMPLE_STREAMOUTSTATS3; 714 } 715} 716 717static void emit_sample_streamout(struct radeon_cmdbuf *cs, uint64_t va, 718 unsigned stream) 719{ 720 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0)); 721 radeon_emit(cs, EVENT_TYPE(event_type_for_stream(stream)) | EVENT_INDEX(3)); 722 radeon_emit(cs, va); 723 radeon_emit(cs, va >> 32); 724} 725 726static void r600_query_hw_do_emit_start(struct r600_common_context *ctx, 727 struct r600_query_hw *query, 728 struct r600_resource *buffer, 729 uint64_t va) 730{ 731 struct radeon_cmdbuf *cs = ctx->gfx.cs; 732 733 switch (query->b.type) { 734 case PIPE_QUERY_OCCLUSION_COUNTER: 735 case PIPE_QUERY_OCCLUSION_PREDICATE: 736 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: 737 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0)); 738 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1)); 739 radeon_emit(cs, va); 740 radeon_emit(cs, va >> 32); 741 break; 742 case PIPE_QUERY_PRIMITIVES_EMITTED: 743 case PIPE_QUERY_PRIMITIVES_GENERATED: 744 case PIPE_QUERY_SO_STATISTICS: 745 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 746 emit_sample_streamout(cs, va, query->stream); 747 break; 748 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE: 749 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) 750 emit_sample_streamout(cs, va + 32 * stream, stream); 751 break; 752 case PIPE_QUERY_TIME_ELAPSED: 753 /* Write the timestamp after the last draw is done. 754 * (bottom-of-pipe) 755 */ 756 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 757 0, EOP_DATA_SEL_TIMESTAMP, 758 NULL, va, 0, query->b.type); 759 break; 760 case PIPE_QUERY_PIPELINE_STATISTICS: 761 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0)); 762 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2)); 763 radeon_emit(cs, va); 764 radeon_emit(cs, va >> 32); 765 break; 766 default: 767 assert(0); 768 } 769 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE, 770 RADEON_PRIO_QUERY); 771} 772 773static void r600_query_hw_emit_start(struct r600_common_context *ctx, 774 struct r600_query_hw *query) 775{ 776 uint64_t va; 777 778 if (!query->buffer.buf) 779 return; // previous buffer allocation failure 780 781 r600_update_occlusion_query_state(ctx, query->b.type, 1); 782 r600_update_prims_generated_query_state(ctx, query->b.type, 1); 783 784 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end, 785 true); 786 787 /* Get a new query buffer if needed. */ 788 if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) { 789 struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer); 790 *qbuf = query->buffer; 791 query->buffer.results_end = 0; 792 query->buffer.previous = qbuf; 793 query->buffer.buf = r600_new_query_buffer(ctx->screen, query); 794 if (!query->buffer.buf) 795 return; 796 } 797 798 /* emit begin query */ 799 va = query->buffer.buf->gpu_address + query->buffer.results_end; 800 801 query->ops->emit_start(ctx, query, query->buffer.buf, va); 802 803 ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end; 804} 805 806static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx, 807 struct r600_query_hw *query, 808 struct r600_resource *buffer, 809 uint64_t va) 810{ 811 struct radeon_cmdbuf *cs = ctx->gfx.cs; 812 uint64_t fence_va = 0; 813 814 switch (query->b.type) { 815 case PIPE_QUERY_OCCLUSION_COUNTER: 816 case PIPE_QUERY_OCCLUSION_PREDICATE: 817 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: 818 va += 8; 819 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0)); 820 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1)); 821 radeon_emit(cs, va); 822 radeon_emit(cs, va >> 32); 823 824 fence_va = va + ctx->screen->info.num_render_backends * 16 - 8; 825 break; 826 case PIPE_QUERY_PRIMITIVES_EMITTED: 827 case PIPE_QUERY_PRIMITIVES_GENERATED: 828 case PIPE_QUERY_SO_STATISTICS: 829 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 830 va += 16; 831 emit_sample_streamout(cs, va, query->stream); 832 break; 833 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE: 834 va += 16; 835 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) 836 emit_sample_streamout(cs, va + 32 * stream, stream); 837 break; 838 case PIPE_QUERY_TIME_ELAPSED: 839 va += 8; 840 /* fall through */ 841 case PIPE_QUERY_TIMESTAMP: 842 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 843 0, EOP_DATA_SEL_TIMESTAMP, NULL, va, 844 0, query->b.type); 845 fence_va = va + 8; 846 break; 847 case PIPE_QUERY_PIPELINE_STATISTICS: { 848 unsigned sample_size = (query->result_size - 8) / 2; 849 850 va += sample_size; 851 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0)); 852 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2)); 853 radeon_emit(cs, va); 854 radeon_emit(cs, va >> 32); 855 856 fence_va = va + sample_size; 857 break; 858 } 859 default: 860 assert(0); 861 } 862 r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE, 863 RADEON_PRIO_QUERY); 864 865 if (fence_va) 866 r600_gfx_write_event_eop(ctx, EVENT_TYPE_BOTTOM_OF_PIPE_TS, 0, 867 EOP_DATA_SEL_VALUE_32BIT, 868 query->buffer.buf, fence_va, 0x80000000, 869 query->b.type); 870} 871 872static void r600_query_hw_emit_stop(struct r600_common_context *ctx, 873 struct r600_query_hw *query) 874{ 875 uint64_t va; 876 877 if (!query->buffer.buf) 878 return; // previous buffer allocation failure 879 880 /* The queries which need begin already called this in begin_query. */ 881 if (query->flags & R600_QUERY_HW_FLAG_NO_START) { 882 ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false); 883 } 884 885 /* emit end query */ 886 va = query->buffer.buf->gpu_address + query->buffer.results_end; 887 888 query->ops->emit_stop(ctx, query, query->buffer.buf, va); 889 890 query->buffer.results_end += query->result_size; 891 892 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START)) 893 ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end; 894 895 r600_update_occlusion_query_state(ctx, query->b.type, -1); 896 r600_update_prims_generated_query_state(ctx, query->b.type, -1); 897} 898 899static void emit_set_predicate(struct r600_common_context *ctx, 900 struct r600_resource *buf, uint64_t va, 901 uint32_t op) 902{ 903 struct radeon_cmdbuf *cs = ctx->gfx.cs; 904 905 radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0)); 906 radeon_emit(cs, va); 907 radeon_emit(cs, op | ((va >> 32) & 0xFF)); 908 r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_READ, 909 RADEON_PRIO_QUERY); 910} 911 912static void r600_emit_query_predication(struct r600_common_context *ctx, 913 struct r600_atom *atom) 914{ 915 struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond; 916 struct r600_query_buffer *qbuf; 917 uint32_t op; 918 bool flag_wait, invert; 919 920 if (!query) 921 return; 922 923 invert = ctx->render_cond_invert; 924 flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT || 925 ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT; 926 927 switch (query->b.type) { 928 case PIPE_QUERY_OCCLUSION_COUNTER: 929 case PIPE_QUERY_OCCLUSION_PREDICATE: 930 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: 931 op = PRED_OP(PREDICATION_OP_ZPASS); 932 break; 933 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 934 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE: 935 op = PRED_OP(PREDICATION_OP_PRIMCOUNT); 936 invert = !invert; 937 break; 938 default: 939 assert(0); 940 return; 941 } 942 943 /* if true then invert, see GL_ARB_conditional_render_inverted */ 944 if (invert) 945 op |= PREDICATION_DRAW_NOT_VISIBLE; /* Draw if not visible or overflow */ 946 else 947 op |= PREDICATION_DRAW_VISIBLE; /* Draw if visible or no overflow */ 948 949 op |= flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW; 950 951 /* emit predicate packets for all data blocks */ 952 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) { 953 unsigned results_base = 0; 954 uint64_t va_base = qbuf->buf->gpu_address; 955 956 while (results_base < qbuf->results_end) { 957 uint64_t va = va_base + results_base; 958 959 if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) { 960 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) { 961 emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op); 962 963 /* set CONTINUE bit for all packets except the first */ 964 op |= PREDICATION_CONTINUE; 965 } 966 } else { 967 emit_set_predicate(ctx, qbuf->buf, va, op); 968 op |= PREDICATION_CONTINUE; 969 } 970 971 results_base += query->result_size; 972 } 973 } 974} 975 976static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index) 977{ 978 struct r600_common_screen *rscreen = 979 (struct r600_common_screen *)ctx->screen; 980 981 if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT || 982 query_type == PIPE_QUERY_GPU_FINISHED || 983 query_type >= PIPE_QUERY_DRIVER_SPECIFIC) 984 return r600_query_sw_create(query_type); 985 986 return r600_query_hw_create(rscreen, query_type, index); 987} 988 989static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query) 990{ 991 struct r600_common_context *rctx = (struct r600_common_context *)ctx; 992 struct r600_query *rquery = (struct r600_query *)query; 993 994 rquery->ops->destroy(rctx->screen, rquery); 995} 996 997static boolean r600_begin_query(struct pipe_context *ctx, 998 struct pipe_query *query) 999{ 1000 struct r600_common_context *rctx = (struct r600_common_context *)ctx; 1001 struct r600_query *rquery = (struct r600_query *)query; 1002 1003 return rquery->ops->begin(rctx, rquery); 1004} 1005 1006void r600_query_hw_reset_buffers(struct r600_common_context *rctx, 1007 struct r600_query_hw *query) 1008{ 1009 struct r600_query_buffer *prev = query->buffer.previous; 1010 1011 /* Discard the old query buffers. */ 1012 while (prev) { 1013 struct r600_query_buffer *qbuf = prev; 1014 prev = prev->previous; 1015 r600_resource_reference(&qbuf->buf, NULL); 1016 FREE(qbuf); 1017 } 1018 1019 query->buffer.results_end = 0; 1020 query->buffer.previous = NULL; 1021 1022 /* Obtain a new buffer if the current one can't be mapped without a stall. */ 1023 if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) || 1024 !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) { 1025 r600_resource_reference(&query->buffer.buf, NULL); 1026 query->buffer.buf = r600_new_query_buffer(rctx->screen, query); 1027 } else { 1028 if (!query->ops->prepare_buffer(rctx->screen, query, query->buffer.buf)) 1029 r600_resource_reference(&query->buffer.buf, NULL); 1030 } 1031} 1032 1033bool r600_query_hw_begin(struct r600_common_context *rctx, 1034 struct r600_query *rquery) 1035{ 1036 struct r600_query_hw *query = (struct r600_query_hw *)rquery; 1037 1038 if (query->flags & R600_QUERY_HW_FLAG_NO_START) { 1039 assert(0); 1040 return false; 1041 } 1042 1043 if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES)) 1044 r600_query_hw_reset_buffers(rctx, query); 1045 1046 r600_query_hw_emit_start(rctx, query); 1047 if (!query->buffer.buf) 1048 return false; 1049 1050 LIST_ADDTAIL(&query->list, &rctx->active_queries); 1051 return true; 1052} 1053 1054static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query) 1055{ 1056 struct r600_common_context *rctx = (struct r600_common_context *)ctx; 1057 struct r600_query *rquery = (struct r600_query *)query; 1058 1059 return rquery->ops->end(rctx, rquery); 1060} 1061 1062bool r600_query_hw_end(struct r600_common_context *rctx, 1063 struct r600_query *rquery) 1064{ 1065 struct r600_query_hw *query = (struct r600_query_hw *)rquery; 1066 1067 if (query->flags & R600_QUERY_HW_FLAG_NO_START) 1068 r600_query_hw_reset_buffers(rctx, query); 1069 1070 r600_query_hw_emit_stop(rctx, query); 1071 1072 if (!(query->flags & R600_QUERY_HW_FLAG_NO_START)) 1073 LIST_DELINIT(&query->list); 1074 1075 if (!query->buffer.buf) 1076 return false; 1077 1078 return true; 1079} 1080 1081static void r600_get_hw_query_params(struct r600_common_context *rctx, 1082 struct r600_query_hw *rquery, int index, 1083 struct r600_hw_query_params *params) 1084{ 1085 unsigned max_rbs = rctx->screen->info.num_render_backends; 1086 1087 params->pair_stride = 0; 1088 params->pair_count = 1; 1089 1090 switch (rquery->b.type) { 1091 case PIPE_QUERY_OCCLUSION_COUNTER: 1092 case PIPE_QUERY_OCCLUSION_PREDICATE: 1093 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: 1094 params->start_offset = 0; 1095 params->end_offset = 8; 1096 params->fence_offset = max_rbs * 16; 1097 params->pair_stride = 16; 1098 params->pair_count = max_rbs; 1099 break; 1100 case PIPE_QUERY_TIME_ELAPSED: 1101 params->start_offset = 0; 1102 params->end_offset = 8; 1103 params->fence_offset = 16; 1104 break; 1105 case PIPE_QUERY_TIMESTAMP: 1106 params->start_offset = 0; 1107 params->end_offset = 0; 1108 params->fence_offset = 8; 1109 break; 1110 case PIPE_QUERY_PRIMITIVES_EMITTED: 1111 params->start_offset = 8; 1112 params->end_offset = 24; 1113 params->fence_offset = params->end_offset + 4; 1114 break; 1115 case PIPE_QUERY_PRIMITIVES_GENERATED: 1116 params->start_offset = 0; 1117 params->end_offset = 16; 1118 params->fence_offset = params->end_offset + 4; 1119 break; 1120 case PIPE_QUERY_SO_STATISTICS: 1121 params->start_offset = 8 - index * 8; 1122 params->end_offset = 24 - index * 8; 1123 params->fence_offset = params->end_offset + 4; 1124 break; 1125 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE: 1126 params->pair_count = R600_MAX_STREAMS; 1127 params->pair_stride = 32; 1128 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 1129 params->start_offset = 0; 1130 params->end_offset = 16; 1131 1132 /* We can re-use the high dword of the last 64-bit value as a 1133 * fence: it is initialized as 0, and the high bit is set by 1134 * the write of the streamout stats event. 1135 */ 1136 params->fence_offset = rquery->result_size - 4; 1137 break; 1138 case PIPE_QUERY_PIPELINE_STATISTICS: 1139 { 1140 /* Offsets apply to EG+ */ 1141 static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80}; 1142 params->start_offset = offsets[index]; 1143 params->end_offset = 88 + offsets[index]; 1144 params->fence_offset = 2 * 88; 1145 break; 1146 } 1147 default: 1148 unreachable("r600_get_hw_query_params unsupported"); 1149 } 1150} 1151 1152static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index, 1153 bool test_status_bit) 1154{ 1155 uint32_t *current_result = (uint32_t*)map; 1156 uint64_t start, end; 1157 1158 start = (uint64_t)current_result[start_index] | 1159 (uint64_t)current_result[start_index+1] << 32; 1160 end = (uint64_t)current_result[end_index] | 1161 (uint64_t)current_result[end_index+1] << 32; 1162 1163 if (!test_status_bit || 1164 ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) { 1165 return end - start; 1166 } 1167 return 0; 1168} 1169 1170static void r600_query_hw_add_result(struct r600_common_screen *rscreen, 1171 struct r600_query_hw *query, 1172 void *buffer, 1173 union pipe_query_result *result) 1174{ 1175 unsigned max_rbs = rscreen->info.num_render_backends; 1176 1177 switch (query->b.type) { 1178 case PIPE_QUERY_OCCLUSION_COUNTER: { 1179 for (unsigned i = 0; i < max_rbs; ++i) { 1180 unsigned results_base = i * 16; 1181 result->u64 += 1182 r600_query_read_result(buffer + results_base, 0, 2, true); 1183 } 1184 break; 1185 } 1186 case PIPE_QUERY_OCCLUSION_PREDICATE: 1187 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: { 1188 for (unsigned i = 0; i < max_rbs; ++i) { 1189 unsigned results_base = i * 16; 1190 result->b = result->b || 1191 r600_query_read_result(buffer + results_base, 0, 2, true) != 0; 1192 } 1193 break; 1194 } 1195 case PIPE_QUERY_TIME_ELAPSED: 1196 result->u64 += r600_query_read_result(buffer, 0, 2, false); 1197 break; 1198 case PIPE_QUERY_TIMESTAMP: 1199 result->u64 = *(uint64_t*)buffer; 1200 break; 1201 case PIPE_QUERY_PRIMITIVES_EMITTED: 1202 /* SAMPLE_STREAMOUTSTATS stores this structure: 1203 * { 1204 * u64 NumPrimitivesWritten; 1205 * u64 PrimitiveStorageNeeded; 1206 * } 1207 * We only need NumPrimitivesWritten here. */ 1208 result->u64 += r600_query_read_result(buffer, 2, 6, true); 1209 break; 1210 case PIPE_QUERY_PRIMITIVES_GENERATED: 1211 /* Here we read PrimitiveStorageNeeded. */ 1212 result->u64 += r600_query_read_result(buffer, 0, 4, true); 1213 break; 1214 case PIPE_QUERY_SO_STATISTICS: 1215 result->so_statistics.num_primitives_written += 1216 r600_query_read_result(buffer, 2, 6, true); 1217 result->so_statistics.primitives_storage_needed += 1218 r600_query_read_result(buffer, 0, 4, true); 1219 break; 1220 case PIPE_QUERY_SO_OVERFLOW_PREDICATE: 1221 result->b = result->b || 1222 r600_query_read_result(buffer, 2, 6, true) != 1223 r600_query_read_result(buffer, 0, 4, true); 1224 break; 1225 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE: 1226 for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) { 1227 result->b = result->b || 1228 r600_query_read_result(buffer, 2, 6, true) != 1229 r600_query_read_result(buffer, 0, 4, true); 1230 buffer = (char *)buffer + 32; 1231 } 1232 break; 1233 case PIPE_QUERY_PIPELINE_STATISTICS: 1234 if (rscreen->chip_class >= EVERGREEN) { 1235 result->pipeline_statistics.ps_invocations += 1236 r600_query_read_result(buffer, 0, 22, false); 1237 result->pipeline_statistics.c_primitives += 1238 r600_query_read_result(buffer, 2, 24, false); 1239 result->pipeline_statistics.c_invocations += 1240 r600_query_read_result(buffer, 4, 26, false); 1241 result->pipeline_statistics.vs_invocations += 1242 r600_query_read_result(buffer, 6, 28, false); 1243 result->pipeline_statistics.gs_invocations += 1244 r600_query_read_result(buffer, 8, 30, false); 1245 result->pipeline_statistics.gs_primitives += 1246 r600_query_read_result(buffer, 10, 32, false); 1247 result->pipeline_statistics.ia_primitives += 1248 r600_query_read_result(buffer, 12, 34, false); 1249 result->pipeline_statistics.ia_vertices += 1250 r600_query_read_result(buffer, 14, 36, false); 1251 result->pipeline_statistics.hs_invocations += 1252 r600_query_read_result(buffer, 16, 38, false); 1253 result->pipeline_statistics.ds_invocations += 1254 r600_query_read_result(buffer, 18, 40, false); 1255 result->pipeline_statistics.cs_invocations += 1256 r600_query_read_result(buffer, 20, 42, false); 1257 } else { 1258 result->pipeline_statistics.ps_invocations += 1259 r600_query_read_result(buffer, 0, 16, false); 1260 result->pipeline_statistics.c_primitives += 1261 r600_query_read_result(buffer, 2, 18, false); 1262 result->pipeline_statistics.c_invocations += 1263 r600_query_read_result(buffer, 4, 20, false); 1264 result->pipeline_statistics.vs_invocations += 1265 r600_query_read_result(buffer, 6, 22, false); 1266 result->pipeline_statistics.gs_invocations += 1267 r600_query_read_result(buffer, 8, 24, false); 1268 result->pipeline_statistics.gs_primitives += 1269 r600_query_read_result(buffer, 10, 26, false); 1270 result->pipeline_statistics.ia_primitives += 1271 r600_query_read_result(buffer, 12, 28, false); 1272 result->pipeline_statistics.ia_vertices += 1273 r600_query_read_result(buffer, 14, 30, false); 1274 } 1275#if 0 /* for testing */ 1276 printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, " 1277 "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, " 1278 "Clipper prims=%llu, PS=%llu, CS=%llu\n", 1279 result->pipeline_statistics.ia_vertices, 1280 result->pipeline_statistics.ia_primitives, 1281 result->pipeline_statistics.vs_invocations, 1282 result->pipeline_statistics.hs_invocations, 1283 result->pipeline_statistics.ds_invocations, 1284 result->pipeline_statistics.gs_invocations, 1285 result->pipeline_statistics.gs_primitives, 1286 result->pipeline_statistics.c_invocations, 1287 result->pipeline_statistics.c_primitives, 1288 result->pipeline_statistics.ps_invocations, 1289 result->pipeline_statistics.cs_invocations); 1290#endif 1291 break; 1292 default: 1293 assert(0); 1294 } 1295} 1296 1297static boolean r600_get_query_result(struct pipe_context *ctx, 1298 struct pipe_query *query, boolean wait, 1299 union pipe_query_result *result) 1300{ 1301 struct r600_common_context *rctx = (struct r600_common_context *)ctx; 1302 struct r600_query *rquery = (struct r600_query *)query; 1303 1304 return rquery->ops->get_result(rctx, rquery, wait, result); 1305} 1306 1307static void r600_get_query_result_resource(struct pipe_context *ctx, 1308 struct pipe_query *query, 1309 boolean wait, 1310 enum pipe_query_value_type result_type, 1311 int index, 1312 struct pipe_resource *resource, 1313 unsigned offset) 1314{ 1315 struct r600_common_context *rctx = (struct r600_common_context *)ctx; 1316 struct r600_query *rquery = (struct r600_query *)query; 1317 1318 rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index, 1319 resource, offset); 1320} 1321 1322static void r600_query_hw_clear_result(struct r600_query_hw *query, 1323 union pipe_query_result *result) 1324{ 1325 util_query_clear_result(result, query->b.type); 1326} 1327 1328bool r600_query_hw_get_result(struct r600_common_context *rctx, 1329 struct r600_query *rquery, 1330 bool wait, union pipe_query_result *result) 1331{ 1332 struct r600_common_screen *rscreen = rctx->screen; 1333 struct r600_query_hw *query = (struct r600_query_hw *)rquery; 1334 struct r600_query_buffer *qbuf; 1335 1336 query->ops->clear_result(query, result); 1337 1338 for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) { 1339 unsigned usage = PIPE_TRANSFER_READ | 1340 (wait ? 0 : PIPE_TRANSFER_DONTBLOCK); 1341 unsigned results_base = 0; 1342 void *map; 1343 1344 if (rquery->b.flushed) 1345 map = rctx->ws->buffer_map(qbuf->buf->buf, NULL, usage); 1346 else 1347 map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage); 1348 1349 if (!map) 1350 return false; 1351 1352 while (results_base != qbuf->results_end) { 1353 query->ops->add_result(rscreen, query, map + results_base, 1354 result); 1355 results_base += query->result_size; 1356 } 1357 } 1358 1359 /* Convert the time to expected units. */ 1360 if (rquery->type == PIPE_QUERY_TIME_ELAPSED || 1361 rquery->type == PIPE_QUERY_TIMESTAMP) { 1362 result->u64 = (1000000 * result->u64) / rscreen->info.clock_crystal_freq; 1363 } 1364 return true; 1365} 1366 1367/* Create the compute shader that is used to collect the results. 1368 * 1369 * One compute grid with a single thread is launched for every query result 1370 * buffer. The thread (optionally) reads a previous summary buffer, then 1371 * accumulates data from the query result buffer, and writes the result either 1372 * to a summary buffer to be consumed by the next grid invocation or to the 1373 * user-supplied buffer. 1374 * 1375 * Data layout: 1376 * 1377 * CONST 1378 * 0.x = end_offset 1379 * 0.y = result_stride 1380 * 0.z = result_count 1381 * 0.w = bit field: 1382 * 1: read previously accumulated values 1383 * 2: write accumulated values for chaining 1384 * 4: write result available 1385 * 8: convert result to boolean (0/1) 1386 * 16: only read one dword and use that as result 1387 * 32: apply timestamp conversion 1388 * 64: store full 64 bits result 1389 * 128: store signed 32 bits result 1390 * 256: SO_OVERFLOW mode: take the difference of two successive half-pairs 1391 * 1.x = fence_offset 1392 * 1.y = pair_stride 1393 * 1.z = pair_count 1394 * 1.w = result_offset 1395 * 2.x = buffer0 offset 1396 * 1397 * BUFFER[0] = query result buffer 1398 * BUFFER[1] = previous summary buffer 1399 * BUFFER[2] = next summary buffer or user-supplied buffer 1400 */ 1401static void r600_create_query_result_shader(struct r600_common_context *rctx) 1402{ 1403 /* TEMP[0].xy = accumulated result so far 1404 * TEMP[0].z = result not available 1405 * 1406 * TEMP[1].x = current result index 1407 * TEMP[1].y = current pair index 1408 */ 1409 static const char text_tmpl[] = 1410 "COMP\n" 1411 "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n" 1412 "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n" 1413 "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n" 1414 "DCL BUFFER[0]\n" 1415 "DCL BUFFER[1]\n" 1416 "DCL BUFFER[2]\n" 1417 "DCL CONST[0][0..2]\n" 1418 "DCL TEMP[0..5]\n" 1419 "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n" 1420 "IMM[1] UINT32 {1, 2, 4, 8}\n" 1421 "IMM[2] UINT32 {16, 32, 64, 128}\n" 1422 "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */ 1423 "IMM[4] UINT32 {256, 0, 0, 0}\n" 1424 1425 "AND TEMP[5], CONST[0][0].wwww, IMM[2].xxxx\n" 1426 "UIF TEMP[5]\n" 1427 /* Check result availability. */ 1428 "UADD TEMP[1].x, CONST[0][1].xxxx, CONST[0][2].xxxx\n" 1429 "LOAD TEMP[1].x, BUFFER[0], TEMP[1].xxxx\n" 1430 "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n" 1431 "MOV TEMP[1], TEMP[0].zzzz\n" 1432 "NOT TEMP[0].z, TEMP[0].zzzz\n" 1433 1434 /* Load result if available. */ 1435 "UIF TEMP[1]\n" 1436 "UADD TEMP[0].x, IMM[0].xxxx, CONST[0][2].xxxx\n" 1437 "LOAD TEMP[0].xy, BUFFER[0], TEMP[0].xxxx\n" 1438 "ENDIF\n" 1439 "ELSE\n" 1440 /* Load previously accumulated result if requested. */ 1441 "MOV TEMP[0], IMM[0].xxxx\n" 1442 "AND TEMP[4], CONST[0][0].wwww, IMM[1].xxxx\n" 1443 "UIF TEMP[4]\n" 1444 "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n" 1445 "ENDIF\n" 1446 1447 "MOV TEMP[1].x, IMM[0].xxxx\n" 1448 "BGNLOOP\n" 1449 /* Break if accumulated result so far is not available. */ 1450 "UIF TEMP[0].zzzz\n" 1451 "BRK\n" 1452 "ENDIF\n" 1453 1454 /* Break if result_index >= result_count. */ 1455 "USGE TEMP[5], TEMP[1].xxxx, CONST[0][0].zzzz\n" 1456 "UIF TEMP[5]\n" 1457 "BRK\n" 1458 "ENDIF\n" 1459 1460 /* Load fence and check result availability */ 1461 "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy, CONST[0][1].xxxx\n" 1462 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n" 1463 "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n" 1464 "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n" 1465 "NOT TEMP[0].z, TEMP[0].zzzz\n" 1466 "UIF TEMP[0].zzzz\n" 1467 "BRK\n" 1468 "ENDIF\n" 1469 1470 "MOV TEMP[1].y, IMM[0].xxxx\n" 1471 "BGNLOOP\n" 1472 /* Load start and end. */ 1473 "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0][0].yyyy\n" 1474 "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[0][1].yyyy, TEMP[5].xxxx\n" 1475 "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0][2].xxxx\n" 1476 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n" 1477 1478 "UADD TEMP[5].y, TEMP[5].xxxx, CONST[0][0].xxxx\n" 1479 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n" 1480 1481 "U64ADD TEMP[4].xy, TEMP[3], -TEMP[2]\n" 1482 1483 "AND TEMP[5].z, CONST[0][0].wwww, IMM[4].xxxx\n" 1484 "UIF TEMP[5].zzzz\n" 1485 /* Load second start/end half-pair and 1486 * take the difference 1487 */ 1488 "UADD TEMP[5].xy, TEMP[5], IMM[1].wwww\n" 1489 "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n" 1490 "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].yyyy\n" 1491 1492 "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n" 1493 "U64ADD TEMP[4].xy, TEMP[4], -TEMP[3]\n" 1494 "ENDIF\n" 1495 1496 "U64ADD TEMP[0].xy, TEMP[0], TEMP[4]\n" 1497 1498 /* Increment pair index */ 1499 "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n" 1500 "USGE TEMP[5], TEMP[1].yyyy, CONST[0][1].zzzz\n" 1501 "UIF TEMP[5]\n" 1502 "BRK\n" 1503 "ENDIF\n" 1504 "ENDLOOP\n" 1505 1506 /* Increment result index */ 1507 "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n" 1508 "ENDLOOP\n" 1509 "ENDIF\n" 1510 1511 "AND TEMP[4], CONST[0][0].wwww, IMM[1].yyyy\n" 1512 "UIF TEMP[4]\n" 1513 /* Store accumulated data for chaining. */ 1514 "STORE BUFFER[2].xyz, CONST[0][1].wwww, TEMP[0]\n" 1515 "ELSE\n" 1516 "AND TEMP[4], CONST[0][0].wwww, IMM[1].zzzz\n" 1517 "UIF TEMP[4]\n" 1518 /* Store result availability. */ 1519 "NOT TEMP[0].z, TEMP[0]\n" 1520 "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n" 1521 "STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].zzzz\n" 1522 1523 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n" 1524 "UIF TEMP[4]\n" 1525 "STORE BUFFER[2].y, CONST[0][1].wwww, IMM[0].xxxx\n" 1526 "ENDIF\n" 1527 "ELSE\n" 1528 /* Store result if it is available. */ 1529 "NOT TEMP[4], TEMP[0].zzzz\n" 1530 "UIF TEMP[4]\n" 1531 /* Apply timestamp conversion */ 1532 "AND TEMP[4], CONST[0][0].wwww, IMM[2].yyyy\n" 1533 "UIF TEMP[4]\n" 1534 "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n" 1535 "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n" 1536 "ENDIF\n" 1537 1538 /* Convert to boolean */ 1539 "AND TEMP[4], CONST[0][0].wwww, IMM[1].wwww\n" 1540 "UIF TEMP[4]\n" 1541 "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[4].zwzw\n" 1542 "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n" 1543 "MOV TEMP[0].y, IMM[0].xxxx\n" 1544 "ENDIF\n" 1545 1546 "AND TEMP[4], CONST[0][0].wwww, IMM[2].zzzz\n" 1547 "UIF TEMP[4]\n" 1548 "STORE BUFFER[2].xy, CONST[0][1].wwww, TEMP[0].xyxy\n" 1549 "ELSE\n" 1550 /* Clamping */ 1551 "UIF TEMP[0].yyyy\n" 1552 "MOV TEMP[0].x, IMM[0].wwww\n" 1553 "ENDIF\n" 1554 1555 "AND TEMP[4], CONST[0][0].wwww, IMM[2].wwww\n" 1556 "UIF TEMP[4]\n" 1557 "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n" 1558 "ENDIF\n" 1559 1560 "STORE BUFFER[2].x, CONST[0][1].wwww, TEMP[0].xxxx\n" 1561 "ENDIF\n" 1562 "ENDIF\n" 1563 "ENDIF\n" 1564 "ENDIF\n" 1565 1566 "END\n"; 1567 1568 char text[sizeof(text_tmpl) + 32]; 1569 struct tgsi_token tokens[1024]; 1570 struct pipe_compute_state state = {}; 1571 1572 /* Hard code the frequency into the shader so that the backend can 1573 * use the full range of optimizations for divide-by-constant. 1574 */ 1575 snprintf(text, sizeof(text), text_tmpl, 1576 rctx->screen->info.clock_crystal_freq); 1577 1578 if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) { 1579 assert(false); 1580 return; 1581 } 1582 1583 state.ir_type = PIPE_SHADER_IR_TGSI; 1584 state.prog = tokens; 1585 1586 rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state); 1587} 1588 1589static void r600_restore_qbo_state(struct r600_common_context *rctx, 1590 struct r600_qbo_state *st) 1591{ 1592 rctx->b.bind_compute_state(&rctx->b, st->saved_compute); 1593 1594 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0); 1595 pipe_resource_reference(&st->saved_const0.buffer, NULL); 1596 1597 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo, ~0); 1598 for (unsigned i = 0; i < 3; ++i) 1599 pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL); 1600} 1601 1602static void r600_query_hw_get_result_resource(struct r600_common_context *rctx, 1603 struct r600_query *rquery, 1604 bool wait, 1605 enum pipe_query_value_type result_type, 1606 int index, 1607 struct pipe_resource *resource, 1608 unsigned offset) 1609{ 1610 struct r600_query_hw *query = (struct r600_query_hw *)rquery; 1611 struct r600_query_buffer *qbuf; 1612 struct r600_query_buffer *qbuf_prev; 1613 struct pipe_resource *tmp_buffer = NULL; 1614 unsigned tmp_buffer_offset = 0; 1615 struct r600_qbo_state saved_state = {}; 1616 struct pipe_grid_info grid = {}; 1617 struct pipe_constant_buffer constant_buffer = {}; 1618 struct pipe_shader_buffer ssbo[3]; 1619 struct r600_hw_query_params params; 1620 struct { 1621 uint32_t end_offset; 1622 uint32_t result_stride; 1623 uint32_t result_count; 1624 uint32_t config; 1625 uint32_t fence_offset; 1626 uint32_t pair_stride; 1627 uint32_t pair_count; 1628 uint32_t buffer_offset; 1629 uint32_t buffer0_offset; 1630 } consts; 1631 1632 if (!rctx->query_result_shader) { 1633 r600_create_query_result_shader(rctx); 1634 if (!rctx->query_result_shader) 1635 return; 1636 } 1637 1638 if (query->buffer.previous) { 1639 u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 256, 1640 &tmp_buffer_offset, &tmp_buffer); 1641 if (!tmp_buffer) 1642 return; 1643 } 1644 1645 rctx->save_qbo_state(&rctx->b, &saved_state); 1646 1647 r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, ¶ms); 1648 consts.end_offset = params.end_offset - params.start_offset; 1649 consts.fence_offset = params.fence_offset - params.start_offset; 1650 consts.result_stride = query->result_size; 1651 consts.pair_stride = params.pair_stride; 1652 consts.pair_count = params.pair_count; 1653 1654 constant_buffer.buffer_size = sizeof(consts); 1655 constant_buffer.user_buffer = &consts; 1656 1657 ssbo[1].buffer = tmp_buffer; 1658 ssbo[1].buffer_offset = tmp_buffer_offset; 1659 ssbo[1].buffer_size = 16; 1660 1661 ssbo[2] = ssbo[1]; 1662 1663 rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader); 1664 1665 grid.block[0] = 1; 1666 grid.block[1] = 1; 1667 grid.block[2] = 1; 1668 grid.grid[0] = 1; 1669 grid.grid[1] = 1; 1670 grid.grid[2] = 1; 1671 1672 consts.config = 0; 1673 if (index < 0) 1674 consts.config |= 4; 1675 if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE || 1676 query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) 1677 consts.config |= 8; 1678 else if (query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE || 1679 query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) 1680 consts.config |= 8 | 256; 1681 else if (query->b.type == PIPE_QUERY_TIMESTAMP || 1682 query->b.type == PIPE_QUERY_TIME_ELAPSED) 1683 consts.config |= 32; 1684 1685 switch (result_type) { 1686 case PIPE_QUERY_TYPE_U64: 1687 case PIPE_QUERY_TYPE_I64: 1688 consts.config |= 64; 1689 break; 1690 case PIPE_QUERY_TYPE_I32: 1691 consts.config |= 128; 1692 break; 1693 case PIPE_QUERY_TYPE_U32: 1694 break; 1695 } 1696 1697 rctx->flags |= rctx->screen->barrier_flags.cp_to_L2; 1698 1699 for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) { 1700 if (query->b.type != PIPE_QUERY_TIMESTAMP) { 1701 qbuf_prev = qbuf->previous; 1702 consts.result_count = qbuf->results_end / query->result_size; 1703 consts.config &= ~3; 1704 if (qbuf != &query->buffer) 1705 consts.config |= 1; 1706 if (qbuf->previous) 1707 consts.config |= 2; 1708 } else { 1709 /* Only read the last timestamp. */ 1710 qbuf_prev = NULL; 1711 consts.result_count = 0; 1712 consts.config |= 16; 1713 params.start_offset += qbuf->results_end - query->result_size; 1714 } 1715 1716 ssbo[0].buffer = &qbuf->buf->b.b; 1717 ssbo[0].buffer_offset = params.start_offset & ~0xff; 1718 ssbo[0].buffer_size = qbuf->results_end - ssbo[0].buffer_offset; 1719 consts.buffer0_offset = (params.start_offset & 0xff); 1720 if (!qbuf->previous) { 1721 1722 ssbo[2].buffer = resource; 1723 ssbo[2].buffer_offset = offset & ~0xff; 1724 ssbo[2].buffer_size = offset + 8; 1725 consts.buffer_offset = (offset & 0xff); 1726 } else 1727 consts.buffer_offset = 0; 1728 1729 rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer); 1730 1731 rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo, ~0); 1732 1733 if (wait && qbuf == &query->buffer) { 1734 uint64_t va; 1735 1736 /* Wait for result availability. Wait only for readiness 1737 * of the last entry, since the fence writes should be 1738 * serialized in the CP. 1739 */ 1740 va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size; 1741 va += params.fence_offset; 1742 1743 r600_gfx_wait_fence(rctx, qbuf->buf, va, 0x80000000, 0x80000000); 1744 } 1745 1746 rctx->b.launch_grid(&rctx->b, &grid); 1747 rctx->flags |= rctx->screen->barrier_flags.compute_to_L2; 1748 } 1749 1750 r600_restore_qbo_state(rctx, &saved_state); 1751 pipe_resource_reference(&tmp_buffer, NULL); 1752} 1753 1754static void r600_render_condition(struct pipe_context *ctx, 1755 struct pipe_query *query, 1756 boolean condition, 1757 enum pipe_render_cond_flag mode) 1758{ 1759 struct r600_common_context *rctx = (struct r600_common_context *)ctx; 1760 struct r600_query_hw *rquery = (struct r600_query_hw *)query; 1761 struct r600_query_buffer *qbuf; 1762 struct r600_atom *atom = &rctx->render_cond_atom; 1763 1764 /* Compute the size of SET_PREDICATION packets. */ 1765 atom->num_dw = 0; 1766 if (query) { 1767 for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) 1768 atom->num_dw += (qbuf->results_end / rquery->result_size) * 5; 1769 1770 if (rquery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) 1771 atom->num_dw *= R600_MAX_STREAMS; 1772 } 1773 1774 rctx->render_cond = query; 1775 rctx->render_cond_invert = condition; 1776 rctx->render_cond_mode = mode; 1777 1778 rctx->set_atom_dirty(rctx, atom, query != NULL); 1779} 1780 1781void r600_suspend_queries(struct r600_common_context *ctx) 1782{ 1783 struct r600_query_hw *query; 1784 1785 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) { 1786 r600_query_hw_emit_stop(ctx, query); 1787 } 1788 assert(ctx->num_cs_dw_queries_suspend == 0); 1789} 1790 1791static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx, 1792 struct list_head *query_list) 1793{ 1794 struct r600_query_hw *query; 1795 unsigned num_dw = 0; 1796 1797 LIST_FOR_EACH_ENTRY(query, query_list, list) { 1798 /* begin + end */ 1799 num_dw += query->num_cs_dw_begin + query->num_cs_dw_end; 1800 1801 /* Workaround for the fact that 1802 * num_cs_dw_nontimer_queries_suspend is incremented for every 1803 * resumed query, which raises the bar in need_cs_space for 1804 * queries about to be resumed. 1805 */ 1806 num_dw += query->num_cs_dw_end; 1807 } 1808 /* primitives generated query */ 1809 num_dw += ctx->streamout.enable_atom.num_dw; 1810 /* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */ 1811 num_dw += 13; 1812 1813 return num_dw; 1814} 1815 1816void r600_resume_queries(struct r600_common_context *ctx) 1817{ 1818 struct r600_query_hw *query; 1819 unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries); 1820 1821 assert(ctx->num_cs_dw_queries_suspend == 0); 1822 1823 /* Check CS space here. Resuming must not be interrupted by flushes. */ 1824 ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true); 1825 1826 LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) { 1827 r600_query_hw_emit_start(ctx, query); 1828 } 1829} 1830 1831/* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */ 1832void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen) 1833{ 1834 struct r600_common_context *ctx = 1835 (struct r600_common_context*)rscreen->aux_context; 1836 struct radeon_cmdbuf *cs = ctx->gfx.cs; 1837 struct r600_resource *buffer; 1838 uint32_t *results; 1839 unsigned i, mask = 0; 1840 unsigned max_rbs; 1841 1842 if (ctx->family == CHIP_JUNIPER) { 1843 /* 1844 * Fix for predication lockups - the chip can only ever have 1845 * 4 RBs, however it looks like the predication logic assumes 1846 * there's 8, trying to read results from query buffers never 1847 * written to. By increasing this number we'll write the 1848 * status bit for these as per the normal disabled rb logic. 1849 */ 1850 ctx->screen->info.num_render_backends = 8; 1851 } 1852 max_rbs = ctx->screen->info.num_render_backends; 1853 1854 assert(rscreen->chip_class <= CAYMAN); 1855 1856 /* 1857 * if backend_map query is supported by the kernel. 1858 * Note the kernel drm driver for a long time never filled in the 1859 * associated data on eg/cm, only on r600/r700, hence ignore the valid 1860 * bit there if the map is zero. 1861 * (Albeit some chips with just one active rb can have a valid 0 map.) 1862 */ 1863 if (rscreen->info.r600_gb_backend_map_valid && 1864 (ctx->chip_class < EVERGREEN || rscreen->info.r600_gb_backend_map != 0)) { 1865 unsigned num_tile_pipes = rscreen->info.num_tile_pipes; 1866 unsigned backend_map = rscreen->info.r600_gb_backend_map; 1867 unsigned item_width, item_mask; 1868 1869 if (ctx->chip_class >= EVERGREEN) { 1870 item_width = 4; 1871 item_mask = 0x7; 1872 } else { 1873 item_width = 2; 1874 item_mask = 0x3; 1875 } 1876 1877 while (num_tile_pipes--) { 1878 i = backend_map & item_mask; 1879 mask |= (1<<i); 1880 backend_map >>= item_width; 1881 } 1882 if (mask != 0) { 1883 rscreen->info.enabled_rb_mask = mask; 1884 return; 1885 } 1886 } 1887 1888 /* otherwise backup path for older kernels */ 1889 1890 /* create buffer for event data */ 1891 buffer = (struct r600_resource*) 1892 pipe_buffer_create(ctx->b.screen, 0, 1893 PIPE_USAGE_STAGING, max_rbs * 16); 1894 if (!buffer) 1895 return; 1896 1897 /* initialize buffer with zeroes */ 1898 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE); 1899 if (results) { 1900 memset(results, 0, max_rbs * 4 * 4); 1901 1902 /* emit EVENT_WRITE for ZPASS_DONE */ 1903 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0)); 1904 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1)); 1905 radeon_emit(cs, buffer->gpu_address); 1906 radeon_emit(cs, buffer->gpu_address >> 32); 1907 1908 r600_emit_reloc(ctx, &ctx->gfx, buffer, 1909 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY); 1910 1911 /* analyze results */ 1912 results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ); 1913 if (results) { 1914 for(i = 0; i < max_rbs; i++) { 1915 /* at least highest bit will be set if backend is used */ 1916 if (results[i*4 + 1]) 1917 mask |= (1<<i); 1918 } 1919 } 1920 } 1921 1922 r600_resource_reference(&buffer, NULL); 1923 1924 if (mask) { 1925 if (rscreen->debug_flags & DBG_INFO && 1926 mask != rscreen->info.enabled_rb_mask) { 1927 printf("enabled_rb_mask (fixed) = 0x%x\n", mask); 1928 } 1929 rscreen->info.enabled_rb_mask = mask; 1930 } 1931} 1932 1933#define XFULL(name_, query_type_, type_, result_type_, group_id_) \ 1934 { \ 1935 .name = name_, \ 1936 .query_type = R600_QUERY_##query_type_, \ 1937 .type = PIPE_DRIVER_QUERY_TYPE_##type_, \ 1938 .result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \ 1939 .group_id = group_id_ \ 1940 } 1941 1942#define X(name_, query_type_, type_, result_type_) \ 1943 XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0) 1944 1945#define XG(group_, name_, query_type_, type_, result_type_) \ 1946 XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_) 1947 1948static const struct pipe_driver_query_info r600_driver_query_list[] = { 1949 X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE), 1950 X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE), 1951 X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE), 1952 X("draw-calls", DRAW_CALLS, UINT64, AVERAGE), 1953 X("decompress-calls", DECOMPRESS_CALLS, UINT64, AVERAGE), 1954 X("MRT-draw-calls", MRT_DRAW_CALLS, UINT64, AVERAGE), 1955 X("prim-restart-calls", PRIM_RESTART_CALLS, UINT64, AVERAGE), 1956 X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE), 1957 X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE), 1958 X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE), 1959 X("dma-calls", DMA_CALLS, UINT64, AVERAGE), 1960 X("cp-dma-calls", CP_DMA_CALLS, UINT64, AVERAGE), 1961 X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE), 1962 X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE), 1963 X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE), 1964 X("num-CB-cache-flushes", NUM_CB_CACHE_FLUSHES, UINT64, AVERAGE), 1965 X("num-DB-cache-flushes", NUM_DB_CACHE_FLUSHES, UINT64, AVERAGE), 1966 X("num-resident-handles", NUM_RESIDENT_HANDLES, UINT64, AVERAGE), 1967 X("tc-offloaded-slots", TC_OFFLOADED_SLOTS, UINT64, AVERAGE), 1968 X("tc-direct-slots", TC_DIRECT_SLOTS, UINT64, AVERAGE), 1969 X("tc-num-syncs", TC_NUM_SYNCS, UINT64, AVERAGE), 1970 X("CS-thread-busy", CS_THREAD_BUSY, UINT64, AVERAGE), 1971 X("gallium-thread-busy", GALLIUM_THREAD_BUSY, UINT64, AVERAGE), 1972 X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE), 1973 X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE), 1974 X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE), 1975 X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE), 1976 X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE), 1977 X("num-mapped-buffers", NUM_MAPPED_BUFFERS, UINT64, AVERAGE), 1978 X("num-GFX-IBs", NUM_GFX_IBS, UINT64, AVERAGE), 1979 X("num-SDMA-IBs", NUM_SDMA_IBS, UINT64, AVERAGE), 1980 X("GFX-BO-list-size", GFX_BO_LIST_SIZE, UINT64, AVERAGE), 1981 X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE), 1982 X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE), 1983 X("VRAM-CPU-page-faults", NUM_VRAM_CPU_PAGE_FAULTS, UINT64, CUMULATIVE), 1984 X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE), 1985 X("VRAM-vis-usage", VRAM_VIS_USAGE, BYTES, AVERAGE), 1986 X("GTT-usage", GTT_USAGE, BYTES, AVERAGE), 1987 1988 /* GPIN queries are for the benefit of old versions of GPUPerfStudio, 1989 * which use it as a fallback path to detect the GPU type. 1990 * 1991 * Note: The names of these queries are significant for GPUPerfStudio 1992 * (and possibly their order as well). */ 1993 XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE), 1994 XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE), 1995 XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE), 1996 XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE), 1997 XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE), 1998 1999 X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE), 2000 X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE), 2001 X("memory-clock", CURRENT_GPU_MCLK, HZ, AVERAGE), 2002 2003 /* The following queries must be at the end of the list because their 2004 * availability is adjusted dynamically based on the DRM version. */ 2005 X("GPU-load", GPU_LOAD, UINT64, AVERAGE), 2006 X("GPU-shaders-busy", GPU_SHADERS_BUSY, UINT64, AVERAGE), 2007 X("GPU-ta-busy", GPU_TA_BUSY, UINT64, AVERAGE), 2008 X("GPU-gds-busy", GPU_GDS_BUSY, UINT64, AVERAGE), 2009 X("GPU-vgt-busy", GPU_VGT_BUSY, UINT64, AVERAGE), 2010 X("GPU-ia-busy", GPU_IA_BUSY, UINT64, AVERAGE), 2011 X("GPU-sx-busy", GPU_SX_BUSY, UINT64, AVERAGE), 2012 X("GPU-wd-busy", GPU_WD_BUSY, UINT64, AVERAGE), 2013 X("GPU-bci-busy", GPU_BCI_BUSY, UINT64, AVERAGE), 2014 X("GPU-sc-busy", GPU_SC_BUSY, UINT64, AVERAGE), 2015 X("GPU-pa-busy", GPU_PA_BUSY, UINT64, AVERAGE), 2016 X("GPU-db-busy", GPU_DB_BUSY, UINT64, AVERAGE), 2017 X("GPU-cp-busy", GPU_CP_BUSY, UINT64, AVERAGE), 2018 X("GPU-cb-busy", GPU_CB_BUSY, UINT64, AVERAGE), 2019 X("GPU-sdma-busy", GPU_SDMA_BUSY, UINT64, AVERAGE), 2020 X("GPU-pfp-busy", GPU_PFP_BUSY, UINT64, AVERAGE), 2021 X("GPU-meq-busy", GPU_MEQ_BUSY, UINT64, AVERAGE), 2022 X("GPU-me-busy", GPU_ME_BUSY, UINT64, AVERAGE), 2023 X("GPU-surf-sync-busy", GPU_SURF_SYNC_BUSY, UINT64, AVERAGE), 2024 X("GPU-cp-dma-busy", GPU_CP_DMA_BUSY, UINT64, AVERAGE), 2025 X("GPU-scratch-ram-busy", GPU_SCRATCH_RAM_BUSY, UINT64, AVERAGE), 2026}; 2027 2028#undef X 2029#undef XG 2030#undef XFULL 2031 2032static unsigned r600_get_num_queries(struct r600_common_screen *rscreen) 2033{ 2034 if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42) 2035 return ARRAY_SIZE(r600_driver_query_list); 2036 else 2037 return ARRAY_SIZE(r600_driver_query_list) - 25; 2038} 2039 2040static int r600_get_driver_query_info(struct pipe_screen *screen, 2041 unsigned index, 2042 struct pipe_driver_query_info *info) 2043{ 2044 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; 2045 unsigned num_queries = r600_get_num_queries(rscreen); 2046 2047 if (!info) { 2048 unsigned num_perfcounters = 2049 r600_get_perfcounter_info(rscreen, 0, NULL); 2050 2051 return num_queries + num_perfcounters; 2052 } 2053 2054 if (index >= num_queries) 2055 return r600_get_perfcounter_info(rscreen, index - num_queries, info); 2056 2057 *info = r600_driver_query_list[index]; 2058 2059 switch (info->query_type) { 2060 case R600_QUERY_REQUESTED_VRAM: 2061 case R600_QUERY_VRAM_USAGE: 2062 case R600_QUERY_MAPPED_VRAM: 2063 info->max_value.u64 = rscreen->info.vram_size; 2064 break; 2065 case R600_QUERY_REQUESTED_GTT: 2066 case R600_QUERY_GTT_USAGE: 2067 case R600_QUERY_MAPPED_GTT: 2068 info->max_value.u64 = rscreen->info.gart_size; 2069 break; 2070 case R600_QUERY_GPU_TEMPERATURE: 2071 info->max_value.u64 = 125; 2072 break; 2073 case R600_QUERY_VRAM_VIS_USAGE: 2074 info->max_value.u64 = rscreen->info.vram_vis_size; 2075 break; 2076 } 2077 2078 if (info->group_id != ~(unsigned)0 && rscreen->perfcounters) 2079 info->group_id += rscreen->perfcounters->num_groups; 2080 2081 return 1; 2082} 2083 2084/* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware 2085 * performance counter groups, so be careful when changing this and related 2086 * functions. 2087 */ 2088static int r600_get_driver_query_group_info(struct pipe_screen *screen, 2089 unsigned index, 2090 struct pipe_driver_query_group_info *info) 2091{ 2092 struct r600_common_screen *rscreen = (struct r600_common_screen *)screen; 2093 unsigned num_pc_groups = 0; 2094 2095 if (rscreen->perfcounters) 2096 num_pc_groups = rscreen->perfcounters->num_groups; 2097 2098 if (!info) 2099 return num_pc_groups + R600_NUM_SW_QUERY_GROUPS; 2100 2101 if (index < num_pc_groups) 2102 return r600_get_perfcounter_group_info(rscreen, index, info); 2103 2104 index -= num_pc_groups; 2105 if (index >= R600_NUM_SW_QUERY_GROUPS) 2106 return 0; 2107 2108 info->name = "GPIN"; 2109 info->max_active_queries = 5; 2110 info->num_queries = 5; 2111 return 1; 2112} 2113 2114void r600_query_init(struct r600_common_context *rctx) 2115{ 2116 rctx->b.create_query = r600_create_query; 2117 rctx->b.create_batch_query = r600_create_batch_query; 2118 rctx->b.destroy_query = r600_destroy_query; 2119 rctx->b.begin_query = r600_begin_query; 2120 rctx->b.end_query = r600_end_query; 2121 rctx->b.get_query_result = r600_get_query_result; 2122 rctx->b.get_query_result_resource = r600_get_query_result_resource; 2123 rctx->render_cond_atom.emit = r600_emit_query_predication; 2124 2125 if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0) 2126 rctx->b.render_condition = r600_render_condition; 2127 2128 LIST_INITHEAD(&rctx->active_queries); 2129} 2130 2131void r600_init_screen_query_functions(struct r600_common_screen *rscreen) 2132{ 2133 rscreen->b.get_driver_query_info = r600_get_driver_query_info; 2134 rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info; 2135} 2136