1361fc4cbSmaya/*
2361fc4cbSmaya * Copyrigh 2016 Red Hat Inc.
3361fc4cbSmaya * Based on anv:
4361fc4cbSmaya * Copyright © 2015 Intel Corporation
5361fc4cbSmaya *
6361fc4cbSmaya * Permission is hereby granted, free of charge, to any person obtaining a
7361fc4cbSmaya * copy of this software and associated documentation files (the "Software"),
8361fc4cbSmaya * to deal in the Software without restriction, including without limitation
9361fc4cbSmaya * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10361fc4cbSmaya * and/or sell copies of the Software, and to permit persons to whom the
11361fc4cbSmaya * Software is furnished to do so, subject to the following conditions:
12361fc4cbSmaya *
13361fc4cbSmaya * The above copyright notice and this permission notice (including the next
14361fc4cbSmaya * paragraph) shall be included in all copies or substantial portions of the
15361fc4cbSmaya * Software.
16361fc4cbSmaya *
17361fc4cbSmaya * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18361fc4cbSmaya * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19361fc4cbSmaya * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20361fc4cbSmaya * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21361fc4cbSmaya * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22361fc4cbSmaya * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23361fc4cbSmaya * DEALINGS IN THE SOFTWARE.
24361fc4cbSmaya */
25361fc4cbSmaya
26361fc4cbSmaya#include "tu_private.h"
27361fc4cbSmaya
28361fc4cbSmaya#include <assert.h>
29361fc4cbSmaya#include <fcntl.h>
30361fc4cbSmaya#include <stdbool.h>
31361fc4cbSmaya#include <string.h>
32361fc4cbSmaya#include <unistd.h>
33361fc4cbSmaya
347ec681f3Smrg#include "adreno_pm4.xml.h"
357ec681f3Smrg#include "adreno_common.xml.h"
367ec681f3Smrg#include "a6xx.xml.h"
377ec681f3Smrg
38361fc4cbSmaya#include "nir/nir_builder.h"
397ec681f3Smrg#include "util/os_time.h"
407ec681f3Smrg
417ec681f3Smrg#include "tu_cs.h"
427ec681f3Smrg#include "vk_util.h"
437ec681f3Smrg
447ec681f3Smrg#define NSEC_PER_SEC 1000000000ull
457ec681f3Smrg#define WAIT_TIMEOUT 5
467ec681f3Smrg#define STAT_COUNT ((REG_A6XX_RBBM_PRIMCTR_10_LO - REG_A6XX_RBBM_PRIMCTR_0_LO) / 2 + 1)
477ec681f3Smrg
487ec681f3Smrgstruct PACKED query_slot {
497ec681f3Smrg   uint64_t available;
507ec681f3Smrg};
517ec681f3Smrg
527ec681f3Smrgstruct PACKED occlusion_slot_value {
537ec681f3Smrg   /* Seems sample counters are placed to be 16-byte aligned
547ec681f3Smrg    * even though this query needs an 8-byte slot. */
557ec681f3Smrg   uint64_t value;
567ec681f3Smrg   uint64_t _padding;
577ec681f3Smrg};
587ec681f3Smrg
597ec681f3Smrgstruct PACKED occlusion_query_slot {
607ec681f3Smrg   struct query_slot common;
617ec681f3Smrg   uint64_t result;
627ec681f3Smrg
637ec681f3Smrg   struct occlusion_slot_value begin;
647ec681f3Smrg   struct occlusion_slot_value end;
657ec681f3Smrg};
667ec681f3Smrg
677ec681f3Smrgstruct PACKED timestamp_query_slot {
687ec681f3Smrg   struct query_slot common;
697ec681f3Smrg   uint64_t result;
707ec681f3Smrg};
717ec681f3Smrg
727ec681f3Smrgstruct PACKED primitive_slot_value {
737ec681f3Smrg   uint64_t values[2];
747ec681f3Smrg};
757ec681f3Smrg
767ec681f3Smrgstruct PACKED pipeline_stat_query_slot {
777ec681f3Smrg   struct query_slot common;
787ec681f3Smrg   uint64_t results[STAT_COUNT];
797ec681f3Smrg
807ec681f3Smrg   uint64_t begin[STAT_COUNT];
817ec681f3Smrg   uint64_t end[STAT_COUNT];
827ec681f3Smrg};
837ec681f3Smrg
847ec681f3Smrgstruct PACKED primitive_query_slot {
857ec681f3Smrg   struct query_slot common;
867ec681f3Smrg   /* The result of transform feedback queries is two integer values:
877ec681f3Smrg    *   results[0] is the count of primitives written,
887ec681f3Smrg    *   results[1] is the count of primitives generated.
897ec681f3Smrg    * Also a result for each stream is stored at 4 slots respectively.
907ec681f3Smrg    */
917ec681f3Smrg   uint64_t results[2];
927ec681f3Smrg
937ec681f3Smrg   /* Primitive counters also need to be 16-byte aligned. */
947ec681f3Smrg   uint64_t _padding;
957ec681f3Smrg
967ec681f3Smrg   struct primitive_slot_value begin[4];
977ec681f3Smrg   struct primitive_slot_value end[4];
987ec681f3Smrg};
997ec681f3Smrg
1007ec681f3Smrgstruct PACKED perfcntr_query_slot {
1017ec681f3Smrg   uint64_t result;
1027ec681f3Smrg   uint64_t begin;
1037ec681f3Smrg   uint64_t end;
1047ec681f3Smrg};
1057ec681f3Smrg
1067ec681f3Smrgstruct PACKED perf_query_slot {
1077ec681f3Smrg   struct query_slot common;
1087ec681f3Smrg   struct perfcntr_query_slot perfcntr;
1097ec681f3Smrg};
1107ec681f3Smrg
1117ec681f3Smrg/* Returns the IOVA of a given uint64_t field in a given slot of a query
1127ec681f3Smrg * pool. */
1137ec681f3Smrg#define query_iova(type, pool, query, field)                         \
1147ec681f3Smrg   pool->bo.iova + pool->stride * (query) + offsetof(type, field)
1157ec681f3Smrg
1167ec681f3Smrg#define occlusion_query_iova(pool, query, field)                     \
1177ec681f3Smrg   query_iova(struct occlusion_query_slot, pool, query, field)
1187ec681f3Smrg
1197ec681f3Smrg#define pipeline_stat_query_iova(pool, query, field)                 \
1207ec681f3Smrg   pool->bo.iova + pool->stride * (query) +                            \
1217ec681f3Smrg   offsetof(struct pipeline_stat_query_slot, field)
1227ec681f3Smrg
1237ec681f3Smrg#define primitive_query_iova(pool, query, field, i)                  \
1247ec681f3Smrg   query_iova(struct primitive_query_slot, pool, query, field) +     \
1257ec681f3Smrg   offsetof(struct primitive_slot_value, values[i])
1267ec681f3Smrg
1277ec681f3Smrg#define perf_query_iova(pool, query, field, i)                          \
1287ec681f3Smrg   pool->bo.iova + pool->stride * (query) +                             \
1297ec681f3Smrg   sizeof(struct query_slot) +                                   \
1307ec681f3Smrg   sizeof(struct perfcntr_query_slot) * (i) +                          \
1317ec681f3Smrg   offsetof(struct perfcntr_query_slot, field)
1327ec681f3Smrg
1337ec681f3Smrg#define query_available_iova(pool, query)                            \
1347ec681f3Smrg   query_iova(struct query_slot, pool, query, available)
1357ec681f3Smrg
1367ec681f3Smrg#define query_result_iova(pool, query, type, i)                            \
1377ec681f3Smrg   pool->bo.iova + pool->stride * (query) +                          \
1387ec681f3Smrg   sizeof(struct query_slot) + sizeof(type) * (i)
1397ec681f3Smrg
1407ec681f3Smrg#define query_result_addr(pool, query, type, i)                            \
1417ec681f3Smrg   pool->bo.map + pool->stride * (query) +                             \
1427ec681f3Smrg   sizeof(struct query_slot) + sizeof(type) * (i)
1437ec681f3Smrg
1447ec681f3Smrg#define query_is_available(slot) slot->available
1457ec681f3Smrg
1467ec681f3Smrgstatic const VkPerformanceCounterUnitKHR
1477ec681f3Smrgfd_perfcntr_type_to_vk_unit[] = {
1487ec681f3Smrg   [FD_PERFCNTR_TYPE_UINT]         = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
1497ec681f3Smrg   [FD_PERFCNTR_TYPE_UINT64]       = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
1507ec681f3Smrg   [FD_PERFCNTR_TYPE_FLOAT]        = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
1517ec681f3Smrg   [FD_PERFCNTR_TYPE_PERCENTAGE]   = VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR,
1527ec681f3Smrg   [FD_PERFCNTR_TYPE_BYTES]        = VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR,
1537ec681f3Smrg   /* TODO. can be UNIT_NANOSECONDS_KHR with a logic to compute */
1547ec681f3Smrg   [FD_PERFCNTR_TYPE_MICROSECONDS] = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
1557ec681f3Smrg   [FD_PERFCNTR_TYPE_HZ]           = VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR,
1567ec681f3Smrg   [FD_PERFCNTR_TYPE_DBM]          = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
1577ec681f3Smrg   [FD_PERFCNTR_TYPE_TEMPERATURE]  = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
1587ec681f3Smrg   [FD_PERFCNTR_TYPE_VOLTS]        = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
1597ec681f3Smrg   [FD_PERFCNTR_TYPE_AMPS]         = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
1607ec681f3Smrg   [FD_PERFCNTR_TYPE_WATTS]        = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR,
1617ec681f3Smrg};
1627ec681f3Smrg
1637ec681f3Smrg/* TODO. Basically this comes from the freedreno implementation where
1647ec681f3Smrg * only UINT64 is used. We'd better confirm this by the blob vulkan driver
1657ec681f3Smrg * when it starts supporting perf query.
1667ec681f3Smrg */
1677ec681f3Smrgstatic const VkPerformanceCounterStorageKHR
1687ec681f3Smrgfd_perfcntr_type_to_vk_storage[] = {
1697ec681f3Smrg   [FD_PERFCNTR_TYPE_UINT]         = VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR,
1707ec681f3Smrg   [FD_PERFCNTR_TYPE_UINT64]       = VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR,
1717ec681f3Smrg   [FD_PERFCNTR_TYPE_FLOAT]        = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR,
1727ec681f3Smrg   [FD_PERFCNTR_TYPE_PERCENTAGE]   = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR,
1737ec681f3Smrg   [FD_PERFCNTR_TYPE_BYTES]        = VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR,
1747ec681f3Smrg   [FD_PERFCNTR_TYPE_MICROSECONDS] = VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR,
1757ec681f3Smrg   [FD_PERFCNTR_TYPE_HZ]           = VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR,
1767ec681f3Smrg   [FD_PERFCNTR_TYPE_DBM]          = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR,
1777ec681f3Smrg   [FD_PERFCNTR_TYPE_TEMPERATURE]  = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR,
1787ec681f3Smrg   [FD_PERFCNTR_TYPE_VOLTS]        = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR,
1797ec681f3Smrg   [FD_PERFCNTR_TYPE_AMPS]         = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR,
1807ec681f3Smrg   [FD_PERFCNTR_TYPE_WATTS]        = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR,
1817ec681f3Smrg};
1827ec681f3Smrg
1837ec681f3Smrg/*
1847ec681f3Smrg * Returns a pointer to a given slot in a query pool.
1857ec681f3Smrg */
1867ec681f3Smrgstatic void* slot_address(struct tu_query_pool *pool, uint32_t query)
1877ec681f3Smrg{
1887ec681f3Smrg   return (char*)pool->bo.map + query * pool->stride;
1897ec681f3Smrg}
1907ec681f3Smrg
1917ec681f3Smrgstatic void
1927ec681f3Smrgperfcntr_index(const struct fd_perfcntr_group *group, uint32_t group_count,
1937ec681f3Smrg               uint32_t index, uint32_t *gid, uint32_t *cid)
194361fc4cbSmaya
1957ec681f3Smrg{
1967ec681f3Smrg   uint32_t i;
1977ec681f3Smrg
1987ec681f3Smrg   for (i = 0; i < group_count; i++) {
1997ec681f3Smrg      if (group[i].num_countables > index) {
2007ec681f3Smrg         *gid = i;
2017ec681f3Smrg         *cid = index;
2027ec681f3Smrg         break;
2037ec681f3Smrg      }
2047ec681f3Smrg      index -= group[i].num_countables;
2057ec681f3Smrg   }
2067ec681f3Smrg
2077ec681f3Smrg   assert(i < group_count);
2087ec681f3Smrg}
2097ec681f3Smrg
2107ec681f3Smrgstatic int
2117ec681f3Smrgcompare_perfcntr_pass(const void *a, const void *b)
2127ec681f3Smrg{
2137ec681f3Smrg   return ((struct tu_perf_query_data *)a)->pass -
2147ec681f3Smrg          ((struct tu_perf_query_data *)b)->pass;
2157ec681f3Smrg}
2167ec681f3Smrg
2177ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL
218361fc4cbSmayatu_CreateQueryPool(VkDevice _device,
219361fc4cbSmaya                   const VkQueryPoolCreateInfo *pCreateInfo,
220361fc4cbSmaya                   const VkAllocationCallbacks *pAllocator,
221361fc4cbSmaya                   VkQueryPool *pQueryPool)
222361fc4cbSmaya{
223361fc4cbSmaya   TU_FROM_HANDLE(tu_device, device, _device);
2247ec681f3Smrg   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
2257ec681f3Smrg   assert(pCreateInfo->queryCount > 0);
226361fc4cbSmaya
2277ec681f3Smrg   uint32_t pool_size, slot_size;
2287ec681f3Smrg   const VkQueryPoolPerformanceCreateInfoKHR *perf_query_info = NULL;
2297ec681f3Smrg
2307ec681f3Smrg   pool_size = sizeof(struct tu_query_pool);
2317ec681f3Smrg
2327ec681f3Smrg   switch (pCreateInfo->queryType) {
2337ec681f3Smrg   case VK_QUERY_TYPE_OCCLUSION:
2347ec681f3Smrg      slot_size = sizeof(struct occlusion_query_slot);
2357ec681f3Smrg      break;
2367ec681f3Smrg   case VK_QUERY_TYPE_TIMESTAMP:
2377ec681f3Smrg      slot_size = sizeof(struct timestamp_query_slot);
2387ec681f3Smrg      break;
2397ec681f3Smrg   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
2407ec681f3Smrg      slot_size = sizeof(struct primitive_query_slot);
2417ec681f3Smrg      break;
2427ec681f3Smrg   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
2437ec681f3Smrg      perf_query_info =
2447ec681f3Smrg            vk_find_struct_const(pCreateInfo->pNext,
2457ec681f3Smrg                                 QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR);
2467ec681f3Smrg      assert(perf_query_info);
2477ec681f3Smrg
2487ec681f3Smrg      slot_size = sizeof(struct perf_query_slot) +
2497ec681f3Smrg                  sizeof(struct perfcntr_query_slot) *
2507ec681f3Smrg                  (perf_query_info->counterIndexCount - 1);
2517ec681f3Smrg
2527ec681f3Smrg      /* Size of the array pool->tu_perf_query_data */
2537ec681f3Smrg      pool_size += sizeof(struct tu_perf_query_data) *
2547ec681f3Smrg                   perf_query_info->counterIndexCount;
2557ec681f3Smrg      break;
2567ec681f3Smrg   }
2577ec681f3Smrg   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
2587ec681f3Smrg      slot_size = sizeof(struct pipeline_stat_query_slot);
2597ec681f3Smrg      break;
2607ec681f3Smrg   default:
2617ec681f3Smrg      unreachable("Invalid query type");
2627ec681f3Smrg   }
2637ec681f3Smrg
2647ec681f3Smrg   struct tu_query_pool *pool =
2657ec681f3Smrg         vk_object_alloc(&device->vk, pAllocator, pool_size,
2667ec681f3Smrg                         VK_OBJECT_TYPE_QUERY_POOL);
267361fc4cbSmaya   if (!pool)
2687ec681f3Smrg      return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
2697ec681f3Smrg
2707ec681f3Smrg   if (pCreateInfo->queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
2717ec681f3Smrg      pool->perf_group = fd_perfcntrs(&device->physical_device->dev_id,
2727ec681f3Smrg                                      &pool->perf_group_count);
2737ec681f3Smrg
2747ec681f3Smrg      pool->counter_index_count = perf_query_info->counterIndexCount;
275361fc4cbSmaya
2767ec681f3Smrg      /* Build all perf counters data that is requested, so we could get
2777ec681f3Smrg       * correct group id, countable id, counter register and pass index with
2787ec681f3Smrg       * only a counter index provided by applications at each command submit.
2797ec681f3Smrg       *
2807ec681f3Smrg       * Also, since this built data will be sorted by pass index later, we
2817ec681f3Smrg       * should keep the original indices and store perfcntrs results according
2827ec681f3Smrg       * to them so apps can get correct results with their own indices.
2837ec681f3Smrg       */
2847ec681f3Smrg      uint32_t regs[pool->perf_group_count], pass[pool->perf_group_count];
2857ec681f3Smrg      memset(regs, 0x00, pool->perf_group_count * sizeof(regs[0]));
2867ec681f3Smrg      memset(pass, 0x00, pool->perf_group_count * sizeof(pass[0]));
2877ec681f3Smrg
2887ec681f3Smrg      for (uint32_t i = 0; i < pool->counter_index_count; i++) {
2897ec681f3Smrg         uint32_t gid = 0, cid = 0;
2907ec681f3Smrg
2917ec681f3Smrg         perfcntr_index(pool->perf_group, pool->perf_group_count,
2927ec681f3Smrg                        perf_query_info->pCounterIndices[i], &gid, &cid);
2937ec681f3Smrg
2947ec681f3Smrg         pool->perf_query_data[i].gid = gid;
2957ec681f3Smrg         pool->perf_query_data[i].cid = cid;
2967ec681f3Smrg         pool->perf_query_data[i].app_idx = i;
2977ec681f3Smrg
2987ec681f3Smrg         /* When a counter register is over the capacity(num_counters),
2997ec681f3Smrg          * reset it for next pass.
3007ec681f3Smrg          */
3017ec681f3Smrg         if (regs[gid] < pool->perf_group[gid].num_counters) {
3027ec681f3Smrg            pool->perf_query_data[i].cntr_reg = regs[gid]++;
3037ec681f3Smrg            pool->perf_query_data[i].pass = pass[gid];
3047ec681f3Smrg         } else {
3057ec681f3Smrg            pool->perf_query_data[i].pass = ++pass[gid];
3067ec681f3Smrg            pool->perf_query_data[i].cntr_reg = regs[gid] = 0;
3077ec681f3Smrg            regs[gid]++;
3087ec681f3Smrg         }
3097ec681f3Smrg      }
3107ec681f3Smrg
3117ec681f3Smrg      /* Sort by pass index so we could easily prepare a command stream
3127ec681f3Smrg       * with the ascending order of pass index.
3137ec681f3Smrg       */
3147ec681f3Smrg      qsort(pool->perf_query_data, pool->counter_index_count,
3157ec681f3Smrg            sizeof(pool->perf_query_data[0]),
3167ec681f3Smrg            compare_perfcntr_pass);
3177ec681f3Smrg   }
3187ec681f3Smrg
3197ec681f3Smrg   VkResult result = tu_bo_init_new(device, &pool->bo,
3207ec681f3Smrg         pCreateInfo->queryCount * slot_size, TU_BO_ALLOC_NO_FLAGS);
3217ec681f3Smrg   if (result != VK_SUCCESS) {
3227ec681f3Smrg      vk_object_free(&device->vk, pAllocator, pool);
3237ec681f3Smrg      return result;
3247ec681f3Smrg   }
3257ec681f3Smrg
3267ec681f3Smrg   result = tu_bo_map(device, &pool->bo);
3277ec681f3Smrg   if (result != VK_SUCCESS) {
3287ec681f3Smrg      tu_bo_finish(device, &pool->bo);
3297ec681f3Smrg      vk_object_free(&device->vk, pAllocator, pool);
3307ec681f3Smrg      return result;
3317ec681f3Smrg   }
3327ec681f3Smrg
3337ec681f3Smrg   /* Initialize all query statuses to unavailable */
3347ec681f3Smrg   memset(pool->bo.map, 0, pool->bo.size);
3357ec681f3Smrg
3367ec681f3Smrg   pool->type = pCreateInfo->queryType;
3377ec681f3Smrg   pool->stride = slot_size;
3387ec681f3Smrg   pool->size = pCreateInfo->queryCount;
3397ec681f3Smrg   pool->pipeline_statistics = pCreateInfo->pipelineStatistics;
340361fc4cbSmaya   *pQueryPool = tu_query_pool_to_handle(pool);
3417ec681f3Smrg
342361fc4cbSmaya   return VK_SUCCESS;
343361fc4cbSmaya}
344361fc4cbSmaya
3457ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
346361fc4cbSmayatu_DestroyQueryPool(VkDevice _device,
347361fc4cbSmaya                    VkQueryPool _pool,
348361fc4cbSmaya                    const VkAllocationCallbacks *pAllocator)
349361fc4cbSmaya{
350361fc4cbSmaya   TU_FROM_HANDLE(tu_device, device, _device);
351361fc4cbSmaya   TU_FROM_HANDLE(tu_query_pool, pool, _pool);
352361fc4cbSmaya
353361fc4cbSmaya   if (!pool)
354361fc4cbSmaya      return;
355361fc4cbSmaya
3567ec681f3Smrg   tu_bo_finish(device, &pool->bo);
3577ec681f3Smrg   vk_object_free(&device->vk, pAllocator, pool);
3587ec681f3Smrg}
3597ec681f3Smrg
3607ec681f3Smrgstatic uint32_t
3617ec681f3Smrgget_result_count(struct tu_query_pool *pool)
3627ec681f3Smrg{
3637ec681f3Smrg   switch (pool->type) {
3647ec681f3Smrg   /* Occulusion and timestamp queries write one integer value */
3657ec681f3Smrg   case VK_QUERY_TYPE_OCCLUSION:
3667ec681f3Smrg   case VK_QUERY_TYPE_TIMESTAMP:
3677ec681f3Smrg      return 1;
3687ec681f3Smrg   /* Transform feedback queries write two integer values */
3697ec681f3Smrg   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
3707ec681f3Smrg      return 2;
3717ec681f3Smrg   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
3727ec681f3Smrg      return util_bitcount(pool->pipeline_statistics);
3737ec681f3Smrg   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
3747ec681f3Smrg      return pool->counter_index_count;
3757ec681f3Smrg   default:
3767ec681f3Smrg      assert(!"Invalid query type");
3777ec681f3Smrg      return 0;
3787ec681f3Smrg   }
3797ec681f3Smrg}
3807ec681f3Smrg
3817ec681f3Smrgstatic uint32_t
3827ec681f3Smrgstatistics_index(uint32_t *statistics)
3837ec681f3Smrg{
3847ec681f3Smrg   uint32_t stat;
3857ec681f3Smrg   stat = u_bit_scan(statistics);
3867ec681f3Smrg
3877ec681f3Smrg   switch (1 << stat) {
3887ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT:
3897ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT:
3907ec681f3Smrg      return 0;
3917ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT:
3927ec681f3Smrg      return 1;
3937ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT:
3947ec681f3Smrg      return 2;
3957ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT:
3967ec681f3Smrg      return 4;
3977ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT:
3987ec681f3Smrg      return 5;
3997ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT:
4007ec681f3Smrg      return 6;
4017ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT:
4027ec681f3Smrg      return 7;
4037ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT:
4047ec681f3Smrg      return 8;
4057ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT:
4067ec681f3Smrg      return 9;
4077ec681f3Smrg   case VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT:
4087ec681f3Smrg      return 10;
4097ec681f3Smrg   default:
4107ec681f3Smrg      return 0;
4117ec681f3Smrg   }
412361fc4cbSmaya}
413361fc4cbSmaya
4147ec681f3Smrg/* Wait on the the availability status of a query up until a timeout. */
4157ec681f3Smrgstatic VkResult
4167ec681f3Smrgwait_for_available(struct tu_device *device, struct tu_query_pool *pool,
4177ec681f3Smrg                   uint32_t query)
4187ec681f3Smrg{
4197ec681f3Smrg   /* TODO: Use the MSM_IOVA_WAIT ioctl to wait on the available bit in a
4207ec681f3Smrg    * scheduler friendly way instead of busy polling once the patch has landed
4217ec681f3Smrg    * upstream. */
4227ec681f3Smrg   struct query_slot *slot = slot_address(pool, query);
4237ec681f3Smrg   uint64_t abs_timeout = os_time_get_absolute_timeout(
4247ec681f3Smrg         WAIT_TIMEOUT * NSEC_PER_SEC);
4257ec681f3Smrg   while(os_time_get_nano() < abs_timeout) {
4267ec681f3Smrg      if (query_is_available(slot))
4277ec681f3Smrg         return VK_SUCCESS;
4287ec681f3Smrg   }
4297ec681f3Smrg   return vk_error(device, VK_TIMEOUT);
4307ec681f3Smrg}
4317ec681f3Smrg
4327ec681f3Smrg/* Writes a query value to a buffer from the CPU. */
4337ec681f3Smrgstatic void
4347ec681f3Smrgwrite_query_value_cpu(char* base,
4357ec681f3Smrg                      uint32_t offset,
4367ec681f3Smrg                      uint64_t value,
4377ec681f3Smrg                      VkQueryResultFlags flags)
4387ec681f3Smrg{
4397ec681f3Smrg   if (flags & VK_QUERY_RESULT_64_BIT) {
4407ec681f3Smrg      *(uint64_t*)(base + (offset * sizeof(uint64_t))) = value;
4417ec681f3Smrg   } else {
4427ec681f3Smrg      *(uint32_t*)(base + (offset * sizeof(uint32_t))) = value;
4437ec681f3Smrg   }
4447ec681f3Smrg}
4457ec681f3Smrg
4467ec681f3Smrgstatic VkResult
4477ec681f3Smrgget_query_pool_results(struct tu_device *device,
4487ec681f3Smrg                       struct tu_query_pool *pool,
4497ec681f3Smrg                       uint32_t firstQuery,
4507ec681f3Smrg                       uint32_t queryCount,
4517ec681f3Smrg                       size_t dataSize,
4527ec681f3Smrg                       void *pData,
4537ec681f3Smrg                       VkDeviceSize stride,
4547ec681f3Smrg                       VkQueryResultFlags flags)
4557ec681f3Smrg{
4567ec681f3Smrg   assert(dataSize >= stride * queryCount);
4577ec681f3Smrg
4587ec681f3Smrg   char *result_base = pData;
4597ec681f3Smrg   VkResult result = VK_SUCCESS;
4607ec681f3Smrg   for (uint32_t i = 0; i < queryCount; i++) {
4617ec681f3Smrg      uint32_t query = firstQuery + i;
4627ec681f3Smrg      struct query_slot *slot = slot_address(pool, query);
4637ec681f3Smrg      bool available = query_is_available(slot);
4647ec681f3Smrg      uint32_t result_count = get_result_count(pool);
4657ec681f3Smrg      uint32_t statistics = pool->pipeline_statistics;
4667ec681f3Smrg
4677ec681f3Smrg      if ((flags & VK_QUERY_RESULT_WAIT_BIT) && !available) {
4687ec681f3Smrg         VkResult wait_result = wait_for_available(device, pool, query);
4697ec681f3Smrg         if (wait_result != VK_SUCCESS)
4707ec681f3Smrg            return wait_result;
4717ec681f3Smrg         available = true;
4727ec681f3Smrg      } else if (!(flags & VK_QUERY_RESULT_PARTIAL_BIT) && !available) {
4737ec681f3Smrg         /* From the Vulkan 1.1.130 spec:
4747ec681f3Smrg          *
4757ec681f3Smrg          *    If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
4767ec681f3Smrg          *    both not set then no result values are written to pData for
4777ec681f3Smrg          *    queries that are in the unavailable state at the time of the
4787ec681f3Smrg          *    call, and vkGetQueryPoolResults returns VK_NOT_READY. However,
4797ec681f3Smrg          *    availability state is still written to pData for those queries
4807ec681f3Smrg          *    if VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set.
4817ec681f3Smrg          */
4827ec681f3Smrg         result = VK_NOT_READY;
4837ec681f3Smrg         if (!(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
4847ec681f3Smrg            result_base += stride;
4857ec681f3Smrg            continue;
4867ec681f3Smrg         }
4877ec681f3Smrg      }
4887ec681f3Smrg
4897ec681f3Smrg      for (uint32_t k = 0; k < result_count; k++) {
4907ec681f3Smrg         if (available) {
4917ec681f3Smrg            uint64_t *result;
4927ec681f3Smrg
4937ec681f3Smrg            if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
4947ec681f3Smrg               uint32_t stat_idx = statistics_index(&statistics);
4957ec681f3Smrg               result = query_result_addr(pool, query, uint64_t, stat_idx);
4967ec681f3Smrg            } else if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
4977ec681f3Smrg               result = query_result_addr(pool, query, struct perfcntr_query_slot, k);
4987ec681f3Smrg            } else {
4997ec681f3Smrg               result = query_result_addr(pool, query, uint64_t, k);
5007ec681f3Smrg            }
5017ec681f3Smrg
5027ec681f3Smrg            write_query_value_cpu(result_base, k, *result, flags);
5037ec681f3Smrg         } else if (flags & VK_QUERY_RESULT_PARTIAL_BIT)
5047ec681f3Smrg             /* From the Vulkan 1.1.130 spec:
5057ec681f3Smrg              *
5067ec681f3Smrg              *   If VK_QUERY_RESULT_PARTIAL_BIT is set, VK_QUERY_RESULT_WAIT_BIT
5077ec681f3Smrg              *   is not set, and the query’s status is unavailable, an
5087ec681f3Smrg              *   intermediate result value between zero and the final result
5097ec681f3Smrg              *   value is written to pData for that query.
5107ec681f3Smrg              *
5117ec681f3Smrg              * Just return 0 here for simplicity since it's a valid result.
5127ec681f3Smrg              */
5137ec681f3Smrg            write_query_value_cpu(result_base, k, 0, flags);
5147ec681f3Smrg      }
5157ec681f3Smrg
5167ec681f3Smrg      if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
5177ec681f3Smrg         /* From the Vulkan 1.1.130 spec:
5187ec681f3Smrg          *
5197ec681f3Smrg          *    If VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set, the final
5207ec681f3Smrg          *    integer value written for each query is non-zero if the query’s
5217ec681f3Smrg          *    status was available or zero if the status was unavailable.
5227ec681f3Smrg          */
5237ec681f3Smrg         write_query_value_cpu(result_base, result_count, available, flags);
5247ec681f3Smrg
5257ec681f3Smrg      result_base += stride;
5267ec681f3Smrg   }
5277ec681f3Smrg   return result;
5287ec681f3Smrg}
5297ec681f3Smrg
5307ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL
531361fc4cbSmayatu_GetQueryPoolResults(VkDevice _device,
532361fc4cbSmaya                       VkQueryPool queryPool,
533361fc4cbSmaya                       uint32_t firstQuery,
534361fc4cbSmaya                       uint32_t queryCount,
535361fc4cbSmaya                       size_t dataSize,
536361fc4cbSmaya                       void *pData,
537361fc4cbSmaya                       VkDeviceSize stride,
538361fc4cbSmaya                       VkQueryResultFlags flags)
539361fc4cbSmaya{
5407ec681f3Smrg   TU_FROM_HANDLE(tu_device, device, _device);
5417ec681f3Smrg   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
5427ec681f3Smrg   assert(firstQuery + queryCount <= pool->size);
5437ec681f3Smrg
5447ec681f3Smrg   if (tu_device_is_lost(device))
5457ec681f3Smrg      return VK_ERROR_DEVICE_LOST;
5467ec681f3Smrg
5477ec681f3Smrg   switch (pool->type) {
5487ec681f3Smrg   case VK_QUERY_TYPE_OCCLUSION:
5497ec681f3Smrg   case VK_QUERY_TYPE_TIMESTAMP:
5507ec681f3Smrg   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
5517ec681f3Smrg   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
5527ec681f3Smrg   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
5537ec681f3Smrg      return get_query_pool_results(device, pool, firstQuery, queryCount,
5547ec681f3Smrg                                    dataSize, pData, stride, flags);
5557ec681f3Smrg   default:
5567ec681f3Smrg      assert(!"Invalid query type");
5577ec681f3Smrg   }
558361fc4cbSmaya   return VK_SUCCESS;
559361fc4cbSmaya}
560361fc4cbSmaya
5617ec681f3Smrg/* Copies a query value from one buffer to another from the GPU. */
5627ec681f3Smrgstatic void
5637ec681f3Smrgcopy_query_value_gpu(struct tu_cmd_buffer *cmdbuf,
5647ec681f3Smrg                     struct tu_cs *cs,
5657ec681f3Smrg                     uint64_t src_iova,
5667ec681f3Smrg                     uint64_t base_write_iova,
5677ec681f3Smrg                     uint32_t offset,
5687ec681f3Smrg                     VkQueryResultFlags flags) {
5697ec681f3Smrg   uint32_t element_size = flags & VK_QUERY_RESULT_64_BIT ?
5707ec681f3Smrg         sizeof(uint64_t) : sizeof(uint32_t);
5717ec681f3Smrg   uint64_t write_iova = base_write_iova + (offset * element_size);
5727ec681f3Smrg
5737ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 5);
5747ec681f3Smrg   uint32_t mem_to_mem_flags = flags & VK_QUERY_RESULT_64_BIT ?
5757ec681f3Smrg         CP_MEM_TO_MEM_0_DOUBLE : 0;
5767ec681f3Smrg   tu_cs_emit(cs, mem_to_mem_flags);
5777ec681f3Smrg   tu_cs_emit_qw(cs, write_iova);
5787ec681f3Smrg   tu_cs_emit_qw(cs, src_iova);
5797ec681f3Smrg}
5807ec681f3Smrg
5817ec681f3Smrgstatic void
5827ec681f3Smrgemit_copy_query_pool_results(struct tu_cmd_buffer *cmdbuf,
5837ec681f3Smrg                             struct tu_cs *cs,
5847ec681f3Smrg                             struct tu_query_pool *pool,
5857ec681f3Smrg                             uint32_t firstQuery,
5867ec681f3Smrg                             uint32_t queryCount,
5877ec681f3Smrg                             struct tu_buffer *buffer,
5887ec681f3Smrg                             VkDeviceSize dstOffset,
5897ec681f3Smrg                             VkDeviceSize stride,
5907ec681f3Smrg                             VkQueryResultFlags flags)
5917ec681f3Smrg{
5927ec681f3Smrg   /* From the Vulkan 1.1.130 spec:
5937ec681f3Smrg    *
5947ec681f3Smrg    *    vkCmdCopyQueryPoolResults is guaranteed to see the effect of previous
5957ec681f3Smrg    *    uses of vkCmdResetQueryPool in the same queue, without any additional
5967ec681f3Smrg    *    synchronization.
5977ec681f3Smrg    *
5987ec681f3Smrg    * To ensure that previous writes to the available bit are coherent, first
5997ec681f3Smrg    * wait for all writes to complete.
6007ec681f3Smrg    */
6017ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
6027ec681f3Smrg
6037ec681f3Smrg   for (uint32_t i = 0; i < queryCount; i++) {
6047ec681f3Smrg      uint32_t query = firstQuery + i;
6057ec681f3Smrg      uint64_t available_iova = query_available_iova(pool, query);
6067ec681f3Smrg      uint64_t buffer_iova = tu_buffer_iova(buffer) + dstOffset + i * stride;
6077ec681f3Smrg      uint32_t result_count = get_result_count(pool);
6087ec681f3Smrg      uint32_t statistics = pool->pipeline_statistics;
6097ec681f3Smrg
6107ec681f3Smrg      /* Wait for the available bit to be set if executed with the
6117ec681f3Smrg       * VK_QUERY_RESULT_WAIT_BIT flag. */
6127ec681f3Smrg      if (flags & VK_QUERY_RESULT_WAIT_BIT) {
6137ec681f3Smrg         tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
6147ec681f3Smrg         tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
6157ec681f3Smrg                        CP_WAIT_REG_MEM_0_POLL_MEMORY);
6167ec681f3Smrg         tu_cs_emit_qw(cs, available_iova);
6177ec681f3Smrg         tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(0x1));
6187ec681f3Smrg         tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
6197ec681f3Smrg         tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
6207ec681f3Smrg      }
6217ec681f3Smrg
6227ec681f3Smrg      for (uint32_t k = 0; k < result_count; k++) {
6237ec681f3Smrg         uint64_t result_iova;
6247ec681f3Smrg
6257ec681f3Smrg         if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
6267ec681f3Smrg            uint32_t stat_idx = statistics_index(&statistics);
6277ec681f3Smrg            result_iova = query_result_iova(pool, query, uint64_t, stat_idx);
6287ec681f3Smrg         } else if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
6297ec681f3Smrg            result_iova = query_result_iova(pool, query,
6307ec681f3Smrg                                            struct perfcntr_query_slot, k);
6317ec681f3Smrg         } else {
6327ec681f3Smrg            result_iova = query_result_iova(pool, query, uint64_t, k);
6337ec681f3Smrg         }
6347ec681f3Smrg
6357ec681f3Smrg         if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
6367ec681f3Smrg            /* Unconditionally copying the bo->result into the buffer here is
6377ec681f3Smrg             * valid because we only set bo->result on vkCmdEndQuery. Thus, even
6387ec681f3Smrg             * if the query is unavailable, this will copy the correct partial
6397ec681f3Smrg             * value of 0.
6407ec681f3Smrg             */
6417ec681f3Smrg            copy_query_value_gpu(cmdbuf, cs, result_iova, buffer_iova,
6427ec681f3Smrg                                 k /* offset */, flags);
6437ec681f3Smrg         } else {
6447ec681f3Smrg            /* Conditionally copy bo->result into the buffer based on whether the
6457ec681f3Smrg             * query is available.
6467ec681f3Smrg             *
6477ec681f3Smrg             * NOTE: For the conditional packets to be executed, CP_COND_EXEC
6487ec681f3Smrg             * tests that ADDR0 != 0 and ADDR1 < REF. The packet here simply tests
6497ec681f3Smrg             * that 0 < available < 2, aka available == 1.
6507ec681f3Smrg             */
6517ec681f3Smrg            tu_cs_reserve(cs, 7 + 6);
6527ec681f3Smrg            tu_cs_emit_pkt7(cs, CP_COND_EXEC, 6);
6537ec681f3Smrg            tu_cs_emit_qw(cs, available_iova);
6547ec681f3Smrg            tu_cs_emit_qw(cs, available_iova);
6557ec681f3Smrg            tu_cs_emit(cs, CP_COND_EXEC_4_REF(0x2));
6567ec681f3Smrg            tu_cs_emit(cs, 6); /* Cond execute the next 6 DWORDS */
6577ec681f3Smrg
6587ec681f3Smrg            /* Start of conditional execution */
6597ec681f3Smrg            copy_query_value_gpu(cmdbuf, cs, result_iova, buffer_iova,
6607ec681f3Smrg                              k /* offset */, flags);
6617ec681f3Smrg            /* End of conditional execution */
6627ec681f3Smrg         }
6637ec681f3Smrg      }
6647ec681f3Smrg
6657ec681f3Smrg      if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
6667ec681f3Smrg         copy_query_value_gpu(cmdbuf, cs, available_iova, buffer_iova,
6677ec681f3Smrg                              result_count /* offset */, flags);
6687ec681f3Smrg      }
6697ec681f3Smrg   }
6707ec681f3Smrg}
6717ec681f3Smrg
6727ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
673361fc4cbSmayatu_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
674361fc4cbSmaya                           VkQueryPool queryPool,
675361fc4cbSmaya                           uint32_t firstQuery,
676361fc4cbSmaya                           uint32_t queryCount,
677361fc4cbSmaya                           VkBuffer dstBuffer,
678361fc4cbSmaya                           VkDeviceSize dstOffset,
679361fc4cbSmaya                           VkDeviceSize stride,
680361fc4cbSmaya                           VkQueryResultFlags flags)
681361fc4cbSmaya{
6827ec681f3Smrg   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
6837ec681f3Smrg   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
6847ec681f3Smrg   TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
6857ec681f3Smrg   struct tu_cs *cs = &cmdbuf->cs;
6867ec681f3Smrg   assert(firstQuery + queryCount <= pool->size);
6877ec681f3Smrg
6887ec681f3Smrg   switch (pool->type) {
6897ec681f3Smrg   case VK_QUERY_TYPE_OCCLUSION:
6907ec681f3Smrg   case VK_QUERY_TYPE_TIMESTAMP:
6917ec681f3Smrg   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
6927ec681f3Smrg   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
6937ec681f3Smrg      return emit_copy_query_pool_results(cmdbuf, cs, pool, firstQuery,
6947ec681f3Smrg               queryCount, buffer, dstOffset, stride, flags);
6957ec681f3Smrg   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
6967ec681f3Smrg      unreachable("allowCommandBufferQueryCopies is false");
6977ec681f3Smrg   default:
6987ec681f3Smrg      assert(!"Invalid query type");
6997ec681f3Smrg   }
700361fc4cbSmaya}
701361fc4cbSmaya
7027ec681f3Smrgstatic void
7037ec681f3Smrgemit_reset_query_pool(struct tu_cmd_buffer *cmdbuf,
7047ec681f3Smrg                      struct tu_query_pool *pool,
7057ec681f3Smrg                      uint32_t firstQuery,
7067ec681f3Smrg                      uint32_t queryCount)
7077ec681f3Smrg{
7087ec681f3Smrg   struct tu_cs *cs = &cmdbuf->cs;
7097ec681f3Smrg
7107ec681f3Smrg   for (uint32_t i = 0; i < queryCount; i++) {
7117ec681f3Smrg      uint32_t query = firstQuery + i;
7127ec681f3Smrg      uint32_t statistics = pool->pipeline_statistics;
7137ec681f3Smrg
7147ec681f3Smrg      tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
7157ec681f3Smrg      tu_cs_emit_qw(cs, query_available_iova(pool, query));
7167ec681f3Smrg      tu_cs_emit_qw(cs, 0x0);
7177ec681f3Smrg
7187ec681f3Smrg      for (uint32_t k = 0; k < get_result_count(pool); k++) {
7197ec681f3Smrg         uint64_t result_iova;
7207ec681f3Smrg
7217ec681f3Smrg         if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
7227ec681f3Smrg            uint32_t stat_idx = statistics_index(&statistics);
7237ec681f3Smrg            result_iova = query_result_iova(pool, query, uint64_t, stat_idx);
7247ec681f3Smrg         } else if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
7257ec681f3Smrg            result_iova = query_result_iova(pool, query,
7267ec681f3Smrg                                            struct perfcntr_query_slot, k);
7277ec681f3Smrg         } else {
7287ec681f3Smrg            result_iova = query_result_iova(pool, query, uint64_t, k);
7297ec681f3Smrg         }
7307ec681f3Smrg
7317ec681f3Smrg         tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
7327ec681f3Smrg         tu_cs_emit_qw(cs, result_iova);
7337ec681f3Smrg         tu_cs_emit_qw(cs, 0x0);
7347ec681f3Smrg      }
7357ec681f3Smrg   }
7367ec681f3Smrg
7377ec681f3Smrg}
7387ec681f3Smrg
7397ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
740361fc4cbSmayatu_CmdResetQueryPool(VkCommandBuffer commandBuffer,
741361fc4cbSmaya                     VkQueryPool queryPool,
742361fc4cbSmaya                     uint32_t firstQuery,
743361fc4cbSmaya                     uint32_t queryCount)
744361fc4cbSmaya{
7457ec681f3Smrg   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
7467ec681f3Smrg   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
7477ec681f3Smrg
7487ec681f3Smrg   switch (pool->type) {
7497ec681f3Smrg   case VK_QUERY_TYPE_TIMESTAMP:
7507ec681f3Smrg   case VK_QUERY_TYPE_OCCLUSION:
7517ec681f3Smrg   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
7527ec681f3Smrg   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
7537ec681f3Smrg   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
7547ec681f3Smrg      emit_reset_query_pool(cmdbuf, pool, firstQuery, queryCount);
7557ec681f3Smrg      break;
7567ec681f3Smrg   default:
7577ec681f3Smrg      assert(!"Invalid query type");
7587ec681f3Smrg   }
7597ec681f3Smrg}
7607ec681f3Smrg
7617ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
7627ec681f3Smrgtu_ResetQueryPool(VkDevice device,
7637ec681f3Smrg                  VkQueryPool queryPool,
7647ec681f3Smrg                  uint32_t firstQuery,
7657ec681f3Smrg                  uint32_t queryCount)
7667ec681f3Smrg{
7677ec681f3Smrg   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
7687ec681f3Smrg
7697ec681f3Smrg   for (uint32_t i = 0; i < queryCount; i++) {
7707ec681f3Smrg      struct query_slot *slot = slot_address(pool, i + firstQuery);
7717ec681f3Smrg      slot->available = 0;
7727ec681f3Smrg
7737ec681f3Smrg      for (uint32_t k = 0; k < get_result_count(pool); k++) {
7747ec681f3Smrg         uint64_t *res;
7757ec681f3Smrg
7767ec681f3Smrg         if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
7777ec681f3Smrg            res = query_result_addr(pool, i + firstQuery,
7787ec681f3Smrg                                    struct perfcntr_query_slot, k);
7797ec681f3Smrg         } else {
7807ec681f3Smrg            res = query_result_addr(pool, i + firstQuery, uint64_t, k);
7817ec681f3Smrg         }
7827ec681f3Smrg
7837ec681f3Smrg         *res = 0;
7847ec681f3Smrg      }
7857ec681f3Smrg   }
7867ec681f3Smrg}
7877ec681f3Smrg
7887ec681f3Smrgstatic void
7897ec681f3Smrgemit_begin_occlusion_query(struct tu_cmd_buffer *cmdbuf,
7907ec681f3Smrg                           struct tu_query_pool *pool,
7917ec681f3Smrg                           uint32_t query)
7927ec681f3Smrg{
7937ec681f3Smrg   /* From the Vulkan 1.1.130 spec:
7947ec681f3Smrg    *
7957ec681f3Smrg    *    A query must begin and end inside the same subpass of a render pass
7967ec681f3Smrg    *    instance, or must both begin and end outside of a render pass
7977ec681f3Smrg    *    instance.
7987ec681f3Smrg    *
7997ec681f3Smrg    * Unlike on an immediate-mode renderer, Turnip renders all tiles on
8007ec681f3Smrg    * vkCmdEndRenderPass, not individually on each vkCmdDraw*. As such, if a
8017ec681f3Smrg    * query begins/ends inside the same subpass of a render pass, we need to
8027ec681f3Smrg    * record the packets on the secondary draw command stream. cmdbuf->draw_cs
8037ec681f3Smrg    * is then run on every tile during render, so we just need to accumulate
8047ec681f3Smrg    * sample counts in slot->result to compute the query result.
8057ec681f3Smrg    */
8067ec681f3Smrg   struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
8077ec681f3Smrg
8087ec681f3Smrg   uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
8097ec681f3Smrg
8107ec681f3Smrg   tu_cs_emit_regs(cs,
8117ec681f3Smrg                   A6XX_RB_SAMPLE_COUNT_CONTROL(.copy = true));
8127ec681f3Smrg
8137ec681f3Smrg   tu_cs_emit_regs(cs,
8147ec681f3Smrg                   A6XX_RB_SAMPLE_COUNT_ADDR(.qword = begin_iova));
8157ec681f3Smrg
8167ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
8177ec681f3Smrg   tu_cs_emit(cs, ZPASS_DONE);
8187ec681f3Smrg}
8197ec681f3Smrg
8207ec681f3Smrgstatic void
8217ec681f3Smrgemit_begin_stat_query(struct tu_cmd_buffer *cmdbuf,
8227ec681f3Smrg                      struct tu_query_pool *pool,
8237ec681f3Smrg                      uint32_t query)
8247ec681f3Smrg{
8257ec681f3Smrg   struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
8267ec681f3Smrg   uint64_t begin_iova = pipeline_stat_query_iova(pool, query, begin);
8277ec681f3Smrg
8287ec681f3Smrg   tu6_emit_event_write(cmdbuf, cs, START_PRIMITIVE_CTRS);
8297ec681f3Smrg   tu6_emit_event_write(cmdbuf, cs, RST_PIX_CNT);
8307ec681f3Smrg   tu6_emit_event_write(cmdbuf, cs, TILE_FLUSH);
8317ec681f3Smrg
8327ec681f3Smrg   tu_cs_emit_wfi(cs);
8337ec681f3Smrg
8347ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
8357ec681f3Smrg   tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_RBBM_PRIMCTR_0_LO) |
8367ec681f3Smrg                  CP_REG_TO_MEM_0_CNT(STAT_COUNT * 2) |
8377ec681f3Smrg                  CP_REG_TO_MEM_0_64B);
8387ec681f3Smrg   tu_cs_emit_qw(cs, begin_iova);
8397ec681f3Smrg}
8407ec681f3Smrg
8417ec681f3Smrgstatic void
8427ec681f3Smrgemit_perfcntrs_pass_start(struct tu_cs *cs, uint32_t pass)
8437ec681f3Smrg{
8447ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_REG_TEST, 1);
8457ec681f3Smrg   tu_cs_emit(cs, A6XX_CP_REG_TEST_0_REG(
8467ec681f3Smrg                        REG_A6XX_CP_SCRATCH_REG(PERF_CNTRS_REG)) |
8477ec681f3Smrg                  A6XX_CP_REG_TEST_0_BIT(pass) |
8487ec681f3Smrg                  A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
8497ec681f3Smrg   tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
8507ec681f3Smrg}
8517ec681f3Smrg
8527ec681f3Smrgstatic void
8537ec681f3Smrgemit_begin_perf_query(struct tu_cmd_buffer *cmdbuf,
8547ec681f3Smrg                           struct tu_query_pool *pool,
8557ec681f3Smrg                           uint32_t query)
8567ec681f3Smrg{
8577ec681f3Smrg   struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
8587ec681f3Smrg   uint32_t last_pass = ~0;
8597ec681f3Smrg
8607ec681f3Smrg   /* Querying perf counters happens in these steps:
8617ec681f3Smrg    *
8627ec681f3Smrg    *  0) There's a scratch reg to set a pass index for perf counters query.
8637ec681f3Smrg    *     Prepare cmd streams to set each pass index to the reg at device
8647ec681f3Smrg    *     creation time. See tu_CreateDevice in tu_device.c
8657ec681f3Smrg    *  1) Emit command streams to read all requested perf counters at all
8667ec681f3Smrg    *     passes in begin/end query with CP_REG_TEST/CP_COND_REG_EXEC, which
8677ec681f3Smrg    *     reads the scratch reg where pass index is set.
8687ec681f3Smrg    *     See emit_perfcntrs_pass_start.
8697ec681f3Smrg    *  2) Pick the right cs setting proper pass index to the reg and prepend
8707ec681f3Smrg    *     it to the command buffer at each submit time.
8717ec681f3Smrg    *     See tu_QueueSubmit in tu_drm.c
8727ec681f3Smrg    *  3) If the pass index in the reg is true, then executes the command
8737ec681f3Smrg    *     stream below CP_COND_REG_EXEC.
8747ec681f3Smrg    */
8757ec681f3Smrg
8767ec681f3Smrg   tu_cs_emit_wfi(cs);
8777ec681f3Smrg
8787ec681f3Smrg   for (uint32_t i = 0; i < pool->counter_index_count; i++) {
8797ec681f3Smrg      struct tu_perf_query_data *data = &pool->perf_query_data[i];
8807ec681f3Smrg
8817ec681f3Smrg      if (last_pass != data->pass) {
8827ec681f3Smrg         last_pass = data->pass;
8837ec681f3Smrg
8847ec681f3Smrg         if (data->pass != 0)
8857ec681f3Smrg            tu_cond_exec_end(cs);
8867ec681f3Smrg         emit_perfcntrs_pass_start(cs, data->pass);
8877ec681f3Smrg      }
8887ec681f3Smrg
8897ec681f3Smrg      const struct fd_perfcntr_counter *counter =
8907ec681f3Smrg            &pool->perf_group[data->gid].counters[data->cntr_reg];
8917ec681f3Smrg      const struct fd_perfcntr_countable *countable =
8927ec681f3Smrg            &pool->perf_group[data->gid].countables[data->cid];
8937ec681f3Smrg
8947ec681f3Smrg      tu_cs_emit_pkt4(cs, counter->select_reg, 1);
8957ec681f3Smrg      tu_cs_emit(cs, countable->selector);
8967ec681f3Smrg   }
8977ec681f3Smrg   tu_cond_exec_end(cs);
8987ec681f3Smrg
8997ec681f3Smrg   last_pass = ~0;
9007ec681f3Smrg   tu_cs_emit_wfi(cs);
9017ec681f3Smrg
9027ec681f3Smrg   for (uint32_t i = 0; i < pool->counter_index_count; i++) {
9037ec681f3Smrg      struct tu_perf_query_data *data = &pool->perf_query_data[i];
9047ec681f3Smrg
9057ec681f3Smrg      if (last_pass != data->pass) {
9067ec681f3Smrg         last_pass = data->pass;
9077ec681f3Smrg
9087ec681f3Smrg         if (data->pass != 0)
9097ec681f3Smrg            tu_cond_exec_end(cs);
9107ec681f3Smrg         emit_perfcntrs_pass_start(cs, data->pass);
9117ec681f3Smrg      }
9127ec681f3Smrg
9137ec681f3Smrg      const struct fd_perfcntr_counter *counter =
9147ec681f3Smrg            &pool->perf_group[data->gid].counters[data->cntr_reg];
9157ec681f3Smrg
9167ec681f3Smrg      uint64_t begin_iova = perf_query_iova(pool, 0, begin, data->app_idx);
9177ec681f3Smrg
9187ec681f3Smrg      tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
9197ec681f3Smrg      tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(counter->counter_reg_lo) |
9207ec681f3Smrg                     CP_REG_TO_MEM_0_64B);
9217ec681f3Smrg      tu_cs_emit_qw(cs, begin_iova);
9227ec681f3Smrg   }
9237ec681f3Smrg   tu_cond_exec_end(cs);
9247ec681f3Smrg}
9257ec681f3Smrg
9267ec681f3Smrgstatic void
9277ec681f3Smrgemit_begin_xfb_query(struct tu_cmd_buffer *cmdbuf,
9287ec681f3Smrg                     struct tu_query_pool *pool,
9297ec681f3Smrg                     uint32_t query,
9307ec681f3Smrg                     uint32_t stream_id)
9317ec681f3Smrg{
9327ec681f3Smrg   struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
9337ec681f3Smrg   uint64_t begin_iova = primitive_query_iova(pool, query, begin[0], 0);
9347ec681f3Smrg
9357ec681f3Smrg   tu_cs_emit_regs(cs, A6XX_VPC_SO_STREAM_COUNTS(.qword = begin_iova));
9367ec681f3Smrg   tu6_emit_event_write(cmdbuf, cs, WRITE_PRIMITIVE_COUNTS);
937361fc4cbSmaya}
938361fc4cbSmaya
9397ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
940361fc4cbSmayatu_CmdBeginQuery(VkCommandBuffer commandBuffer,
941361fc4cbSmaya                 VkQueryPool queryPool,
942361fc4cbSmaya                 uint32_t query,
943361fc4cbSmaya                 VkQueryControlFlags flags)
944361fc4cbSmaya{
9457ec681f3Smrg   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
9467ec681f3Smrg   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
9477ec681f3Smrg   assert(query < pool->size);
9487ec681f3Smrg
9497ec681f3Smrg   switch (pool->type) {
9507ec681f3Smrg   case VK_QUERY_TYPE_OCCLUSION:
9517ec681f3Smrg      /* In freedreno, there is no implementation difference between
9527ec681f3Smrg       * GL_SAMPLES_PASSED and GL_ANY_SAMPLES_PASSED, so we can similarly
9537ec681f3Smrg       * ignore the VK_QUERY_CONTROL_PRECISE_BIT flag here.
9547ec681f3Smrg       */
9557ec681f3Smrg      emit_begin_occlusion_query(cmdbuf, pool, query);
9567ec681f3Smrg      break;
9577ec681f3Smrg   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
9587ec681f3Smrg      emit_begin_xfb_query(cmdbuf, pool, query, 0);
9597ec681f3Smrg      break;
9607ec681f3Smrg   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
9617ec681f3Smrg      emit_begin_perf_query(cmdbuf, pool, query);
9627ec681f3Smrg      break;
9637ec681f3Smrg   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
9647ec681f3Smrg      emit_begin_stat_query(cmdbuf, pool, query);
9657ec681f3Smrg      break;
9667ec681f3Smrg   case VK_QUERY_TYPE_TIMESTAMP:
9677ec681f3Smrg      unreachable("Unimplemented query type");
9687ec681f3Smrg   default:
9697ec681f3Smrg      assert(!"Invalid query type");
9707ec681f3Smrg   }
9717ec681f3Smrg}
9727ec681f3Smrg
9737ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
9747ec681f3Smrgtu_CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer,
9757ec681f3Smrg                           VkQueryPool queryPool,
9767ec681f3Smrg                           uint32_t query,
9777ec681f3Smrg                           VkQueryControlFlags flags,
9787ec681f3Smrg                           uint32_t index)
9797ec681f3Smrg{
9807ec681f3Smrg   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
9817ec681f3Smrg   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
9827ec681f3Smrg   assert(query < pool->size);
9837ec681f3Smrg
9847ec681f3Smrg   switch (pool->type) {
9857ec681f3Smrg   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
9867ec681f3Smrg      emit_begin_xfb_query(cmdbuf, pool, query, index);
9877ec681f3Smrg      break;
9887ec681f3Smrg   default:
9897ec681f3Smrg      assert(!"Invalid query type");
9907ec681f3Smrg   }
9917ec681f3Smrg}
9927ec681f3Smrg
9937ec681f3Smrgstatic void
9947ec681f3Smrgemit_end_occlusion_query(struct tu_cmd_buffer *cmdbuf,
9957ec681f3Smrg                         struct tu_query_pool *pool,
9967ec681f3Smrg                         uint32_t query)
9977ec681f3Smrg{
9987ec681f3Smrg   /* Ending an occlusion query happens in a few steps:
9997ec681f3Smrg    *    1) Set the slot->end to UINT64_MAX.
10007ec681f3Smrg    *    2) Set up the SAMPLE_COUNT registers and trigger a CP_EVENT_WRITE to
10017ec681f3Smrg    *       write the current sample count value into slot->end.
10027ec681f3Smrg    *    3) Since (2) is asynchronous, wait until slot->end is not equal to
10037ec681f3Smrg    *       UINT64_MAX before continuing via CP_WAIT_REG_MEM.
10047ec681f3Smrg    *    4) Accumulate the results of the query (slot->end - slot->begin) into
10057ec681f3Smrg    *       slot->result.
10067ec681f3Smrg    *    5) If vkCmdEndQuery is *not* called from within the scope of a render
10077ec681f3Smrg    *       pass, set the slot's available bit since the query is now done.
10087ec681f3Smrg    *    6) If vkCmdEndQuery *is* called from within the scope of a render
10097ec681f3Smrg    *       pass, we cannot mark as available yet since the commands in
10107ec681f3Smrg    *       draw_cs are not run until vkCmdEndRenderPass.
10117ec681f3Smrg    */
10127ec681f3Smrg   const struct tu_render_pass *pass = cmdbuf->state.pass;
10137ec681f3Smrg   struct tu_cs *cs = pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
10147ec681f3Smrg
10157ec681f3Smrg   uint64_t available_iova = query_available_iova(pool, query);
10167ec681f3Smrg   uint64_t begin_iova = occlusion_query_iova(pool, query, begin);
10177ec681f3Smrg   uint64_t end_iova = occlusion_query_iova(pool, query, end);
10187ec681f3Smrg   uint64_t result_iova = query_result_iova(pool, query, uint64_t, 0);
10197ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
10207ec681f3Smrg   tu_cs_emit_qw(cs, end_iova);
10217ec681f3Smrg   tu_cs_emit_qw(cs, 0xffffffffffffffffull);
10227ec681f3Smrg
10237ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
10247ec681f3Smrg
10257ec681f3Smrg   tu_cs_emit_regs(cs,
10267ec681f3Smrg                   A6XX_RB_SAMPLE_COUNT_CONTROL(.copy = true));
10277ec681f3Smrg
10287ec681f3Smrg   tu_cs_emit_regs(cs,
10297ec681f3Smrg                   A6XX_RB_SAMPLE_COUNT_ADDR(.qword = end_iova));
10307ec681f3Smrg
10317ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
10327ec681f3Smrg   tu_cs_emit(cs, ZPASS_DONE);
10337ec681f3Smrg
10347ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
10357ec681f3Smrg   tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_NE) |
10367ec681f3Smrg                  CP_WAIT_REG_MEM_0_POLL_MEMORY);
10377ec681f3Smrg   tu_cs_emit_qw(cs, end_iova);
10387ec681f3Smrg   tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(0xffffffff));
10397ec681f3Smrg   tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0));
10407ec681f3Smrg   tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
10417ec681f3Smrg
10427ec681f3Smrg   /* result (dst) = result (srcA) + end (srcB) - begin (srcC) */
10437ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
10447ec681f3Smrg   tu_cs_emit(cs, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C);
10457ec681f3Smrg   tu_cs_emit_qw(cs, result_iova);
10467ec681f3Smrg   tu_cs_emit_qw(cs, result_iova);
10477ec681f3Smrg   tu_cs_emit_qw(cs, end_iova);
10487ec681f3Smrg   tu_cs_emit_qw(cs, begin_iova);
10497ec681f3Smrg
10507ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
10517ec681f3Smrg
10527ec681f3Smrg   if (pass)
10537ec681f3Smrg      /* Technically, queries should be tracked per-subpass, but here we track
10547ec681f3Smrg       * at the render pass level to simply the code a bit. This is safe
10557ec681f3Smrg       * because the only commands that use the available bit are
10567ec681f3Smrg       * vkCmdCopyQueryPoolResults and vkCmdResetQueryPool, both of which
10577ec681f3Smrg       * cannot be invoked from inside a render pass scope.
10587ec681f3Smrg       */
10597ec681f3Smrg      cs = &cmdbuf->draw_epilogue_cs;
10607ec681f3Smrg
10617ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
10627ec681f3Smrg   tu_cs_emit_qw(cs, available_iova);
10637ec681f3Smrg   tu_cs_emit_qw(cs, 0x1);
10647ec681f3Smrg}
10657ec681f3Smrg
10667ec681f3Smrgstatic void
10677ec681f3Smrgemit_end_stat_query(struct tu_cmd_buffer *cmdbuf,
10687ec681f3Smrg                    struct tu_query_pool *pool,
10697ec681f3Smrg                    uint32_t query)
10707ec681f3Smrg{
10717ec681f3Smrg   struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
10727ec681f3Smrg   uint64_t end_iova = pipeline_stat_query_iova(pool, query, end);
10737ec681f3Smrg   uint64_t available_iova = query_available_iova(pool, query);
10747ec681f3Smrg   uint64_t result_iova;
10757ec681f3Smrg   uint64_t stat_start_iova;
10767ec681f3Smrg   uint64_t stat_stop_iova;
10777ec681f3Smrg
10787ec681f3Smrg   tu6_emit_event_write(cmdbuf, cs, STOP_PRIMITIVE_CTRS);
10797ec681f3Smrg   tu6_emit_event_write(cmdbuf, cs, RST_VTX_CNT);
10807ec681f3Smrg   tu6_emit_event_write(cmdbuf, cs, STAT_EVENT);
10817ec681f3Smrg
10827ec681f3Smrg   tu_cs_emit_wfi(cs);
10837ec681f3Smrg
10847ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
10857ec681f3Smrg   tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_RBBM_PRIMCTR_0_LO) |
10867ec681f3Smrg                  CP_REG_TO_MEM_0_CNT(STAT_COUNT * 2) |
10877ec681f3Smrg                  CP_REG_TO_MEM_0_64B);
10887ec681f3Smrg   tu_cs_emit_qw(cs, end_iova);
10897ec681f3Smrg
10907ec681f3Smrg   for (int i = 0; i < STAT_COUNT; i++) {
10917ec681f3Smrg      result_iova = query_result_iova(pool, query, uint64_t, i);
10927ec681f3Smrg      stat_start_iova = pipeline_stat_query_iova(pool, query, begin[i]);
10937ec681f3Smrg      stat_stop_iova = pipeline_stat_query_iova(pool, query, end[i]);
10947ec681f3Smrg
10957ec681f3Smrg      tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
10967ec681f3Smrg      tu_cs_emit(cs, CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES |
10977ec681f3Smrg                     CP_MEM_TO_MEM_0_DOUBLE |
10987ec681f3Smrg                     CP_MEM_TO_MEM_0_NEG_C);
10997ec681f3Smrg
11007ec681f3Smrg      tu_cs_emit_qw(cs, result_iova);
11017ec681f3Smrg      tu_cs_emit_qw(cs, result_iova);
11027ec681f3Smrg      tu_cs_emit_qw(cs, stat_stop_iova);
11037ec681f3Smrg      tu_cs_emit_qw(cs, stat_start_iova);
11047ec681f3Smrg   }
11057ec681f3Smrg
11067ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
11077ec681f3Smrg
11087ec681f3Smrg   if (cmdbuf->state.pass)
11097ec681f3Smrg      cs = &cmdbuf->draw_epilogue_cs;
11107ec681f3Smrg
11117ec681f3Smrg   /* Set the availability to 1 */
11127ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
11137ec681f3Smrg   tu_cs_emit_qw(cs, available_iova);
11147ec681f3Smrg   tu_cs_emit_qw(cs, 0x1);
1115361fc4cbSmaya}
1116361fc4cbSmaya
11177ec681f3Smrgstatic void
11187ec681f3Smrgemit_end_perf_query(struct tu_cmd_buffer *cmdbuf,
11197ec681f3Smrg                         struct tu_query_pool *pool,
11207ec681f3Smrg                         uint32_t query)
11217ec681f3Smrg{
11227ec681f3Smrg   struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
11237ec681f3Smrg   uint64_t available_iova = query_available_iova(pool, query);
11247ec681f3Smrg   uint64_t end_iova;
11257ec681f3Smrg   uint64_t begin_iova;
11267ec681f3Smrg   uint64_t result_iova;
11277ec681f3Smrg   uint32_t last_pass = ~0;
11287ec681f3Smrg
11297ec681f3Smrg   for (uint32_t i = 0; i < pool->counter_index_count; i++) {
11307ec681f3Smrg      struct tu_perf_query_data *data = &pool->perf_query_data[i];
11317ec681f3Smrg
11327ec681f3Smrg      if (last_pass != data->pass) {
11337ec681f3Smrg         last_pass = data->pass;
11347ec681f3Smrg
11357ec681f3Smrg         if (data->pass != 0)
11367ec681f3Smrg            tu_cond_exec_end(cs);
11377ec681f3Smrg         emit_perfcntrs_pass_start(cs, data->pass);
11387ec681f3Smrg      }
11397ec681f3Smrg
11407ec681f3Smrg      const struct fd_perfcntr_counter *counter =
11417ec681f3Smrg            &pool->perf_group[data->gid].counters[data->cntr_reg];
11427ec681f3Smrg
11437ec681f3Smrg      end_iova = perf_query_iova(pool, 0, end, data->app_idx);
11447ec681f3Smrg
11457ec681f3Smrg      tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
11467ec681f3Smrg      tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(counter->counter_reg_lo) |
11477ec681f3Smrg                     CP_REG_TO_MEM_0_64B);
11487ec681f3Smrg      tu_cs_emit_qw(cs, end_iova);
11497ec681f3Smrg   }
11507ec681f3Smrg   tu_cond_exec_end(cs);
11517ec681f3Smrg
11527ec681f3Smrg   last_pass = ~0;
11537ec681f3Smrg   tu_cs_emit_wfi(cs);
11547ec681f3Smrg
11557ec681f3Smrg   for (uint32_t i = 0; i < pool->counter_index_count; i++) {
11567ec681f3Smrg      struct tu_perf_query_data *data = &pool->perf_query_data[i];
11577ec681f3Smrg
11587ec681f3Smrg      if (last_pass != data->pass) {
11597ec681f3Smrg         last_pass = data->pass;
11607ec681f3Smrg
11617ec681f3Smrg
11627ec681f3Smrg         if (data->pass != 0)
11637ec681f3Smrg            tu_cond_exec_end(cs);
11647ec681f3Smrg         emit_perfcntrs_pass_start(cs, data->pass);
11657ec681f3Smrg      }
11667ec681f3Smrg
11677ec681f3Smrg      result_iova = query_result_iova(pool, 0, struct perfcntr_query_slot,
11687ec681f3Smrg             data->app_idx);
11697ec681f3Smrg      begin_iova = perf_query_iova(pool, 0, begin, data->app_idx);
11707ec681f3Smrg      end_iova = perf_query_iova(pool, 0, end, data->app_idx);
11717ec681f3Smrg
11727ec681f3Smrg      /* result += end - begin */
11737ec681f3Smrg      tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
11747ec681f3Smrg      tu_cs_emit(cs, CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES |
11757ec681f3Smrg                     CP_MEM_TO_MEM_0_DOUBLE |
11767ec681f3Smrg                     CP_MEM_TO_MEM_0_NEG_C);
11777ec681f3Smrg
11787ec681f3Smrg      tu_cs_emit_qw(cs, result_iova);
11797ec681f3Smrg      tu_cs_emit_qw(cs, result_iova);
11807ec681f3Smrg      tu_cs_emit_qw(cs, end_iova);
11817ec681f3Smrg      tu_cs_emit_qw(cs, begin_iova);
11827ec681f3Smrg   }
11837ec681f3Smrg   tu_cond_exec_end(cs);
11847ec681f3Smrg
11857ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
11867ec681f3Smrg
11877ec681f3Smrg   if (cmdbuf->state.pass)
11887ec681f3Smrg      cs = &cmdbuf->draw_epilogue_cs;
11897ec681f3Smrg
11907ec681f3Smrg   /* Set the availability to 1 */
11917ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
11927ec681f3Smrg   tu_cs_emit_qw(cs, available_iova);
11937ec681f3Smrg   tu_cs_emit_qw(cs, 0x1);
11947ec681f3Smrg}
11957ec681f3Smrg
11967ec681f3Smrgstatic void
11977ec681f3Smrgemit_end_xfb_query(struct tu_cmd_buffer *cmdbuf,
11987ec681f3Smrg                   struct tu_query_pool *pool,
11997ec681f3Smrg                   uint32_t query,
12007ec681f3Smrg                   uint32_t stream_id)
12017ec681f3Smrg{
12027ec681f3Smrg   struct tu_cs *cs = cmdbuf->state.pass ? &cmdbuf->draw_cs : &cmdbuf->cs;
12037ec681f3Smrg
12047ec681f3Smrg   uint64_t end_iova = primitive_query_iova(pool, query, end[0], 0);
12057ec681f3Smrg   uint64_t result_written_iova = query_result_iova(pool, query, uint64_t, 0);
12067ec681f3Smrg   uint64_t result_generated_iova = query_result_iova(pool, query, uint64_t, 1);
12077ec681f3Smrg   uint64_t begin_written_iova = primitive_query_iova(pool, query, begin[stream_id], 0);
12087ec681f3Smrg   uint64_t begin_generated_iova = primitive_query_iova(pool, query, begin[stream_id], 1);
12097ec681f3Smrg   uint64_t end_written_iova = primitive_query_iova(pool, query, end[stream_id], 0);
12107ec681f3Smrg   uint64_t end_generated_iova = primitive_query_iova(pool, query, end[stream_id], 1);
12117ec681f3Smrg   uint64_t available_iova = query_available_iova(pool, query);
12127ec681f3Smrg
12137ec681f3Smrg   tu_cs_emit_regs(cs, A6XX_VPC_SO_STREAM_COUNTS(.qword = end_iova));
12147ec681f3Smrg   tu6_emit_event_write(cmdbuf, cs, WRITE_PRIMITIVE_COUNTS);
12157ec681f3Smrg
12167ec681f3Smrg   tu_cs_emit_wfi(cs);
12177ec681f3Smrg   tu6_emit_event_write(cmdbuf, cs, CACHE_FLUSH_TS);
12187ec681f3Smrg
12197ec681f3Smrg   /* Set the count of written primitives */
12207ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
12217ec681f3Smrg   tu_cs_emit(cs, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C |
12227ec681f3Smrg                  CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES | 0x80000000);
12237ec681f3Smrg   tu_cs_emit_qw(cs, result_written_iova);
12247ec681f3Smrg   tu_cs_emit_qw(cs, result_written_iova);
12257ec681f3Smrg   tu_cs_emit_qw(cs, end_written_iova);
12267ec681f3Smrg   tu_cs_emit_qw(cs, begin_written_iova);
12277ec681f3Smrg
12287ec681f3Smrg   tu6_emit_event_write(cmdbuf, cs, CACHE_FLUSH_TS);
12297ec681f3Smrg
12307ec681f3Smrg   /* Set the count of generated primitives */
12317ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 9);
12327ec681f3Smrg   tu_cs_emit(cs, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C |
12337ec681f3Smrg                  CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES | 0x80000000);
12347ec681f3Smrg   tu_cs_emit_qw(cs, result_generated_iova);
12357ec681f3Smrg   tu_cs_emit_qw(cs, result_generated_iova);
12367ec681f3Smrg   tu_cs_emit_qw(cs, end_generated_iova);
12377ec681f3Smrg   tu_cs_emit_qw(cs, begin_generated_iova);
12387ec681f3Smrg
12397ec681f3Smrg   /* Set the availability to 1 */
12407ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
12417ec681f3Smrg   tu_cs_emit_qw(cs, available_iova);
12427ec681f3Smrg   tu_cs_emit_qw(cs, 0x1);
12437ec681f3Smrg}
12447ec681f3Smrg
12457ec681f3Smrg/* Implement this bit of spec text from section 17.2 "Query Operation":
12467ec681f3Smrg *
12477ec681f3Smrg *     If queries are used while executing a render pass instance that has
12487ec681f3Smrg *     multiview enabled, the query uses N consecutive query indices in the
12497ec681f3Smrg *     query pool (starting at query) where N is the number of bits set in the
12507ec681f3Smrg *     view mask in the subpass the query is used in. How the numerical
12517ec681f3Smrg *     results of the query are distributed among the queries is
12527ec681f3Smrg *     implementation-dependent. For example, some implementations may write
12537ec681f3Smrg *     each view’s results to a distinct query, while other implementations
12547ec681f3Smrg *     may write the total result to the first query and write zero to the
12557ec681f3Smrg *     other queries. However, the sum of the results in all the queries must
12567ec681f3Smrg *     accurately reflect the total result of the query summed over all views.
12577ec681f3Smrg *     Applications can sum the results from all the queries to compute the
12587ec681f3Smrg *     total result.
12597ec681f3Smrg *
12607ec681f3Smrg * Since we execute all views at once, we write zero to the other queries.
12617ec681f3Smrg * Furthermore, because queries must be reset before use, and we set the
12627ec681f3Smrg * result to 0 in vkCmdResetQueryPool(), we just need to mark it as available.
12637ec681f3Smrg */
12647ec681f3Smrg
12657ec681f3Smrgstatic void
12667ec681f3Smrghandle_multiview_queries(struct tu_cmd_buffer *cmd,
12677ec681f3Smrg                         struct tu_query_pool *pool,
12687ec681f3Smrg                         uint32_t query)
12697ec681f3Smrg{
12707ec681f3Smrg   if (!cmd->state.pass || !cmd->state.subpass->multiview_mask)
12717ec681f3Smrg      return;
12727ec681f3Smrg
12737ec681f3Smrg   unsigned views = util_bitcount(cmd->state.subpass->multiview_mask);
12747ec681f3Smrg   struct tu_cs *cs = &cmd->draw_epilogue_cs;
12757ec681f3Smrg
12767ec681f3Smrg   for (uint32_t i = 1; i < views; i++) {
12777ec681f3Smrg      tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
12787ec681f3Smrg      tu_cs_emit_qw(cs, query_available_iova(pool, query + i));
12797ec681f3Smrg      tu_cs_emit_qw(cs, 0x1);
12807ec681f3Smrg   }
12817ec681f3Smrg}
12827ec681f3Smrg
12837ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
1284361fc4cbSmayatu_CmdEndQuery(VkCommandBuffer commandBuffer,
1285361fc4cbSmaya               VkQueryPool queryPool,
1286361fc4cbSmaya               uint32_t query)
1287361fc4cbSmaya{
12887ec681f3Smrg   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
12897ec681f3Smrg   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
12907ec681f3Smrg   assert(query < pool->size);
12917ec681f3Smrg
12927ec681f3Smrg   switch (pool->type) {
12937ec681f3Smrg   case VK_QUERY_TYPE_OCCLUSION:
12947ec681f3Smrg      emit_end_occlusion_query(cmdbuf, pool, query);
12957ec681f3Smrg      break;
12967ec681f3Smrg   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
12977ec681f3Smrg      emit_end_xfb_query(cmdbuf, pool, query, 0);
12987ec681f3Smrg      break;
12997ec681f3Smrg   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
13007ec681f3Smrg      emit_end_perf_query(cmdbuf, pool, query);
13017ec681f3Smrg      break;
13027ec681f3Smrg   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
13037ec681f3Smrg      emit_end_stat_query(cmdbuf, pool, query);
13047ec681f3Smrg      break;
13057ec681f3Smrg   case VK_QUERY_TYPE_TIMESTAMP:
13067ec681f3Smrg      unreachable("Unimplemented query type");
13077ec681f3Smrg   default:
13087ec681f3Smrg      assert(!"Invalid query type");
13097ec681f3Smrg   }
13107ec681f3Smrg
13117ec681f3Smrg   handle_multiview_queries(cmdbuf, pool, query);
1312361fc4cbSmaya}
1313361fc4cbSmaya
13147ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
13157ec681f3Smrgtu_CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer,
13167ec681f3Smrg                         VkQueryPool queryPool,
13177ec681f3Smrg                         uint32_t query,
13187ec681f3Smrg                         uint32_t index)
13197ec681f3Smrg{
13207ec681f3Smrg   TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, commandBuffer);
13217ec681f3Smrg   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
13227ec681f3Smrg   assert(query < pool->size);
13237ec681f3Smrg
13247ec681f3Smrg   switch (pool->type) {
13257ec681f3Smrg   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
13267ec681f3Smrg      assert(index <= 4);
13277ec681f3Smrg      emit_end_xfb_query(cmdbuf, pool, query, index);
13287ec681f3Smrg      break;
13297ec681f3Smrg   default:
13307ec681f3Smrg      assert(!"Invalid query type");
13317ec681f3Smrg   }
13327ec681f3Smrg}
13337ec681f3Smrg
13347ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
1335361fc4cbSmayatu_CmdWriteTimestamp(VkCommandBuffer commandBuffer,
1336361fc4cbSmaya                     VkPipelineStageFlagBits pipelineStage,
1337361fc4cbSmaya                     VkQueryPool queryPool,
1338361fc4cbSmaya                     uint32_t query)
1339361fc4cbSmaya{
13407ec681f3Smrg   TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
13417ec681f3Smrg   TU_FROM_HANDLE(tu_query_pool, pool, queryPool);
13427ec681f3Smrg
13437ec681f3Smrg   /* Inside a render pass, just write the timestamp multiple times so that
13447ec681f3Smrg    * the user gets the last one if we use GMEM. There isn't really much
13457ec681f3Smrg    * better we can do, and this seems to be what the blob does too.
13467ec681f3Smrg    */
13477ec681f3Smrg   struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
13487ec681f3Smrg
13497ec681f3Smrg   /* Stages that will already have been executed by the time the CP executes
13507ec681f3Smrg    * the REG_TO_MEM. DrawIndirect parameters are read by the CP, so the draw
13517ec681f3Smrg    * indirect stage counts as top-of-pipe too.
13527ec681f3Smrg    */
13537ec681f3Smrg   VkPipelineStageFlags top_of_pipe_flags =
13547ec681f3Smrg      VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
13557ec681f3Smrg      VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
13567ec681f3Smrg
13577ec681f3Smrg   if (pipelineStage & ~top_of_pipe_flags) {
13587ec681f3Smrg      /* Execute a WFI so that all commands complete. Note that CP_REG_TO_MEM
13597ec681f3Smrg       * does CP_WAIT_FOR_ME internally, which will wait for the WFI to
13607ec681f3Smrg       * complete.
13617ec681f3Smrg       *
13627ec681f3Smrg       * Stalling the CP like this is really unfortunate, but I don't think
13637ec681f3Smrg       * there's a better solution that allows all 48 bits of precision
13647ec681f3Smrg       * because CP_EVENT_WRITE doesn't support 64-bit timestamps.
13657ec681f3Smrg       */
13667ec681f3Smrg      tu_cs_emit_wfi(cs);
13677ec681f3Smrg   }
13687ec681f3Smrg
13697ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
13707ec681f3Smrg   tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_ALWAYS_ON_COUNTER_LO) |
13717ec681f3Smrg                  CP_REG_TO_MEM_0_CNT(2) |
13727ec681f3Smrg                  CP_REG_TO_MEM_0_64B);
13737ec681f3Smrg   tu_cs_emit_qw(cs, query_result_iova(pool, query, uint64_t, 0));
13747ec681f3Smrg
13757ec681f3Smrg   /* Only flag availability once the entire renderpass is done, similar to
13767ec681f3Smrg    * the begin/end path.
13777ec681f3Smrg    */
13787ec681f3Smrg   cs = cmd->state.pass ? &cmd->draw_epilogue_cs : &cmd->cs;
13797ec681f3Smrg
13807ec681f3Smrg   tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 4);
13817ec681f3Smrg   tu_cs_emit_qw(cs, query_available_iova(pool, query));
13827ec681f3Smrg   tu_cs_emit_qw(cs, 0x1);
13837ec681f3Smrg
13847ec681f3Smrg   /* From the spec for vkCmdWriteTimestamp:
13857ec681f3Smrg    *
13867ec681f3Smrg    *    If vkCmdWriteTimestamp is called while executing a render pass
13877ec681f3Smrg    *    instance that has multiview enabled, the timestamp uses N consecutive
13887ec681f3Smrg    *    query indices in the query pool (starting at query) where N is the
13897ec681f3Smrg    *    number of bits set in the view mask of the subpass the command is
13907ec681f3Smrg    *    executed in. The resulting query values are determined by an
13917ec681f3Smrg    *    implementation-dependent choice of one of the following behaviors:
13927ec681f3Smrg    *
13937ec681f3Smrg    *    -   The first query is a timestamp value and (if more than one bit is
13947ec681f3Smrg    *        set in the view mask) zero is written to the remaining queries.
13957ec681f3Smrg    *        If two timestamps are written in the same subpass, the sum of the
13967ec681f3Smrg    *        execution time of all views between those commands is the
13977ec681f3Smrg    *        difference between the first query written by each command.
13987ec681f3Smrg    *
13997ec681f3Smrg    *    -   All N queries are timestamp values. If two timestamps are written
14007ec681f3Smrg    *        in the same subpass, the sum of the execution time of all views
14017ec681f3Smrg    *        between those commands is the sum of the difference between
14027ec681f3Smrg    *        corresponding queries written by each command. The difference
14037ec681f3Smrg    *        between corresponding queries may be the execution time of a
14047ec681f3Smrg    *        single view.
14057ec681f3Smrg    *
14067ec681f3Smrg    * We execute all views in the same draw call, so we implement the first
14077ec681f3Smrg    * option, the same as regular queries.
14087ec681f3Smrg    */
14097ec681f3Smrg   handle_multiview_queries(cmd, pool, query);
14107ec681f3Smrg}
14117ec681f3Smrg
14127ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL
14137ec681f3Smrgtu_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(
14147ec681f3Smrg    VkPhysicalDevice                            physicalDevice,
14157ec681f3Smrg    uint32_t                                    queueFamilyIndex,
14167ec681f3Smrg    uint32_t*                                   pCounterCount,
14177ec681f3Smrg    VkPerformanceCounterKHR*                    pCounters,
14187ec681f3Smrg    VkPerformanceCounterDescriptionKHR*         pCounterDescriptions)
14197ec681f3Smrg{
14207ec681f3Smrg   TU_FROM_HANDLE(tu_physical_device, phydev, physicalDevice);
14217ec681f3Smrg
14227ec681f3Smrg   uint32_t desc_count = *pCounterCount;
14237ec681f3Smrg   uint32_t group_count;
14247ec681f3Smrg   const struct fd_perfcntr_group *group =
14257ec681f3Smrg         fd_perfcntrs(&phydev->dev_id, &group_count);
14267ec681f3Smrg
14277ec681f3Smrg   VK_OUTARRAY_MAKE(out, pCounters, pCounterCount);
14287ec681f3Smrg   VK_OUTARRAY_MAKE(out_desc, pCounterDescriptions, &desc_count);
14297ec681f3Smrg
14307ec681f3Smrg   for (int i = 0; i < group_count; i++) {
14317ec681f3Smrg      for (int j = 0; j < group[i].num_countables; j++) {
14327ec681f3Smrg
14337ec681f3Smrg         vk_outarray_append(&out, counter) {
14347ec681f3Smrg            counter->scope = VK_QUERY_SCOPE_COMMAND_BUFFER_KHR;
14357ec681f3Smrg            counter->unit =
14367ec681f3Smrg                  fd_perfcntr_type_to_vk_unit[group[i].countables[j].query_type];
14377ec681f3Smrg            counter->storage =
14387ec681f3Smrg                  fd_perfcntr_type_to_vk_storage[group[i].countables[j].query_type];
14397ec681f3Smrg
14407ec681f3Smrg            unsigned char sha1_result[20];
14417ec681f3Smrg            _mesa_sha1_compute(group[i].countables[j].name,
14427ec681f3Smrg                               strlen(group[i].countables[j].name),
14437ec681f3Smrg                               sha1_result);
14447ec681f3Smrg            memcpy(counter->uuid, sha1_result, sizeof(counter->uuid));
14457ec681f3Smrg         }
14467ec681f3Smrg
14477ec681f3Smrg         vk_outarray_append(&out_desc, desc) {
14487ec681f3Smrg            desc->flags = 0;
14497ec681f3Smrg
14507ec681f3Smrg            snprintf(desc->name, sizeof(desc->name),
14517ec681f3Smrg                     "%s", group[i].countables[j].name);
14527ec681f3Smrg            snprintf(desc->category, sizeof(desc->category), "%s", group[i].name);
14537ec681f3Smrg            snprintf(desc->description, sizeof(desc->description),
14547ec681f3Smrg                     "%s: %s performance counter",
14557ec681f3Smrg                     group[i].name, group[i].countables[j].name);
14567ec681f3Smrg         }
14577ec681f3Smrg      }
14587ec681f3Smrg   }
14597ec681f3Smrg
14607ec681f3Smrg   return vk_outarray_status(&out);
14617ec681f3Smrg}
14627ec681f3Smrg
14637ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
14647ec681f3Smrgtu_GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(
14657ec681f3Smrg      VkPhysicalDevice                            physicalDevice,
14667ec681f3Smrg      const VkQueryPoolPerformanceCreateInfoKHR*  pPerformanceQueryCreateInfo,
14677ec681f3Smrg      uint32_t*                                   pNumPasses)
14687ec681f3Smrg{
14697ec681f3Smrg   TU_FROM_HANDLE(tu_physical_device, phydev, physicalDevice);
14707ec681f3Smrg   uint32_t group_count = 0;
14717ec681f3Smrg   uint32_t gid = 0, cid = 0, n_passes;
14727ec681f3Smrg   const struct fd_perfcntr_group *group =
14737ec681f3Smrg         fd_perfcntrs(&phydev->dev_id, &group_count);
14747ec681f3Smrg
14757ec681f3Smrg   uint32_t counters_requested[group_count];
14767ec681f3Smrg   memset(counters_requested, 0x0, sizeof(counters_requested));
14777ec681f3Smrg   *pNumPasses = 1;
14787ec681f3Smrg
14797ec681f3Smrg   for (unsigned i = 0; i < pPerformanceQueryCreateInfo->counterIndexCount; i++) {
14807ec681f3Smrg      perfcntr_index(group, group_count,
14817ec681f3Smrg                     pPerformanceQueryCreateInfo->pCounterIndices[i],
14827ec681f3Smrg                     &gid, &cid);
14837ec681f3Smrg
14847ec681f3Smrg      counters_requested[gid]++;
14857ec681f3Smrg   }
14867ec681f3Smrg
14877ec681f3Smrg   for (uint32_t i = 0; i < group_count; i++) {
14887ec681f3Smrg      n_passes = DIV_ROUND_UP(counters_requested[i], group[i].num_counters);
14897ec681f3Smrg      *pNumPasses = MAX2(*pNumPasses, n_passes);
14907ec681f3Smrg   }
14917ec681f3Smrg}
14927ec681f3Smrg
14937ec681f3SmrgVKAPI_ATTR VkResult VKAPI_CALL
14947ec681f3Smrgtu_AcquireProfilingLockKHR(VkDevice device,
14957ec681f3Smrg                           const VkAcquireProfilingLockInfoKHR* pInfo)
14967ec681f3Smrg{
14977ec681f3Smrg   /* TODO. Probably there's something to do for kgsl. */
14987ec681f3Smrg   return VK_SUCCESS;
14997ec681f3Smrg}
15007ec681f3Smrg
15017ec681f3SmrgVKAPI_ATTR void VKAPI_CALL
15027ec681f3Smrgtu_ReleaseProfilingLockKHR(VkDevice device)
15037ec681f3Smrg{
15047ec681f3Smrg   /* TODO. Probably there's something to do for kgsl. */
15057ec681f3Smrg   return;
1506361fc4cbSmaya}
1507