17ec681f3Smrg/*
27ec681f3Smrg * Copyright © 2019 Intel Corporation
37ec681f3Smrg *
47ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a
57ec681f3Smrg * copy of this software and associated documentation files (the "Software"),
67ec681f3Smrg * to deal in the Software without restriction, including without limitation
77ec681f3Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
87ec681f3Smrg * and/or sell copies of the Software, and to permit persons to whom the
97ec681f3Smrg * Software is furnished to do so, subject to the following conditions:
107ec681f3Smrg *
117ec681f3Smrg * The above copyright notice and this permission notice (including the next
127ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the
137ec681f3Smrg * Software.
147ec681f3Smrg *
157ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
167ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
177ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
187ec681f3Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
197ec681f3Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
207ec681f3Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
217ec681f3Smrg * IN THE SOFTWARE.
227ec681f3Smrg */
237ec681f3Smrg
247ec681f3Smrg#include <unistd.h>
257ec681f3Smrg#include <poll.h>
267ec681f3Smrg
277ec681f3Smrg#include "common/intel_gem.h"
287ec681f3Smrg
297ec681f3Smrg#include "dev/intel_debug.h"
307ec681f3Smrg#include "dev/intel_device_info.h"
317ec681f3Smrg
327ec681f3Smrg#include "perf/intel_perf.h"
337ec681f3Smrg#include "perf/intel_perf_mdapi.h"
347ec681f3Smrg#include "perf/intel_perf_private.h"
357ec681f3Smrg#include "perf/intel_perf_query.h"
367ec681f3Smrg#include "perf/intel_perf_regs.h"
377ec681f3Smrg
387ec681f3Smrg#include "drm-uapi/i915_drm.h"
397ec681f3Smrg
407ec681f3Smrg#include "util/compiler.h"
417ec681f3Smrg#include "util/u_math.h"
427ec681f3Smrg
437ec681f3Smrg#define FILE_DEBUG_FLAG DEBUG_PERFMON
447ec681f3Smrg
457ec681f3Smrg#define MI_RPC_BO_SIZE                (4096)
467ec681f3Smrg#define MI_FREQ_OFFSET_BYTES          (256)
477ec681f3Smrg#define MI_PERF_COUNTERS_OFFSET_BYTES (260)
487ec681f3Smrg
497ec681f3Smrg#define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
507ec681f3Smrg
517ec681f3Smrg/* Align to 64bytes, requirement for OA report write address. */
527ec681f3Smrg#define TOTAL_QUERY_DATA_SIZE            \
537ec681f3Smrg   ALIGN(256 /* OA report */ +           \
547ec681f3Smrg         4  /* freq register */ +        \
557ec681f3Smrg         8 + 8 /* perf counter 1 & 2 */, \
567ec681f3Smrg         64)
577ec681f3Smrg
587ec681f3Smrg
597ec681f3Smrgstatic uint32_t field_offset(bool end, uint32_t offset)
607ec681f3Smrg{
617ec681f3Smrg   return (end ? TOTAL_QUERY_DATA_SIZE : 0) + offset;
627ec681f3Smrg}
637ec681f3Smrg
647ec681f3Smrg#define MAP_READ  (1 << 0)
657ec681f3Smrg#define MAP_WRITE (1 << 1)
667ec681f3Smrg
677ec681f3Smrg/**
687ec681f3Smrg * Periodic OA samples are read() into these buffer structures via the
697ec681f3Smrg * i915 perf kernel interface and appended to the
707ec681f3Smrg * perf_ctx->sample_buffers linked list. When we process the
717ec681f3Smrg * results of an OA metrics query we need to consider all the periodic
727ec681f3Smrg * samples between the Begin and End MI_REPORT_PERF_COUNT command
737ec681f3Smrg * markers.
747ec681f3Smrg *
757ec681f3Smrg * 'Periodic' is a simplification as there are other automatic reports
767ec681f3Smrg * written by the hardware also buffered here.
777ec681f3Smrg *
787ec681f3Smrg * Considering three queries, A, B and C:
797ec681f3Smrg *
807ec681f3Smrg *  Time ---->
817ec681f3Smrg *                ________________A_________________
827ec681f3Smrg *                |                                |
837ec681f3Smrg *                | ________B_________ _____C___________
847ec681f3Smrg *                | |                | |           |   |
857ec681f3Smrg *
867ec681f3Smrg * And an illustration of sample buffers read over this time frame:
877ec681f3Smrg * [HEAD ][     ][     ][     ][     ][     ][     ][     ][TAIL ]
887ec681f3Smrg *
897ec681f3Smrg * These nodes may hold samples for query A:
907ec681f3Smrg * [     ][     ][  A  ][  A  ][  A  ][  A  ][  A  ][     ][     ]
917ec681f3Smrg *
927ec681f3Smrg * These nodes may hold samples for query B:
937ec681f3Smrg * [     ][     ][  B  ][  B  ][  B  ][     ][     ][     ][     ]
947ec681f3Smrg *
957ec681f3Smrg * These nodes may hold samples for query C:
967ec681f3Smrg * [     ][     ][     ][     ][     ][  C  ][  C  ][  C  ][     ]
977ec681f3Smrg *
987ec681f3Smrg * The illustration assumes we have an even distribution of periodic
997ec681f3Smrg * samples so all nodes have the same size plotted against time:
1007ec681f3Smrg *
1017ec681f3Smrg * Note, to simplify code, the list is never empty.
1027ec681f3Smrg *
1037ec681f3Smrg * With overlapping queries we can see that periodic OA reports may
1047ec681f3Smrg * relate to multiple queries and care needs to be take to keep
1057ec681f3Smrg * track of sample buffers until there are no queries that might
1067ec681f3Smrg * depend on their contents.
1077ec681f3Smrg *
1087ec681f3Smrg * We use a node ref counting system where a reference ensures that a
1097ec681f3Smrg * node and all following nodes can't be freed/recycled until the
1107ec681f3Smrg * reference drops to zero.
1117ec681f3Smrg *
1127ec681f3Smrg * E.g. with a ref of one here:
1137ec681f3Smrg * [  0  ][  0  ][  1  ][  0  ][  0  ][  0  ][  0  ][  0  ][  0  ]
1147ec681f3Smrg *
1157ec681f3Smrg * These nodes could be freed or recycled ("reaped"):
1167ec681f3Smrg * [  0  ][  0  ]
1177ec681f3Smrg *
1187ec681f3Smrg * These must be preserved until the leading ref drops to zero:
1197ec681f3Smrg *               [  1  ][  0  ][  0  ][  0  ][  0  ][  0  ][  0  ]
1207ec681f3Smrg *
1217ec681f3Smrg * When a query starts we take a reference on the current tail of
1227ec681f3Smrg * the list, knowing that no already-buffered samples can possibly
1237ec681f3Smrg * relate to the newly-started query. A pointer to this node is
1247ec681f3Smrg * also saved in the query object's ->oa.samples_head.
1257ec681f3Smrg *
1267ec681f3Smrg * E.g. starting query A while there are two nodes in .sample_buffers:
1277ec681f3Smrg *                ________________A________
1287ec681f3Smrg *                |
1297ec681f3Smrg *
1307ec681f3Smrg * [  0  ][  1  ]
1317ec681f3Smrg *           ^_______ Add a reference and store pointer to node in
1327ec681f3Smrg *                    A->oa.samples_head
1337ec681f3Smrg *
1347ec681f3Smrg * Moving forward to when the B query starts with no new buffer nodes:
1357ec681f3Smrg * (for reference, i915 perf reads() are only done when queries finish)
1367ec681f3Smrg *                ________________A_______
1377ec681f3Smrg *                | ________B___
1387ec681f3Smrg *                | |
1397ec681f3Smrg *
1407ec681f3Smrg * [  0  ][  2  ]
1417ec681f3Smrg *           ^_______ Add a reference and store pointer to
1427ec681f3Smrg *                    node in B->oa.samples_head
1437ec681f3Smrg *
1447ec681f3Smrg * Once a query is finished, after an OA query has become 'Ready',
1457ec681f3Smrg * once the End OA report has landed and after we we have processed
1467ec681f3Smrg * all the intermediate periodic samples then we drop the
1477ec681f3Smrg * ->oa.samples_head reference we took at the start.
1487ec681f3Smrg *
1497ec681f3Smrg * So when the B query has finished we have:
1507ec681f3Smrg *                ________________A________
1517ec681f3Smrg *                | ______B___________
1527ec681f3Smrg *                | |                |
1537ec681f3Smrg * [  0  ][  1  ][  0  ][  0  ][  0  ]
1547ec681f3Smrg *           ^_______ Drop B->oa.samples_head reference
1557ec681f3Smrg *
1567ec681f3Smrg * We still can't free these due to the A->oa.samples_head ref:
1577ec681f3Smrg *        [  1  ][  0  ][  0  ][  0  ]
1587ec681f3Smrg *
1597ec681f3Smrg * When the A query finishes: (note there's a new ref for C's samples_head)
1607ec681f3Smrg *                ________________A_________________
1617ec681f3Smrg *                |                                |
1627ec681f3Smrg *                |                    _____C_________
1637ec681f3Smrg *                |                    |           |
1647ec681f3Smrg * [  0  ][  0  ][  0  ][  0  ][  1  ][  0  ][  0  ]
1657ec681f3Smrg *           ^_______ Drop A->oa.samples_head reference
1667ec681f3Smrg *
1677ec681f3Smrg * And we can now reap these nodes up to the C->oa.samples_head:
1687ec681f3Smrg * [  X  ][  X  ][  X  ][  X  ]
1697ec681f3Smrg *                  keeping -> [  1  ][  0  ][  0  ]
1707ec681f3Smrg *
1717ec681f3Smrg * We reap old sample buffers each time we finish processing an OA
1727ec681f3Smrg * query by iterating the sample_buffers list from the head until we
1737ec681f3Smrg * find a referenced node and stop.
1747ec681f3Smrg *
1757ec681f3Smrg * Reaped buffers move to a perfquery.free_sample_buffers list and
1767ec681f3Smrg * when we come to read() we first look to recycle a buffer from the
1777ec681f3Smrg * free_sample_buffers list before allocating a new buffer.
1787ec681f3Smrg */
1797ec681f3Smrgstruct oa_sample_buf {
1807ec681f3Smrg   struct exec_node link;
1817ec681f3Smrg   int refcount;
1827ec681f3Smrg   int len;
1837ec681f3Smrg   uint8_t buf[I915_PERF_OA_SAMPLE_SIZE * 10];
1847ec681f3Smrg   uint32_t last_timestamp;
1857ec681f3Smrg};
1867ec681f3Smrg
1877ec681f3Smrg/**
1887ec681f3Smrg * gen representation of a performance query object.
1897ec681f3Smrg *
1907ec681f3Smrg * NB: We want to keep this structure relatively lean considering that
1917ec681f3Smrg * applications may expect to allocate enough objects to be able to
1927ec681f3Smrg * query around all draw calls in a frame.
1937ec681f3Smrg */
1947ec681f3Smrgstruct intel_perf_query_object
1957ec681f3Smrg{
1967ec681f3Smrg   const struct intel_perf_query_info *queryinfo;
1977ec681f3Smrg
1987ec681f3Smrg   /* See query->kind to know which state below is in use... */
1997ec681f3Smrg   union {
2007ec681f3Smrg      struct {
2017ec681f3Smrg
2027ec681f3Smrg         /**
2037ec681f3Smrg          * BO containing OA counter snapshots at query Begin/End time.
2047ec681f3Smrg          */
2057ec681f3Smrg         void *bo;
2067ec681f3Smrg
2077ec681f3Smrg         /**
2087ec681f3Smrg          * Address of mapped of @bo
2097ec681f3Smrg          */
2107ec681f3Smrg         void *map;
2117ec681f3Smrg
2127ec681f3Smrg         /**
2137ec681f3Smrg          * The MI_REPORT_PERF_COUNT command lets us specify a unique
2147ec681f3Smrg          * ID that will be reflected in the resulting OA report
2157ec681f3Smrg          * that's written by the GPU. This is the ID we're expecting
2167ec681f3Smrg          * in the begin report and the the end report should be
2177ec681f3Smrg          * @begin_report_id + 1.
2187ec681f3Smrg          */
2197ec681f3Smrg         int begin_report_id;
2207ec681f3Smrg
2217ec681f3Smrg         /**
2227ec681f3Smrg          * Reference the head of the brw->perfquery.sample_buffers
2237ec681f3Smrg          * list at the time that the query started (so we only need
2247ec681f3Smrg          * to look at nodes after this point when looking for samples
2257ec681f3Smrg          * related to this query)
2267ec681f3Smrg          *
2277ec681f3Smrg          * (See struct brw_oa_sample_buf description for more details)
2287ec681f3Smrg          */
2297ec681f3Smrg         struct exec_node *samples_head;
2307ec681f3Smrg
2317ec681f3Smrg         /**
2327ec681f3Smrg          * false while in the unaccumulated_elements list, and set to
2337ec681f3Smrg          * true when the final, end MI_RPC snapshot has been
2347ec681f3Smrg          * accumulated.
2357ec681f3Smrg          */
2367ec681f3Smrg         bool results_accumulated;
2377ec681f3Smrg
2387ec681f3Smrg         /**
2397ec681f3Smrg          * Accumulated OA results between begin and end of the query.
2407ec681f3Smrg          */
2417ec681f3Smrg         struct intel_perf_query_result result;
2427ec681f3Smrg      } oa;
2437ec681f3Smrg
2447ec681f3Smrg      struct {
2457ec681f3Smrg         /**
2467ec681f3Smrg          * BO containing starting and ending snapshots for the
2477ec681f3Smrg          * statistics counters.
2487ec681f3Smrg          */
2497ec681f3Smrg         void *bo;
2507ec681f3Smrg      } pipeline_stats;
2517ec681f3Smrg   };
2527ec681f3Smrg};
2537ec681f3Smrg
2547ec681f3Smrgstruct intel_perf_context {
2557ec681f3Smrg   struct intel_perf_config *perf;
2567ec681f3Smrg
2577ec681f3Smrg   void * mem_ctx; /* ralloc context */
2587ec681f3Smrg   void * ctx;  /* driver context (eg, brw_context) */
2597ec681f3Smrg   void * bufmgr;
2607ec681f3Smrg   const struct intel_device_info *devinfo;
2617ec681f3Smrg
2627ec681f3Smrg   uint32_t hw_ctx;
2637ec681f3Smrg   int drm_fd;
2647ec681f3Smrg
2657ec681f3Smrg   /* The i915 perf stream we open to setup + enable the OA counters */
2667ec681f3Smrg   int oa_stream_fd;
2677ec681f3Smrg
2687ec681f3Smrg   /* An i915 perf stream fd gives exclusive access to the OA unit that will
2697ec681f3Smrg    * report counter snapshots for a specific counter set/profile in a
2707ec681f3Smrg    * specific layout/format so we can only start OA queries that are
2717ec681f3Smrg    * compatible with the currently open fd...
2727ec681f3Smrg    */
2737ec681f3Smrg   int current_oa_metrics_set_id;
2747ec681f3Smrg   int current_oa_format;
2757ec681f3Smrg
2767ec681f3Smrg   /* List of buffers containing OA reports */
2777ec681f3Smrg   struct exec_list sample_buffers;
2787ec681f3Smrg
2797ec681f3Smrg   /* Cached list of empty sample buffers */
2807ec681f3Smrg   struct exec_list free_sample_buffers;
2817ec681f3Smrg
2827ec681f3Smrg   int n_active_oa_queries;
2837ec681f3Smrg   int n_active_pipeline_stats_queries;
2847ec681f3Smrg
2857ec681f3Smrg   /* The number of queries depending on running OA counters which
2867ec681f3Smrg    * extends beyond brw_end_perf_query() since we need to wait until
2877ec681f3Smrg    * the last MI_RPC command has parsed by the GPU.
2887ec681f3Smrg    *
2897ec681f3Smrg    * Accurate accounting is important here as emitting an
2907ec681f3Smrg    * MI_REPORT_PERF_COUNT command while the OA unit is disabled will
2917ec681f3Smrg    * effectively hang the gpu.
2927ec681f3Smrg    */
2937ec681f3Smrg   int n_oa_users;
2947ec681f3Smrg
2957ec681f3Smrg   /* To help catch an spurious problem with the hardware or perf
2967ec681f3Smrg    * forwarding samples, we emit each MI_REPORT_PERF_COUNT command
2977ec681f3Smrg    * with a unique ID that we can explicitly check for...
2987ec681f3Smrg    */
2997ec681f3Smrg   int next_query_start_report_id;
3007ec681f3Smrg
3017ec681f3Smrg   /**
3027ec681f3Smrg    * An array of queries whose results haven't yet been assembled
3037ec681f3Smrg    * based on the data in buffer objects.
3047ec681f3Smrg    *
3057ec681f3Smrg    * These may be active, or have already ended.  However, the
3067ec681f3Smrg    * results have not been requested.
3077ec681f3Smrg    */
3087ec681f3Smrg   struct intel_perf_query_object **unaccumulated;
3097ec681f3Smrg   int unaccumulated_elements;
3107ec681f3Smrg   int unaccumulated_array_size;
3117ec681f3Smrg
3127ec681f3Smrg   /* The total number of query objects so we can relinquish
3137ec681f3Smrg    * our exclusive access to perf if the application deletes
3147ec681f3Smrg    * all of its objects. (NB: We only disable perf while
3157ec681f3Smrg    * there are no active queries)
3167ec681f3Smrg    */
3177ec681f3Smrg   int n_query_instances;
3187ec681f3Smrg
3197ec681f3Smrg   int period_exponent;
3207ec681f3Smrg};
3217ec681f3Smrg
3227ec681f3Smrgstatic bool
3237ec681f3Smrginc_n_users(struct intel_perf_context *perf_ctx)
3247ec681f3Smrg{
3257ec681f3Smrg   if (perf_ctx->n_oa_users == 0 &&
3267ec681f3Smrg       intel_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_ENABLE, 0) < 0)
3277ec681f3Smrg   {
3287ec681f3Smrg      return false;
3297ec681f3Smrg   }
3307ec681f3Smrg   ++perf_ctx->n_oa_users;
3317ec681f3Smrg
3327ec681f3Smrg   return true;
3337ec681f3Smrg}
3347ec681f3Smrg
3357ec681f3Smrgstatic void
3367ec681f3Smrgdec_n_users(struct intel_perf_context *perf_ctx)
3377ec681f3Smrg{
3387ec681f3Smrg   /* Disabling the i915 perf stream will effectively disable the OA
3397ec681f3Smrg    * counters.  Note it's important to be sure there are no outstanding
3407ec681f3Smrg    * MI_RPC commands at this point since they could stall the CS
3417ec681f3Smrg    * indefinitely once OACONTROL is disabled.
3427ec681f3Smrg    */
3437ec681f3Smrg   --perf_ctx->n_oa_users;
3447ec681f3Smrg   if (perf_ctx->n_oa_users == 0 &&
3457ec681f3Smrg       intel_ioctl(perf_ctx->oa_stream_fd, I915_PERF_IOCTL_DISABLE, 0) < 0)
3467ec681f3Smrg   {
3477ec681f3Smrg      DBG("WARNING: Error disabling gen perf stream: %m\n");
3487ec681f3Smrg   }
3497ec681f3Smrg}
3507ec681f3Smrg
3517ec681f3Smrgvoid
3527ec681f3Smrgintel_perf_close(struct intel_perf_context *perfquery,
3537ec681f3Smrg                 const struct intel_perf_query_info *query)
3547ec681f3Smrg{
3557ec681f3Smrg   if (perfquery->oa_stream_fd != -1) {
3567ec681f3Smrg      close(perfquery->oa_stream_fd);
3577ec681f3Smrg      perfquery->oa_stream_fd = -1;
3587ec681f3Smrg   }
3597ec681f3Smrg   if (query && query->kind == INTEL_PERF_QUERY_TYPE_RAW) {
3607ec681f3Smrg      struct intel_perf_query_info *raw_query =
3617ec681f3Smrg         (struct intel_perf_query_info *) query;
3627ec681f3Smrg      raw_query->oa_metrics_set_id = 0;
3637ec681f3Smrg   }
3647ec681f3Smrg}
3657ec681f3Smrg
3667ec681f3Smrgbool
3677ec681f3Smrgintel_perf_open(struct intel_perf_context *perf_ctx,
3687ec681f3Smrg                int metrics_set_id,
3697ec681f3Smrg                int report_format,
3707ec681f3Smrg                int period_exponent,
3717ec681f3Smrg                int drm_fd,
3727ec681f3Smrg                uint32_t ctx_id,
3737ec681f3Smrg                bool enable)
3747ec681f3Smrg{
3757ec681f3Smrg   uint64_t properties[DRM_I915_PERF_PROP_MAX * 2];
3767ec681f3Smrg   uint32_t p = 0;
3777ec681f3Smrg
3787ec681f3Smrg   /* Single context sampling if valid context id. */
3797ec681f3Smrg   if (ctx_id != INTEL_PERF_INVALID_CTX_ID) {
3807ec681f3Smrg      properties[p++] = DRM_I915_PERF_PROP_CTX_HANDLE;
3817ec681f3Smrg      properties[p++] = ctx_id;
3827ec681f3Smrg   }
3837ec681f3Smrg
3847ec681f3Smrg   /* Include OA reports in samples */
3857ec681f3Smrg   properties[p++] = DRM_I915_PERF_PROP_SAMPLE_OA;
3867ec681f3Smrg   properties[p++] = true;
3877ec681f3Smrg
3887ec681f3Smrg   /* OA unit configuration */
3897ec681f3Smrg   properties[p++] = DRM_I915_PERF_PROP_OA_METRICS_SET;
3907ec681f3Smrg   properties[p++] = metrics_set_id;
3917ec681f3Smrg
3927ec681f3Smrg   properties[p++] = DRM_I915_PERF_PROP_OA_FORMAT;
3937ec681f3Smrg   properties[p++] = report_format;
3947ec681f3Smrg
3957ec681f3Smrg   properties[p++] = DRM_I915_PERF_PROP_OA_EXPONENT;
3967ec681f3Smrg   properties[p++] = period_exponent;
3977ec681f3Smrg
3987ec681f3Smrg   /* SSEU configuration */
3997ec681f3Smrg   if (intel_perf_has_global_sseu(perf_ctx->perf)) {
4007ec681f3Smrg      properties[p++] = DRM_I915_PERF_PROP_GLOBAL_SSEU;
4017ec681f3Smrg      properties[p++] = to_user_pointer(&perf_ctx->perf->sseu);
4027ec681f3Smrg   }
4037ec681f3Smrg
4047ec681f3Smrg   assert(p <= ARRAY_SIZE(properties));
4057ec681f3Smrg
4067ec681f3Smrg   struct drm_i915_perf_open_param param = {
4077ec681f3Smrg      .flags = I915_PERF_FLAG_FD_CLOEXEC |
4087ec681f3Smrg               I915_PERF_FLAG_FD_NONBLOCK |
4097ec681f3Smrg               (enable ? 0 : I915_PERF_FLAG_DISABLED),
4107ec681f3Smrg      .num_properties = p / 2,
4117ec681f3Smrg      .properties_ptr = (uintptr_t) properties,
4127ec681f3Smrg   };
4137ec681f3Smrg   int fd = intel_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
4147ec681f3Smrg   if (fd == -1) {
4157ec681f3Smrg      DBG("Error opening gen perf OA stream: %m\n");
4167ec681f3Smrg      return false;
4177ec681f3Smrg   }
4187ec681f3Smrg
4197ec681f3Smrg   perf_ctx->oa_stream_fd = fd;
4207ec681f3Smrg
4217ec681f3Smrg   perf_ctx->current_oa_metrics_set_id = metrics_set_id;
4227ec681f3Smrg   perf_ctx->current_oa_format = report_format;
4237ec681f3Smrg
4247ec681f3Smrg   if (enable)
4257ec681f3Smrg      ++perf_ctx->n_oa_users;
4267ec681f3Smrg
4277ec681f3Smrg   return true;
4287ec681f3Smrg}
4297ec681f3Smrg
4307ec681f3Smrgstatic uint64_t
4317ec681f3Smrgget_metric_id(struct intel_perf_config *perf,
4327ec681f3Smrg              const struct intel_perf_query_info *query)
4337ec681f3Smrg{
4347ec681f3Smrg   /* These queries are know not to ever change, their config ID has been
4357ec681f3Smrg    * loaded upon the first query creation. No need to look them up again.
4367ec681f3Smrg    */
4377ec681f3Smrg   if (query->kind == INTEL_PERF_QUERY_TYPE_OA)
4387ec681f3Smrg      return query->oa_metrics_set_id;
4397ec681f3Smrg
4407ec681f3Smrg   assert(query->kind == INTEL_PERF_QUERY_TYPE_RAW);
4417ec681f3Smrg
4427ec681f3Smrg   /* Raw queries can be reprogrammed up by an external application/library.
4437ec681f3Smrg    * When a raw query is used for the first time it's id is set to a value !=
4447ec681f3Smrg    * 0. When it stops being used the id returns to 0. No need to reload the
4457ec681f3Smrg    * ID when it's already loaded.
4467ec681f3Smrg    */
4477ec681f3Smrg   if (query->oa_metrics_set_id != 0) {
4487ec681f3Smrg      DBG("Raw query '%s' guid=%s using cached ID: %"PRIu64"\n",
4497ec681f3Smrg          query->name, query->guid, query->oa_metrics_set_id);
4507ec681f3Smrg      return query->oa_metrics_set_id;
4517ec681f3Smrg   }
4527ec681f3Smrg
4537ec681f3Smrg   struct intel_perf_query_info *raw_query = (struct intel_perf_query_info *)query;
4547ec681f3Smrg   if (!intel_perf_load_metric_id(perf, query->guid,
4557ec681f3Smrg                                &raw_query->oa_metrics_set_id)) {
4567ec681f3Smrg      DBG("Unable to read query guid=%s ID, falling back to test config\n", query->guid);
4577ec681f3Smrg      raw_query->oa_metrics_set_id = perf->fallback_raw_oa_metric;
4587ec681f3Smrg   } else {
4597ec681f3Smrg      DBG("Raw query '%s'guid=%s loaded ID: %"PRIu64"\n",
4607ec681f3Smrg          query->name, query->guid, query->oa_metrics_set_id);
4617ec681f3Smrg   }
4627ec681f3Smrg   return query->oa_metrics_set_id;
4637ec681f3Smrg}
4647ec681f3Smrg
4657ec681f3Smrgstatic struct oa_sample_buf *
4667ec681f3Smrgget_free_sample_buf(struct intel_perf_context *perf_ctx)
4677ec681f3Smrg{
4687ec681f3Smrg   struct exec_node *node = exec_list_pop_head(&perf_ctx->free_sample_buffers);
4697ec681f3Smrg   struct oa_sample_buf *buf;
4707ec681f3Smrg
4717ec681f3Smrg   if (node)
4727ec681f3Smrg      buf = exec_node_data(struct oa_sample_buf, node, link);
4737ec681f3Smrg   else {
4747ec681f3Smrg      buf = ralloc_size(perf_ctx->perf, sizeof(*buf));
4757ec681f3Smrg
4767ec681f3Smrg      exec_node_init(&buf->link);
4777ec681f3Smrg      buf->refcount = 0;
4787ec681f3Smrg   }
4797ec681f3Smrg   buf->len = 0;
4807ec681f3Smrg
4817ec681f3Smrg   return buf;
4827ec681f3Smrg}
4837ec681f3Smrg
4847ec681f3Smrgstatic void
4857ec681f3Smrgreap_old_sample_buffers(struct intel_perf_context *perf_ctx)
4867ec681f3Smrg{
4877ec681f3Smrg   struct exec_node *tail_node =
4887ec681f3Smrg      exec_list_get_tail(&perf_ctx->sample_buffers);
4897ec681f3Smrg   struct oa_sample_buf *tail_buf =
4907ec681f3Smrg      exec_node_data(struct oa_sample_buf, tail_node, link);
4917ec681f3Smrg
4927ec681f3Smrg   /* Remove all old, unreferenced sample buffers walking forward from
4937ec681f3Smrg    * the head of the list, except always leave at least one node in
4947ec681f3Smrg    * the list so we always have a node to reference when we Begin
4957ec681f3Smrg    * a new query.
4967ec681f3Smrg    */
4977ec681f3Smrg   foreach_list_typed_safe(struct oa_sample_buf, buf, link,
4987ec681f3Smrg                           &perf_ctx->sample_buffers)
4997ec681f3Smrg   {
5007ec681f3Smrg      if (buf->refcount == 0 && buf != tail_buf) {
5017ec681f3Smrg         exec_node_remove(&buf->link);
5027ec681f3Smrg         exec_list_push_head(&perf_ctx->free_sample_buffers, &buf->link);
5037ec681f3Smrg      } else
5047ec681f3Smrg         return;
5057ec681f3Smrg   }
5067ec681f3Smrg}
5077ec681f3Smrg
5087ec681f3Smrgstatic void
5097ec681f3Smrgfree_sample_bufs(struct intel_perf_context *perf_ctx)
5107ec681f3Smrg{
5117ec681f3Smrg   foreach_list_typed_safe(struct oa_sample_buf, buf, link,
5127ec681f3Smrg                           &perf_ctx->free_sample_buffers)
5137ec681f3Smrg      ralloc_free(buf);
5147ec681f3Smrg
5157ec681f3Smrg   exec_list_make_empty(&perf_ctx->free_sample_buffers);
5167ec681f3Smrg}
5177ec681f3Smrg
5187ec681f3Smrg
5197ec681f3Smrgstruct intel_perf_query_object *
5207ec681f3Smrgintel_perf_new_query(struct intel_perf_context *perf_ctx, unsigned query_index)
5217ec681f3Smrg{
5227ec681f3Smrg   const struct intel_perf_query_info *query =
5237ec681f3Smrg      &perf_ctx->perf->queries[query_index];
5247ec681f3Smrg
5257ec681f3Smrg   switch (query->kind) {
5267ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_OA:
5277ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_RAW:
5287ec681f3Smrg      if (perf_ctx->period_exponent == 0)
5297ec681f3Smrg         return NULL;
5307ec681f3Smrg      break;
5317ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_PIPELINE:
5327ec681f3Smrg      break;
5337ec681f3Smrg   }
5347ec681f3Smrg
5357ec681f3Smrg   struct intel_perf_query_object *obj =
5367ec681f3Smrg      calloc(1, sizeof(struct intel_perf_query_object));
5377ec681f3Smrg
5387ec681f3Smrg   if (!obj)
5397ec681f3Smrg      return NULL;
5407ec681f3Smrg
5417ec681f3Smrg   obj->queryinfo = query;
5427ec681f3Smrg
5437ec681f3Smrg   perf_ctx->n_query_instances++;
5447ec681f3Smrg   return obj;
5457ec681f3Smrg}
5467ec681f3Smrg
5477ec681f3Smrgint
5487ec681f3Smrgintel_perf_active_queries(struct intel_perf_context *perf_ctx,
5497ec681f3Smrg                          const struct intel_perf_query_info *query)
5507ec681f3Smrg{
5517ec681f3Smrg   assert(perf_ctx->n_active_oa_queries == 0 || perf_ctx->n_active_pipeline_stats_queries == 0);
5527ec681f3Smrg
5537ec681f3Smrg   switch (query->kind) {
5547ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_OA:
5557ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_RAW:
5567ec681f3Smrg      return perf_ctx->n_active_oa_queries;
5577ec681f3Smrg      break;
5587ec681f3Smrg
5597ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_PIPELINE:
5607ec681f3Smrg      return perf_ctx->n_active_pipeline_stats_queries;
5617ec681f3Smrg      break;
5627ec681f3Smrg
5637ec681f3Smrg   default:
5647ec681f3Smrg      unreachable("Unknown query type");
5657ec681f3Smrg      break;
5667ec681f3Smrg   }
5677ec681f3Smrg}
5687ec681f3Smrg
5697ec681f3Smrgconst struct intel_perf_query_info*
5707ec681f3Smrgintel_perf_query_info(const struct intel_perf_query_object *query)
5717ec681f3Smrg{
5727ec681f3Smrg   return query->queryinfo;
5737ec681f3Smrg}
5747ec681f3Smrg
5757ec681f3Smrgstruct intel_perf_context *
5767ec681f3Smrgintel_perf_new_context(void *parent)
5777ec681f3Smrg{
5787ec681f3Smrg   struct intel_perf_context *ctx = rzalloc(parent, struct intel_perf_context);
5797ec681f3Smrg   if (! ctx)
5807ec681f3Smrg      fprintf(stderr, "%s: failed to alloc context\n", __func__);
5817ec681f3Smrg   return ctx;
5827ec681f3Smrg}
5837ec681f3Smrg
5847ec681f3Smrgstruct intel_perf_config *
5857ec681f3Smrgintel_perf_config(struct intel_perf_context *ctx)
5867ec681f3Smrg{
5877ec681f3Smrg   return ctx->perf;
5887ec681f3Smrg}
5897ec681f3Smrg
5907ec681f3Smrgvoid
5917ec681f3Smrgintel_perf_init_context(struct intel_perf_context *perf_ctx,
5927ec681f3Smrg                        struct intel_perf_config *perf_cfg,
5937ec681f3Smrg                        void * mem_ctx, /* ralloc context */
5947ec681f3Smrg                        void * ctx,  /* driver context (eg, brw_context) */
5957ec681f3Smrg                        void * bufmgr,  /* eg brw_bufmgr */
5967ec681f3Smrg                        const struct intel_device_info *devinfo,
5977ec681f3Smrg                        uint32_t hw_ctx,
5987ec681f3Smrg                        int drm_fd)
5997ec681f3Smrg{
6007ec681f3Smrg   perf_ctx->perf = perf_cfg;
6017ec681f3Smrg   perf_ctx->mem_ctx = mem_ctx;
6027ec681f3Smrg   perf_ctx->ctx = ctx;
6037ec681f3Smrg   perf_ctx->bufmgr = bufmgr;
6047ec681f3Smrg   perf_ctx->drm_fd = drm_fd;
6057ec681f3Smrg   perf_ctx->hw_ctx = hw_ctx;
6067ec681f3Smrg   perf_ctx->devinfo = devinfo;
6077ec681f3Smrg
6087ec681f3Smrg   perf_ctx->unaccumulated =
6097ec681f3Smrg      ralloc_array(mem_ctx, struct intel_perf_query_object *, 2);
6107ec681f3Smrg   perf_ctx->unaccumulated_elements = 0;
6117ec681f3Smrg   perf_ctx->unaccumulated_array_size = 2;
6127ec681f3Smrg
6137ec681f3Smrg   exec_list_make_empty(&perf_ctx->sample_buffers);
6147ec681f3Smrg   exec_list_make_empty(&perf_ctx->free_sample_buffers);
6157ec681f3Smrg
6167ec681f3Smrg   /* It's convenient to guarantee that this linked list of sample
6177ec681f3Smrg    * buffers is never empty so we add an empty head so when we
6187ec681f3Smrg    * Begin an OA query we can always take a reference on a buffer
6197ec681f3Smrg    * in this list.
6207ec681f3Smrg    */
6217ec681f3Smrg   struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
6227ec681f3Smrg   exec_list_push_head(&perf_ctx->sample_buffers, &buf->link);
6237ec681f3Smrg
6247ec681f3Smrg   perf_ctx->oa_stream_fd = -1;
6257ec681f3Smrg   perf_ctx->next_query_start_report_id = 1000;
6267ec681f3Smrg
6277ec681f3Smrg   /* The period_exponent gives a sampling period as follows:
6287ec681f3Smrg    *   sample_period = timestamp_period * 2^(period_exponent + 1)
6297ec681f3Smrg    *
6307ec681f3Smrg    * The timestamps increments every 80ns (HSW), ~52ns (GFX9LP) or
6317ec681f3Smrg    * ~83ns (GFX8/9).
6327ec681f3Smrg    *
6337ec681f3Smrg    * The counter overflow period is derived from the EuActive counter
6347ec681f3Smrg    * which reads a counter that increments by the number of clock
6357ec681f3Smrg    * cycles multiplied by the number of EUs. It can be calculated as:
6367ec681f3Smrg    *
6377ec681f3Smrg    * 2^(number of bits in A counter) / (n_eus * max_intel_freq * 2)
6387ec681f3Smrg    *
6397ec681f3Smrg    * (E.g. 40 EUs @ 1GHz = ~53ms)
6407ec681f3Smrg    *
6417ec681f3Smrg    * We select a sampling period inferior to that overflow period to
6427ec681f3Smrg    * ensure we cannot see more than 1 counter overflow, otherwise we
6437ec681f3Smrg    * could loose information.
6447ec681f3Smrg    */
6457ec681f3Smrg
6467ec681f3Smrg   int a_counter_in_bits = 32;
6477ec681f3Smrg   if (devinfo->ver >= 8)
6487ec681f3Smrg      a_counter_in_bits = 40;
6497ec681f3Smrg
6507ec681f3Smrg   uint64_t overflow_period = pow(2, a_counter_in_bits) / (perf_cfg->sys_vars.n_eus *
6517ec681f3Smrg       /* drop 1GHz freq to have units in nanoseconds */
6527ec681f3Smrg       2);
6537ec681f3Smrg
6547ec681f3Smrg   DBG("A counter overflow period: %"PRIu64"ns, %"PRIu64"ms (n_eus=%"PRIu64")\n",
6557ec681f3Smrg       overflow_period, overflow_period / 1000000ul, perf_cfg->sys_vars.n_eus);
6567ec681f3Smrg
6577ec681f3Smrg   int period_exponent = 0;
6587ec681f3Smrg   uint64_t prev_sample_period, next_sample_period;
6597ec681f3Smrg   for (int e = 0; e < 30; e++) {
6607ec681f3Smrg      prev_sample_period = 1000000000ull * pow(2, e + 1) / devinfo->timestamp_frequency;
6617ec681f3Smrg      next_sample_period = 1000000000ull * pow(2, e + 2) / devinfo->timestamp_frequency;
6627ec681f3Smrg
6637ec681f3Smrg      /* Take the previous sampling period, lower than the overflow
6647ec681f3Smrg       * period.
6657ec681f3Smrg       */
6667ec681f3Smrg      if (prev_sample_period < overflow_period &&
6677ec681f3Smrg          next_sample_period > overflow_period)
6687ec681f3Smrg         period_exponent = e + 1;
6697ec681f3Smrg   }
6707ec681f3Smrg
6717ec681f3Smrg   perf_ctx->period_exponent = period_exponent;
6727ec681f3Smrg
6737ec681f3Smrg   if (period_exponent == 0) {
6747ec681f3Smrg      DBG("WARNING: enable to find a sampling exponent\n");
6757ec681f3Smrg   } else {
6767ec681f3Smrg      DBG("OA sampling exponent: %i ~= %"PRIu64"ms\n", period_exponent,
6777ec681f3Smrg            prev_sample_period / 1000000ul);
6787ec681f3Smrg   }
6797ec681f3Smrg}
6807ec681f3Smrg
6817ec681f3Smrg/**
6827ec681f3Smrg * Add a query to the global list of "unaccumulated queries."
6837ec681f3Smrg *
6847ec681f3Smrg * Queries are tracked here until all the associated OA reports have
6857ec681f3Smrg * been accumulated via accumulate_oa_reports() after the end
6867ec681f3Smrg * MI_REPORT_PERF_COUNT has landed in query->oa.bo.
6877ec681f3Smrg */
6887ec681f3Smrgstatic void
6897ec681f3Smrgadd_to_unaccumulated_query_list(struct intel_perf_context *perf_ctx,
6907ec681f3Smrg                                struct intel_perf_query_object *obj)
6917ec681f3Smrg{
6927ec681f3Smrg   if (perf_ctx->unaccumulated_elements >=
6937ec681f3Smrg       perf_ctx->unaccumulated_array_size)
6947ec681f3Smrg   {
6957ec681f3Smrg      perf_ctx->unaccumulated_array_size *= 1.5;
6967ec681f3Smrg      perf_ctx->unaccumulated =
6977ec681f3Smrg         reralloc(perf_ctx->mem_ctx, perf_ctx->unaccumulated,
6987ec681f3Smrg                  struct intel_perf_query_object *,
6997ec681f3Smrg                  perf_ctx->unaccumulated_array_size);
7007ec681f3Smrg   }
7017ec681f3Smrg
7027ec681f3Smrg   perf_ctx->unaccumulated[perf_ctx->unaccumulated_elements++] = obj;
7037ec681f3Smrg}
7047ec681f3Smrg
7057ec681f3Smrg/**
7067ec681f3Smrg * Emit MI_STORE_REGISTER_MEM commands to capture all of the
7077ec681f3Smrg * pipeline statistics for the performance query object.
7087ec681f3Smrg */
7097ec681f3Smrgstatic void
7107ec681f3Smrgsnapshot_statistics_registers(struct intel_perf_context *ctx,
7117ec681f3Smrg                              struct intel_perf_query_object *obj,
7127ec681f3Smrg                              uint32_t offset_in_bytes)
7137ec681f3Smrg{
7147ec681f3Smrg   struct intel_perf_config *perf = ctx->perf;
7157ec681f3Smrg   const struct intel_perf_query_info *query = obj->queryinfo;
7167ec681f3Smrg   const int n_counters = query->n_counters;
7177ec681f3Smrg
7187ec681f3Smrg   for (int i = 0; i < n_counters; i++) {
7197ec681f3Smrg      const struct intel_perf_query_counter *counter = &query->counters[i];
7207ec681f3Smrg
7217ec681f3Smrg      assert(counter->data_type == INTEL_PERF_COUNTER_DATA_TYPE_UINT64);
7227ec681f3Smrg
7237ec681f3Smrg      perf->vtbl.store_register_mem(ctx->ctx, obj->pipeline_stats.bo,
7247ec681f3Smrg                                    counter->pipeline_stat.reg, 8,
7257ec681f3Smrg                                    offset_in_bytes + counter->offset);
7267ec681f3Smrg   }
7277ec681f3Smrg}
7287ec681f3Smrg
7297ec681f3Smrgstatic void
7307ec681f3Smrgsnapshot_query_layout(struct intel_perf_context *perf_ctx,
7317ec681f3Smrg                      struct intel_perf_query_object *query,
7327ec681f3Smrg                      bool end_snapshot)
7337ec681f3Smrg{
7347ec681f3Smrg   struct intel_perf_config *perf_cfg = perf_ctx->perf;
7357ec681f3Smrg   const struct intel_perf_query_field_layout *layout = &perf_cfg->query_layout;
7367ec681f3Smrg   uint32_t offset = end_snapshot ? align(layout->size, layout->alignment) : 0;
7377ec681f3Smrg
7387ec681f3Smrg   for (uint32_t f = 0; f < layout->n_fields; f++) {
7397ec681f3Smrg      const struct intel_perf_query_field *field =
7407ec681f3Smrg         &layout->fields[end_snapshot ? f : (layout->n_fields - 1 - f)];
7417ec681f3Smrg
7427ec681f3Smrg      switch (field->type) {
7437ec681f3Smrg      case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
7447ec681f3Smrg         perf_cfg->vtbl.emit_mi_report_perf_count(perf_ctx->ctx, query->oa.bo,
7457ec681f3Smrg                                                  offset + field->location,
7467ec681f3Smrg                                                  query->oa.begin_report_id +
7477ec681f3Smrg                                                  (end_snapshot ? 1 : 0));
7487ec681f3Smrg         break;
7497ec681f3Smrg      case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
7507ec681f3Smrg      case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
7517ec681f3Smrg      case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
7527ec681f3Smrg      case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
7537ec681f3Smrg         perf_cfg->vtbl.store_register_mem(perf_ctx->ctx, query->oa.bo,
7547ec681f3Smrg                                           field->mmio_offset, field->size,
7557ec681f3Smrg                                           offset + field->location);
7567ec681f3Smrg         break;
7577ec681f3Smrg      default:
7587ec681f3Smrg         unreachable("Invalid field type");
7597ec681f3Smrg      }
7607ec681f3Smrg   }
7617ec681f3Smrg}
7627ec681f3Smrg
7637ec681f3Smrgbool
7647ec681f3Smrgintel_perf_begin_query(struct intel_perf_context *perf_ctx,
7657ec681f3Smrg                       struct intel_perf_query_object *query)
7667ec681f3Smrg{
7677ec681f3Smrg   struct intel_perf_config *perf_cfg = perf_ctx->perf;
7687ec681f3Smrg   const struct intel_perf_query_info *queryinfo = query->queryinfo;
7697ec681f3Smrg
7707ec681f3Smrg   /* XXX: We have to consider that the command parser unit that parses batch
7717ec681f3Smrg    * buffer commands and is used to capture begin/end counter snapshots isn't
7727ec681f3Smrg    * implicitly synchronized with what's currently running across other GPU
7737ec681f3Smrg    * units (such as the EUs running shaders) that the performance counters are
7747ec681f3Smrg    * associated with.
7757ec681f3Smrg    *
7767ec681f3Smrg    * The intention of performance queries is to measure the work associated
7777ec681f3Smrg    * with commands between the begin/end delimiters and so for that to be the
7787ec681f3Smrg    * case we need to explicitly synchronize the parsing of commands to capture
7797ec681f3Smrg    * Begin/End counter snapshots with what's running across other parts of the
7807ec681f3Smrg    * GPU.
7817ec681f3Smrg    *
7827ec681f3Smrg    * When the command parser reaches a Begin marker it effectively needs to
7837ec681f3Smrg    * drain everything currently running on the GPU until the hardware is idle
7847ec681f3Smrg    * before capturing the first snapshot of counters - otherwise the results
7857ec681f3Smrg    * would also be measuring the effects of earlier commands.
7867ec681f3Smrg    *
7877ec681f3Smrg    * When the command parser reaches an End marker it needs to stall until
7887ec681f3Smrg    * everything currently running on the GPU has finished before capturing the
7897ec681f3Smrg    * end snapshot - otherwise the results won't be a complete representation
7907ec681f3Smrg    * of the work.
7917ec681f3Smrg    *
7927ec681f3Smrg    * To achieve this, we stall the pipeline at pixel scoreboard (prevent any
7937ec681f3Smrg    * additional work to be processed by the pipeline until all pixels of the
7947ec681f3Smrg    * previous draw has be completed).
7957ec681f3Smrg    *
7967ec681f3Smrg    * N.B. The final results are based on deltas of counters between (inside)
7977ec681f3Smrg    * Begin/End markers so even though the total wall clock time of the
7987ec681f3Smrg    * workload is stretched by larger pipeline bubbles the bubbles themselves
7997ec681f3Smrg    * are generally invisible to the query results. Whether that's a good or a
8007ec681f3Smrg    * bad thing depends on the use case. For a lower real-time impact while
8017ec681f3Smrg    * capturing metrics then periodic sampling may be a better choice than
8027ec681f3Smrg    * INTEL_performance_query.
8037ec681f3Smrg    *
8047ec681f3Smrg    *
8057ec681f3Smrg    * This is our Begin synchronization point to drain current work on the
8067ec681f3Smrg    * GPU before we capture our first counter snapshot...
8077ec681f3Smrg    */
8087ec681f3Smrg   perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
8097ec681f3Smrg
8107ec681f3Smrg   switch (queryinfo->kind) {
8117ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_OA:
8127ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_RAW: {
8137ec681f3Smrg
8147ec681f3Smrg      /* Opening an i915 perf stream implies exclusive access to the OA unit
8157ec681f3Smrg       * which will generate counter reports for a specific counter set with a
8167ec681f3Smrg       * specific layout/format so we can't begin any OA based queries that
8177ec681f3Smrg       * require a different counter set or format unless we get an opportunity
8187ec681f3Smrg       * to close the stream and open a new one...
8197ec681f3Smrg       */
8207ec681f3Smrg      uint64_t metric_id = get_metric_id(perf_ctx->perf, queryinfo);
8217ec681f3Smrg
8227ec681f3Smrg      if (perf_ctx->oa_stream_fd != -1 &&
8237ec681f3Smrg          perf_ctx->current_oa_metrics_set_id != metric_id) {
8247ec681f3Smrg
8257ec681f3Smrg         if (perf_ctx->n_oa_users != 0) {
8267ec681f3Smrg            DBG("WARNING: Begin failed already using perf config=%i/%"PRIu64"\n",
8277ec681f3Smrg                perf_ctx->current_oa_metrics_set_id, metric_id);
8287ec681f3Smrg            return false;
8297ec681f3Smrg         } else
8307ec681f3Smrg            intel_perf_close(perf_ctx, queryinfo);
8317ec681f3Smrg      }
8327ec681f3Smrg
8337ec681f3Smrg      /* If the OA counters aren't already on, enable them. */
8347ec681f3Smrg      if (perf_ctx->oa_stream_fd == -1) {
8357ec681f3Smrg         assert(perf_ctx->period_exponent != 0);
8367ec681f3Smrg
8377ec681f3Smrg         if (!intel_perf_open(perf_ctx, metric_id, queryinfo->oa_format,
8387ec681f3Smrg                            perf_ctx->period_exponent, perf_ctx->drm_fd,
8397ec681f3Smrg                            perf_ctx->hw_ctx, false))
8407ec681f3Smrg            return false;
8417ec681f3Smrg      } else {
8427ec681f3Smrg         assert(perf_ctx->current_oa_metrics_set_id == metric_id &&
8437ec681f3Smrg                perf_ctx->current_oa_format == queryinfo->oa_format);
8447ec681f3Smrg      }
8457ec681f3Smrg
8467ec681f3Smrg      if (!inc_n_users(perf_ctx)) {
8477ec681f3Smrg         DBG("WARNING: Error enabling i915 perf stream: %m\n");
8487ec681f3Smrg         return false;
8497ec681f3Smrg      }
8507ec681f3Smrg
8517ec681f3Smrg      if (query->oa.bo) {
8527ec681f3Smrg         perf_cfg->vtbl.bo_unreference(query->oa.bo);
8537ec681f3Smrg         query->oa.bo = NULL;
8547ec681f3Smrg      }
8557ec681f3Smrg
8567ec681f3Smrg      query->oa.bo = perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
8577ec681f3Smrg                                             "perf. query OA MI_RPC bo",
8587ec681f3Smrg                                             MI_RPC_BO_SIZE);
8597ec681f3Smrg#ifdef DEBUG
8607ec681f3Smrg      /* Pre-filling the BO helps debug whether writes landed. */
8617ec681f3Smrg      void *map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_WRITE);
8627ec681f3Smrg      memset(map, 0x80, MI_RPC_BO_SIZE);
8637ec681f3Smrg      perf_cfg->vtbl.bo_unmap(query->oa.bo);
8647ec681f3Smrg#endif
8657ec681f3Smrg
8667ec681f3Smrg      query->oa.begin_report_id = perf_ctx->next_query_start_report_id;
8677ec681f3Smrg      perf_ctx->next_query_start_report_id += 2;
8687ec681f3Smrg
8697ec681f3Smrg      snapshot_query_layout(perf_ctx, query, false /* end_snapshot */);
8707ec681f3Smrg
8717ec681f3Smrg      ++perf_ctx->n_active_oa_queries;
8727ec681f3Smrg
8737ec681f3Smrg      /* No already-buffered samples can possibly be associated with this query
8747ec681f3Smrg       * so create a marker within the list of sample buffers enabling us to
8757ec681f3Smrg       * easily ignore earlier samples when processing this query after
8767ec681f3Smrg       * completion.
8777ec681f3Smrg       */
8787ec681f3Smrg      assert(!exec_list_is_empty(&perf_ctx->sample_buffers));
8797ec681f3Smrg      query->oa.samples_head = exec_list_get_tail(&perf_ctx->sample_buffers);
8807ec681f3Smrg
8817ec681f3Smrg      struct oa_sample_buf *buf =
8827ec681f3Smrg         exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
8837ec681f3Smrg
8847ec681f3Smrg      /* This reference will ensure that future/following sample
8857ec681f3Smrg       * buffers (that may relate to this query) can't be freed until
8867ec681f3Smrg       * this drops to zero.
8877ec681f3Smrg       */
8887ec681f3Smrg      buf->refcount++;
8897ec681f3Smrg
8907ec681f3Smrg      intel_perf_query_result_clear(&query->oa.result);
8917ec681f3Smrg      query->oa.results_accumulated = false;
8927ec681f3Smrg
8937ec681f3Smrg      add_to_unaccumulated_query_list(perf_ctx, query);
8947ec681f3Smrg      break;
8957ec681f3Smrg   }
8967ec681f3Smrg
8977ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_PIPELINE:
8987ec681f3Smrg      if (query->pipeline_stats.bo) {
8997ec681f3Smrg         perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
9007ec681f3Smrg         query->pipeline_stats.bo = NULL;
9017ec681f3Smrg      }
9027ec681f3Smrg
9037ec681f3Smrg      query->pipeline_stats.bo =
9047ec681f3Smrg         perf_cfg->vtbl.bo_alloc(perf_ctx->bufmgr,
9057ec681f3Smrg                                 "perf. query pipeline stats bo",
9067ec681f3Smrg                                 STATS_BO_SIZE);
9077ec681f3Smrg
9087ec681f3Smrg      /* Take starting snapshots. */
9097ec681f3Smrg      snapshot_statistics_registers(perf_ctx, query, 0);
9107ec681f3Smrg
9117ec681f3Smrg      ++perf_ctx->n_active_pipeline_stats_queries;
9127ec681f3Smrg      break;
9137ec681f3Smrg
9147ec681f3Smrg   default:
9157ec681f3Smrg      unreachable("Unknown query type");
9167ec681f3Smrg      break;
9177ec681f3Smrg   }
9187ec681f3Smrg
9197ec681f3Smrg   return true;
9207ec681f3Smrg}
9217ec681f3Smrg
9227ec681f3Smrgvoid
9237ec681f3Smrgintel_perf_end_query(struct intel_perf_context *perf_ctx,
9247ec681f3Smrg                     struct intel_perf_query_object *query)
9257ec681f3Smrg{
9267ec681f3Smrg   struct intel_perf_config *perf_cfg = perf_ctx->perf;
9277ec681f3Smrg
9287ec681f3Smrg   /* Ensure that the work associated with the queried commands will have
9297ec681f3Smrg    * finished before taking our query end counter readings.
9307ec681f3Smrg    *
9317ec681f3Smrg    * For more details see comment in brw_begin_perf_query for
9327ec681f3Smrg    * corresponding flush.
9337ec681f3Smrg    */
9347ec681f3Smrg   perf_cfg->vtbl.emit_stall_at_pixel_scoreboard(perf_ctx->ctx);
9357ec681f3Smrg
9367ec681f3Smrg   switch (query->queryinfo->kind) {
9377ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_OA:
9387ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_RAW:
9397ec681f3Smrg
9407ec681f3Smrg      /* NB: It's possible that the query will have already been marked
9417ec681f3Smrg       * as 'accumulated' if an error was seen while reading samples
9427ec681f3Smrg       * from perf. In this case we mustn't try and emit a closing
9437ec681f3Smrg       * MI_RPC command in case the OA unit has already been disabled
9447ec681f3Smrg       */
9457ec681f3Smrg      if (!query->oa.results_accumulated)
9467ec681f3Smrg         snapshot_query_layout(perf_ctx, query, true /* end_snapshot */);
9477ec681f3Smrg
9487ec681f3Smrg      --perf_ctx->n_active_oa_queries;
9497ec681f3Smrg
9507ec681f3Smrg      /* NB: even though the query has now ended, it can't be accumulated
9517ec681f3Smrg       * until the end MI_REPORT_PERF_COUNT snapshot has been written
9527ec681f3Smrg       * to query->oa.bo
9537ec681f3Smrg       */
9547ec681f3Smrg      break;
9557ec681f3Smrg
9567ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_PIPELINE:
9577ec681f3Smrg      snapshot_statistics_registers(perf_ctx, query,
9587ec681f3Smrg                                    STATS_BO_END_OFFSET_BYTES);
9597ec681f3Smrg      --perf_ctx->n_active_pipeline_stats_queries;
9607ec681f3Smrg      break;
9617ec681f3Smrg
9627ec681f3Smrg   default:
9637ec681f3Smrg      unreachable("Unknown query type");
9647ec681f3Smrg      break;
9657ec681f3Smrg   }
9667ec681f3Smrg}
9677ec681f3Smrg
9687ec681f3Smrgbool intel_perf_oa_stream_ready(struct intel_perf_context *perf_ctx)
9697ec681f3Smrg{
9707ec681f3Smrg   struct pollfd pfd;
9717ec681f3Smrg
9727ec681f3Smrg   pfd.fd = perf_ctx->oa_stream_fd;
9737ec681f3Smrg   pfd.events = POLLIN;
9747ec681f3Smrg   pfd.revents = 0;
9757ec681f3Smrg
9767ec681f3Smrg   if (poll(&pfd, 1, 0) < 0) {
9777ec681f3Smrg      DBG("Error polling OA stream\n");
9787ec681f3Smrg      return false;
9797ec681f3Smrg   }
9807ec681f3Smrg
9817ec681f3Smrg   if (!(pfd.revents & POLLIN))
9827ec681f3Smrg      return false;
9837ec681f3Smrg
9847ec681f3Smrg   return true;
9857ec681f3Smrg}
9867ec681f3Smrg
9877ec681f3Smrgssize_t
9887ec681f3Smrgintel_perf_read_oa_stream(struct intel_perf_context *perf_ctx,
9897ec681f3Smrg                          void* buf,
9907ec681f3Smrg                          size_t nbytes)
9917ec681f3Smrg{
9927ec681f3Smrg   return read(perf_ctx->oa_stream_fd, buf, nbytes);
9937ec681f3Smrg}
9947ec681f3Smrg
9957ec681f3Smrgenum OaReadStatus {
9967ec681f3Smrg   OA_READ_STATUS_ERROR,
9977ec681f3Smrg   OA_READ_STATUS_UNFINISHED,
9987ec681f3Smrg   OA_READ_STATUS_FINISHED,
9997ec681f3Smrg};
10007ec681f3Smrg
10017ec681f3Smrgstatic enum OaReadStatus
10027ec681f3Smrgread_oa_samples_until(struct intel_perf_context *perf_ctx,
10037ec681f3Smrg                      uint32_t start_timestamp,
10047ec681f3Smrg                      uint32_t end_timestamp)
10057ec681f3Smrg{
10067ec681f3Smrg   struct exec_node *tail_node =
10077ec681f3Smrg      exec_list_get_tail(&perf_ctx->sample_buffers);
10087ec681f3Smrg   struct oa_sample_buf *tail_buf =
10097ec681f3Smrg      exec_node_data(struct oa_sample_buf, tail_node, link);
10107ec681f3Smrg   uint32_t last_timestamp =
10117ec681f3Smrg      tail_buf->len == 0 ? start_timestamp : tail_buf->last_timestamp;
10127ec681f3Smrg
10137ec681f3Smrg   while (1) {
10147ec681f3Smrg      struct oa_sample_buf *buf = get_free_sample_buf(perf_ctx);
10157ec681f3Smrg      uint32_t offset;
10167ec681f3Smrg      int len;
10177ec681f3Smrg
10187ec681f3Smrg      while ((len = read(perf_ctx->oa_stream_fd, buf->buf,
10197ec681f3Smrg                         sizeof(buf->buf))) < 0 && errno == EINTR)
10207ec681f3Smrg         ;
10217ec681f3Smrg
10227ec681f3Smrg      if (len <= 0) {
10237ec681f3Smrg         exec_list_push_tail(&perf_ctx->free_sample_buffers, &buf->link);
10247ec681f3Smrg
10257ec681f3Smrg         if (len == 0) {
10267ec681f3Smrg            DBG("Spurious EOF reading i915 perf samples\n");
10277ec681f3Smrg            return OA_READ_STATUS_ERROR;
10287ec681f3Smrg         }
10297ec681f3Smrg
10307ec681f3Smrg         if (errno != EAGAIN) {
10317ec681f3Smrg            DBG("Error reading i915 perf samples: %m\n");
10327ec681f3Smrg            return OA_READ_STATUS_ERROR;
10337ec681f3Smrg         }
10347ec681f3Smrg
10357ec681f3Smrg         if ((last_timestamp - start_timestamp) >= INT32_MAX)
10367ec681f3Smrg            return OA_READ_STATUS_UNFINISHED;
10377ec681f3Smrg
10387ec681f3Smrg         if ((last_timestamp - start_timestamp) <
10397ec681f3Smrg              (end_timestamp - start_timestamp))
10407ec681f3Smrg            return OA_READ_STATUS_UNFINISHED;
10417ec681f3Smrg
10427ec681f3Smrg         return OA_READ_STATUS_FINISHED;
10437ec681f3Smrg      }
10447ec681f3Smrg
10457ec681f3Smrg      buf->len = len;
10467ec681f3Smrg      exec_list_push_tail(&perf_ctx->sample_buffers, &buf->link);
10477ec681f3Smrg
10487ec681f3Smrg      /* Go through the reports and update the last timestamp. */
10497ec681f3Smrg      offset = 0;
10507ec681f3Smrg      while (offset < buf->len) {
10517ec681f3Smrg         const struct drm_i915_perf_record_header *header =
10527ec681f3Smrg            (const struct drm_i915_perf_record_header *) &buf->buf[offset];
10537ec681f3Smrg         uint32_t *report = (uint32_t *) (header + 1);
10547ec681f3Smrg
10557ec681f3Smrg         if (header->type == DRM_I915_PERF_RECORD_SAMPLE)
10567ec681f3Smrg            last_timestamp = report[1];
10577ec681f3Smrg
10587ec681f3Smrg         offset += header->size;
10597ec681f3Smrg      }
10607ec681f3Smrg
10617ec681f3Smrg      buf->last_timestamp = last_timestamp;
10627ec681f3Smrg   }
10637ec681f3Smrg
10647ec681f3Smrg   unreachable("not reached");
10657ec681f3Smrg   return OA_READ_STATUS_ERROR;
10667ec681f3Smrg}
10677ec681f3Smrg
10687ec681f3Smrg/**
10697ec681f3Smrg * Try to read all the reports until either the delimiting timestamp
10707ec681f3Smrg * or an error arises.
10717ec681f3Smrg */
10727ec681f3Smrgstatic bool
10737ec681f3Smrgread_oa_samples_for_query(struct intel_perf_context *perf_ctx,
10747ec681f3Smrg                          struct intel_perf_query_object *query,
10757ec681f3Smrg                          void *current_batch)
10767ec681f3Smrg{
10777ec681f3Smrg   uint32_t *start;
10787ec681f3Smrg   uint32_t *last;
10797ec681f3Smrg   uint32_t *end;
10807ec681f3Smrg   struct intel_perf_config *perf_cfg = perf_ctx->perf;
10817ec681f3Smrg
10827ec681f3Smrg   /* We need the MI_REPORT_PERF_COUNT to land before we can start
10837ec681f3Smrg    * accumulate. */
10847ec681f3Smrg   assert(!perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
10857ec681f3Smrg          !perf_cfg->vtbl.bo_busy(query->oa.bo));
10867ec681f3Smrg
10877ec681f3Smrg   /* Map the BO once here and let accumulate_oa_reports() unmap
10887ec681f3Smrg    * it. */
10897ec681f3Smrg   if (query->oa.map == NULL)
10907ec681f3Smrg      query->oa.map = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->oa.bo, MAP_READ);
10917ec681f3Smrg
10927ec681f3Smrg   start = last = query->oa.map + field_offset(false, 0);
10937ec681f3Smrg   end = query->oa.map + field_offset(true, 0);
10947ec681f3Smrg
10957ec681f3Smrg   if (start[0] != query->oa.begin_report_id) {
10967ec681f3Smrg      DBG("Spurious start report id=%"PRIu32"\n", start[0]);
10977ec681f3Smrg      return true;
10987ec681f3Smrg   }
10997ec681f3Smrg   if (end[0] != (query->oa.begin_report_id + 1)) {
11007ec681f3Smrg      DBG("Spurious end report id=%"PRIu32"\n", end[0]);
11017ec681f3Smrg      return true;
11027ec681f3Smrg   }
11037ec681f3Smrg
11047ec681f3Smrg   /* Read the reports until the end timestamp. */
11057ec681f3Smrg   switch (read_oa_samples_until(perf_ctx, start[1], end[1])) {
11067ec681f3Smrg   case OA_READ_STATUS_ERROR:
11077ec681f3Smrg      FALLTHROUGH; /* Let accumulate_oa_reports() deal with the error. */
11087ec681f3Smrg   case OA_READ_STATUS_FINISHED:
11097ec681f3Smrg      return true;
11107ec681f3Smrg   case OA_READ_STATUS_UNFINISHED:
11117ec681f3Smrg      return false;
11127ec681f3Smrg   }
11137ec681f3Smrg
11147ec681f3Smrg   unreachable("invalid read status");
11157ec681f3Smrg   return false;
11167ec681f3Smrg}
11177ec681f3Smrg
11187ec681f3Smrgvoid
11197ec681f3Smrgintel_perf_wait_query(struct intel_perf_context *perf_ctx,
11207ec681f3Smrg                      struct intel_perf_query_object *query,
11217ec681f3Smrg                      void *current_batch)
11227ec681f3Smrg{
11237ec681f3Smrg   struct intel_perf_config *perf_cfg = perf_ctx->perf;
11247ec681f3Smrg   struct brw_bo *bo = NULL;
11257ec681f3Smrg
11267ec681f3Smrg   switch (query->queryinfo->kind) {
11277ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_OA:
11287ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_RAW:
11297ec681f3Smrg      bo = query->oa.bo;
11307ec681f3Smrg      break;
11317ec681f3Smrg
11327ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_PIPELINE:
11337ec681f3Smrg      bo = query->pipeline_stats.bo;
11347ec681f3Smrg      break;
11357ec681f3Smrg
11367ec681f3Smrg   default:
11377ec681f3Smrg      unreachable("Unknown query type");
11387ec681f3Smrg      break;
11397ec681f3Smrg   }
11407ec681f3Smrg
11417ec681f3Smrg   if (bo == NULL)
11427ec681f3Smrg      return;
11437ec681f3Smrg
11447ec681f3Smrg   /* If the current batch references our results bo then we need to
11457ec681f3Smrg    * flush first...
11467ec681f3Smrg    */
11477ec681f3Smrg   if (perf_cfg->vtbl.batch_references(current_batch, bo))
11487ec681f3Smrg      perf_cfg->vtbl.batchbuffer_flush(perf_ctx->ctx, __FILE__, __LINE__);
11497ec681f3Smrg
11507ec681f3Smrg   perf_cfg->vtbl.bo_wait_rendering(bo);
11517ec681f3Smrg}
11527ec681f3Smrg
11537ec681f3Smrgbool
11547ec681f3Smrgintel_perf_is_query_ready(struct intel_perf_context *perf_ctx,
11557ec681f3Smrg                          struct intel_perf_query_object *query,
11567ec681f3Smrg                          void *current_batch)
11577ec681f3Smrg{
11587ec681f3Smrg   struct intel_perf_config *perf_cfg = perf_ctx->perf;
11597ec681f3Smrg
11607ec681f3Smrg   switch (query->queryinfo->kind) {
11617ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_OA:
11627ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_RAW:
11637ec681f3Smrg      return (query->oa.results_accumulated ||
11647ec681f3Smrg              (query->oa.bo &&
11657ec681f3Smrg               !perf_cfg->vtbl.batch_references(current_batch, query->oa.bo) &&
11667ec681f3Smrg               !perf_cfg->vtbl.bo_busy(query->oa.bo)));
11677ec681f3Smrg
11687ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_PIPELINE:
11697ec681f3Smrg      return (query->pipeline_stats.bo &&
11707ec681f3Smrg              !perf_cfg->vtbl.batch_references(current_batch, query->pipeline_stats.bo) &&
11717ec681f3Smrg              !perf_cfg->vtbl.bo_busy(query->pipeline_stats.bo));
11727ec681f3Smrg
11737ec681f3Smrg   default:
11747ec681f3Smrg      unreachable("Unknown query type");
11757ec681f3Smrg      break;
11767ec681f3Smrg   }
11777ec681f3Smrg
11787ec681f3Smrg   return false;
11797ec681f3Smrg}
11807ec681f3Smrg
11817ec681f3Smrg/**
11827ec681f3Smrg * Remove a query from the global list of unaccumulated queries once
11837ec681f3Smrg * after successfully accumulating the OA reports associated with the
11847ec681f3Smrg * query in accumulate_oa_reports() or when discarding unwanted query
11857ec681f3Smrg * results.
11867ec681f3Smrg */
11877ec681f3Smrgstatic void
11887ec681f3Smrgdrop_from_unaccumulated_query_list(struct intel_perf_context *perf_ctx,
11897ec681f3Smrg                                   struct intel_perf_query_object *query)
11907ec681f3Smrg{
11917ec681f3Smrg   for (int i = 0; i < perf_ctx->unaccumulated_elements; i++) {
11927ec681f3Smrg      if (perf_ctx->unaccumulated[i] == query) {
11937ec681f3Smrg         int last_elt = --perf_ctx->unaccumulated_elements;
11947ec681f3Smrg
11957ec681f3Smrg         if (i == last_elt)
11967ec681f3Smrg            perf_ctx->unaccumulated[i] = NULL;
11977ec681f3Smrg         else {
11987ec681f3Smrg            perf_ctx->unaccumulated[i] =
11997ec681f3Smrg               perf_ctx->unaccumulated[last_elt];
12007ec681f3Smrg         }
12017ec681f3Smrg
12027ec681f3Smrg         break;
12037ec681f3Smrg      }
12047ec681f3Smrg   }
12057ec681f3Smrg
12067ec681f3Smrg   /* Drop our samples_head reference so that associated periodic
12077ec681f3Smrg    * sample data buffers can potentially be reaped if they aren't
12087ec681f3Smrg    * referenced by any other queries...
12097ec681f3Smrg    */
12107ec681f3Smrg
12117ec681f3Smrg   struct oa_sample_buf *buf =
12127ec681f3Smrg      exec_node_data(struct oa_sample_buf, query->oa.samples_head, link);
12137ec681f3Smrg
12147ec681f3Smrg   assert(buf->refcount > 0);
12157ec681f3Smrg   buf->refcount--;
12167ec681f3Smrg
12177ec681f3Smrg   query->oa.samples_head = NULL;
12187ec681f3Smrg
12197ec681f3Smrg   reap_old_sample_buffers(perf_ctx);
12207ec681f3Smrg}
12217ec681f3Smrg
12227ec681f3Smrg/* In general if we see anything spurious while accumulating results,
12237ec681f3Smrg * we don't try and continue accumulating the current query, hoping
12247ec681f3Smrg * for the best, we scrap anything outstanding, and then hope for the
12257ec681f3Smrg * best with new queries.
12267ec681f3Smrg */
12277ec681f3Smrgstatic void
12287ec681f3Smrgdiscard_all_queries(struct intel_perf_context *perf_ctx)
12297ec681f3Smrg{
12307ec681f3Smrg   while (perf_ctx->unaccumulated_elements) {
12317ec681f3Smrg      struct intel_perf_query_object *query = perf_ctx->unaccumulated[0];
12327ec681f3Smrg
12337ec681f3Smrg      query->oa.results_accumulated = true;
12347ec681f3Smrg      drop_from_unaccumulated_query_list(perf_ctx, query);
12357ec681f3Smrg
12367ec681f3Smrg      dec_n_users(perf_ctx);
12377ec681f3Smrg   }
12387ec681f3Smrg}
12397ec681f3Smrg
12407ec681f3Smrg/* Looks for the validity bit of context ID (dword 2) of an OA report. */
12417ec681f3Smrgstatic bool
12427ec681f3Smrgoa_report_ctx_id_valid(const struct intel_device_info *devinfo,
12437ec681f3Smrg                       const uint32_t *report)
12447ec681f3Smrg{
12457ec681f3Smrg   assert(devinfo->ver >= 8);
12467ec681f3Smrg   if (devinfo->ver == 8)
12477ec681f3Smrg      return (report[0] & (1 << 25)) != 0;
12487ec681f3Smrg   return (report[0] & (1 << 16)) != 0;
12497ec681f3Smrg}
12507ec681f3Smrg
12517ec681f3Smrg/**
12527ec681f3Smrg * Accumulate raw OA counter values based on deltas between pairs of
12537ec681f3Smrg * OA reports.
12547ec681f3Smrg *
12557ec681f3Smrg * Accumulation starts from the first report captured via
12567ec681f3Smrg * MI_REPORT_PERF_COUNT (MI_RPC) by brw_begin_perf_query() until the
12577ec681f3Smrg * last MI_RPC report requested by brw_end_perf_query(). Between these
12587ec681f3Smrg * two reports there may also some number of periodically sampled OA
12597ec681f3Smrg * reports collected via the i915 perf interface - depending on the
12607ec681f3Smrg * duration of the query.
12617ec681f3Smrg *
12627ec681f3Smrg * These periodic snapshots help to ensure we handle counter overflow
12637ec681f3Smrg * correctly by being frequent enough to ensure we don't miss multiple
12647ec681f3Smrg * overflows of a counter between snapshots. For Gfx8+ the i915 perf
12657ec681f3Smrg * snapshots provide the extra context-switch reports that let us
12667ec681f3Smrg * subtract out the progress of counters associated with other
12677ec681f3Smrg * contexts running on the system.
12687ec681f3Smrg */
12697ec681f3Smrgstatic void
12707ec681f3Smrgaccumulate_oa_reports(struct intel_perf_context *perf_ctx,
12717ec681f3Smrg                      struct intel_perf_query_object *query)
12727ec681f3Smrg{
12737ec681f3Smrg   const struct intel_device_info *devinfo = perf_ctx->devinfo;
12747ec681f3Smrg   uint32_t *start;
12757ec681f3Smrg   uint32_t *last;
12767ec681f3Smrg   uint32_t *end;
12777ec681f3Smrg   struct exec_node *first_samples_node;
12787ec681f3Smrg   bool last_report_ctx_match = true;
12797ec681f3Smrg   int out_duration = 0;
12807ec681f3Smrg
12817ec681f3Smrg   assert(query->oa.map != NULL);
12827ec681f3Smrg
12837ec681f3Smrg   start = last = query->oa.map + field_offset(false, 0);
12847ec681f3Smrg   end = query->oa.map + field_offset(true, 0);
12857ec681f3Smrg
12867ec681f3Smrg   if (start[0] != query->oa.begin_report_id) {
12877ec681f3Smrg      DBG("Spurious start report id=%"PRIu32"\n", start[0]);
12887ec681f3Smrg      goto error;
12897ec681f3Smrg   }
12907ec681f3Smrg   if (end[0] != (query->oa.begin_report_id + 1)) {
12917ec681f3Smrg      DBG("Spurious end report id=%"PRIu32"\n", end[0]);
12927ec681f3Smrg      goto error;
12937ec681f3Smrg   }
12947ec681f3Smrg
12957ec681f3Smrg   /* On Gfx12+ OA reports are sourced from per context counters, so we don't
12967ec681f3Smrg    * ever have to look at the global OA buffer. Yey \o/
12977ec681f3Smrg    */
12987ec681f3Smrg   if (perf_ctx->devinfo->ver >= 12) {
12997ec681f3Smrg      last = start;
13007ec681f3Smrg      goto end;
13017ec681f3Smrg   }
13027ec681f3Smrg
13037ec681f3Smrg   /* See if we have any periodic reports to accumulate too... */
13047ec681f3Smrg
13057ec681f3Smrg   /* N.B. The oa.samples_head was set when the query began and
13067ec681f3Smrg    * pointed to the tail of the perf_ctx->sample_buffers list at
13077ec681f3Smrg    * the time the query started. Since the buffer existed before the
13087ec681f3Smrg    * first MI_REPORT_PERF_COUNT command was emitted we therefore know
13097ec681f3Smrg    * that no data in this particular node's buffer can possibly be
13107ec681f3Smrg    * associated with the query - so skip ahead one...
13117ec681f3Smrg    */
13127ec681f3Smrg   first_samples_node = query->oa.samples_head->next;
13137ec681f3Smrg
13147ec681f3Smrg   foreach_list_typed_from(struct oa_sample_buf, buf, link,
13157ec681f3Smrg                           &perf_ctx->sample_buffers,
13167ec681f3Smrg                           first_samples_node)
13177ec681f3Smrg   {
13187ec681f3Smrg      int offset = 0;
13197ec681f3Smrg
13207ec681f3Smrg      while (offset < buf->len) {
13217ec681f3Smrg         const struct drm_i915_perf_record_header *header =
13227ec681f3Smrg            (const struct drm_i915_perf_record_header *)(buf->buf + offset);
13237ec681f3Smrg
13247ec681f3Smrg         assert(header->size != 0);
13257ec681f3Smrg         assert(header->size <= buf->len);
13267ec681f3Smrg
13277ec681f3Smrg         offset += header->size;
13287ec681f3Smrg
13297ec681f3Smrg         switch (header->type) {
13307ec681f3Smrg         case DRM_I915_PERF_RECORD_SAMPLE: {
13317ec681f3Smrg            uint32_t *report = (uint32_t *)(header + 1);
13327ec681f3Smrg            bool report_ctx_match = true;
13337ec681f3Smrg            bool add = true;
13347ec681f3Smrg
13357ec681f3Smrg            /* Ignore reports that come before the start marker.
13367ec681f3Smrg             * (Note: takes care to allow overflow of 32bit timestamps)
13377ec681f3Smrg             */
13387ec681f3Smrg            if (intel_device_info_timebase_scale(devinfo,
13397ec681f3Smrg                                               report[1] - start[1]) > 5000000000) {
13407ec681f3Smrg               continue;
13417ec681f3Smrg            }
13427ec681f3Smrg
13437ec681f3Smrg            /* Ignore reports that come after the end marker.
13447ec681f3Smrg             * (Note: takes care to allow overflow of 32bit timestamps)
13457ec681f3Smrg             */
13467ec681f3Smrg            if (intel_device_info_timebase_scale(devinfo,
13477ec681f3Smrg                                               report[1] - end[1]) <= 5000000000) {
13487ec681f3Smrg               goto end;
13497ec681f3Smrg            }
13507ec681f3Smrg
13517ec681f3Smrg            /* For Gfx8+ since the counters continue while other
13527ec681f3Smrg             * contexts are running we need to discount any unrelated
13537ec681f3Smrg             * deltas. The hardware automatically generates a report
13547ec681f3Smrg             * on context switch which gives us a new reference point
13557ec681f3Smrg             * to continuing adding deltas from.
13567ec681f3Smrg             *
13577ec681f3Smrg             * For Haswell we can rely on the HW to stop the progress
13587ec681f3Smrg             * of OA counters while any other context is acctive.
13597ec681f3Smrg             */
13607ec681f3Smrg            if (devinfo->ver >= 8) {
13617ec681f3Smrg               /* Consider that the current report matches our context only if
13627ec681f3Smrg                * the report says the report ID is valid.
13637ec681f3Smrg                */
13647ec681f3Smrg               report_ctx_match = oa_report_ctx_id_valid(devinfo, report) &&
13657ec681f3Smrg                  report[2] == start[2];
13667ec681f3Smrg               if (report_ctx_match)
13677ec681f3Smrg                  out_duration = 0;
13687ec681f3Smrg               else
13697ec681f3Smrg                  out_duration++;
13707ec681f3Smrg
13717ec681f3Smrg               /* Only add the delta between <last, report> if the last report
13727ec681f3Smrg                * was clearly identified as our context, or if we have at most
13737ec681f3Smrg                * 1 report without a matching ID.
13747ec681f3Smrg                *
13757ec681f3Smrg                * The OA unit will sometimes label reports with an invalid
13767ec681f3Smrg                * context ID when i915 rewrites the execlist submit register
13777ec681f3Smrg                * with the same context as the one currently running. This
13787ec681f3Smrg                * happens when i915 wants to notify the HW of ringbuffer tail
13797ec681f3Smrg                * register update. We have to consider this report as part of
13807ec681f3Smrg                * our context as the 3d pipeline behind the OACS unit is still
13817ec681f3Smrg                * processing the operations started at the previous execlist
13827ec681f3Smrg                * submission.
13837ec681f3Smrg                */
13847ec681f3Smrg               add = last_report_ctx_match && out_duration < 2;
13857ec681f3Smrg            }
13867ec681f3Smrg
13877ec681f3Smrg            if (add) {
13887ec681f3Smrg               intel_perf_query_result_accumulate(&query->oa.result,
13897ec681f3Smrg                                                query->queryinfo,
13907ec681f3Smrg                                                devinfo,
13917ec681f3Smrg                                                last, report);
13927ec681f3Smrg            } else {
13937ec681f3Smrg               /* We're not adding the delta because we've identified it's not
13947ec681f3Smrg                * for the context we filter for. We can consider that the
13957ec681f3Smrg                * query was split.
13967ec681f3Smrg                */
13977ec681f3Smrg               query->oa.result.query_disjoint = true;
13987ec681f3Smrg            }
13997ec681f3Smrg
14007ec681f3Smrg            last = report;
14017ec681f3Smrg            last_report_ctx_match = report_ctx_match;
14027ec681f3Smrg
14037ec681f3Smrg            break;
14047ec681f3Smrg         }
14057ec681f3Smrg
14067ec681f3Smrg         case DRM_I915_PERF_RECORD_OA_BUFFER_LOST:
14077ec681f3Smrg             DBG("i915 perf: OA error: all reports lost\n");
14087ec681f3Smrg             goto error;
14097ec681f3Smrg         case DRM_I915_PERF_RECORD_OA_REPORT_LOST:
14107ec681f3Smrg             DBG("i915 perf: OA report lost\n");
14117ec681f3Smrg             break;
14127ec681f3Smrg         }
14137ec681f3Smrg      }
14147ec681f3Smrg   }
14157ec681f3Smrg
14167ec681f3Smrgend:
14177ec681f3Smrg
14187ec681f3Smrg   intel_perf_query_result_accumulate(&query->oa.result, query->queryinfo,
14197ec681f3Smrg                                    devinfo, last, end);
14207ec681f3Smrg
14217ec681f3Smrg   query->oa.results_accumulated = true;
14227ec681f3Smrg   drop_from_unaccumulated_query_list(perf_ctx, query);
14237ec681f3Smrg   dec_n_users(perf_ctx);
14247ec681f3Smrg
14257ec681f3Smrg   return;
14267ec681f3Smrg
14277ec681f3Smrgerror:
14287ec681f3Smrg
14297ec681f3Smrg   discard_all_queries(perf_ctx);
14307ec681f3Smrg}
14317ec681f3Smrg
14327ec681f3Smrgvoid
14337ec681f3Smrgintel_perf_delete_query(struct intel_perf_context *perf_ctx,
14347ec681f3Smrg                        struct intel_perf_query_object *query)
14357ec681f3Smrg{
14367ec681f3Smrg   struct intel_perf_config *perf_cfg = perf_ctx->perf;
14377ec681f3Smrg
14387ec681f3Smrg   /* We can assume that the frontend waits for a query to complete
14397ec681f3Smrg    * before ever calling into here, so we don't have to worry about
14407ec681f3Smrg    * deleting an in-flight query object.
14417ec681f3Smrg    */
14427ec681f3Smrg   switch (query->queryinfo->kind) {
14437ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_OA:
14447ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_RAW:
14457ec681f3Smrg      if (query->oa.bo) {
14467ec681f3Smrg         if (!query->oa.results_accumulated) {
14477ec681f3Smrg            drop_from_unaccumulated_query_list(perf_ctx, query);
14487ec681f3Smrg            dec_n_users(perf_ctx);
14497ec681f3Smrg         }
14507ec681f3Smrg
14517ec681f3Smrg         perf_cfg->vtbl.bo_unreference(query->oa.bo);
14527ec681f3Smrg         query->oa.bo = NULL;
14537ec681f3Smrg      }
14547ec681f3Smrg
14557ec681f3Smrg      query->oa.results_accumulated = false;
14567ec681f3Smrg      break;
14577ec681f3Smrg
14587ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_PIPELINE:
14597ec681f3Smrg      if (query->pipeline_stats.bo) {
14607ec681f3Smrg         perf_cfg->vtbl.bo_unreference(query->pipeline_stats.bo);
14617ec681f3Smrg         query->pipeline_stats.bo = NULL;
14627ec681f3Smrg      }
14637ec681f3Smrg      break;
14647ec681f3Smrg
14657ec681f3Smrg   default:
14667ec681f3Smrg      unreachable("Unknown query type");
14677ec681f3Smrg      break;
14687ec681f3Smrg   }
14697ec681f3Smrg
14707ec681f3Smrg   /* As an indication that the INTEL_performance_query extension is no
14717ec681f3Smrg    * longer in use, it's a good time to free our cache of sample
14727ec681f3Smrg    * buffers and close any current i915-perf stream.
14737ec681f3Smrg    */
14747ec681f3Smrg   if (--perf_ctx->n_query_instances == 0) {
14757ec681f3Smrg      free_sample_bufs(perf_ctx);
14767ec681f3Smrg      intel_perf_close(perf_ctx, query->queryinfo);
14777ec681f3Smrg   }
14787ec681f3Smrg
14797ec681f3Smrg   free(query);
14807ec681f3Smrg}
14817ec681f3Smrg
14827ec681f3Smrgstatic int
14837ec681f3Smrgget_oa_counter_data(struct intel_perf_context *perf_ctx,
14847ec681f3Smrg                    struct intel_perf_query_object *query,
14857ec681f3Smrg                    size_t data_size,
14867ec681f3Smrg                    uint8_t *data)
14877ec681f3Smrg{
14887ec681f3Smrg   struct intel_perf_config *perf_cfg = perf_ctx->perf;
14897ec681f3Smrg   const struct intel_perf_query_info *queryinfo = query->queryinfo;
14907ec681f3Smrg   int n_counters = queryinfo->n_counters;
14917ec681f3Smrg   int written = 0;
14927ec681f3Smrg
14937ec681f3Smrg   for (int i = 0; i < n_counters; i++) {
14947ec681f3Smrg      const struct intel_perf_query_counter *counter = &queryinfo->counters[i];
14957ec681f3Smrg      uint64_t *out_uint64;
14967ec681f3Smrg      float *out_float;
14977ec681f3Smrg      size_t counter_size = intel_perf_query_counter_get_size(counter);
14987ec681f3Smrg
14997ec681f3Smrg      if (counter_size) {
15007ec681f3Smrg         switch (counter->data_type) {
15017ec681f3Smrg         case INTEL_PERF_COUNTER_DATA_TYPE_UINT64:
15027ec681f3Smrg            out_uint64 = (uint64_t *)(data + counter->offset);
15037ec681f3Smrg            *out_uint64 =
15047ec681f3Smrg               counter->oa_counter_read_uint64(perf_cfg, queryinfo,
15057ec681f3Smrg                                               &query->oa.result);
15067ec681f3Smrg            break;
15077ec681f3Smrg         case INTEL_PERF_COUNTER_DATA_TYPE_FLOAT:
15087ec681f3Smrg            out_float = (float *)(data + counter->offset);
15097ec681f3Smrg            *out_float =
15107ec681f3Smrg               counter->oa_counter_read_float(perf_cfg, queryinfo,
15117ec681f3Smrg                                              &query->oa.result);
15127ec681f3Smrg            break;
15137ec681f3Smrg         default:
15147ec681f3Smrg            /* So far we aren't using uint32, double or bool32... */
15157ec681f3Smrg            unreachable("unexpected counter data type");
15167ec681f3Smrg         }
15177ec681f3Smrg
15187ec681f3Smrg         if (counter->offset + counter_size > written)
15197ec681f3Smrg            written = counter->offset + counter_size;
15207ec681f3Smrg      }
15217ec681f3Smrg   }
15227ec681f3Smrg
15237ec681f3Smrg   return written;
15247ec681f3Smrg}
15257ec681f3Smrg
15267ec681f3Smrgstatic int
15277ec681f3Smrgget_pipeline_stats_data(struct intel_perf_context *perf_ctx,
15287ec681f3Smrg                        struct intel_perf_query_object *query,
15297ec681f3Smrg                        size_t data_size,
15307ec681f3Smrg                        uint8_t *data)
15317ec681f3Smrg
15327ec681f3Smrg{
15337ec681f3Smrg   struct intel_perf_config *perf_cfg = perf_ctx->perf;
15347ec681f3Smrg   const struct intel_perf_query_info *queryinfo = query->queryinfo;
15357ec681f3Smrg   int n_counters = queryinfo->n_counters;
15367ec681f3Smrg   uint8_t *p = data;
15377ec681f3Smrg
15387ec681f3Smrg   uint64_t *start = perf_cfg->vtbl.bo_map(perf_ctx->ctx, query->pipeline_stats.bo, MAP_READ);
15397ec681f3Smrg   uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
15407ec681f3Smrg
15417ec681f3Smrg   for (int i = 0; i < n_counters; i++) {
15427ec681f3Smrg      const struct intel_perf_query_counter *counter = &queryinfo->counters[i];
15437ec681f3Smrg      uint64_t value = end[i] - start[i];
15447ec681f3Smrg
15457ec681f3Smrg      if (counter->pipeline_stat.numerator !=
15467ec681f3Smrg          counter->pipeline_stat.denominator) {
15477ec681f3Smrg         value *= counter->pipeline_stat.numerator;
15487ec681f3Smrg         value /= counter->pipeline_stat.denominator;
15497ec681f3Smrg      }
15507ec681f3Smrg
15517ec681f3Smrg      *((uint64_t *)p) = value;
15527ec681f3Smrg      p += 8;
15537ec681f3Smrg   }
15547ec681f3Smrg
15557ec681f3Smrg   perf_cfg->vtbl.bo_unmap(query->pipeline_stats.bo);
15567ec681f3Smrg
15577ec681f3Smrg   return p - data;
15587ec681f3Smrg}
15597ec681f3Smrg
15607ec681f3Smrgvoid
15617ec681f3Smrgintel_perf_get_query_data(struct intel_perf_context *perf_ctx,
15627ec681f3Smrg                          struct intel_perf_query_object *query,
15637ec681f3Smrg                          void *current_batch,
15647ec681f3Smrg                          int data_size,
15657ec681f3Smrg                          unsigned *data,
15667ec681f3Smrg                          unsigned *bytes_written)
15677ec681f3Smrg{
15687ec681f3Smrg   struct intel_perf_config *perf_cfg = perf_ctx->perf;
15697ec681f3Smrg   int written = 0;
15707ec681f3Smrg
15717ec681f3Smrg   switch (query->queryinfo->kind) {
15727ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_OA:
15737ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_RAW:
15747ec681f3Smrg      if (!query->oa.results_accumulated) {
15757ec681f3Smrg         /* Due to the sampling frequency of the OA buffer by the i915-perf
15767ec681f3Smrg          * driver, there can be a 5ms delay between the Mesa seeing the query
15777ec681f3Smrg          * complete and i915 making all the OA buffer reports available to us.
15787ec681f3Smrg          * We need to wait for all the reports to come in before we can do
15797ec681f3Smrg          * the post processing removing unrelated deltas.
15807ec681f3Smrg          * There is a i915-perf series to address this issue, but it's
15817ec681f3Smrg          * not been merged upstream yet.
15827ec681f3Smrg          */
15837ec681f3Smrg         while (!read_oa_samples_for_query(perf_ctx, query, current_batch))
15847ec681f3Smrg            ;
15857ec681f3Smrg
15867ec681f3Smrg         uint32_t *begin_report = query->oa.map;
15877ec681f3Smrg         uint32_t *end_report = query->oa.map + perf_cfg->query_layout.size;
15887ec681f3Smrg         intel_perf_query_result_accumulate_fields(&query->oa.result,
15897ec681f3Smrg                                                 query->queryinfo,
15907ec681f3Smrg                                                 perf_ctx->devinfo,
15917ec681f3Smrg                                                 begin_report,
15927ec681f3Smrg                                                 end_report,
15937ec681f3Smrg                                                 true /* no_oa_accumulate */);
15947ec681f3Smrg         accumulate_oa_reports(perf_ctx, query);
15957ec681f3Smrg         assert(query->oa.results_accumulated);
15967ec681f3Smrg
15977ec681f3Smrg         perf_cfg->vtbl.bo_unmap(query->oa.bo);
15987ec681f3Smrg         query->oa.map = NULL;
15997ec681f3Smrg      }
16007ec681f3Smrg      if (query->queryinfo->kind == INTEL_PERF_QUERY_TYPE_OA) {
16017ec681f3Smrg         written = get_oa_counter_data(perf_ctx, query, data_size, (uint8_t *)data);
16027ec681f3Smrg      } else {
16037ec681f3Smrg         const struct intel_device_info *devinfo = perf_ctx->devinfo;
16047ec681f3Smrg
16057ec681f3Smrg         written = intel_perf_query_result_write_mdapi((uint8_t *)data, data_size,
16067ec681f3Smrg                                                     devinfo, query->queryinfo,
16077ec681f3Smrg                                                     &query->oa.result);
16087ec681f3Smrg      }
16097ec681f3Smrg      break;
16107ec681f3Smrg
16117ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_PIPELINE:
16127ec681f3Smrg      written = get_pipeline_stats_data(perf_ctx, query, data_size, (uint8_t *)data);
16137ec681f3Smrg      break;
16147ec681f3Smrg
16157ec681f3Smrg   default:
16167ec681f3Smrg      unreachable("Unknown query type");
16177ec681f3Smrg      break;
16187ec681f3Smrg   }
16197ec681f3Smrg
16207ec681f3Smrg   if (bytes_written)
16217ec681f3Smrg      *bytes_written = written;
16227ec681f3Smrg}
16237ec681f3Smrg
16247ec681f3Smrgvoid
16257ec681f3Smrgintel_perf_dump_query_count(struct intel_perf_context *perf_ctx)
16267ec681f3Smrg{
16277ec681f3Smrg   DBG("Queries: (Open queries = %d, OA users = %d)\n",
16287ec681f3Smrg       perf_ctx->n_active_oa_queries, perf_ctx->n_oa_users);
16297ec681f3Smrg}
16307ec681f3Smrg
16317ec681f3Smrgvoid
16327ec681f3Smrgintel_perf_dump_query(struct intel_perf_context *ctx,
16337ec681f3Smrg                      struct intel_perf_query_object *obj,
16347ec681f3Smrg                      void *current_batch)
16357ec681f3Smrg{
16367ec681f3Smrg   switch (obj->queryinfo->kind) {
16377ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_OA:
16387ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_RAW:
16397ec681f3Smrg      DBG("BO: %-4s OA data: %-10s %-15s\n",
16407ec681f3Smrg          obj->oa.bo ? "yes," : "no,",
16417ec681f3Smrg          intel_perf_is_query_ready(ctx, obj, current_batch) ? "ready," : "not ready,",
16427ec681f3Smrg          obj->oa.results_accumulated ? "accumulated" : "not accumulated");
16437ec681f3Smrg      break;
16447ec681f3Smrg   case INTEL_PERF_QUERY_TYPE_PIPELINE:
16457ec681f3Smrg      DBG("BO: %-4s\n",
16467ec681f3Smrg          obj->pipeline_stats.bo ? "yes" : "no");
16477ec681f3Smrg      break;
16487ec681f3Smrg   default:
16497ec681f3Smrg      unreachable("Unknown query type");
16507ec681f3Smrg      break;
16517ec681f3Smrg   }
16527ec681f3Smrg}
1653