17ec681f3Smrg/* 27ec681f3Smrg * Copyright © 2019 Intel Corporation 37ec681f3Smrg * 47ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a 57ec681f3Smrg * copy of this software and associated documentation files (the "Software"), 67ec681f3Smrg * to deal in the Software without restriction, including without limitation 77ec681f3Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 87ec681f3Smrg * and/or sell copies of the Software, and to permit persons to whom the 97ec681f3Smrg * Software is furnished to do so, subject to the following conditions: 107ec681f3Smrg * 117ec681f3Smrg * The above copyright notice and this permission notice shall be included 127ec681f3Smrg * in all copies or substantial portions of the Software. 137ec681f3Smrg * 147ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 157ec681f3Smrg * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 167ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 177ec681f3Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 187ec681f3Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 197ec681f3Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 207ec681f3Smrg * DEALINGS IN THE SOFTWARE. 217ec681f3Smrg */ 227ec681f3Smrg 237ec681f3Smrg/** 247ec681f3Smrg * @file iris_measure.c 257ec681f3Smrg */ 267ec681f3Smrg 277ec681f3Smrg#include <stdio.h> 287ec681f3Smrg#include "util/debug.h" 297ec681f3Smrg#include "util/list.h" 307ec681f3Smrg#include "util/crc32.h" 317ec681f3Smrg#include "iris_context.h" 327ec681f3Smrg#include "iris_defines.h" 337ec681f3Smrg 347ec681f3Smrgvoid 357ec681f3Smrgiris_init_screen_measure(struct iris_screen *screen) 367ec681f3Smrg{ 377ec681f3Smrg struct intel_measure_device *measure_device = &screen->measure; 387ec681f3Smrg 397ec681f3Smrg memset(measure_device, 0, sizeof(*measure_device)); 407ec681f3Smrg intel_measure_init(measure_device); 417ec681f3Smrg struct intel_measure_config *config = measure_device->config; 427ec681f3Smrg if (config == NULL) 437ec681f3Smrg return; 447ec681f3Smrg 457ec681f3Smrg /* the final member of intel_measure_ringbuffer is a zero-length array of 467ec681f3Smrg * intel_measure_buffered_result objects. Allocate additional space for 477ec681f3Smrg * the buffered objects based on the run-time configurable buffer_size 487ec681f3Smrg */ 497ec681f3Smrg const size_t rb_bytes = sizeof(struct intel_measure_ringbuffer) + 507ec681f3Smrg config->buffer_size * sizeof(struct intel_measure_buffered_result); 517ec681f3Smrg struct intel_measure_ringbuffer *rb = rzalloc_size(screen, rb_bytes); 527ec681f3Smrg measure_device->ringbuffer = rb; 537ec681f3Smrg} 547ec681f3Smrg 557ec681f3Smrgstatic struct intel_measure_config * 567ec681f3Smrgconfig_from_screen(struct iris_screen *screen) 577ec681f3Smrg{ 587ec681f3Smrg return screen->measure.config; 597ec681f3Smrg} 607ec681f3Smrg 617ec681f3Smrgstatic struct intel_measure_config * 627ec681f3Smrgconfig_from_context(struct iris_context *ice) 637ec681f3Smrg{ 647ec681f3Smrg return ((struct iris_screen *) ice->ctx.screen)->measure.config; 657ec681f3Smrg} 667ec681f3Smrg 677ec681f3Smrgvoid 687ec681f3Smrgiris_destroy_screen_measure(struct iris_screen *screen) 697ec681f3Smrg{ 707ec681f3Smrg if (!config_from_screen(screen)) 717ec681f3Smrg return; 727ec681f3Smrg 737ec681f3Smrg struct intel_measure_device *measure_device = &screen->measure; 747ec681f3Smrg 757ec681f3Smrg if (measure_device->config->file && 767ec681f3Smrg measure_device->config->file != stderr) 777ec681f3Smrg fclose(screen->measure.config->file); 787ec681f3Smrg 797ec681f3Smrg ralloc_free(measure_device->ringbuffer); 807ec681f3Smrg measure_device->ringbuffer = NULL; 817ec681f3Smrg} 827ec681f3Smrg 837ec681f3Smrg 847ec681f3Smrgvoid 857ec681f3Smrgiris_init_batch_measure(struct iris_context *ice, struct iris_batch *batch) 867ec681f3Smrg{ 877ec681f3Smrg const struct intel_measure_config *config = config_from_context(ice); 887ec681f3Smrg struct iris_screen *screen = batch->screen; 897ec681f3Smrg struct iris_bufmgr *bufmgr = screen->bufmgr; 907ec681f3Smrg 917ec681f3Smrg if (!config) 927ec681f3Smrg return; 937ec681f3Smrg 947ec681f3Smrg /* the final member of iris_measure_batch is a zero-length array of 957ec681f3Smrg * intel_measure_snapshot objects. Create additional space for the 967ec681f3Smrg * snapshot objects based on the run-time configurable batch_size 977ec681f3Smrg */ 987ec681f3Smrg const size_t batch_bytes = sizeof(struct iris_measure_batch) + 997ec681f3Smrg config->batch_size * sizeof(struct intel_measure_snapshot); 1007ec681f3Smrg assert(batch->measure == NULL); 1017ec681f3Smrg batch->measure = malloc(batch_bytes); 1027ec681f3Smrg memset(batch->measure, 0, batch_bytes); 1037ec681f3Smrg struct iris_measure_batch *measure = batch->measure; 1047ec681f3Smrg 1057ec681f3Smrg measure->bo = iris_bo_alloc(bufmgr, "measure", 1067ec681f3Smrg config->batch_size * sizeof(uint64_t), 1, 1077ec681f3Smrg IRIS_MEMZONE_OTHER, BO_ALLOC_ZEROED); 1087ec681f3Smrg measure->base.timestamps = iris_bo_map(NULL, measure->bo, MAP_READ); 1097ec681f3Smrg measure->base.framebuffer = 1107ec681f3Smrg (uintptr_t)util_hash_crc32(&ice->state.framebuffer, 1117ec681f3Smrg sizeof(ice->state.framebuffer)); 1127ec681f3Smrg} 1137ec681f3Smrg 1147ec681f3Smrgvoid 1157ec681f3Smrgiris_destroy_batch_measure(struct iris_measure_batch *batch) 1167ec681f3Smrg{ 1177ec681f3Smrg if (!batch) 1187ec681f3Smrg return; 1197ec681f3Smrg iris_bo_unmap(batch->bo); 1207ec681f3Smrg iris_bo_unreference(batch->bo); 1217ec681f3Smrg batch->bo = NULL; 1227ec681f3Smrg free(batch); 1237ec681f3Smrg} 1247ec681f3Smrg 1257ec681f3Smrgstatic void 1267ec681f3Smrgmeasure_start_snapshot(struct iris_context *ice, 1277ec681f3Smrg struct iris_batch *batch, 1287ec681f3Smrg enum intel_measure_snapshot_type type, 1297ec681f3Smrg const char *event_name, 1307ec681f3Smrg uint32_t count) 1317ec681f3Smrg{ 1327ec681f3Smrg struct intel_measure_batch *measure_batch = &batch->measure->base; 1337ec681f3Smrg const struct intel_measure_config *config = config_from_context(ice); 1347ec681f3Smrg const struct iris_screen *screen = (void *) ice->ctx.screen; 1357ec681f3Smrg const unsigned screen_frame = screen->measure.frame; 1367ec681f3Smrg 1377ec681f3Smrg /* if the command buffer is not associated with a frame, associate it with 1387ec681f3Smrg * the most recent acquired frame 1397ec681f3Smrg */ 1407ec681f3Smrg if (measure_batch->frame == 0) 1417ec681f3Smrg measure_batch->frame = screen_frame; 1427ec681f3Smrg 1437ec681f3Smrg uintptr_t framebuffer = measure_batch->framebuffer; 1447ec681f3Smrg 1457ec681f3Smrg if (measure_batch->index == config->batch_size) { 1467ec681f3Smrg /* Snapshot buffer is full. The batch must be flushed before additional 1477ec681f3Smrg * snapshots can be taken. 1487ec681f3Smrg */ 1497ec681f3Smrg static bool warned = false; 1507ec681f3Smrg if (unlikely(!warned)) { 1517ec681f3Smrg fprintf(config->file, 1527ec681f3Smrg "WARNING: batch size exceeds INTEL_MEASURE limit: %d. " 1537ec681f3Smrg "Data has been dropped. " 1547ec681f3Smrg "Increase setting with INTEL_MEASURE=batch_size={count}\n", 1557ec681f3Smrg config->batch_size); 1567ec681f3Smrg warned = true; 1577ec681f3Smrg } 1587ec681f3Smrg return; 1597ec681f3Smrg } 1607ec681f3Smrg 1617ec681f3Smrg unsigned index = measure_batch->index++; 1627ec681f3Smrg assert(index < config->batch_size); 1637ec681f3Smrg iris_emit_pipe_control_write(batch, "measurement snapshot", 1647ec681f3Smrg PIPE_CONTROL_WRITE_TIMESTAMP | 1657ec681f3Smrg PIPE_CONTROL_CS_STALL, 1667ec681f3Smrg batch->measure->bo, index * sizeof(uint64_t), 0ull); 1677ec681f3Smrg if (event_name == NULL) 1687ec681f3Smrg event_name = intel_measure_snapshot_string(type); 1697ec681f3Smrg 1707ec681f3Smrg struct intel_measure_snapshot *snapshot = &(measure_batch->snapshots[index]); 1717ec681f3Smrg memset(snapshot, 0, sizeof(*snapshot)); 1727ec681f3Smrg snapshot->type = type; 1737ec681f3Smrg snapshot->count = (unsigned) count; 1747ec681f3Smrg snapshot->event_count = measure_batch->event_count; 1757ec681f3Smrg snapshot->event_name = event_name; 1767ec681f3Smrg snapshot->framebuffer = framebuffer; 1777ec681f3Smrg 1787ec681f3Smrg if (type == INTEL_SNAPSHOT_COMPUTE) { 1797ec681f3Smrg snapshot->cs = (uintptr_t) ice->shaders.prog[MESA_SHADER_COMPUTE]; 1807ec681f3Smrg } else { 1817ec681f3Smrg snapshot->vs = (uintptr_t) ice->shaders.prog[MESA_SHADER_VERTEX]; 1827ec681f3Smrg snapshot->tcs = (uintptr_t) ice->shaders.prog[MESA_SHADER_TESS_CTRL]; 1837ec681f3Smrg snapshot->tes = (uintptr_t) ice->shaders.prog[MESA_SHADER_TESS_EVAL]; 1847ec681f3Smrg snapshot->gs = (uintptr_t) ice->shaders.prog[MESA_SHADER_GEOMETRY]; 1857ec681f3Smrg snapshot->fs = (uintptr_t) ice->shaders.prog[MESA_SHADER_FRAGMENT]; 1867ec681f3Smrg } 1877ec681f3Smrg} 1887ec681f3Smrg 1897ec681f3Smrgstatic void 1907ec681f3Smrgmeasure_end_snapshot(struct iris_batch *batch, 1917ec681f3Smrg uint32_t event_count) 1927ec681f3Smrg{ 1937ec681f3Smrg struct intel_measure_batch *measure_batch = &batch->measure->base; 1947ec681f3Smrg 1957ec681f3Smrg unsigned index = measure_batch->index++; 1967ec681f3Smrg assert(index % 2 == 1); 1977ec681f3Smrg 1987ec681f3Smrg iris_emit_pipe_control_write(batch, "measurement snapshot", 1997ec681f3Smrg PIPE_CONTROL_WRITE_TIMESTAMP | 2007ec681f3Smrg PIPE_CONTROL_CS_STALL, 2017ec681f3Smrg batch->measure->bo, 2027ec681f3Smrg index * sizeof(uint64_t), 0ull); 2037ec681f3Smrg 2047ec681f3Smrg struct intel_measure_snapshot *snapshot = &(measure_batch->snapshots[index]); 2057ec681f3Smrg memset(snapshot, 0, sizeof(*snapshot)); 2067ec681f3Smrg snapshot->type = INTEL_SNAPSHOT_END; 2077ec681f3Smrg snapshot->event_count = event_count; 2087ec681f3Smrg} 2097ec681f3Smrg 2107ec681f3Smrgstatic bool 2117ec681f3Smrgstate_changed(const struct iris_context *ice, 2127ec681f3Smrg const struct iris_batch *batch, 2137ec681f3Smrg enum intel_measure_snapshot_type type) 2147ec681f3Smrg{ 2157ec681f3Smrg uintptr_t vs=0, tcs=0, tes=0, gs=0, fs=0, cs=0; 2167ec681f3Smrg 2177ec681f3Smrg if (type == INTEL_SNAPSHOT_COMPUTE) { 2187ec681f3Smrg cs = (uintptr_t) ice->shaders.prog[MESA_SHADER_COMPUTE]; 2197ec681f3Smrg } else if (type == INTEL_SNAPSHOT_DRAW) { 2207ec681f3Smrg vs = (uintptr_t) ice->shaders.prog[MESA_SHADER_VERTEX]; 2217ec681f3Smrg tcs = (uintptr_t) ice->shaders.prog[MESA_SHADER_TESS_CTRL]; 2227ec681f3Smrg tes = (uintptr_t) ice->shaders.prog[MESA_SHADER_TESS_EVAL]; 2237ec681f3Smrg gs = (uintptr_t) ice->shaders.prog[MESA_SHADER_GEOMETRY]; 2247ec681f3Smrg fs = (uintptr_t) ice->shaders.prog[MESA_SHADER_FRAGMENT]; 2257ec681f3Smrg } 2267ec681f3Smrg /* else blorp, all programs NULL */ 2277ec681f3Smrg 2287ec681f3Smrg return intel_measure_state_changed(&batch->measure->base, 2297ec681f3Smrg vs, tcs, tes, gs, fs, cs); 2307ec681f3Smrg} 2317ec681f3Smrg 2327ec681f3Smrgstatic void 2337ec681f3Smrgiris_measure_renderpass(struct iris_context *ice) 2347ec681f3Smrg{ 2357ec681f3Smrg const struct intel_measure_config *config = config_from_context(ice); 2367ec681f3Smrg struct intel_measure_batch *batch = 2377ec681f3Smrg &ice->batches[IRIS_BATCH_RENDER].measure->base; 2387ec681f3Smrg 2397ec681f3Smrg if (!config) 2407ec681f3Smrg return; 2417ec681f3Smrg uint32_t framebuffer_crc = util_hash_crc32(&ice->state.framebuffer, 2427ec681f3Smrg sizeof(ice->state.framebuffer)); 2437ec681f3Smrg if (framebuffer_crc == batch->framebuffer) 2447ec681f3Smrg return; 2457ec681f3Smrg bool filtering = config->flags & INTEL_MEASURE_RENDERPASS; 2467ec681f3Smrg if (filtering && batch->index % 2 == 1) { 2477ec681f3Smrg /* snapshot for previous renderpass was not ended */ 2487ec681f3Smrg measure_end_snapshot(&ice->batches[IRIS_BATCH_RENDER], 2497ec681f3Smrg batch->event_count); 2507ec681f3Smrg batch->event_count = 0; 2517ec681f3Smrg } 2527ec681f3Smrg 2537ec681f3Smrg batch->framebuffer = framebuffer_crc; 2547ec681f3Smrg} 2557ec681f3Smrg 2567ec681f3Smrgvoid 2577ec681f3Smrg_iris_measure_snapshot(struct iris_context *ice, 2587ec681f3Smrg struct iris_batch *batch, 2597ec681f3Smrg enum intel_measure_snapshot_type type, 2607ec681f3Smrg const struct pipe_draw_info *draw, 2617ec681f3Smrg const struct pipe_draw_indirect_info *indirect, 2627ec681f3Smrg const struct pipe_draw_start_count_bias *sc) 2637ec681f3Smrg{ 2647ec681f3Smrg 2657ec681f3Smrg const struct intel_measure_config *config = config_from_context(ice); 2667ec681f3Smrg struct intel_measure_batch* measure_batch = &batch->measure->base; 2677ec681f3Smrg 2687ec681f3Smrg assert(config); 2697ec681f3Smrg if (!config->enabled) 2707ec681f3Smrg return; 2717ec681f3Smrg if (measure_batch == NULL) 2727ec681f3Smrg return; 2737ec681f3Smrg 2747ec681f3Smrg assert(type != INTEL_SNAPSHOT_END); 2757ec681f3Smrg iris_measure_renderpass(ice); 2767ec681f3Smrg 2777ec681f3Smrg if (!state_changed(ice, batch, type)) { 2787ec681f3Smrg /* filter out this event */ 2797ec681f3Smrg return; 2807ec681f3Smrg } 2817ec681f3Smrg 2827ec681f3Smrg /* increment event count */ 2837ec681f3Smrg ++measure_batch->event_count; 2847ec681f3Smrg if (measure_batch->event_count == 1 || 2857ec681f3Smrg measure_batch->event_count == config->event_interval + 1) { 2867ec681f3Smrg /* the first event of an interval */ 2877ec681f3Smrg if (measure_batch->index % 2) { 2887ec681f3Smrg /* end the previous event */ 2897ec681f3Smrg measure_end_snapshot(batch, measure_batch->event_count - 1); 2907ec681f3Smrg } 2917ec681f3Smrg measure_batch->event_count = 1; 2927ec681f3Smrg 2937ec681f3Smrg const char *event_name = NULL; 2947ec681f3Smrg int count = 0; 2957ec681f3Smrg if (sc) 2967ec681f3Smrg count = sc->count; 2977ec681f3Smrg 2987ec681f3Smrg if (draw != NULL) { 2997ec681f3Smrg const struct shader_info *fs_info = 3007ec681f3Smrg iris_get_shader_info(ice, MESA_SHADER_FRAGMENT); 3017ec681f3Smrg if (fs_info && fs_info->name && strncmp(fs_info->name, "st/", 2) == 0) { 3027ec681f3Smrg event_name = fs_info->name; 3037ec681f3Smrg } else if (indirect) { 3047ec681f3Smrg event_name = "DrawIndirect"; 3057ec681f3Smrg if (indirect->count_from_stream_output) { 3067ec681f3Smrg event_name = "DrawTransformFeedback"; 3077ec681f3Smrg } 3087ec681f3Smrg } 3097ec681f3Smrg else if (draw->index_size) 3107ec681f3Smrg event_name = "DrawElements"; 3117ec681f3Smrg else 3127ec681f3Smrg event_name = "DrawArrays"; 3137ec681f3Smrg count = count * (draw->instance_count ? draw->instance_count : 1); 3147ec681f3Smrg } 3157ec681f3Smrg 3167ec681f3Smrg measure_start_snapshot(ice, batch, type, event_name, count); 3177ec681f3Smrg return; 3187ec681f3Smrg } 3197ec681f3Smrg} 3207ec681f3Smrg 3217ec681f3Smrgvoid 3227ec681f3Smrgiris_destroy_ctx_measure(struct iris_context *ice) 3237ec681f3Smrg{ 3247ec681f3Smrg /* All outstanding snapshots must be collected before the context is 3257ec681f3Smrg * destroyed. 3267ec681f3Smrg */ 3277ec681f3Smrg struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen; 3287ec681f3Smrg intel_measure_gather(&screen->measure, &screen->devinfo); 3297ec681f3Smrg} 3307ec681f3Smrg 3317ec681f3Smrgvoid 3327ec681f3Smrgiris_measure_batch_end(struct iris_context *ice, struct iris_batch *batch) 3337ec681f3Smrg{ 3347ec681f3Smrg const struct intel_measure_config *config = config_from_context(ice); 3357ec681f3Smrg struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen; 3367ec681f3Smrg struct iris_measure_batch *iris_measure_batch = batch->measure; 3377ec681f3Smrg struct intel_measure_batch *measure_batch = &iris_measure_batch->base; 3387ec681f3Smrg struct intel_measure_device *measure_device = &screen->measure; 3397ec681f3Smrg 3407ec681f3Smrg if (!config) 3417ec681f3Smrg return; 3427ec681f3Smrg if (!config->enabled) 3437ec681f3Smrg return; 3447ec681f3Smrg 3457ec681f3Smrg assert(measure_batch); 3467ec681f3Smrg assert(measure_device); 3477ec681f3Smrg 3487ec681f3Smrg static unsigned batch_count = 0; 3497ec681f3Smrg measure_batch->batch_count = p_atomic_inc_return(&batch_count); 3507ec681f3Smrg 3517ec681f3Smrg if (measure_batch->index % 2) { 3527ec681f3Smrg /* We hit the end of the batch, but never terminated our section of 3537ec681f3Smrg * drawing with the same render target or shaders. End it now. 3547ec681f3Smrg */ 3557ec681f3Smrg measure_end_snapshot(batch, measure_batch->event_count); 3567ec681f3Smrg } 3577ec681f3Smrg 3587ec681f3Smrg if (measure_batch->index == 0) 3597ec681f3Smrg return; 3607ec681f3Smrg 3617ec681f3Smrg /* enqueue snapshot for gathering */ 3627ec681f3Smrg pthread_mutex_lock(&measure_device->mutex); 3637ec681f3Smrg list_addtail(&iris_measure_batch->base.link, &measure_device->queued_snapshots); 3647ec681f3Smrg batch->measure = NULL; 3657ec681f3Smrg pthread_mutex_unlock(&measure_device->mutex); 3667ec681f3Smrg /* init new measure_batch */ 3677ec681f3Smrg iris_init_batch_measure(ice, batch); 3687ec681f3Smrg 3697ec681f3Smrg static int interval = 0; 3707ec681f3Smrg if (++interval > 10) { 3717ec681f3Smrg intel_measure_gather(measure_device, &screen->devinfo); 3727ec681f3Smrg interval = 0; 3737ec681f3Smrg } 3747ec681f3Smrg} 3757ec681f3Smrg 3767ec681f3Smrgvoid 3777ec681f3Smrgiris_measure_frame_end(struct iris_context *ice) 3787ec681f3Smrg{ 3797ec681f3Smrg struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen; 3807ec681f3Smrg struct intel_measure_device *measure_device = &screen->measure; 3817ec681f3Smrg const struct intel_measure_config *config = measure_device->config; 3827ec681f3Smrg 3837ec681f3Smrg if (!config) 3847ec681f3Smrg return; 3857ec681f3Smrg 3867ec681f3Smrg /* increment frame counter */ 3877ec681f3Smrg intel_measure_frame_transition(p_atomic_inc_return(&measure_device->frame)); 3887ec681f3Smrg 3897ec681f3Smrg intel_measure_gather(measure_device, &screen->devinfo); 3907ec681f3Smrg} 391