101e04c3fSmrg/* 201e04c3fSmrg * Copyright © 2014-2015 Broadcom 301e04c3fSmrg * 401e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a 501e04c3fSmrg * copy of this software and associated documentation files (the "Software"), 601e04c3fSmrg * to deal in the Software without restriction, including without limitation 701e04c3fSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 801e04c3fSmrg * and/or sell copies of the Software, and to permit persons to whom the 901e04c3fSmrg * Software is furnished to do so, subject to the following conditions: 1001e04c3fSmrg * 1101e04c3fSmrg * The above copyright notice and this permission notice (including the next 1201e04c3fSmrg * paragraph) shall be included in all copies or substantial portions of the 1301e04c3fSmrg * Software. 1401e04c3fSmrg * 1501e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1601e04c3fSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1701e04c3fSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1801e04c3fSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 1901e04c3fSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 2001e04c3fSmrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 2101e04c3fSmrg * IN THE SOFTWARE. 2201e04c3fSmrg */ 2301e04c3fSmrg 2401e04c3fSmrg#ifndef _VC4_DRM_H_ 2501e04c3fSmrg#define _VC4_DRM_H_ 2601e04c3fSmrg 2701e04c3fSmrg#include "drm.h" 2801e04c3fSmrg 2901e04c3fSmrg#if defined(__cplusplus) 3001e04c3fSmrgextern "C" { 3101e04c3fSmrg#endif 3201e04c3fSmrg 3301e04c3fSmrg#define DRM_VC4_SUBMIT_CL 0x00 3401e04c3fSmrg#define DRM_VC4_WAIT_SEQNO 0x01 3501e04c3fSmrg#define DRM_VC4_WAIT_BO 0x02 3601e04c3fSmrg#define DRM_VC4_CREATE_BO 0x03 3701e04c3fSmrg#define DRM_VC4_MMAP_BO 0x04 3801e04c3fSmrg#define DRM_VC4_CREATE_SHADER_BO 0x05 3901e04c3fSmrg#define DRM_VC4_GET_HANG_STATE 0x06 4001e04c3fSmrg#define DRM_VC4_GET_PARAM 0x07 4101e04c3fSmrg#define DRM_VC4_SET_TILING 0x08 4201e04c3fSmrg#define DRM_VC4_GET_TILING 0x09 4301e04c3fSmrg#define DRM_VC4_LABEL_BO 0x0a 4401e04c3fSmrg#define DRM_VC4_GEM_MADVISE 0x0b 4501e04c3fSmrg#define DRM_VC4_PERFMON_CREATE 0x0c 4601e04c3fSmrg#define DRM_VC4_PERFMON_DESTROY 0x0d 4701e04c3fSmrg#define DRM_VC4_PERFMON_GET_VALUES 0x0e 4801e04c3fSmrg 4901e04c3fSmrg#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl) 5001e04c3fSmrg#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno) 5101e04c3fSmrg#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo) 5201e04c3fSmrg#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo) 5301e04c3fSmrg#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo) 5401e04c3fSmrg#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo) 5501e04c3fSmrg#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state) 5601e04c3fSmrg#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param) 5701e04c3fSmrg#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling) 5801e04c3fSmrg#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling) 5901e04c3fSmrg#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo) 6001e04c3fSmrg#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise) 6101e04c3fSmrg#define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create) 6201e04c3fSmrg#define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy) 6301e04c3fSmrg#define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values) 6401e04c3fSmrg 6501e04c3fSmrgstruct drm_vc4_submit_rcl_surface { 6601e04c3fSmrg __u32 hindex; /* Handle index, or ~0 if not present. */ 6701e04c3fSmrg __u32 offset; /* Offset to start of buffer. */ 6801e04c3fSmrg /* 6901e04c3fSmrg * Bits for either render config (color_write) or load/store packet. 7001e04c3fSmrg * Bits should all be 0 for MSAA load/stores. 7101e04c3fSmrg */ 7201e04c3fSmrg __u16 bits; 7301e04c3fSmrg 7401e04c3fSmrg#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0) 7501e04c3fSmrg __u16 flags; 7601e04c3fSmrg}; 7701e04c3fSmrg 7801e04c3fSmrg/** 7901e04c3fSmrg * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D 8001e04c3fSmrg * engine. 8101e04c3fSmrg * 8201e04c3fSmrg * Drivers typically use GPU BOs to store batchbuffers / command lists and 8301e04c3fSmrg * their associated state. However, because the VC4 lacks an MMU, we have to 8401e04c3fSmrg * do validation of memory accesses by the GPU commands. If we were to store 8501e04c3fSmrg * our commands in BOs, we'd need to do uncached readback from them to do the 8601e04c3fSmrg * validation process, which is too expensive. Instead, userspace accumulates 8701e04c3fSmrg * commands and associated state in plain memory, then the kernel copies the 8801e04c3fSmrg * data to its own address space, and then validates and stores it in a GPU 8901e04c3fSmrg * BO. 9001e04c3fSmrg */ 9101e04c3fSmrgstruct drm_vc4_submit_cl { 9201e04c3fSmrg /* Pointer to the binner command list. 9301e04c3fSmrg * 9401e04c3fSmrg * This is the first set of commands executed, which runs the 9501e04c3fSmrg * coordinate shader to determine where primitives land on the screen, 9601e04c3fSmrg * then writes out the state updates and draw calls necessary per tile 9701e04c3fSmrg * to the tile allocation BO. 9801e04c3fSmrg */ 9901e04c3fSmrg __u64 bin_cl; 10001e04c3fSmrg 10101e04c3fSmrg /* Pointer to the shader records. 10201e04c3fSmrg * 10301e04c3fSmrg * Shader records are the structures read by the hardware that contain 10401e04c3fSmrg * pointers to uniforms, shaders, and vertex attributes. The 10501e04c3fSmrg * reference to the shader record has enough information to determine 10601e04c3fSmrg * how many pointers are necessary (fixed number for shaders/uniforms, 10701e04c3fSmrg * and an attribute count), so those BO indices into bo_handles are 10801e04c3fSmrg * just stored as __u32s before each shader record passed in. 10901e04c3fSmrg */ 11001e04c3fSmrg __u64 shader_rec; 11101e04c3fSmrg 11201e04c3fSmrg /* Pointer to uniform data and texture handles for the textures 11301e04c3fSmrg * referenced by the shader. 11401e04c3fSmrg * 11501e04c3fSmrg * For each shader state record, there is a set of uniform data in the 11601e04c3fSmrg * order referenced by the record (FS, VS, then CS). Each set of 11701e04c3fSmrg * uniform data has a __u32 index into bo_handles per texture 11801e04c3fSmrg * sample operation, in the order the QPU_W_TMUn_S writes appear in 11901e04c3fSmrg * the program. Following the texture BO handle indices is the actual 12001e04c3fSmrg * uniform data. 12101e04c3fSmrg * 12201e04c3fSmrg * The individual uniform state blocks don't have sizes passed in, 12301e04c3fSmrg * because the kernel has to determine the sizes anyway during shader 12401e04c3fSmrg * code validation. 12501e04c3fSmrg */ 12601e04c3fSmrg __u64 uniforms; 12701e04c3fSmrg __u64 bo_handles; 12801e04c3fSmrg 12901e04c3fSmrg /* Size in bytes of the binner command list. */ 13001e04c3fSmrg __u32 bin_cl_size; 13101e04c3fSmrg /* Size in bytes of the set of shader records. */ 13201e04c3fSmrg __u32 shader_rec_size; 13301e04c3fSmrg /* Number of shader records. 13401e04c3fSmrg * 13501e04c3fSmrg * This could just be computed from the contents of shader_records and 13601e04c3fSmrg * the address bits of references to them from the bin CL, but it 13701e04c3fSmrg * keeps the kernel from having to resize some allocations it makes. 13801e04c3fSmrg */ 13901e04c3fSmrg __u32 shader_rec_count; 14001e04c3fSmrg /* Size in bytes of the uniform state. */ 14101e04c3fSmrg __u32 uniforms_size; 14201e04c3fSmrg 14301e04c3fSmrg /* Number of BO handles passed in (size is that times 4). */ 14401e04c3fSmrg __u32 bo_handle_count; 14501e04c3fSmrg 14601e04c3fSmrg /* RCL setup: */ 14701e04c3fSmrg __u16 width; 14801e04c3fSmrg __u16 height; 14901e04c3fSmrg __u8 min_x_tile; 15001e04c3fSmrg __u8 min_y_tile; 15101e04c3fSmrg __u8 max_x_tile; 15201e04c3fSmrg __u8 max_y_tile; 15301e04c3fSmrg struct drm_vc4_submit_rcl_surface color_read; 15401e04c3fSmrg struct drm_vc4_submit_rcl_surface color_write; 15501e04c3fSmrg struct drm_vc4_submit_rcl_surface zs_read; 15601e04c3fSmrg struct drm_vc4_submit_rcl_surface zs_write; 15701e04c3fSmrg struct drm_vc4_submit_rcl_surface msaa_color_write; 15801e04c3fSmrg struct drm_vc4_submit_rcl_surface msaa_zs_write; 15901e04c3fSmrg __u32 clear_color[2]; 16001e04c3fSmrg __u32 clear_z; 16101e04c3fSmrg __u8 clear_s; 16201e04c3fSmrg 16301e04c3fSmrg __u32 pad:24; 16401e04c3fSmrg 16501e04c3fSmrg#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0) 16601e04c3fSmrg/* By default, the kernel gets to choose the order that the tiles are 16701e04c3fSmrg * rendered in. If this is set, then the tiles will be rendered in a 16801e04c3fSmrg * raster order, with the right-to-left vs left-to-right and 16901e04c3fSmrg * top-to-bottom vs bottom-to-top dictated by 17001e04c3fSmrg * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping 17101e04c3fSmrg * blits to be implemented using the 3D engine. 17201e04c3fSmrg */ 17301e04c3fSmrg#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1) 17401e04c3fSmrg#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2) 17501e04c3fSmrg#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3) 17601e04c3fSmrg __u32 flags; 17701e04c3fSmrg 17801e04c3fSmrg /* Returned value of the seqno of this render job (for the 17901e04c3fSmrg * wait ioctl). 18001e04c3fSmrg */ 18101e04c3fSmrg __u64 seqno; 18201e04c3fSmrg 18301e04c3fSmrg /* ID of the perfmon to attach to this job. 0 means no perfmon. */ 18401e04c3fSmrg __u32 perfmonid; 18501e04c3fSmrg 18601e04c3fSmrg /* Syncobj handle to wait on. If set, processing of this render job 18701e04c3fSmrg * will not start until the syncobj is signaled. 0 means ignore. 18801e04c3fSmrg */ 18901e04c3fSmrg __u32 in_sync; 19001e04c3fSmrg 19101e04c3fSmrg /* Syncobj handle to export fence to. If set, the fence in the syncobj 19201e04c3fSmrg * will be replaced with a fence that signals upon completion of this 19301e04c3fSmrg * render job. 0 means ignore. 19401e04c3fSmrg */ 19501e04c3fSmrg __u32 out_sync; 19601e04c3fSmrg 19701e04c3fSmrg __u32 pad2; 19801e04c3fSmrg}; 19901e04c3fSmrg 20001e04c3fSmrg/** 20101e04c3fSmrg * struct drm_vc4_wait_seqno - ioctl argument for waiting for 20201e04c3fSmrg * DRM_VC4_SUBMIT_CL completion using its returned seqno. 20301e04c3fSmrg * 20401e04c3fSmrg * timeout_ns is the timeout in nanoseconds, where "0" means "don't 20501e04c3fSmrg * block, just return the status." 20601e04c3fSmrg */ 20701e04c3fSmrgstruct drm_vc4_wait_seqno { 20801e04c3fSmrg __u64 seqno; 20901e04c3fSmrg __u64 timeout_ns; 21001e04c3fSmrg}; 21101e04c3fSmrg 21201e04c3fSmrg/** 21301e04c3fSmrg * struct drm_vc4_wait_bo - ioctl argument for waiting for 21401e04c3fSmrg * completion of the last DRM_VC4_SUBMIT_CL on a BO. 21501e04c3fSmrg * 21601e04c3fSmrg * This is useful for cases where multiple processes might be 21701e04c3fSmrg * rendering to a BO and you want to wait for all rendering to be 21801e04c3fSmrg * completed. 21901e04c3fSmrg */ 22001e04c3fSmrgstruct drm_vc4_wait_bo { 22101e04c3fSmrg __u32 handle; 22201e04c3fSmrg __u32 pad; 22301e04c3fSmrg __u64 timeout_ns; 22401e04c3fSmrg}; 22501e04c3fSmrg 22601e04c3fSmrg/** 22701e04c3fSmrg * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs. 22801e04c3fSmrg * 22901e04c3fSmrg * There are currently no values for the flags argument, but it may be 23001e04c3fSmrg * used in a future extension. 23101e04c3fSmrg */ 23201e04c3fSmrgstruct drm_vc4_create_bo { 23301e04c3fSmrg __u32 size; 23401e04c3fSmrg __u32 flags; 23501e04c3fSmrg /** Returned GEM handle for the BO. */ 23601e04c3fSmrg __u32 handle; 23701e04c3fSmrg __u32 pad; 23801e04c3fSmrg}; 23901e04c3fSmrg 24001e04c3fSmrg/** 24101e04c3fSmrg * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs. 24201e04c3fSmrg * 24301e04c3fSmrg * This doesn't actually perform an mmap. Instead, it returns the 24401e04c3fSmrg * offset you need to use in an mmap on the DRM device node. This 24501e04c3fSmrg * means that tools like valgrind end up knowing about the mapped 24601e04c3fSmrg * memory. 24701e04c3fSmrg * 24801e04c3fSmrg * There are currently no values for the flags argument, but it may be 24901e04c3fSmrg * used in a future extension. 25001e04c3fSmrg */ 25101e04c3fSmrgstruct drm_vc4_mmap_bo { 25201e04c3fSmrg /** Handle for the object being mapped. */ 25301e04c3fSmrg __u32 handle; 25401e04c3fSmrg __u32 flags; 25501e04c3fSmrg /** offset into the drm node to use for subsequent mmap call. */ 25601e04c3fSmrg __u64 offset; 25701e04c3fSmrg}; 25801e04c3fSmrg 25901e04c3fSmrg/** 26001e04c3fSmrg * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4 26101e04c3fSmrg * shader BOs. 26201e04c3fSmrg * 26301e04c3fSmrg * Since allowing a shader to be overwritten while it's also being 26401e04c3fSmrg * executed from would allow privlege escalation, shaders must be 26501e04c3fSmrg * created using this ioctl, and they can't be mmapped later. 26601e04c3fSmrg */ 26701e04c3fSmrgstruct drm_vc4_create_shader_bo { 26801e04c3fSmrg /* Size of the data argument. */ 26901e04c3fSmrg __u32 size; 27001e04c3fSmrg /* Flags, currently must be 0. */ 27101e04c3fSmrg __u32 flags; 27201e04c3fSmrg 27301e04c3fSmrg /* Pointer to the data. */ 27401e04c3fSmrg __u64 data; 27501e04c3fSmrg 27601e04c3fSmrg /** Returned GEM handle for the BO. */ 27701e04c3fSmrg __u32 handle; 27801e04c3fSmrg /* Pad, must be 0. */ 27901e04c3fSmrg __u32 pad; 28001e04c3fSmrg}; 28101e04c3fSmrg 28201e04c3fSmrgstruct drm_vc4_get_hang_state_bo { 28301e04c3fSmrg __u32 handle; 28401e04c3fSmrg __u32 paddr; 28501e04c3fSmrg __u32 size; 28601e04c3fSmrg __u32 pad; 28701e04c3fSmrg}; 28801e04c3fSmrg 28901e04c3fSmrg/** 29001e04c3fSmrg * struct drm_vc4_hang_state - ioctl argument for collecting state 29101e04c3fSmrg * from a GPU hang for analysis. 29201e04c3fSmrg*/ 29301e04c3fSmrgstruct drm_vc4_get_hang_state { 29401e04c3fSmrg /** Pointer to array of struct drm_vc4_get_hang_state_bo. */ 29501e04c3fSmrg __u64 bo; 29601e04c3fSmrg /** 29701e04c3fSmrg * On input, the size of the bo array. Output is the number 29801e04c3fSmrg * of bos to be returned. 29901e04c3fSmrg */ 30001e04c3fSmrg __u32 bo_count; 30101e04c3fSmrg 30201e04c3fSmrg __u32 start_bin, start_render; 30301e04c3fSmrg 30401e04c3fSmrg __u32 ct0ca, ct0ea; 30501e04c3fSmrg __u32 ct1ca, ct1ea; 30601e04c3fSmrg __u32 ct0cs, ct1cs; 30701e04c3fSmrg __u32 ct0ra0, ct1ra0; 30801e04c3fSmrg 30901e04c3fSmrg __u32 bpca, bpcs; 31001e04c3fSmrg __u32 bpoa, bpos; 31101e04c3fSmrg 31201e04c3fSmrg __u32 vpmbase; 31301e04c3fSmrg 31401e04c3fSmrg __u32 dbge; 31501e04c3fSmrg __u32 fdbgo; 31601e04c3fSmrg __u32 fdbgb; 31701e04c3fSmrg __u32 fdbgr; 31801e04c3fSmrg __u32 fdbgs; 31901e04c3fSmrg __u32 errstat; 32001e04c3fSmrg 32101e04c3fSmrg /* Pad that we may save more registers into in the future. */ 32201e04c3fSmrg __u32 pad[16]; 32301e04c3fSmrg}; 32401e04c3fSmrg 32501e04c3fSmrg#define DRM_VC4_PARAM_V3D_IDENT0 0 32601e04c3fSmrg#define DRM_VC4_PARAM_V3D_IDENT1 1 32701e04c3fSmrg#define DRM_VC4_PARAM_V3D_IDENT2 2 32801e04c3fSmrg#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3 32901e04c3fSmrg#define DRM_VC4_PARAM_SUPPORTS_ETC1 4 33001e04c3fSmrg#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5 33101e04c3fSmrg#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6 33201e04c3fSmrg#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7 33301e04c3fSmrg#define DRM_VC4_PARAM_SUPPORTS_PERFMON 8 33401e04c3fSmrg 33501e04c3fSmrgstruct drm_vc4_get_param { 33601e04c3fSmrg __u32 param; 33701e04c3fSmrg __u32 pad; 33801e04c3fSmrg __u64 value; 33901e04c3fSmrg}; 34001e04c3fSmrg 34101e04c3fSmrgstruct drm_vc4_get_tiling { 34201e04c3fSmrg __u32 handle; 34301e04c3fSmrg __u32 flags; 34401e04c3fSmrg __u64 modifier; 34501e04c3fSmrg}; 34601e04c3fSmrg 34701e04c3fSmrgstruct drm_vc4_set_tiling { 34801e04c3fSmrg __u32 handle; 34901e04c3fSmrg __u32 flags; 35001e04c3fSmrg __u64 modifier; 35101e04c3fSmrg}; 35201e04c3fSmrg 35301e04c3fSmrg/** 35401e04c3fSmrg * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes. 35501e04c3fSmrg */ 35601e04c3fSmrgstruct drm_vc4_label_bo { 35701e04c3fSmrg __u32 handle; 35801e04c3fSmrg __u32 len; 35901e04c3fSmrg __u64 name; 36001e04c3fSmrg}; 36101e04c3fSmrg 36201e04c3fSmrg/* 36301e04c3fSmrg * States prefixed with '__' are internal states and cannot be passed to the 36401e04c3fSmrg * DRM_IOCTL_VC4_GEM_MADVISE ioctl. 36501e04c3fSmrg */ 36601e04c3fSmrg#define VC4_MADV_WILLNEED 0 36701e04c3fSmrg#define VC4_MADV_DONTNEED 1 36801e04c3fSmrg#define __VC4_MADV_PURGED 2 36901e04c3fSmrg#define __VC4_MADV_NOTSUPP 3 37001e04c3fSmrg 37101e04c3fSmrgstruct drm_vc4_gem_madvise { 37201e04c3fSmrg __u32 handle; 37301e04c3fSmrg __u32 madv; 37401e04c3fSmrg __u32 retained; 37501e04c3fSmrg __u32 pad; 37601e04c3fSmrg}; 37701e04c3fSmrg 37801e04c3fSmrgenum { 37901e04c3fSmrg VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER, 38001e04c3fSmrg VC4_PERFCNT_FEP_VALID_PRIMS_RENDER, 38101e04c3fSmrg VC4_PERFCNT_FEP_CLIPPED_QUADS, 38201e04c3fSmrg VC4_PERFCNT_FEP_VALID_QUADS, 38301e04c3fSmrg VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL, 38401e04c3fSmrg VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL, 38501e04c3fSmrg VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL, 38601e04c3fSmrg VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE, 38701e04c3fSmrg VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE, 38801e04c3fSmrg VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF, 38901e04c3fSmrg VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT, 39001e04c3fSmrg VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING, 39101e04c3fSmrg VC4_PERFCNT_PSE_PRIMS_REVERSED, 39201e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES, 39301e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING, 39401e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING, 39501e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST, 39601e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS, 39701e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD, 39801e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS, 39901e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT, 40001e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS, 40101e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT, 40201e04c3fSmrg VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS, 40301e04c3fSmrg VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED, 40401e04c3fSmrg VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS, 40501e04c3fSmrg VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED, 40601e04c3fSmrg VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED, 40701e04c3fSmrg VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT, 40801e04c3fSmrg VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS, 40901e04c3fSmrg VC4_PERFCNT_NUM_EVENTS, 41001e04c3fSmrg}; 41101e04c3fSmrg 41201e04c3fSmrg#define DRM_VC4_MAX_PERF_COUNTERS 16 41301e04c3fSmrg 41401e04c3fSmrgstruct drm_vc4_perfmon_create { 41501e04c3fSmrg __u32 id; 41601e04c3fSmrg __u32 ncounters; 41701e04c3fSmrg __u8 events[DRM_VC4_MAX_PERF_COUNTERS]; 41801e04c3fSmrg}; 41901e04c3fSmrg 42001e04c3fSmrgstruct drm_vc4_perfmon_destroy { 42101e04c3fSmrg __u32 id; 42201e04c3fSmrg}; 42301e04c3fSmrg 42401e04c3fSmrg/* 42501e04c3fSmrg * Returns the values of the performance counters tracked by this 42601e04c3fSmrg * perfmon (as an array of ncounters u64 values). 42701e04c3fSmrg * 42801e04c3fSmrg * No implicit synchronization is performed, so the user has to 42901e04c3fSmrg * guarantee that any jobs using this perfmon have already been 43001e04c3fSmrg * completed (probably by blocking on the seqno returned by the 43101e04c3fSmrg * last exec that used the perfmon). 43201e04c3fSmrg */ 43301e04c3fSmrgstruct drm_vc4_perfmon_get_values { 43401e04c3fSmrg __u32 id; 43501e04c3fSmrg __u64 values_ptr; 43601e04c3fSmrg}; 43701e04c3fSmrg 43801e04c3fSmrg#if defined(__cplusplus) 43901e04c3fSmrg} 44001e04c3fSmrg#endif 44101e04c3fSmrg 44201e04c3fSmrg#endif /* _VC4_DRM_H_ */ 443