Home | History | Annotate | Line # | Download | only in drm
      1 /*	$NetBSD: i915_drm.h,v 1.2 2021/12/18 23:45:46 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining a
      8  * copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sub license, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * The above copyright notice and this permission notice (including the
     16  * next paragraph) shall be included in all copies or substantial portions
     17  * of the Software.
     18  *
     19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     26  *
     27  */
     28 
     29 #ifndef _UAPI_I915_DRM_H_
     30 #define _UAPI_I915_DRM_H_
     31 
     32 #include "drm.h"
     33 
     34 #if defined(__cplusplus)
     35 extern "C" {
     36 #endif
     37 
     38 /* Please note that modifications to all structs defined here are
     39  * subject to backwards-compatibility constraints.
     40  */
     41 
     42 /**
     43  * DOC: uevents generated by i915 on it's device node
     44  *
     45  * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
     46  *	event from the gpu l3 cache. Additional information supplied is ROW,
     47  *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
     48  *	track of these events and if a specific cache-line seems to have a
     49  *	persistent error remap it with the l3 remapping tool supplied in
     50  *	intel-gpu-tools.  The value supplied with the event is always 1.
     51  *
     52  * I915_ERROR_UEVENT - Generated upon error detection, currently only via
     53  *	hangcheck. The error detection event is a good indicator of when things
     54  *	began to go badly. The value supplied with the event is a 1 upon error
     55  *	detection, and a 0 upon reset completion, signifying no more error
     56  *	exists. NOTE: Disabling hangcheck or reset via module parameter will
     57  *	cause the related events to not be seen.
     58  *
     59  * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
     60  *	the GPU. The value supplied with the event is always 1. NOTE: Disable
     61  *	reset via module parameter will cause this event to not be seen.
     62  */
     63 #define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
     64 #define I915_ERROR_UEVENT		"ERROR"
     65 #define I915_RESET_UEVENT		"RESET"
     66 
     67 /*
     68  * i915_user_extension: Base class for defining a chain of extensions
     69  *
     70  * Many interfaces need to grow over time. In most cases we can simply
     71  * extend the struct and have userspace pass in more data. Another option,
     72  * as demonstrated by Vulkan's approach to providing extensions for forward
     73  * and backward compatibility, is to use a list of optional structs to
     74  * provide those extra details.
     75  *
     76  * The key advantage to using an extension chain is that it allows us to
     77  * redefine the interface more easily than an ever growing struct of
     78  * increasing complexity, and for large parts of that interface to be
     79  * entirely optional. The downside is more pointer chasing; chasing across
     80  * the __user boundary with pointers encapsulated inside u64.
     81  */
     82 struct i915_user_extension {
     83 	__u64 next_extension;
     84 	__u32 name;
     85 	__u32 flags; /* All undefined bits must be zero. */
     86 	__u32 rsvd[4]; /* Reserved for future use; must be zero. */
     87 };
     88 
     89 /*
     90  * MOCS indexes used for GPU surfaces, defining the cacheability of the
     91  * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
     92  */
     93 enum i915_mocs_table_index {
     94 	/*
     95 	 * Not cached anywhere, coherency between CPU and GPU accesses is
     96 	 * guaranteed.
     97 	 */
     98 	I915_MOCS_UNCACHED,
     99 	/*
    100 	 * Cacheability and coherency controlled by the kernel automatically
    101 	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
    102 	 * usage of the surface (used for display scanout or not).
    103 	 */
    104 	I915_MOCS_PTE,
    105 	/*
    106 	 * Cached in all GPU caches available on the platform.
    107 	 * Coherency between CPU and GPU accesses to the surface is not
    108 	 * guaranteed without extra synchronization.
    109 	 */
    110 	I915_MOCS_CACHED,
    111 };
    112 
    113 /*
    114  * Different engines serve different roles, and there may be more than one
    115  * engine serving each role. enum drm_i915_gem_engine_class provides a
    116  * classification of the role of the engine, which may be used when requesting
    117  * operations to be performed on a certain subset of engines, or for providing
    118  * information about that group.
    119  */
    120 enum drm_i915_gem_engine_class {
    121 	I915_ENGINE_CLASS_RENDER	= 0,
    122 	I915_ENGINE_CLASS_COPY		= 1,
    123 	I915_ENGINE_CLASS_VIDEO		= 2,
    124 	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
    125 
    126 	/* should be kept compact */
    127 
    128 	I915_ENGINE_CLASS_INVALID	= -1
    129 };
    130 
    131 /*
    132  * There may be more than one engine fulfilling any role within the system.
    133  * Each engine of a class is given a unique instance number and therefore
    134  * any engine can be specified by its class:instance tuplet. APIs that allow
    135  * access to any engine in the system will use struct i915_engine_class_instance
    136  * for this identification.
    137  */
    138 struct i915_engine_class_instance {
    139 	__u16 engine_class; /* see enum drm_i915_gem_engine_class */
    140 	__u16 engine_instance;
    141 #define I915_ENGINE_CLASS_INVALID_NONE -1
    142 #define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
    143 };
    144 
    145 /**
    146  * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
    147  *
    148  */
    149 
    150 enum drm_i915_pmu_engine_sample {
    151 	I915_SAMPLE_BUSY = 0,
    152 	I915_SAMPLE_WAIT = 1,
    153 	I915_SAMPLE_SEMA = 2
    154 };
    155 
    156 #define I915_PMU_SAMPLE_BITS (4)
    157 #define I915_PMU_SAMPLE_MASK (0xf)
    158 #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
    159 #define I915_PMU_CLASS_SHIFT \
    160 	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
    161 
    162 #define __I915_PMU_ENGINE(class, instance, sample) \
    163 	((class) << I915_PMU_CLASS_SHIFT | \
    164 	(instance) << I915_PMU_SAMPLE_BITS | \
    165 	(sample))
    166 
    167 #define I915_PMU_ENGINE_BUSY(class, instance) \
    168 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
    169 
    170 #define I915_PMU_ENGINE_WAIT(class, instance) \
    171 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
    172 
    173 #define I915_PMU_ENGINE_SEMA(class, instance) \
    174 	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
    175 
    176 #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
    177 
    178 #define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
    179 #define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
    180 #define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
    181 #define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
    182 
    183 #define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
    184 
    185 /* Each region is a minimum of 16k, and there are at most 255 of them.
    186  */
    187 #define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
    188 				 * of chars for next/prev indices */
    189 #define I915_LOG_MIN_TEX_REGION_SIZE 14
    190 
    191 typedef struct _drm_i915_init {
    192 	enum {
    193 		I915_INIT_DMA = 0x01,
    194 		I915_CLEANUP_DMA = 0x02,
    195 		I915_RESUME_DMA = 0x03
    196 	} func;
    197 	unsigned int mmio_offset;
    198 	int sarea_priv_offset;
    199 	unsigned int ring_start;
    200 	unsigned int ring_end;
    201 	unsigned int ring_size;
    202 	unsigned int front_offset;
    203 	unsigned int back_offset;
    204 	unsigned int depth_offset;
    205 	unsigned int w;
    206 	unsigned int h;
    207 	unsigned int pitch;
    208 	unsigned int pitch_bits;
    209 	unsigned int back_pitch;
    210 	unsigned int depth_pitch;
    211 	unsigned int cpp;
    212 	unsigned int chipset;
    213 } drm_i915_init_t;
    214 
    215 typedef struct _drm_i915_sarea {
    216 	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
    217 	int last_upload;	/* last time texture was uploaded */
    218 	int last_enqueue;	/* last time a buffer was enqueued */
    219 	int last_dispatch;	/* age of the most recently dispatched buffer */
    220 	int ctxOwner;		/* last context to upload state */
    221 	int texAge;
    222 	int pf_enabled;		/* is pageflipping allowed? */
    223 	int pf_active;
    224 	int pf_current_page;	/* which buffer is being displayed? */
    225 	int perf_boxes;		/* performance boxes to be displayed */
    226 	int width, height;      /* screen size in pixels */
    227 
    228 	drm_handle_t front_handle;
    229 	int front_offset;
    230 	int front_size;
    231 
    232 	drm_handle_t back_handle;
    233 	int back_offset;
    234 	int back_size;
    235 
    236 	drm_handle_t depth_handle;
    237 	int depth_offset;
    238 	int depth_size;
    239 
    240 	drm_handle_t tex_handle;
    241 	int tex_offset;
    242 	int tex_size;
    243 	int log_tex_granularity;
    244 	int pitch;
    245 	int rotation;           /* 0, 90, 180 or 270 */
    246 	int rotated_offset;
    247 	int rotated_size;
    248 	int rotated_pitch;
    249 	int virtualX, virtualY;
    250 
    251 	unsigned int front_tiled;
    252 	unsigned int back_tiled;
    253 	unsigned int depth_tiled;
    254 	unsigned int rotated_tiled;
    255 	unsigned int rotated2_tiled;
    256 
    257 	int pipeA_x;
    258 	int pipeA_y;
    259 	int pipeA_w;
    260 	int pipeA_h;
    261 	int pipeB_x;
    262 	int pipeB_y;
    263 	int pipeB_w;
    264 	int pipeB_h;
    265 
    266 	/* fill out some space for old userspace triple buffer */
    267 	drm_handle_t unused_handle;
    268 	__u32 unused1, unused2, unused3;
    269 
    270 	/* buffer object handles for static buffers. May change
    271 	 * over the lifetime of the client.
    272 	 */
    273 	__u32 front_bo_handle;
    274 	__u32 back_bo_handle;
    275 	__u32 unused_bo_handle;
    276 	__u32 depth_bo_handle;
    277 
    278 } drm_i915_sarea_t;
    279 
    280 /* due to userspace building against these headers we need some compat here */
    281 #define planeA_x pipeA_x
    282 #define planeA_y pipeA_y
    283 #define planeA_w pipeA_w
    284 #define planeA_h pipeA_h
    285 #define planeB_x pipeB_x
    286 #define planeB_y pipeB_y
    287 #define planeB_w pipeB_w
    288 #define planeB_h pipeB_h
    289 
    290 /* Flags for perf_boxes
    291  */
    292 #define I915_BOX_RING_EMPTY    0x1
    293 #define I915_BOX_FLIP          0x2
    294 #define I915_BOX_WAIT          0x4
    295 #define I915_BOX_TEXTURE_LOAD  0x8
    296 #define I915_BOX_LOST_CONTEXT  0x10
    297 
    298 /*
    299  * i915 specific ioctls.
    300  *
    301  * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
    302  * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
    303  * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
    304  */
    305 #define DRM_I915_INIT		0x00
    306 #define DRM_I915_FLUSH		0x01
    307 #define DRM_I915_FLIP		0x02
    308 #define DRM_I915_BATCHBUFFER	0x03
    309 #define DRM_I915_IRQ_EMIT	0x04
    310 #define DRM_I915_IRQ_WAIT	0x05
    311 #define DRM_I915_GETPARAM	0x06
    312 #define DRM_I915_SETPARAM	0x07
    313 #define DRM_I915_ALLOC		0x08
    314 #define DRM_I915_FREE		0x09
    315 #define DRM_I915_INIT_HEAP	0x0a
    316 #define DRM_I915_CMDBUFFER	0x0b
    317 #define DRM_I915_DESTROY_HEAP	0x0c
    318 #define DRM_I915_SET_VBLANK_PIPE	0x0d
    319 #define DRM_I915_GET_VBLANK_PIPE	0x0e
    320 #define DRM_I915_VBLANK_SWAP	0x0f
    321 #define DRM_I915_HWS_ADDR	0x11
    322 #define DRM_I915_GEM_INIT	0x13
    323 #define DRM_I915_GEM_EXECBUFFER	0x14
    324 #define DRM_I915_GEM_PIN	0x15
    325 #define DRM_I915_GEM_UNPIN	0x16
    326 #define DRM_I915_GEM_BUSY	0x17
    327 #define DRM_I915_GEM_THROTTLE	0x18
    328 #define DRM_I915_GEM_ENTERVT	0x19
    329 #define DRM_I915_GEM_LEAVEVT	0x1a
    330 #define DRM_I915_GEM_CREATE	0x1b
    331 #define DRM_I915_GEM_PREAD	0x1c
    332 #define DRM_I915_GEM_PWRITE	0x1d
    333 #define DRM_I915_GEM_MMAP	0x1e
    334 #define DRM_I915_GEM_SET_DOMAIN	0x1f
    335 #define DRM_I915_GEM_SW_FINISH	0x20
    336 #define DRM_I915_GEM_SET_TILING	0x21
    337 #define DRM_I915_GEM_GET_TILING	0x22
    338 #define DRM_I915_GEM_GET_APERTURE 0x23
    339 #define DRM_I915_GEM_MMAP_GTT	0x24
    340 #define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
    341 #define DRM_I915_GEM_MADVISE	0x26
    342 #define DRM_I915_OVERLAY_PUT_IMAGE	0x27
    343 #define DRM_I915_OVERLAY_ATTRS	0x28
    344 #define DRM_I915_GEM_EXECBUFFER2	0x29
    345 #define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
    346 #define DRM_I915_GET_SPRITE_COLORKEY	0x2a
    347 #define DRM_I915_SET_SPRITE_COLORKEY	0x2b
    348 #define DRM_I915_GEM_WAIT	0x2c
    349 #define DRM_I915_GEM_CONTEXT_CREATE	0x2d
    350 #define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
    351 #define DRM_I915_GEM_SET_CACHING	0x2f
    352 #define DRM_I915_GEM_GET_CACHING	0x30
    353 #define DRM_I915_REG_READ		0x31
    354 #define DRM_I915_GET_RESET_STATS	0x32
    355 #define DRM_I915_GEM_USERPTR		0x33
    356 #define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
    357 #define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
    358 #define DRM_I915_PERF_OPEN		0x36
    359 #define DRM_I915_PERF_ADD_CONFIG	0x37
    360 #define DRM_I915_PERF_REMOVE_CONFIG	0x38
    361 #define DRM_I915_QUERY			0x39
    362 #define DRM_I915_GEM_VM_CREATE		0x3a
    363 #define DRM_I915_GEM_VM_DESTROY		0x3b
    364 /* Must be kept compact -- no holes */
    365 
    366 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
    367 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
    368 #define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
    369 #define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
    370 #define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
    371 #define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
    372 #define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
    373 #define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
    374 #define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
    375 #define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
    376 #define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
    377 #define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
    378 #define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
    379 #define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
    380 #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
    381 #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
    382 #define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
    383 #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
    384 #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
    385 #define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
    386 #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
    387 #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
    388 #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
    389 #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
    390 #define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
    391 #define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
    392 #define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
    393 #define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
    394 #define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
    395 #define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
    396 #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
    397 #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
    398 #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
    399 #define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
    400 #define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
    401 #define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
    402 #define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
    403 #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
    404 #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
    405 #define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
    406 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
    407 #define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
    408 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
    409 #define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
    410 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
    411 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
    412 #define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
    413 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
    414 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
    415 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
    416 #define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
    417 #define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
    418 #define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
    419 #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
    420 #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
    421 #define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
    422 #define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
    423 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
    424 #define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
    425 #define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
    426 #define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
    427 
    428 /* Allow drivers to submit batchbuffers directly to hardware, relying
    429  * on the security mechanisms provided by hardware.
    430  */
    431 typedef struct drm_i915_batchbuffer {
    432 	int start;		/* agp offset */
    433 	int used;		/* nr bytes in use */
    434 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
    435 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
    436 	int num_cliprects;	/* mulitpass with multiple cliprects? */
    437 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
    438 } drm_i915_batchbuffer_t;
    439 
    440 /* As above, but pass a pointer to userspace buffer which can be
    441  * validated by the kernel prior to sending to hardware.
    442  */
    443 typedef struct _drm_i915_cmdbuffer {
    444 	char __user *buf;	/* pointer to userspace command buffer */
    445 	int sz;			/* nr bytes in buf */
    446 	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
    447 	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
    448 	int num_cliprects;	/* mulitpass with multiple cliprects? */
    449 	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
    450 } drm_i915_cmdbuffer_t;
    451 
    452 /* Userspace can request & wait on irq's:
    453  */
    454 typedef struct drm_i915_irq_emit {
    455 	int __user *irq_seq;
    456 } drm_i915_irq_emit_t;
    457 
    458 typedef struct drm_i915_irq_wait {
    459 	int irq_seq;
    460 } drm_i915_irq_wait_t;
    461 
    462 /*
    463  * Different modes of per-process Graphics Translation Table,
    464  * see I915_PARAM_HAS_ALIASING_PPGTT
    465  */
    466 #define I915_GEM_PPGTT_NONE	0
    467 #define I915_GEM_PPGTT_ALIASING	1
    468 #define I915_GEM_PPGTT_FULL	2
    469 
    470 /* Ioctl to query kernel params:
    471  */
    472 #define I915_PARAM_IRQ_ACTIVE            1
    473 #define I915_PARAM_ALLOW_BATCHBUFFER     2
    474 #define I915_PARAM_LAST_DISPATCH         3
    475 #define I915_PARAM_CHIPSET_ID            4
    476 #define I915_PARAM_HAS_GEM               5
    477 #define I915_PARAM_NUM_FENCES_AVAIL      6
    478 #define I915_PARAM_HAS_OVERLAY           7
    479 #define I915_PARAM_HAS_PAGEFLIPPING	 8
    480 #define I915_PARAM_HAS_EXECBUF2          9
    481 #define I915_PARAM_HAS_BSD		 10
    482 #define I915_PARAM_HAS_BLT		 11
    483 #define I915_PARAM_HAS_RELAXED_FENCING	 12
    484 #define I915_PARAM_HAS_COHERENT_RINGS	 13
    485 #define I915_PARAM_HAS_EXEC_CONSTANTS	 14
    486 #define I915_PARAM_HAS_RELAXED_DELTA	 15
    487 #define I915_PARAM_HAS_GEN7_SOL_RESET	 16
    488 #define I915_PARAM_HAS_LLC     	 	 17
    489 #define I915_PARAM_HAS_ALIASING_PPGTT	 18
    490 #define I915_PARAM_HAS_WAIT_TIMEOUT	 19
    491 #define I915_PARAM_HAS_SEMAPHORES	 20
    492 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
    493 #define I915_PARAM_HAS_VEBOX		 22
    494 #define I915_PARAM_HAS_SECURE_BATCHES	 23
    495 #define I915_PARAM_HAS_PINNED_BATCHES	 24
    496 #define I915_PARAM_HAS_EXEC_NO_RELOC	 25
    497 #define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
    498 #define I915_PARAM_HAS_WT     	 	 27
    499 #define I915_PARAM_CMD_PARSER_VERSION	 28
    500 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
    501 #define I915_PARAM_MMAP_VERSION          30
    502 #define I915_PARAM_HAS_BSD2		 31
    503 #define I915_PARAM_REVISION              32
    504 #define I915_PARAM_SUBSLICE_TOTAL	 33
    505 #define I915_PARAM_EU_TOTAL		 34
    506 #define I915_PARAM_HAS_GPU_RESET	 35
    507 #define I915_PARAM_HAS_RESOURCE_STREAMER 36
    508 #define I915_PARAM_HAS_EXEC_SOFTPIN	 37
    509 #define I915_PARAM_HAS_POOLED_EU	 38
    510 #define I915_PARAM_MIN_EU_IN_POOL	 39
    511 #define I915_PARAM_MMAP_GTT_VERSION	 40
    512 
    513 /*
    514  * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
    515  * priorities and the driver will attempt to execute batches in priority order.
    516  * The param returns a capability bitmask, nonzero implies that the scheduler
    517  * is enabled, with different features present according to the mask.
    518  *
    519  * The initial priority for each batch is supplied by the context and is
    520  * controlled via I915_CONTEXT_PARAM_PRIORITY.
    521  */
    522 #define I915_PARAM_HAS_SCHEDULER	 41
    523 #define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
    524 #define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
    525 #define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
    526 #define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
    527 #define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
    528 
    529 #define I915_PARAM_HUC_STATUS		 42
    530 
    531 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
    532  * synchronisation with implicit fencing on individual objects.
    533  * See EXEC_OBJECT_ASYNC.
    534  */
    535 #define I915_PARAM_HAS_EXEC_ASYNC	 43
    536 
    537 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
    538  * both being able to pass in a sync_file fd to wait upon before executing,
    539  * and being able to return a new sync_file fd that is signaled when the
    540  * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
    541  */
    542 #define I915_PARAM_HAS_EXEC_FENCE	 44
    543 
    544 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
    545  * user specified bufffers for post-mortem debugging of GPU hangs. See
    546  * EXEC_OBJECT_CAPTURE.
    547  */
    548 #define I915_PARAM_HAS_EXEC_CAPTURE	 45
    549 
    550 #define I915_PARAM_SLICE_MASK		 46
    551 
    552 /* Assuming it's uniform for each slice, this queries the mask of subslices
    553  * per-slice for this system.
    554  */
    555 #define I915_PARAM_SUBSLICE_MASK	 47
    556 
    557 /*
    558  * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
    559  * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
    560  */
    561 #define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
    562 
    563 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
    564  * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
    565  */
    566 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
    567 
    568 /*
    569  * Query whether every context (both per-file default and user created) is
    570  * isolated (insofar as HW supports). If this parameter is not true, then
    571  * freshly created contexts may inherit values from an existing context,
    572  * rather than default HW values. If true, it also ensures (insofar as HW
    573  * supports) that all state set by this context will not leak to any other
    574  * context.
    575  *
    576  * As not every engine across every gen support contexts, the returned
    577  * value reports the support of context isolation for individual engines by
    578  * returning a bitmask of each engine class set to true if that class supports
    579  * isolation.
    580  */
    581 #define I915_PARAM_HAS_CONTEXT_ISOLATION 50
    582 
    583 /* Frequency of the command streamer timestamps given by the *_TIMESTAMP
    584  * registers. This used to be fixed per platform but from CNL onwards, this
    585  * might vary depending on the parts.
    586  */
    587 #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
    588 
    589 /*
    590  * Once upon a time we supposed that writes through the GGTT would be
    591  * immediately in physical memory (once flushed out of the CPU path). However,
    592  * on a few different processors and chipsets, this is not necessarily the case
    593  * as the writes appear to be buffered internally. Thus a read of the backing
    594  * storage (physical memory) via a different path (with different physical tags
    595  * to the indirect write via the GGTT) will see stale values from before
    596  * the GGTT write. Inside the kernel, we can for the most part keep track of
    597  * the different read/write domains in use (e.g. set-domain), but the assumption
    598  * of coherency is baked into the ABI, hence reporting its true state in this
    599  * parameter.
    600  *
    601  * Reports true when writes via mmap_gtt are immediately visible following an
    602  * lfence to flush the WCB.
    603  *
    604  * Reports false when writes via mmap_gtt are indeterminately delayed in an in
    605  * internal buffer and are _not_ immediately visible to third parties accessing
    606  * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
    607  * communications channel when reporting false is strongly disadvised.
    608  */
    609 #define I915_PARAM_MMAP_GTT_COHERENT	52
    610 
    611 /*
    612  * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
    613  * execution through use of explicit fence support.
    614  * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
    615  */
    616 #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
    617 
    618 /*
    619  * Revision of the i915-perf uAPI. The value returned helps determine what
    620  * i915-perf features are available. See drm_i915_perf_property_id.
    621  */
    622 #define I915_PARAM_PERF_REVISION	54
    623 
    624 /* Must be kept compact -- no holes and well documented */
    625 
    626 typedef struct drm_i915_getparam {
    627 	__s32 param;
    628 	/*
    629 	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
    630 	 * compat32 code. Don't repeat this mistake.
    631 	 */
    632 	int __user *value;
    633 } drm_i915_getparam_t;
    634 
    635 /* Ioctl to set kernel params:
    636  */
    637 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
    638 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
    639 #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
    640 #define I915_SETPARAM_NUM_USED_FENCES                     4
    641 /* Must be kept compact -- no holes */
    642 
    643 typedef struct drm_i915_setparam {
    644 	int param;
    645 	int value;
    646 } drm_i915_setparam_t;
    647 
    648 /* A memory manager for regions of shared memory:
    649  */
    650 #define I915_MEM_REGION_AGP 1
    651 
    652 typedef struct drm_i915_mem_alloc {
    653 	int region;
    654 	int alignment;
    655 	int size;
    656 	int __user *region_offset;	/* offset from start of fb or agp */
    657 } drm_i915_mem_alloc_t;
    658 
    659 typedef struct drm_i915_mem_free {
    660 	int region;
    661 	int region_offset;
    662 } drm_i915_mem_free_t;
    663 
    664 typedef struct drm_i915_mem_init_heap {
    665 	int region;
    666 	int size;
    667 	int start;
    668 } drm_i915_mem_init_heap_t;
    669 
    670 /* Allow memory manager to be torn down and re-initialized (eg on
    671  * rotate):
    672  */
    673 typedef struct drm_i915_mem_destroy_heap {
    674 	int region;
    675 } drm_i915_mem_destroy_heap_t;
    676 
    677 /* Allow X server to configure which pipes to monitor for vblank signals
    678  */
    679 #define	DRM_I915_VBLANK_PIPE_A	1
    680 #define	DRM_I915_VBLANK_PIPE_B	2
    681 
    682 typedef struct drm_i915_vblank_pipe {
    683 	int pipe;
    684 } drm_i915_vblank_pipe_t;
    685 
    686 /* Schedule buffer swap at given vertical blank:
    687  */
    688 typedef struct drm_i915_vblank_swap {
    689 	drm_drawable_t drawable;
    690 	enum drm_vblank_seq_type seqtype;
    691 	unsigned int sequence;
    692 } drm_i915_vblank_swap_t;
    693 
    694 typedef struct drm_i915_hws_addr {
    695 	__u64 addr;
    696 } drm_i915_hws_addr_t;
    697 
    698 struct drm_i915_gem_init {
    699 	/**
    700 	 * Beginning offset in the GTT to be managed by the DRM memory
    701 	 * manager.
    702 	 */
    703 	__u64 gtt_start;
    704 	/**
    705 	 * Ending offset in the GTT to be managed by the DRM memory
    706 	 * manager.
    707 	 */
    708 	__u64 gtt_end;
    709 };
    710 
    711 struct drm_i915_gem_create {
    712 	/**
    713 	 * Requested size for the object.
    714 	 *
    715 	 * The (page-aligned) allocated size for the object will be returned.
    716 	 */
    717 	__u64 size;
    718 	/**
    719 	 * Returned handle for the object.
    720 	 *
    721 	 * Object handles are nonzero.
    722 	 */
    723 	__u32 handle;
    724 	__u32 pad;
    725 };
    726 
    727 struct drm_i915_gem_pread {
    728 	/** Handle for the object being read. */
    729 	__u32 handle;
    730 	__u32 pad;
    731 	/** Offset into the object to read from */
    732 	__u64 offset;
    733 	/** Length of data to read */
    734 	__u64 size;
    735 	/**
    736 	 * Pointer to write the data into.
    737 	 *
    738 	 * This is a fixed-size type for 32/64 compatibility.
    739 	 */
    740 	__u64 data_ptr;
    741 };
    742 
    743 struct drm_i915_gem_pwrite {
    744 	/** Handle for the object being written to. */
    745 	__u32 handle;
    746 	__u32 pad;
    747 	/** Offset into the object to write to */
    748 	__u64 offset;
    749 	/** Length of data to write */
    750 	__u64 size;
    751 	/**
    752 	 * Pointer to read the data from.
    753 	 *
    754 	 * This is a fixed-size type for 32/64 compatibility.
    755 	 */
    756 	__u64 data_ptr;
    757 };
    758 
    759 struct drm_i915_gem_mmap {
    760 	/** Handle for the object being mapped. */
    761 	__u32 handle;
    762 	__u32 pad;
    763 	/** Offset in the object to map. */
    764 	__u64 offset;
    765 	/**
    766 	 * Length of data to map.
    767 	 *
    768 	 * The value will be page-aligned.
    769 	 */
    770 	__u64 size;
    771 	/**
    772 	 * Returned pointer the data was mapped at.
    773 	 *
    774 	 * This is a fixed-size type for 32/64 compatibility.
    775 	 */
    776 	__u64 addr_ptr;
    777 
    778 	/**
    779 	 * Flags for extended behaviour.
    780 	 *
    781 	 * Added in version 2.
    782 	 */
    783 	__u64 flags;
    784 #define I915_MMAP_WC 0x1
    785 };
    786 
    787 struct drm_i915_gem_mmap_gtt {
    788 	/** Handle for the object being mapped. */
    789 	__u32 handle;
    790 	__u32 pad;
    791 	/**
    792 	 * Fake offset to use for subsequent mmap call
    793 	 *
    794 	 * This is a fixed-size type for 32/64 compatibility.
    795 	 */
    796 	__u64 offset;
    797 };
    798 
    799 struct drm_i915_gem_mmap_offset {
    800 	/** Handle for the object being mapped. */
    801 	__u32 handle;
    802 	__u32 pad;
    803 	/**
    804 	 * Fake offset to use for subsequent mmap call
    805 	 *
    806 	 * This is a fixed-size type for 32/64 compatibility.
    807 	 */
    808 	__u64 offset;
    809 
    810 	/**
    811 	 * Flags for extended behaviour.
    812 	 *
    813 	 * It is mandatory that one of the MMAP_OFFSET types
    814 	 * (GTT, WC, WB, UC, etc) should be included.
    815 	 */
    816 	__u64 flags;
    817 #define I915_MMAP_OFFSET_GTT 0
    818 #define I915_MMAP_OFFSET_WC  1
    819 #define I915_MMAP_OFFSET_WB  2
    820 #define I915_MMAP_OFFSET_UC  3
    821 
    822 	/*
    823 	 * Zero-terminated chain of extensions.
    824 	 *
    825 	 * No current extensions defined; mbz.
    826 	 */
    827 	__u64 extensions;
    828 };
    829 
    830 struct drm_i915_gem_set_domain {
    831 	/** Handle for the object */
    832 	__u32 handle;
    833 
    834 	/** New read domains */
    835 	__u32 read_domains;
    836 
    837 	/** New write domain */
    838 	__u32 write_domain;
    839 };
    840 
    841 struct drm_i915_gem_sw_finish {
    842 	/** Handle for the object */
    843 	__u32 handle;
    844 };
    845 
    846 struct drm_i915_gem_relocation_entry {
    847 	/**
    848 	 * Handle of the buffer being pointed to by this relocation entry.
    849 	 *
    850 	 * It's appealing to make this be an index into the mm_validate_entry
    851 	 * list to refer to the buffer, but this allows the driver to create
    852 	 * a relocation list for state buffers and not re-write it per
    853 	 * exec using the buffer.
    854 	 */
    855 	__u32 target_handle;
    856 
    857 	/**
    858 	 * Value to be added to the offset of the target buffer to make up
    859 	 * the relocation entry.
    860 	 */
    861 	__u32 delta;
    862 
    863 	/** Offset in the buffer the relocation entry will be written into */
    864 	__u64 offset;
    865 
    866 	/**
    867 	 * Offset value of the target buffer that the relocation entry was last
    868 	 * written as.
    869 	 *
    870 	 * If the buffer has the same offset as last time, we can skip syncing
    871 	 * and writing the relocation.  This value is written back out by
    872 	 * the execbuffer ioctl when the relocation is written.
    873 	 */
    874 	__u64 presumed_offset;
    875 
    876 	/**
    877 	 * Target memory domains read by this operation.
    878 	 */
    879 	__u32 read_domains;
    880 
    881 	/**
    882 	 * Target memory domains written by this operation.
    883 	 *
    884 	 * Note that only one domain may be written by the whole
    885 	 * execbuffer operation, so that where there are conflicts,
    886 	 * the application will get -EINVAL back.
    887 	 */
    888 	__u32 write_domain;
    889 };
    890 
    891 /** @{
    892  * Intel memory domains
    893  *
    894  * Most of these just align with the various caches in
    895  * the system and are used to flush and invalidate as
    896  * objects end up cached in different domains.
    897  */
    898 /** CPU cache */
    899 #define I915_GEM_DOMAIN_CPU		0x00000001
    900 /** Render cache, used by 2D and 3D drawing */
    901 #define I915_GEM_DOMAIN_RENDER		0x00000002
    902 /** Sampler cache, used by texture engine */
    903 #define I915_GEM_DOMAIN_SAMPLER		0x00000004
    904 /** Command queue, used to load batch buffers */
    905 #define I915_GEM_DOMAIN_COMMAND		0x00000008
    906 /** Instruction cache, used by shader programs */
    907 #define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
    908 /** Vertex address cache */
    909 #define I915_GEM_DOMAIN_VERTEX		0x00000020
    910 /** GTT domain - aperture and scanout */
    911 #define I915_GEM_DOMAIN_GTT		0x00000040
    912 /** WC domain - uncached access */
    913 #define I915_GEM_DOMAIN_WC		0x00000080
    914 /** @} */
    915 
    916 struct drm_i915_gem_exec_object {
    917 	/**
    918 	 * User's handle for a buffer to be bound into the GTT for this
    919 	 * operation.
    920 	 */
    921 	__u32 handle;
    922 
    923 	/** Number of relocations to be performed on this buffer */
    924 	__u32 relocation_count;
    925 	/**
    926 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
    927 	 * the relocations to be performed in this buffer.
    928 	 */
    929 	__u64 relocs_ptr;
    930 
    931 	/** Required alignment in graphics aperture */
    932 	__u64 alignment;
    933 
    934 	/**
    935 	 * Returned value of the updated offset of the object, for future
    936 	 * presumed_offset writes.
    937 	 */
    938 	__u64 offset;
    939 };
    940 
    941 struct drm_i915_gem_execbuffer {
    942 	/**
    943 	 * List of buffers to be validated with their relocations to be
    944 	 * performend on them.
    945 	 *
    946 	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
    947 	 *
    948 	 * These buffers must be listed in an order such that all relocations
    949 	 * a buffer is performing refer to buffers that have already appeared
    950 	 * in the validate list.
    951 	 */
    952 	__u64 buffers_ptr;
    953 	__u32 buffer_count;
    954 
    955 	/** Offset in the batchbuffer to start execution from. */
    956 	__u32 batch_start_offset;
    957 	/** Bytes used in batchbuffer from batch_start_offset */
    958 	__u32 batch_len;
    959 	__u32 DR1;
    960 	__u32 DR4;
    961 	__u32 num_cliprects;
    962 	/** This is a struct drm_clip_rect *cliprects */
    963 	__u64 cliprects_ptr;
    964 };
    965 
    966 struct drm_i915_gem_exec_object2 {
    967 	/**
    968 	 * User's handle for a buffer to be bound into the GTT for this
    969 	 * operation.
    970 	 */
    971 	__u32 handle;
    972 
    973 	/** Number of relocations to be performed on this buffer */
    974 	__u32 relocation_count;
    975 	/**
    976 	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
    977 	 * the relocations to be performed in this buffer.
    978 	 */
    979 	__u64 relocs_ptr;
    980 
    981 	/** Required alignment in graphics aperture */
    982 	__u64 alignment;
    983 
    984 	/**
    985 	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
    986 	 * the user with the GTT offset at which this object will be pinned.
    987 	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
    988 	 * presumed_offset of the object.
    989 	 * During execbuffer2 the kernel populates it with the value of the
    990 	 * current GTT offset of the object, for future presumed_offset writes.
    991 	 */
    992 	__u64 offset;
    993 
    994 #define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
    995 #define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
    996 #define EXEC_OBJECT_WRITE		 (1<<2)
    997 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
    998 #define EXEC_OBJECT_PINNED		 (1<<4)
    999 #define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
   1000 /* The kernel implicitly tracks GPU activity on all GEM objects, and
   1001  * synchronises operations with outstanding rendering. This includes
   1002  * rendering on other devices if exported via dma-buf. However, sometimes
   1003  * this tracking is too coarse and the user knows better. For example,
   1004  * if the object is split into non-overlapping ranges shared between different
   1005  * clients or engines (i.e. suballocating objects), the implicit tracking
   1006  * by kernel assumes that each operation affects the whole object rather
   1007  * than an individual range, causing needless synchronisation between clients.
   1008  * The kernel will also forgo any CPU cache flushes prior to rendering from
   1009  * the object as the client is expected to be also handling such domain
   1010  * tracking.
   1011  *
   1012  * The kernel maintains the implicit tracking in order to manage resources
   1013  * used by the GPU - this flag only disables the synchronisation prior to
   1014  * rendering with this object in this execbuf.
   1015  *
   1016  * Opting out of implicit synhronisation requires the user to do its own
   1017  * explicit tracking to avoid rendering corruption. See, for example,
   1018  * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
   1019  */
   1020 #define EXEC_OBJECT_ASYNC		(1<<6)
   1021 /* Request that the contents of this execobject be copied into the error
   1022  * state upon a GPU hang involving this batch for post-mortem debugging.
   1023  * These buffers are recorded in no particular order as "user" in
   1024  * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
   1025  * if the kernel supports this flag.
   1026  */
   1027 #define EXEC_OBJECT_CAPTURE		(1<<7)
   1028 /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
   1029 #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
   1030 	__u64 flags;
   1031 
   1032 	union {
   1033 		__u64 rsvd1;
   1034 		__u64 pad_to_size;
   1035 	};
   1036 	__u64 rsvd2;
   1037 };
   1038 
   1039 struct drm_i915_gem_exec_fence {
   1040 	/**
   1041 	 * User's handle for a drm_syncobj to wait on or signal.
   1042 	 */
   1043 	__u32 handle;
   1044 
   1045 #define I915_EXEC_FENCE_WAIT            (1<<0)
   1046 #define I915_EXEC_FENCE_SIGNAL          (1<<1)
   1047 #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
   1048 	__u32 flags;
   1049 };
   1050 
   1051 struct drm_i915_gem_execbuffer2 {
   1052 	/**
   1053 	 * List of gem_exec_object2 structs
   1054 	 */
   1055 	__u64 buffers_ptr;
   1056 	__u32 buffer_count;
   1057 
   1058 	/** Offset in the batchbuffer to start execution from. */
   1059 	__u32 batch_start_offset;
   1060 	/** Bytes used in batchbuffer from batch_start_offset */
   1061 	__u32 batch_len;
   1062 	__u32 DR1;
   1063 	__u32 DR4;
   1064 	__u32 num_cliprects;
   1065 	/**
   1066 	 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
   1067 	 * is not set.  If I915_EXEC_FENCE_ARRAY is set, then this is a
   1068 	 * struct drm_i915_gem_exec_fence *fences.
   1069 	 */
   1070 	__u64 cliprects_ptr;
   1071 #define I915_EXEC_RING_MASK              (0x3f)
   1072 #define I915_EXEC_DEFAULT                (0<<0)
   1073 #define I915_EXEC_RENDER                 (1<<0)
   1074 #define I915_EXEC_BSD                    (2<<0)
   1075 #define I915_EXEC_BLT                    (3<<0)
   1076 #define I915_EXEC_VEBOX                  (4<<0)
   1077 
   1078 /* Used for switching the constants addressing mode on gen4+ RENDER ring.
   1079  * Gen6+ only supports relative addressing to dynamic state (default) and
   1080  * absolute addressing.
   1081  *
   1082  * These flags are ignored for the BSD and BLT rings.
   1083  */
   1084 #define I915_EXEC_CONSTANTS_MASK 	(3<<6)
   1085 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
   1086 #define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
   1087 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
   1088 	__u64 flags;
   1089 	__u64 rsvd1; /* now used for context info */
   1090 	__u64 rsvd2;
   1091 };
   1092 
   1093 /** Resets the SO write offset registers for transform feedback on gen7. */
   1094 #define I915_EXEC_GEN7_SOL_RESET	(1<<8)
   1095 
   1096 /** Request a privileged ("secure") batch buffer. Note only available for
   1097  * DRM_ROOT_ONLY | DRM_MASTER processes.
   1098  */
   1099 #define I915_EXEC_SECURE		(1<<9)
   1100 
   1101 /** Inform the kernel that the batch is and will always be pinned. This
   1102  * negates the requirement for a workaround to be performed to avoid
   1103  * an incoherent CS (such as can be found on 830/845). If this flag is
   1104  * not passed, the kernel will endeavour to make sure the batch is
   1105  * coherent with the CS before execution. If this flag is passed,
   1106  * userspace assumes the responsibility for ensuring the same.
   1107  */
   1108 #define I915_EXEC_IS_PINNED		(1<<10)
   1109 
   1110 /** Provide a hint to the kernel that the command stream and auxiliary
   1111  * state buffers already holds the correct presumed addresses and so the
   1112  * relocation process may be skipped if no buffers need to be moved in
   1113  * preparation for the execbuffer.
   1114  */
   1115 #define I915_EXEC_NO_RELOC		(1<<11)
   1116 
   1117 /** Use the reloc.handle as an index into the exec object array rather
   1118  * than as the per-file handle.
   1119  */
   1120 #define I915_EXEC_HANDLE_LUT		(1<<12)
   1121 
   1122 /** Used for switching BSD rings on the platforms with two BSD rings */
   1123 #define I915_EXEC_BSD_SHIFT	 (13)
   1124 #define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
   1125 /* default ping-pong mode */
   1126 #define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
   1127 #define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
   1128 #define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
   1129 
   1130 /** Tell the kernel that the batchbuffer is processed by
   1131  *  the resource streamer.
   1132  */
   1133 #define I915_EXEC_RESOURCE_STREAMER     (1<<15)
   1134 
   1135 /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
   1136  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
   1137  * the batch.
   1138  *
   1139  * Returns -EINVAL if the sync_file fd cannot be found.
   1140  */
   1141 #define I915_EXEC_FENCE_IN		(1<<16)
   1142 
   1143 /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
   1144  * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
   1145  * to the caller, and it should be close() after use. (The fd is a regular
   1146  * file descriptor and will be cleaned up on process termination. It holds
   1147  * a reference to the request, but nothing else.)
   1148  *
   1149  * The sync_file fd can be combined with other sync_file and passed either
   1150  * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
   1151  * will only occur after this request completes), or to other devices.
   1152  *
   1153  * Using I915_EXEC_FENCE_OUT requires use of
   1154  * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
   1155  * back to userspace. Failure to do so will cause the out-fence to always
   1156  * be reported as zero, and the real fence fd to be leaked.
   1157  */
   1158 #define I915_EXEC_FENCE_OUT		(1<<17)
   1159 
   1160 /*
   1161  * Traditionally the execbuf ioctl has only considered the final element in
   1162  * the execobject[] to be the executable batch. Often though, the client
   1163  * will known the batch object prior to construction and being able to place
   1164  * it into the execobject[] array first can simplify the relocation tracking.
   1165  * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
   1166  * execobject[] as the * batch instead (the default is to use the last
   1167  * element).
   1168  */
   1169 #define I915_EXEC_BATCH_FIRST		(1<<18)
   1170 
   1171 /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
   1172  * define an array of i915_gem_exec_fence structures which specify a set of
   1173  * dma fences to wait upon or signal.
   1174  */
   1175 #define I915_EXEC_FENCE_ARRAY   (1<<19)
   1176 
   1177 /*
   1178  * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
   1179  * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
   1180  * the batch.
   1181  *
   1182  * Returns -EINVAL if the sync_file fd cannot be found.
   1183  */
   1184 #define I915_EXEC_FENCE_SUBMIT		(1 << 20)
   1185 
   1186 #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
   1187 
   1188 #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
   1189 #define i915_execbuffer2_set_context_id(eb2, context) \
   1190 	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
   1191 #define i915_execbuffer2_get_context_id(eb2) \
   1192 	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
   1193 
   1194 struct drm_i915_gem_pin {
   1195 	/** Handle of the buffer to be pinned. */
   1196 	__u32 handle;
   1197 	__u32 pad;
   1198 
   1199 	/** alignment required within the aperture */
   1200 	__u64 alignment;
   1201 
   1202 	/** Returned GTT offset of the buffer. */
   1203 	__u64 offset;
   1204 };
   1205 
   1206 struct drm_i915_gem_unpin {
   1207 	/** Handle of the buffer to be unpinned. */
   1208 	__u32 handle;
   1209 	__u32 pad;
   1210 };
   1211 
   1212 struct drm_i915_gem_busy {
   1213 	/** Handle of the buffer to check for busy */
   1214 	__u32 handle;
   1215 
   1216 	/** Return busy status
   1217 	 *
   1218 	 * A return of 0 implies that the object is idle (after
   1219 	 * having flushed any pending activity), and a non-zero return that
   1220 	 * the object is still in-flight on the GPU. (The GPU has not yet
   1221 	 * signaled completion for all pending requests that reference the
   1222 	 * object.) An object is guaranteed to become idle eventually (so
   1223 	 * long as no new GPU commands are executed upon it). Due to the
   1224 	 * asynchronous nature of the hardware, an object reported
   1225 	 * as busy may become idle before the ioctl is completed.
   1226 	 *
   1227 	 * Furthermore, if the object is busy, which engine is busy is only
   1228 	 * provided as a guide and only indirectly by reporting its class
   1229 	 * (there may be more than one engine in each class). There are race
   1230 	 * conditions which prevent the report of which engines are busy from
   1231 	 * being always accurate.  However, the converse is not true. If the
   1232 	 * object is idle, the result of the ioctl, that all engines are idle,
   1233 	 * is accurate.
   1234 	 *
   1235 	 * The returned dword is split into two fields to indicate both
   1236 	 * the engine classess on which the object is being read, and the
   1237 	 * engine class on which it is currently being written (if any).
   1238 	 *
   1239 	 * The low word (bits 0:15) indicate if the object is being written
   1240 	 * to by any engine (there can only be one, as the GEM implicit
   1241 	 * synchronisation rules force writes to be serialised). Only the
   1242 	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
   1243 	 * 1 not 0 etc) for the last write is reported.
   1244 	 *
   1245 	 * The high word (bits 16:31) are a bitmask of which engines classes
   1246 	 * are currently reading from the object. Multiple engines may be
   1247 	 * reading from the object simultaneously.
   1248 	 *
   1249 	 * The value of each engine class is the same as specified in the
   1250 	 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
   1251 	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
   1252 	 * reported as active itself. Some hardware may have parallel
   1253 	 * execution engines, e.g. multiple media engines, which are
   1254 	 * mapped to the same class identifier and so are not separately
   1255 	 * reported for busyness.
   1256 	 *
   1257 	 * Caveat emptor:
   1258 	 * Only the boolean result of this query is reliable; that is whether
   1259 	 * the object is idle or busy. The report of which engines are busy
   1260 	 * should be only used as a heuristic.
   1261 	 */
   1262 	__u32 busy;
   1263 };
   1264 
   1265 /**
   1266  * I915_CACHING_NONE
   1267  *
   1268  * GPU access is not coherent with cpu caches. Default for machines without an
   1269  * LLC.
   1270  */
   1271 #define I915_CACHING_NONE		0
   1272 /**
   1273  * I915_CACHING_CACHED
   1274  *
   1275  * GPU access is coherent with cpu caches and furthermore the data is cached in
   1276  * last-level caches shared between cpu cores and the gpu GT. Default on
   1277  * machines with HAS_LLC.
   1278  */
   1279 #define I915_CACHING_CACHED		1
   1280 /**
   1281  * I915_CACHING_DISPLAY
   1282  *
   1283  * Special GPU caching mode which is coherent with the scanout engines.
   1284  * Transparently falls back to I915_CACHING_NONE on platforms where no special
   1285  * cache mode (like write-through or gfdt flushing) is available. The kernel
   1286  * automatically sets this mode when using a buffer as a scanout target.
   1287  * Userspace can manually set this mode to avoid a costly stall and clflush in
   1288  * the hotpath of drawing the first frame.
   1289  */
   1290 #define I915_CACHING_DISPLAY		2
   1291 
   1292 struct drm_i915_gem_caching {
   1293 	/**
   1294 	 * Handle of the buffer to set/get the caching level of. */
   1295 	__u32 handle;
   1296 
   1297 	/**
   1298 	 * Cacheing level to apply or return value
   1299 	 *
   1300 	 * bits0-15 are for generic caching control (i.e. the above defined
   1301 	 * values). bits16-31 are reserved for platform-specific variations
   1302 	 * (e.g. l3$ caching on gen7). */
   1303 	__u32 caching;
   1304 };
   1305 
   1306 #define I915_TILING_NONE	0
   1307 #define I915_TILING_X		1
   1308 #define I915_TILING_Y		2
   1309 #define I915_TILING_LAST	I915_TILING_Y
   1310 
   1311 #define I915_BIT_6_SWIZZLE_NONE		0
   1312 #define I915_BIT_6_SWIZZLE_9		1
   1313 #define I915_BIT_6_SWIZZLE_9_10		2
   1314 #define I915_BIT_6_SWIZZLE_9_11		3
   1315 #define I915_BIT_6_SWIZZLE_9_10_11	4
   1316 /* Not seen by userland */
   1317 #define I915_BIT_6_SWIZZLE_UNKNOWN	5
   1318 /* Seen by userland. */
   1319 #define I915_BIT_6_SWIZZLE_9_17		6
   1320 #define I915_BIT_6_SWIZZLE_9_10_17	7
   1321 
   1322 struct drm_i915_gem_set_tiling {
   1323 	/** Handle of the buffer to have its tiling state updated */
   1324 	__u32 handle;
   1325 
   1326 	/**
   1327 	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
   1328 	 * I915_TILING_Y).
   1329 	 *
   1330 	 * This value is to be set on request, and will be updated by the
   1331 	 * kernel on successful return with the actual chosen tiling layout.
   1332 	 *
   1333 	 * The tiling mode may be demoted to I915_TILING_NONE when the system
   1334 	 * has bit 6 swizzling that can't be managed correctly by GEM.
   1335 	 *
   1336 	 * Buffer contents become undefined when changing tiling_mode.
   1337 	 */
   1338 	__u32 tiling_mode;
   1339 
   1340 	/**
   1341 	 * Stride in bytes for the object when in I915_TILING_X or
   1342 	 * I915_TILING_Y.
   1343 	 */
   1344 	__u32 stride;
   1345 
   1346 	/**
   1347 	 * Returned address bit 6 swizzling required for CPU access through
   1348 	 * mmap mapping.
   1349 	 */
   1350 	__u32 swizzle_mode;
   1351 };
   1352 
   1353 struct drm_i915_gem_get_tiling {
   1354 	/** Handle of the buffer to get tiling state for. */
   1355 	__u32 handle;
   1356 
   1357 	/**
   1358 	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
   1359 	 * I915_TILING_Y).
   1360 	 */
   1361 	__u32 tiling_mode;
   1362 
   1363 	/**
   1364 	 * Returned address bit 6 swizzling required for CPU access through
   1365 	 * mmap mapping.
   1366 	 */
   1367 	__u32 swizzle_mode;
   1368 
   1369 	/**
   1370 	 * Returned address bit 6 swizzling required for CPU access through
   1371 	 * mmap mapping whilst bound.
   1372 	 */
   1373 	__u32 phys_swizzle_mode;
   1374 };
   1375 
   1376 struct drm_i915_gem_get_aperture {
   1377 	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
   1378 	__u64 aper_size;
   1379 
   1380 	/**
   1381 	 * Available space in the aperture used by i915_gem_execbuffer, in
   1382 	 * bytes
   1383 	 */
   1384 	__u64 aper_available_size;
   1385 };
   1386 
   1387 struct drm_i915_get_pipe_from_crtc_id {
   1388 	/** ID of CRTC being requested **/
   1389 	__u32 crtc_id;
   1390 
   1391 	/** pipe of requested CRTC **/
   1392 	__u32 pipe;
   1393 };
   1394 
   1395 #define I915_MADV_WILLNEED 0
   1396 #define I915_MADV_DONTNEED 1
   1397 #define __I915_MADV_PURGED 2 /* internal state */
   1398 
   1399 struct drm_i915_gem_madvise {
   1400 	/** Handle of the buffer to change the backing store advice */
   1401 	__u32 handle;
   1402 
   1403 	/* Advice: either the buffer will be needed again in the near future,
   1404 	 *         or wont be and could be discarded under memory pressure.
   1405 	 */
   1406 	__u32 madv;
   1407 
   1408 	/** Whether the backing store still exists. */
   1409 	__u32 retained;
   1410 };
   1411 
   1412 /* flags */
   1413 #define I915_OVERLAY_TYPE_MASK 		0xff
   1414 #define I915_OVERLAY_YUV_PLANAR 	0x01
   1415 #define I915_OVERLAY_YUV_PACKED 	0x02
   1416 #define I915_OVERLAY_RGB		0x03
   1417 
   1418 #define I915_OVERLAY_DEPTH_MASK		0xff00
   1419 #define I915_OVERLAY_RGB24		0x1000
   1420 #define I915_OVERLAY_RGB16		0x2000
   1421 #define I915_OVERLAY_RGB15		0x3000
   1422 #define I915_OVERLAY_YUV422		0x0100
   1423 #define I915_OVERLAY_YUV411		0x0200
   1424 #define I915_OVERLAY_YUV420		0x0300
   1425 #define I915_OVERLAY_YUV410		0x0400
   1426 
   1427 #define I915_OVERLAY_SWAP_MASK		0xff0000
   1428 #define I915_OVERLAY_NO_SWAP		0x000000
   1429 #define I915_OVERLAY_UV_SWAP		0x010000
   1430 #define I915_OVERLAY_Y_SWAP		0x020000
   1431 #define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
   1432 
   1433 #define I915_OVERLAY_FLAGS_MASK		0xff000000
   1434 #define I915_OVERLAY_ENABLE		0x01000000
   1435 
   1436 struct drm_intel_overlay_put_image {
   1437 	/* various flags and src format description */
   1438 	__u32 flags;
   1439 	/* source picture description */
   1440 	__u32 bo_handle;
   1441 	/* stride values and offsets are in bytes, buffer relative */
   1442 	__u16 stride_Y; /* stride for packed formats */
   1443 	__u16 stride_UV;
   1444 	__u32 offset_Y; /* offset for packet formats */
   1445 	__u32 offset_U;
   1446 	__u32 offset_V;
   1447 	/* in pixels */
   1448 	__u16 src_width;
   1449 	__u16 src_height;
   1450 	/* to compensate the scaling factors for partially covered surfaces */
   1451 	__u16 src_scan_width;
   1452 	__u16 src_scan_height;
   1453 	/* output crtc description */
   1454 	__u32 crtc_id;
   1455 	__u16 dst_x;
   1456 	__u16 dst_y;
   1457 	__u16 dst_width;
   1458 	__u16 dst_height;
   1459 };
   1460 
   1461 /* flags */
   1462 #define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
   1463 #define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
   1464 #define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
   1465 struct drm_intel_overlay_attrs {
   1466 	__u32 flags;
   1467 	__u32 color_key;
   1468 	__s32 brightness;
   1469 	__u32 contrast;
   1470 	__u32 saturation;
   1471 	__u32 gamma0;
   1472 	__u32 gamma1;
   1473 	__u32 gamma2;
   1474 	__u32 gamma3;
   1475 	__u32 gamma4;
   1476 	__u32 gamma5;
   1477 };
   1478 
   1479 /*
   1480  * Intel sprite handling
   1481  *
   1482  * Color keying works with a min/mask/max tuple.  Both source and destination
   1483  * color keying is allowed.
   1484  *
   1485  * Source keying:
   1486  * Sprite pixels within the min & max values, masked against the color channels
   1487  * specified in the mask field, will be transparent.  All other pixels will
   1488  * be displayed on top of the primary plane.  For RGB surfaces, only the min
   1489  * and mask fields will be used; ranged compares are not allowed.
   1490  *
   1491  * Destination keying:
   1492  * Primary plane pixels that match the min value, masked against the color
   1493  * channels specified in the mask field, will be replaced by corresponding
   1494  * pixels from the sprite plane.
   1495  *
   1496  * Note that source & destination keying are exclusive; only one can be
   1497  * active on a given plane.
   1498  */
   1499 
   1500 #define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
   1501 						* flags==0 to disable colorkeying.
   1502 						*/
   1503 #define I915_SET_COLORKEY_DESTINATION	(1<<1)
   1504 #define I915_SET_COLORKEY_SOURCE	(1<<2)
   1505 struct drm_intel_sprite_colorkey {
   1506 	__u32 plane_id;
   1507 	__u32 min_value;
   1508 	__u32 channel_mask;
   1509 	__u32 max_value;
   1510 	__u32 flags;
   1511 };
   1512 
   1513 struct drm_i915_gem_wait {
   1514 	/** Handle of BO we shall wait on */
   1515 	__u32 bo_handle;
   1516 	__u32 flags;
   1517 	/** Number of nanoseconds to wait, Returns time remaining. */
   1518 	__s64 timeout_ns;
   1519 };
   1520 
   1521 struct drm_i915_gem_context_create {
   1522 	__u32 ctx_id; /* output: id of new context*/
   1523 	__u32 pad;
   1524 };
   1525 
   1526 struct drm_i915_gem_context_create_ext {
   1527 	__u32 ctx_id; /* output: id of new context*/
   1528 	__u32 flags;
   1529 #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
   1530 #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
   1531 #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
   1532 	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
   1533 	__u64 extensions;
   1534 };
   1535 
   1536 struct drm_i915_gem_context_param {
   1537 	__u32 ctx_id;
   1538 	__u32 size;
   1539 	__u64 param;
   1540 #define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
   1541 #define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
   1542 #define I915_CONTEXT_PARAM_GTT_SIZE	0x3
   1543 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
   1544 #define I915_CONTEXT_PARAM_BANNABLE	0x5
   1545 #define I915_CONTEXT_PARAM_PRIORITY	0x6
   1546 #define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
   1547 #define   I915_CONTEXT_DEFAULT_PRIORITY		0
   1548 #define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
   1549 	/*
   1550 	 * When using the following param, value should be a pointer to
   1551 	 * drm_i915_gem_context_param_sseu.
   1552 	 */
   1553 #define I915_CONTEXT_PARAM_SSEU		0x7
   1554 
   1555 /*
   1556  * Not all clients may want to attempt automatic recover of a context after
   1557  * a hang (for example, some clients may only submit very small incremental
   1558  * batches relying on known logical state of previous batches which will never
   1559  * recover correctly and each attempt will hang), and so would prefer that
   1560  * the context is forever banned instead.
   1561  *
   1562  * If set to false (0), after a reset, subsequent (and in flight) rendering
   1563  * from this context is discarded, and the client will need to create a new
   1564  * context to use instead.
   1565  *
   1566  * If set to true (1), the kernel will automatically attempt to recover the
   1567  * context by skipping the hanging batch and executing the next batch starting
   1568  * from the default context state (discarding the incomplete logical context
   1569  * state lost due to the reset).
   1570  *
   1571  * On creation, all new contexts are marked as recoverable.
   1572  */
   1573 #define I915_CONTEXT_PARAM_RECOVERABLE	0x8
   1574 
   1575 	/*
   1576 	 * The id of the associated virtual memory address space (ppGTT) of
   1577 	 * this context. Can be retrieved and passed to another context
   1578 	 * (on the same fd) for both to use the same ppGTT and so share
   1579 	 * address layouts, and avoid reloading the page tables on context
   1580 	 * switches between themselves.
   1581 	 *
   1582 	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
   1583 	 */
   1584 #define I915_CONTEXT_PARAM_VM		0x9
   1585 
   1586 /*
   1587  * I915_CONTEXT_PARAM_ENGINES:
   1588  *
   1589  * Bind this context to operate on this subset of available engines. Henceforth,
   1590  * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
   1591  * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
   1592  * and upwards. Slots 0...N are filled in using the specified (class, instance).
   1593  * Use
   1594  *	engine_class: I915_ENGINE_CLASS_INVALID,
   1595  *	engine_instance: I915_ENGINE_CLASS_INVALID_NONE
   1596  * to specify a gap in the array that can be filled in later, e.g. by a
   1597  * virtual engine used for load balancing.
   1598  *
   1599  * Setting the number of engines bound to the context to 0, by passing a zero
   1600  * sized argument, will revert back to default settings.
   1601  *
   1602  * See struct i915_context_param_engines.
   1603  *
   1604  * Extensions:
   1605  *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
   1606  *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
   1607  */
   1608 #define I915_CONTEXT_PARAM_ENGINES	0xa
   1609 
   1610 /*
   1611  * I915_CONTEXT_PARAM_PERSISTENCE:
   1612  *
   1613  * Allow the context and active rendering to survive the process until
   1614  * completion. Persistence allows fire-and-forget clients to queue up a
   1615  * bunch of work, hand the output over to a display server and then quit.
   1616  * If the context is marked as not persistent, upon closing (either via
   1617  * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
   1618  * or process termination), the context and any outstanding requests will be
   1619  * cancelled (and exported fences for cancelled requests marked as -EIO).
   1620  *
   1621  * By default, new contexts allow persistence.
   1622  */
   1623 #define I915_CONTEXT_PARAM_PERSISTENCE	0xb
   1624 /* Must be kept compact -- no holes and well documented */
   1625 
   1626 	__u64 value;
   1627 };
   1628 
   1629 /**
   1630  * Context SSEU programming
   1631  *
   1632  * It may be necessary for either functional or performance reason to configure
   1633  * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
   1634  * Sub-slice/EU).
   1635  *
   1636  * This is done by configuring SSEU configuration using the below
   1637  * @struct drm_i915_gem_context_param_sseu for every supported engine which
   1638  * userspace intends to use.
   1639  *
   1640  * Not all GPUs or engines support this functionality in which case an error
   1641  * code -ENODEV will be returned.
   1642  *
   1643  * Also, flexibility of possible SSEU configuration permutations varies between
   1644  * GPU generations and software imposed limitations. Requesting such a
   1645  * combination will return an error code of -EINVAL.
   1646  *
   1647  * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
   1648  * favour of a single global setting.
   1649  */
   1650 struct drm_i915_gem_context_param_sseu {
   1651 	/*
   1652 	 * Engine class & instance to be configured or queried.
   1653 	 */
   1654 	struct i915_engine_class_instance engine;
   1655 
   1656 	/*
   1657 	 * Unknown flags must be cleared to zero.
   1658 	 */
   1659 	__u32 flags;
   1660 #define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
   1661 
   1662 	/*
   1663 	 * Mask of slices to enable for the context. Valid values are a subset
   1664 	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
   1665 	 */
   1666 	__u64 slice_mask;
   1667 
   1668 	/*
   1669 	 * Mask of subslices to enable for the context. Valid values are a
   1670 	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
   1671 	 */
   1672 	__u64 subslice_mask;
   1673 
   1674 	/*
   1675 	 * Minimum/Maximum number of EUs to enable per subslice for the
   1676 	 * context. min_eus_per_subslice must be inferior or equal to
   1677 	 * max_eus_per_subslice.
   1678 	 */
   1679 	__u16 min_eus_per_subslice;
   1680 	__u16 max_eus_per_subslice;
   1681 
   1682 	/*
   1683 	 * Unused for now. Must be cleared to zero.
   1684 	 */
   1685 	__u32 rsvd;
   1686 };
   1687 
   1688 /*
   1689  * i915_context_engines_load_balance:
   1690  *
   1691  * Enable load balancing across this set of engines.
   1692  *
   1693  * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
   1694  * used will proxy the execbuffer request onto one of the set of engines
   1695  * in such a way as to distribute the load evenly across the set.
   1696  *
   1697  * The set of engines must be compatible (e.g. the same HW class) as they
   1698  * will share the same logical GPU context and ring.
   1699  *
   1700  * To intermix rendering with the virtual engine and direct rendering onto
   1701  * the backing engines (bypassing the load balancing proxy), the context must
   1702  * be defined to use a single timeline for all engines.
   1703  */
   1704 struct i915_context_engines_load_balance {
   1705 	struct i915_user_extension base;
   1706 
   1707 	__u16 engine_index;
   1708 	__u16 num_siblings;
   1709 	__u32 flags; /* all undefined flags must be zero */
   1710 
   1711 	__u64 mbz64; /* reserved for future use; must be zero */
   1712 
   1713 	struct i915_engine_class_instance engines[0];
   1714 } __attribute__((packed));
   1715 
   1716 #define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
   1717 	struct i915_user_extension base; \
   1718 	__u16 engine_index; \
   1719 	__u16 num_siblings; \
   1720 	__u32 flags; \
   1721 	__u64 mbz64; \
   1722 	struct i915_engine_class_instance engines[N__]; \
   1723 } __attribute__((packed)) name__
   1724 
   1725 /*
   1726  * i915_context_engines_bond:
   1727  *
   1728  * Constructed bonded pairs for execution within a virtual engine.
   1729  *
   1730  * All engines are equal, but some are more equal than others. Given
   1731  * the distribution of resources in the HW, it may be preferable to run
   1732  * a request on a given subset of engines in parallel to a request on a
   1733  * specific engine. We enable this selection of engines within a virtual
   1734  * engine by specifying bonding pairs, for any given master engine we will
   1735  * only execute on one of the corresponding siblings within the virtual engine.
   1736  *
   1737  * To execute a request in parallel on the master engine and a sibling requires
   1738  * coordination with a I915_EXEC_FENCE_SUBMIT.
   1739  */
   1740 struct i915_context_engines_bond {
   1741 	struct i915_user_extension base;
   1742 
   1743 	struct i915_engine_class_instance master;
   1744 
   1745 	__u16 virtual_index; /* index of virtual engine in ctx->engines[] */
   1746 	__u16 num_bonds;
   1747 
   1748 	__u64 flags; /* all undefined flags must be zero */
   1749 	__u64 mbz64[4]; /* reserved for future use; must be zero */
   1750 
   1751 	struct i915_engine_class_instance engines[0];
   1752 } __attribute__((packed));
   1753 
   1754 #define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
   1755 	struct i915_user_extension base; \
   1756 	struct i915_engine_class_instance master; \
   1757 	__u16 virtual_index; \
   1758 	__u16 num_bonds; \
   1759 	__u64 flags; \
   1760 	__u64 mbz64[4]; \
   1761 	struct i915_engine_class_instance engines[N__]; \
   1762 } __attribute__((packed)) name__
   1763 
   1764 struct i915_context_param_engines {
   1765 	__u64 extensions; /* linked chain of extension blocks, 0 terminates */
   1766 #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
   1767 #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
   1768 	struct i915_engine_class_instance engines[0];
   1769 } __attribute__((packed));
   1770 
   1771 #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
   1772 	__u64 extensions; \
   1773 	struct i915_engine_class_instance engines[N__]; \
   1774 } __attribute__((packed)) name__
   1775 
   1776 struct drm_i915_gem_context_create_ext_setparam {
   1777 #define I915_CONTEXT_CREATE_EXT_SETPARAM 0
   1778 	struct i915_user_extension base;
   1779 	struct drm_i915_gem_context_param param;
   1780 };
   1781 
   1782 struct drm_i915_gem_context_create_ext_clone {
   1783 #define I915_CONTEXT_CREATE_EXT_CLONE 1
   1784 	struct i915_user_extension base;
   1785 	__u32 clone_id;
   1786 	__u32 flags;
   1787 #define I915_CONTEXT_CLONE_ENGINES	(1u << 0)
   1788 #define I915_CONTEXT_CLONE_FLAGS	(1u << 1)
   1789 #define I915_CONTEXT_CLONE_SCHEDATTR	(1u << 2)
   1790 #define I915_CONTEXT_CLONE_SSEU		(1u << 3)
   1791 #define I915_CONTEXT_CLONE_TIMELINE	(1u << 4)
   1792 #define I915_CONTEXT_CLONE_VM		(1u << 5)
   1793 #define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
   1794 	__u64 rsvd;
   1795 };
   1796 
   1797 struct drm_i915_gem_context_destroy {
   1798 	__u32 ctx_id;
   1799 	__u32 pad;
   1800 };
   1801 
   1802 /*
   1803  * DRM_I915_GEM_VM_CREATE -
   1804  *
   1805  * Create a new virtual memory address space (ppGTT) for use within a context
   1806  * on the same file. Extensions can be provided to configure exactly how the
   1807  * address space is setup upon creation.
   1808  *
   1809  * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
   1810  * returned in the outparam @id.
   1811  *
   1812  * No flags are defined, with all bits reserved and must be zero.
   1813  *
   1814  * An extension chain maybe provided, starting with @extensions, and terminated
   1815  * by the @next_extension being 0. Currently, no extensions are defined.
   1816  *
   1817  * DRM_I915_GEM_VM_DESTROY -
   1818  *
   1819  * Destroys a previously created VM id, specified in @id.
   1820  *
   1821  * No extensions or flags are allowed currently, and so must be zero.
   1822  */
   1823 struct drm_i915_gem_vm_control {
   1824 	__u64 extensions;
   1825 	__u32 flags;
   1826 	__u32 vm_id;
   1827 };
   1828 
   1829 struct drm_i915_reg_read {
   1830 	/*
   1831 	 * Register offset.
   1832 	 * For 64bit wide registers where the upper 32bits don't immediately
   1833 	 * follow the lower 32bits, the offset of the lower 32bits must
   1834 	 * be specified
   1835 	 */
   1836 	__u64 offset;
   1837 #define I915_REG_READ_8B_WA (1ul << 0)
   1838 
   1839 	__u64 val; /* Return value */
   1840 };
   1841 
   1842 /* Known registers:
   1843  *
   1844  * Render engine timestamp - 0x2358 + 64bit - gen7+
   1845  * - Note this register returns an invalid value if using the default
   1846  *   single instruction 8byte read, in order to workaround that pass
   1847  *   flag I915_REG_READ_8B_WA in offset field.
   1848  *
   1849  */
   1850 
   1851 struct drm_i915_reset_stats {
   1852 	__u32 ctx_id;
   1853 	__u32 flags;
   1854 
   1855 	/* All resets since boot/module reload, for all contexts */
   1856 	__u32 reset_count;
   1857 
   1858 	/* Number of batches lost when active in GPU, for this context */
   1859 	__u32 batch_active;
   1860 
   1861 	/* Number of batches lost pending for execution, for this context */
   1862 	__u32 batch_pending;
   1863 
   1864 	__u32 pad;
   1865 };
   1866 
   1867 struct drm_i915_gem_userptr {
   1868 	__u64 user_ptr;
   1869 	__u64 user_size;
   1870 	__u32 flags;
   1871 #define I915_USERPTR_READ_ONLY 0x1
   1872 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
   1873 	/**
   1874 	 * Returned handle for the object.
   1875 	 *
   1876 	 * Object handles are nonzero.
   1877 	 */
   1878 	__u32 handle;
   1879 };
   1880 
   1881 enum drm_i915_oa_format {
   1882 	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
   1883 	I915_OA_FORMAT_A29,	    /* HSW only */
   1884 	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
   1885 	I915_OA_FORMAT_B4_C8,	    /* HSW only */
   1886 	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
   1887 	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
   1888 	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
   1889 
   1890 	/* Gen8+ */
   1891 	I915_OA_FORMAT_A12,
   1892 	I915_OA_FORMAT_A12_B8_C8,
   1893 	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
   1894 
   1895 	I915_OA_FORMAT_MAX	    /* non-ABI */
   1896 };
   1897 
   1898 enum drm_i915_perf_property_id {
   1899 	/**
   1900 	 * Open the stream for a specific context handle (as used with
   1901 	 * execbuffer2). A stream opened for a specific context this way
   1902 	 * won't typically require root privileges.
   1903 	 *
   1904 	 * This property is available in perf revision 1.
   1905 	 */
   1906 	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
   1907 
   1908 	/**
   1909 	 * A value of 1 requests the inclusion of raw OA unit reports as
   1910 	 * part of stream samples.
   1911 	 *
   1912 	 * This property is available in perf revision 1.
   1913 	 */
   1914 	DRM_I915_PERF_PROP_SAMPLE_OA,
   1915 
   1916 	/**
   1917 	 * The value specifies which set of OA unit metrics should be
   1918 	 * be configured, defining the contents of any OA unit reports.
   1919 	 *
   1920 	 * This property is available in perf revision 1.
   1921 	 */
   1922 	DRM_I915_PERF_PROP_OA_METRICS_SET,
   1923 
   1924 	/**
   1925 	 * The value specifies the size and layout of OA unit reports.
   1926 	 *
   1927 	 * This property is available in perf revision 1.
   1928 	 */
   1929 	DRM_I915_PERF_PROP_OA_FORMAT,
   1930 
   1931 	/**
   1932 	 * Specifying this property implicitly requests periodic OA unit
   1933 	 * sampling and (at least on Haswell) the sampling frequency is derived
   1934 	 * from this exponent as follows:
   1935 	 *
   1936 	 *   80ns * 2^(period_exponent + 1)
   1937 	 *
   1938 	 * This property is available in perf revision 1.
   1939 	 */
   1940 	DRM_I915_PERF_PROP_OA_EXPONENT,
   1941 
   1942 	/**
   1943 	 * Specifying this property is only valid when specify a context to
   1944 	 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
   1945 	 * will hold preemption of the particular context we want to gather
   1946 	 * performance data about. The execbuf2 submissions must include a
   1947 	 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
   1948 	 *
   1949 	 * This property is available in perf revision 3.
   1950 	 */
   1951 	DRM_I915_PERF_PROP_HOLD_PREEMPTION,
   1952 
   1953 	DRM_I915_PERF_PROP_MAX /* non-ABI */
   1954 };
   1955 
   1956 struct drm_i915_perf_open_param {
   1957 	__u32 flags;
   1958 #define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
   1959 #define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
   1960 #define I915_PERF_FLAG_DISABLED		(1<<2)
   1961 
   1962 	/** The number of u64 (id, value) pairs */
   1963 	__u32 num_properties;
   1964 
   1965 	/**
   1966 	 * Pointer to array of u64 (id, value) pairs configuring the stream
   1967 	 * to open.
   1968 	 */
   1969 	__u64 properties_ptr;
   1970 };
   1971 
   1972 /**
   1973  * Enable data capture for a stream that was either opened in a disabled state
   1974  * via I915_PERF_FLAG_DISABLED or was later disabled via
   1975  * I915_PERF_IOCTL_DISABLE.
   1976  *
   1977  * It is intended to be cheaper to disable and enable a stream than it may be
   1978  * to close and re-open a stream with the same configuration.
   1979  *
   1980  * It's undefined whether any pending data for the stream will be lost.
   1981  *
   1982  * This ioctl is available in perf revision 1.
   1983  */
   1984 #define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
   1985 
   1986 /**
   1987  * Disable data capture for a stream.
   1988  *
   1989  * It is an error to try and read a stream that is disabled.
   1990  *
   1991  * This ioctl is available in perf revision 1.
   1992  */
   1993 #define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
   1994 
   1995 /**
   1996  * Change metrics_set captured by a stream.
   1997  *
   1998  * If the stream is bound to a specific context, the configuration change
   1999  * will performed inline with that context such that it takes effect before
   2000  * the next execbuf submission.
   2001  *
   2002  * Returns the previously bound metrics set id, or a negative error code.
   2003  *
   2004  * This ioctl is available in perf revision 2.
   2005  */
   2006 #define I915_PERF_IOCTL_CONFIG	_IO('i', 0x2)
   2007 
   2008 /**
   2009  * Common to all i915 perf records
   2010  */
   2011 struct drm_i915_perf_record_header {
   2012 	__u32 type;
   2013 	__u16 pad;
   2014 	__u16 size;
   2015 };
   2016 
   2017 enum drm_i915_perf_record_type {
   2018 
   2019 	/**
   2020 	 * Samples are the work horse record type whose contents are extensible
   2021 	 * and defined when opening an i915 perf stream based on the given
   2022 	 * properties.
   2023 	 *
   2024 	 * Boolean properties following the naming convention
   2025 	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
   2026 	 * every sample.
   2027 	 *
   2028 	 * The order of these sample properties given by userspace has no
   2029 	 * affect on the ordering of data within a sample. The order is
   2030 	 * documented here.
   2031 	 *
   2032 	 * struct {
   2033 	 *     struct drm_i915_perf_record_header header;
   2034 	 *
   2035 	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
   2036 	 * };
   2037 	 */
   2038 	DRM_I915_PERF_RECORD_SAMPLE = 1,
   2039 
   2040 	/*
   2041 	 * Indicates that one or more OA reports were not written by the
   2042 	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
   2043 	 * command collides with periodic sampling - which would be more likely
   2044 	 * at higher sampling frequencies.
   2045 	 */
   2046 	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
   2047 
   2048 	/**
   2049 	 * An error occurred that resulted in all pending OA reports being lost.
   2050 	 */
   2051 	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
   2052 
   2053 	DRM_I915_PERF_RECORD_MAX /* non-ABI */
   2054 };
   2055 
   2056 /**
   2057  * Structure to upload perf dynamic configuration into the kernel.
   2058  */
   2059 struct drm_i915_perf_oa_config {
   2060 	/** String formatted like "%08x-%04x-%04x-%04x-%012x" */
   2061 	char uuid[36];
   2062 
   2063 	__u32 n_mux_regs;
   2064 	__u32 n_boolean_regs;
   2065 	__u32 n_flex_regs;
   2066 
   2067 	/*
   2068 	 * These fields are pointers to tuples of u32 values (register address,
   2069 	 * value). For example the expected length of the buffer pointed by
   2070 	 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
   2071 	 */
   2072 	__u64 mux_regs_ptr;
   2073 	__u64 boolean_regs_ptr;
   2074 	__u64 flex_regs_ptr;
   2075 };
   2076 
   2077 struct drm_i915_query_item {
   2078 	__u64 query_id;
   2079 #define DRM_I915_QUERY_TOPOLOGY_INFO    1
   2080 #define DRM_I915_QUERY_ENGINE_INFO	2
   2081 #define DRM_I915_QUERY_PERF_CONFIG      3
   2082 /* Must be kept compact -- no holes and well documented */
   2083 
   2084 	/*
   2085 	 * When set to zero by userspace, this is filled with the size of the
   2086 	 * data to be written at the data_ptr pointer. The kernel sets this
   2087 	 * value to a negative value to signal an error on a particular query
   2088 	 * item.
   2089 	 */
   2090 	__s32 length;
   2091 
   2092 	/*
   2093 	 * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
   2094 	 *
   2095 	 * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
   2096 	 * following :
   2097 	 *         - DRM_I915_QUERY_PERF_CONFIG_LIST
   2098 	 *         - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
   2099 	 *         - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
   2100 	 */
   2101 	__u32 flags;
   2102 #define DRM_I915_QUERY_PERF_CONFIG_LIST          1
   2103 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
   2104 #define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
   2105 
   2106 	/*
   2107 	 * Data will be written at the location pointed by data_ptr when the
   2108 	 * value of length matches the length of the data to be written by the
   2109 	 * kernel.
   2110 	 */
   2111 	__u64 data_ptr;
   2112 };
   2113 
   2114 struct drm_i915_query {
   2115 	__u32 num_items;
   2116 
   2117 	/*
   2118 	 * Unused for now. Must be cleared to zero.
   2119 	 */
   2120 	__u32 flags;
   2121 
   2122 	/*
   2123 	 * This points to an array of num_items drm_i915_query_item structures.
   2124 	 */
   2125 	__u64 items_ptr;
   2126 };
   2127 
   2128 /*
   2129  * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
   2130  *
   2131  * data: contains the 3 pieces of information :
   2132  *
   2133  * - the slice mask with one bit per slice telling whether a slice is
   2134  *   available. The availability of slice X can be queried with the following
   2135  *   formula :
   2136  *
   2137  *           (data[X / 8] >> (X % 8)) & 1
   2138  *
   2139  * - the subslice mask for each slice with one bit per subslice telling
   2140  *   whether a subslice is available. Gen12 has dual-subslices, which are
   2141  *   similar to two gen11 subslices. For gen12, this array represents dual-
   2142  *   subslices. The availability of subslice Y in slice X can be queried
   2143  *   with the following formula :
   2144  *
   2145  *           (data[subslice_offset +
   2146  *                 X * subslice_stride +
   2147  *                 Y / 8] >> (Y % 8)) & 1
   2148  *
   2149  * - the EU mask for each subslice in each slice with one bit per EU telling
   2150  *   whether an EU is available. The availability of EU Z in subslice Y in
   2151  *   slice X can be queried with the following formula :
   2152  *
   2153  *           (data[eu_offset +
   2154  *                 (X * max_subslices + Y) * eu_stride +
   2155  *                 Z / 8] >> (Z % 8)) & 1
   2156  */
   2157 struct drm_i915_query_topology_info {
   2158 	/*
   2159 	 * Unused for now. Must be cleared to zero.
   2160 	 */
   2161 	__u16 flags;
   2162 
   2163 	__u16 max_slices;
   2164 	__u16 max_subslices;
   2165 	__u16 max_eus_per_subslice;
   2166 
   2167 	/*
   2168 	 * Offset in data[] at which the subslice masks are stored.
   2169 	 */
   2170 	__u16 subslice_offset;
   2171 
   2172 	/*
   2173 	 * Stride at which each of the subslice masks for each slice are
   2174 	 * stored.
   2175 	 */
   2176 	__u16 subslice_stride;
   2177 
   2178 	/*
   2179 	 * Offset in data[] at which the EU masks are stored.
   2180 	 */
   2181 	__u16 eu_offset;
   2182 
   2183 	/*
   2184 	 * Stride at which each of the EU masks for each subslice are stored.
   2185 	 */
   2186 	__u16 eu_stride;
   2187 
   2188 	__u8 data[];
   2189 };
   2190 
   2191 /**
   2192  * struct drm_i915_engine_info
   2193  *
   2194  * Describes one engine and it's capabilities as known to the driver.
   2195  */
   2196 struct drm_i915_engine_info {
   2197 	/** Engine class and instance. */
   2198 	struct i915_engine_class_instance engine;
   2199 
   2200 	/** Reserved field. */
   2201 	__u32 rsvd0;
   2202 
   2203 	/** Engine flags. */
   2204 	__u64 flags;
   2205 
   2206 	/** Capabilities of this engine. */
   2207 	__u64 capabilities;
   2208 #define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
   2209 #define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
   2210 
   2211 	/** Reserved fields. */
   2212 	__u64 rsvd1[4];
   2213 };
   2214 
   2215 /**
   2216  * struct drm_i915_query_engine_info
   2217  *
   2218  * Engine info query enumerates all engines known to the driver by filling in
   2219  * an array of struct drm_i915_engine_info structures.
   2220  */
   2221 struct drm_i915_query_engine_info {
   2222 	/** Number of struct drm_i915_engine_info structs following. */
   2223 	__u32 num_engines;
   2224 
   2225 	/** MBZ */
   2226 	__u32 rsvd[3];
   2227 
   2228 	/** Marker for drm_i915_engine_info structures. */
   2229 	struct drm_i915_engine_info engines[];
   2230 };
   2231 
   2232 /*
   2233  * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
   2234  */
   2235 struct drm_i915_query_perf_config {
   2236 	union {
   2237 		/*
   2238 		 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
   2239 		 * this fields to the number of configurations available.
   2240 		 */
   2241 		__u64 n_configs;
   2242 
   2243 		/*
   2244 		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
   2245 		 * i915 will use the value in this field as configuration
   2246 		 * identifier to decide what data to write into config_ptr.
   2247 		 */
   2248 		__u64 config;
   2249 
   2250 		/*
   2251 		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
   2252 		 * i915 will use the value in this field as configuration
   2253 		 * identifier to decide what data to write into config_ptr.
   2254 		 *
   2255 		 * String formatted like "%08x-%04x-%04x-%04x-%012x"
   2256 		 */
   2257 		char uuid[36];
   2258 	};
   2259 
   2260 	/*
   2261 	 * Unused for now. Must be cleared to zero.
   2262 	 */
   2263 	__u32 flags;
   2264 
   2265 	/*
   2266 	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
   2267 	 * write an array of __u64 of configuration identifiers.
   2268 	 *
   2269 	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
   2270 	 * write a struct drm_i915_perf_oa_config. If the following fields of
   2271 	 * drm_i915_perf_oa_config are set not set to 0, i915 will write into
   2272 	 * the associated pointers the values of submitted when the
   2273 	 * configuration was created :
   2274 	 *
   2275 	 *         - n_mux_regs
   2276 	 *         - n_boolean_regs
   2277 	 *         - n_flex_regs
   2278 	 */
   2279 	__u8 data[];
   2280 };
   2281 
   2282 #if defined(__cplusplus)
   2283 }
   2284 #endif
   2285 
   2286 #endif /* _UAPI_I915_DRM_H_ */
   2287