101e04c3fSmrg/*
201e04c3fSmrg * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
301e04c3fSmrg * All Rights Reserved.
401e04c3fSmrg *
501e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a
601e04c3fSmrg * copy of this software and associated documentation files (the
701e04c3fSmrg * "Software"), to deal in the Software without restriction, including
801e04c3fSmrg * without limitation the rights to use, copy, modify, merge, publish,
901e04c3fSmrg * distribute, sub license, and/or sell copies of the Software, and to
1001e04c3fSmrg * permit persons to whom the Software is furnished to do so, subject to
1101e04c3fSmrg * the following conditions:
1201e04c3fSmrg *
1301e04c3fSmrg * The above copyright notice and this permission notice (including the
1401e04c3fSmrg * next paragraph) shall be included in all copies or substantial portions
1501e04c3fSmrg * of the Software.
1601e04c3fSmrg *
1701e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1801e04c3fSmrg * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1901e04c3fSmrg * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
2001e04c3fSmrg * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
2101e04c3fSmrg * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
2201e04c3fSmrg * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
2301e04c3fSmrg * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2401e04c3fSmrg *
2501e04c3fSmrg */
2601e04c3fSmrg
2701e04c3fSmrg#ifndef _I915_DRM_H_
2801e04c3fSmrg#define _I915_DRM_H_
2901e04c3fSmrg
3001e04c3fSmrg#include "drm.h"
3101e04c3fSmrg
3201e04c3fSmrg#if defined(__cplusplus)
3301e04c3fSmrgextern "C" {
3401e04c3fSmrg#endif
3501e04c3fSmrg
3601e04c3fSmrg/* Please note that modifications to all structs defined here are
3701e04c3fSmrg * subject to backwards-compatibility constraints.
3801e04c3fSmrg */
3901e04c3fSmrg
4001e04c3fSmrg/**
4101e04c3fSmrg * DOC: uevents generated by i915 on it's device node
4201e04c3fSmrg *
4301e04c3fSmrg * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
4401e04c3fSmrg *	event from the gpu l3 cache. Additional information supplied is ROW,
4501e04c3fSmrg *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
4601e04c3fSmrg *	track of these events and if a specific cache-line seems to have a
4701e04c3fSmrg *	persistent error remap it with the l3 remapping tool supplied in
4801e04c3fSmrg *	intel-gpu-tools.  The value supplied with the event is always 1.
4901e04c3fSmrg *
5001e04c3fSmrg * I915_ERROR_UEVENT - Generated upon error detection, currently only via
5101e04c3fSmrg *	hangcheck. The error detection event is a good indicator of when things
5201e04c3fSmrg *	began to go badly. The value supplied with the event is a 1 upon error
5301e04c3fSmrg *	detection, and a 0 upon reset completion, signifying no more error
5401e04c3fSmrg *	exists. NOTE: Disabling hangcheck or reset via module parameter will
5501e04c3fSmrg *	cause the related events to not be seen.
5601e04c3fSmrg *
5701e04c3fSmrg * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
587ec681f3Smrg *	GPU. The value supplied with the event is always 1. NOTE: Disable
5901e04c3fSmrg *	reset via module parameter will cause this event to not be seen.
6001e04c3fSmrg */
6101e04c3fSmrg#define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
6201e04c3fSmrg#define I915_ERROR_UEVENT		"ERROR"
6301e04c3fSmrg#define I915_RESET_UEVENT		"RESET"
6401e04c3fSmrg
657ec681f3Smrg/**
667ec681f3Smrg * struct i915_user_extension - Base class for defining a chain of extensions
677ec681f3Smrg *
687ec681f3Smrg * Many interfaces need to grow over time. In most cases we can simply
697ec681f3Smrg * extend the struct and have userspace pass in more data. Another option,
707ec681f3Smrg * as demonstrated by Vulkan's approach to providing extensions for forward
717ec681f3Smrg * and backward compatibility, is to use a list of optional structs to
727ec681f3Smrg * provide those extra details.
737ec681f3Smrg *
747ec681f3Smrg * The key advantage to using an extension chain is that it allows us to
757ec681f3Smrg * redefine the interface more easily than an ever growing struct of
767ec681f3Smrg * increasing complexity, and for large parts of that interface to be
777ec681f3Smrg * entirely optional. The downside is more pointer chasing; chasing across
787ec681f3Smrg * the boundary with pointers encapsulated inside u64.
797ec681f3Smrg *
807ec681f3Smrg * Example chaining:
817ec681f3Smrg *
827ec681f3Smrg * .. code-block:: C
837ec681f3Smrg *
847ec681f3Smrg *	struct i915_user_extension ext3 {
857ec681f3Smrg *		.next_extension = 0, // end
867ec681f3Smrg *		.name = ...,
877ec681f3Smrg *	};
887ec681f3Smrg *	struct i915_user_extension ext2 {
897ec681f3Smrg *		.next_extension = (uintptr_t)&ext3,
907ec681f3Smrg *		.name = ...,
917ec681f3Smrg *	};
927ec681f3Smrg *	struct i915_user_extension ext1 {
937ec681f3Smrg *		.next_extension = (uintptr_t)&ext2,
947ec681f3Smrg *		.name = ...,
957ec681f3Smrg *	};
967ec681f3Smrg *
977ec681f3Smrg * Typically the struct i915_user_extension would be embedded in some uAPI
987ec681f3Smrg * struct, and in this case we would feed it the head of the chain(i.e ext1),
997ec681f3Smrg * which would then apply all of the above extensions.
1007ec681f3Smrg *
1017ec681f3Smrg */
1027ec681f3Smrgstruct i915_user_extension {
1037ec681f3Smrg	/**
1047ec681f3Smrg	 * @next_extension:
1057ec681f3Smrg	 *
1067ec681f3Smrg	 * Pointer to the next struct i915_user_extension, or zero if the end.
1077ec681f3Smrg	 */
1087ec681f3Smrg	__u64 next_extension;
1097ec681f3Smrg	/**
1107ec681f3Smrg	 * @name: Name of the extension.
1117ec681f3Smrg	 *
1127ec681f3Smrg	 * Note that the name here is just some integer.
1137ec681f3Smrg	 *
1147ec681f3Smrg	 * Also note that the name space for this is not global for the whole
1157ec681f3Smrg	 * driver, but rather its scope/meaning is limited to the specific piece
1167ec681f3Smrg	 * of uAPI which has embedded the struct i915_user_extension.
1177ec681f3Smrg	 */
1187ec681f3Smrg	__u32 name;
1197ec681f3Smrg	/**
1207ec681f3Smrg	 * @flags: MBZ
1217ec681f3Smrg	 *
1227ec681f3Smrg	 * All undefined bits must be zero.
1237ec681f3Smrg	 */
1247ec681f3Smrg	__u32 flags;
1257ec681f3Smrg	/**
1267ec681f3Smrg	 * @rsvd: MBZ
1277ec681f3Smrg	 *
1287ec681f3Smrg	 * Reserved for future use; must be zero.
1297ec681f3Smrg	 */
1307ec681f3Smrg	__u32 rsvd[4];
1317ec681f3Smrg};
1327ec681f3Smrg
13301e04c3fSmrg/*
13401e04c3fSmrg * MOCS indexes used for GPU surfaces, defining the cacheability of the
13501e04c3fSmrg * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
13601e04c3fSmrg */
13701e04c3fSmrgenum i915_mocs_table_index {
13801e04c3fSmrg	/*
13901e04c3fSmrg	 * Not cached anywhere, coherency between CPU and GPU accesses is
14001e04c3fSmrg	 * guaranteed.
14101e04c3fSmrg	 */
14201e04c3fSmrg	I915_MOCS_UNCACHED,
14301e04c3fSmrg	/*
14401e04c3fSmrg	 * Cacheability and coherency controlled by the kernel automatically
14501e04c3fSmrg	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
14601e04c3fSmrg	 * usage of the surface (used for display scanout or not).
14701e04c3fSmrg	 */
14801e04c3fSmrg	I915_MOCS_PTE,
14901e04c3fSmrg	/*
15001e04c3fSmrg	 * Cached in all GPU caches available on the platform.
15101e04c3fSmrg	 * Coherency between CPU and GPU accesses to the surface is not
15201e04c3fSmrg	 * guaranteed without extra synchronization.
15301e04c3fSmrg	 */
15401e04c3fSmrg	I915_MOCS_CACHED,
15501e04c3fSmrg};
15601e04c3fSmrg
15701e04c3fSmrg/*
15801e04c3fSmrg * Different engines serve different roles, and there may be more than one
15901e04c3fSmrg * engine serving each role. enum drm_i915_gem_engine_class provides a
16001e04c3fSmrg * classification of the role of the engine, which may be used when requesting
16101e04c3fSmrg * operations to be performed on a certain subset of engines, or for providing
16201e04c3fSmrg * information about that group.
16301e04c3fSmrg */
16401e04c3fSmrgenum drm_i915_gem_engine_class {
16501e04c3fSmrg	I915_ENGINE_CLASS_RENDER	= 0,
16601e04c3fSmrg	I915_ENGINE_CLASS_COPY		= 1,
16701e04c3fSmrg	I915_ENGINE_CLASS_VIDEO		= 2,
16801e04c3fSmrg	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
16901e04c3fSmrg
1707ec681f3Smrg	/* should be kept compact */
1717ec681f3Smrg
17201e04c3fSmrg	I915_ENGINE_CLASS_INVALID	= -1
17301e04c3fSmrg};
17401e04c3fSmrg
1757ec681f3Smrg/*
1767ec681f3Smrg * There may be more than one engine fulfilling any role within the system.
1777ec681f3Smrg * Each engine of a class is given a unique instance number and therefore
1787ec681f3Smrg * any engine can be specified by its class:instance tuplet. APIs that allow
1797ec681f3Smrg * access to any engine in the system will use struct i915_engine_class_instance
1807ec681f3Smrg * for this identification.
1817ec681f3Smrg */
1827ec681f3Smrgstruct i915_engine_class_instance {
1837ec681f3Smrg	__u16 engine_class; /* see enum drm_i915_gem_engine_class */
1847ec681f3Smrg	__u16 engine_instance;
1857ec681f3Smrg#define I915_ENGINE_CLASS_INVALID_NONE -1
1867ec681f3Smrg#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
1877ec681f3Smrg};
1887ec681f3Smrg
18901e04c3fSmrg/**
19001e04c3fSmrg * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
19101e04c3fSmrg *
19201e04c3fSmrg */
19301e04c3fSmrg
19401e04c3fSmrgenum drm_i915_pmu_engine_sample {
19501e04c3fSmrg	I915_SAMPLE_BUSY = 0,
19601e04c3fSmrg	I915_SAMPLE_WAIT = 1,
19701e04c3fSmrg	I915_SAMPLE_SEMA = 2
19801e04c3fSmrg};
19901e04c3fSmrg
20001e04c3fSmrg#define I915_PMU_SAMPLE_BITS (4)
20101e04c3fSmrg#define I915_PMU_SAMPLE_MASK (0xf)
20201e04c3fSmrg#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
20301e04c3fSmrg#define I915_PMU_CLASS_SHIFT \
20401e04c3fSmrg	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
20501e04c3fSmrg
20601e04c3fSmrg#define __I915_PMU_ENGINE(class, instance, sample) \
20701e04c3fSmrg	((class) << I915_PMU_CLASS_SHIFT | \
20801e04c3fSmrg	(instance) << I915_PMU_SAMPLE_BITS | \
20901e04c3fSmrg	(sample))
21001e04c3fSmrg
21101e04c3fSmrg#define I915_PMU_ENGINE_BUSY(class, instance) \
21201e04c3fSmrg	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
21301e04c3fSmrg
21401e04c3fSmrg#define I915_PMU_ENGINE_WAIT(class, instance) \
21501e04c3fSmrg	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
21601e04c3fSmrg
21701e04c3fSmrg#define I915_PMU_ENGINE_SEMA(class, instance) \
21801e04c3fSmrg	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
21901e04c3fSmrg
22001e04c3fSmrg#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
22101e04c3fSmrg
22201e04c3fSmrg#define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
22301e04c3fSmrg#define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
22401e04c3fSmrg#define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
22501e04c3fSmrg#define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
2267ec681f3Smrg#define I915_PMU_SOFTWARE_GT_AWAKE_TIME	__I915_PMU_OTHER(4)
22701e04c3fSmrg
2287ec681f3Smrg#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
22901e04c3fSmrg
23001e04c3fSmrg/* Each region is a minimum of 16k, and there are at most 255 of them.
23101e04c3fSmrg */
23201e04c3fSmrg#define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
23301e04c3fSmrg				 * of chars for next/prev indices */
23401e04c3fSmrg#define I915_LOG_MIN_TEX_REGION_SIZE 14
23501e04c3fSmrg
23601e04c3fSmrgtypedef struct _drm_i915_init {
23701e04c3fSmrg	enum {
23801e04c3fSmrg		I915_INIT_DMA = 0x01,
23901e04c3fSmrg		I915_CLEANUP_DMA = 0x02,
24001e04c3fSmrg		I915_RESUME_DMA = 0x03
24101e04c3fSmrg	} func;
24201e04c3fSmrg	unsigned int mmio_offset;
24301e04c3fSmrg	int sarea_priv_offset;
24401e04c3fSmrg	unsigned int ring_start;
24501e04c3fSmrg	unsigned int ring_end;
24601e04c3fSmrg	unsigned int ring_size;
24701e04c3fSmrg	unsigned int front_offset;
24801e04c3fSmrg	unsigned int back_offset;
24901e04c3fSmrg	unsigned int depth_offset;
25001e04c3fSmrg	unsigned int w;
25101e04c3fSmrg	unsigned int h;
25201e04c3fSmrg	unsigned int pitch;
25301e04c3fSmrg	unsigned int pitch_bits;
25401e04c3fSmrg	unsigned int back_pitch;
25501e04c3fSmrg	unsigned int depth_pitch;
25601e04c3fSmrg	unsigned int cpp;
25701e04c3fSmrg	unsigned int chipset;
25801e04c3fSmrg} drm_i915_init_t;
25901e04c3fSmrg
26001e04c3fSmrgtypedef struct _drm_i915_sarea {
26101e04c3fSmrg	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
26201e04c3fSmrg	int last_upload;	/* last time texture was uploaded */
26301e04c3fSmrg	int last_enqueue;	/* last time a buffer was enqueued */
26401e04c3fSmrg	int last_dispatch;	/* age of the most recently dispatched buffer */
26501e04c3fSmrg	int ctxOwner;		/* last context to upload state */
26601e04c3fSmrg	int texAge;
26701e04c3fSmrg	int pf_enabled;		/* is pageflipping allowed? */
26801e04c3fSmrg	int pf_active;
26901e04c3fSmrg	int pf_current_page;	/* which buffer is being displayed? */
27001e04c3fSmrg	int perf_boxes;		/* performance boxes to be displayed */
27101e04c3fSmrg	int width, height;      /* screen size in pixels */
27201e04c3fSmrg
27301e04c3fSmrg	drm_handle_t front_handle;
27401e04c3fSmrg	int front_offset;
27501e04c3fSmrg	int front_size;
27601e04c3fSmrg
27701e04c3fSmrg	drm_handle_t back_handle;
27801e04c3fSmrg	int back_offset;
27901e04c3fSmrg	int back_size;
28001e04c3fSmrg
28101e04c3fSmrg	drm_handle_t depth_handle;
28201e04c3fSmrg	int depth_offset;
28301e04c3fSmrg	int depth_size;
28401e04c3fSmrg
28501e04c3fSmrg	drm_handle_t tex_handle;
28601e04c3fSmrg	int tex_offset;
28701e04c3fSmrg	int tex_size;
28801e04c3fSmrg	int log_tex_granularity;
28901e04c3fSmrg	int pitch;
29001e04c3fSmrg	int rotation;           /* 0, 90, 180 or 270 */
29101e04c3fSmrg	int rotated_offset;
29201e04c3fSmrg	int rotated_size;
29301e04c3fSmrg	int rotated_pitch;
29401e04c3fSmrg	int virtualX, virtualY;
29501e04c3fSmrg
29601e04c3fSmrg	unsigned int front_tiled;
29701e04c3fSmrg	unsigned int back_tiled;
29801e04c3fSmrg	unsigned int depth_tiled;
29901e04c3fSmrg	unsigned int rotated_tiled;
30001e04c3fSmrg	unsigned int rotated2_tiled;
30101e04c3fSmrg
30201e04c3fSmrg	int pipeA_x;
30301e04c3fSmrg	int pipeA_y;
30401e04c3fSmrg	int pipeA_w;
30501e04c3fSmrg	int pipeA_h;
30601e04c3fSmrg	int pipeB_x;
30701e04c3fSmrg	int pipeB_y;
30801e04c3fSmrg	int pipeB_w;
30901e04c3fSmrg	int pipeB_h;
31001e04c3fSmrg
31101e04c3fSmrg	/* fill out some space for old userspace triple buffer */
31201e04c3fSmrg	drm_handle_t unused_handle;
31301e04c3fSmrg	__u32 unused1, unused2, unused3;
31401e04c3fSmrg
31501e04c3fSmrg	/* buffer object handles for static buffers. May change
31601e04c3fSmrg	 * over the lifetime of the client.
31701e04c3fSmrg	 */
31801e04c3fSmrg	__u32 front_bo_handle;
31901e04c3fSmrg	__u32 back_bo_handle;
32001e04c3fSmrg	__u32 unused_bo_handle;
32101e04c3fSmrg	__u32 depth_bo_handle;
32201e04c3fSmrg
32301e04c3fSmrg} drm_i915_sarea_t;
32401e04c3fSmrg
32501e04c3fSmrg/* due to userspace building against these headers we need some compat here */
32601e04c3fSmrg#define planeA_x pipeA_x
32701e04c3fSmrg#define planeA_y pipeA_y
32801e04c3fSmrg#define planeA_w pipeA_w
32901e04c3fSmrg#define planeA_h pipeA_h
33001e04c3fSmrg#define planeB_x pipeB_x
33101e04c3fSmrg#define planeB_y pipeB_y
33201e04c3fSmrg#define planeB_w pipeB_w
33301e04c3fSmrg#define planeB_h pipeB_h
33401e04c3fSmrg
33501e04c3fSmrg/* Flags for perf_boxes
33601e04c3fSmrg */
33701e04c3fSmrg#define I915_BOX_RING_EMPTY    0x1
33801e04c3fSmrg#define I915_BOX_FLIP          0x2
33901e04c3fSmrg#define I915_BOX_WAIT          0x4
34001e04c3fSmrg#define I915_BOX_TEXTURE_LOAD  0x8
34101e04c3fSmrg#define I915_BOX_LOST_CONTEXT  0x10
34201e04c3fSmrg
34301e04c3fSmrg/*
34401e04c3fSmrg * i915 specific ioctls.
34501e04c3fSmrg *
34601e04c3fSmrg * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
34701e04c3fSmrg * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
34801e04c3fSmrg * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
34901e04c3fSmrg */
35001e04c3fSmrg#define DRM_I915_INIT		0x00
35101e04c3fSmrg#define DRM_I915_FLUSH		0x01
35201e04c3fSmrg#define DRM_I915_FLIP		0x02
35301e04c3fSmrg#define DRM_I915_BATCHBUFFER	0x03
35401e04c3fSmrg#define DRM_I915_IRQ_EMIT	0x04
35501e04c3fSmrg#define DRM_I915_IRQ_WAIT	0x05
35601e04c3fSmrg#define DRM_I915_GETPARAM	0x06
35701e04c3fSmrg#define DRM_I915_SETPARAM	0x07
35801e04c3fSmrg#define DRM_I915_ALLOC		0x08
35901e04c3fSmrg#define DRM_I915_FREE		0x09
36001e04c3fSmrg#define DRM_I915_INIT_HEAP	0x0a
36101e04c3fSmrg#define DRM_I915_CMDBUFFER	0x0b
36201e04c3fSmrg#define DRM_I915_DESTROY_HEAP	0x0c
36301e04c3fSmrg#define DRM_I915_SET_VBLANK_PIPE	0x0d
36401e04c3fSmrg#define DRM_I915_GET_VBLANK_PIPE	0x0e
36501e04c3fSmrg#define DRM_I915_VBLANK_SWAP	0x0f
36601e04c3fSmrg#define DRM_I915_HWS_ADDR	0x11
36701e04c3fSmrg#define DRM_I915_GEM_INIT	0x13
36801e04c3fSmrg#define DRM_I915_GEM_EXECBUFFER	0x14
36901e04c3fSmrg#define DRM_I915_GEM_PIN	0x15
37001e04c3fSmrg#define DRM_I915_GEM_UNPIN	0x16
37101e04c3fSmrg#define DRM_I915_GEM_BUSY	0x17
37201e04c3fSmrg#define DRM_I915_GEM_THROTTLE	0x18
37301e04c3fSmrg#define DRM_I915_GEM_ENTERVT	0x19
37401e04c3fSmrg#define DRM_I915_GEM_LEAVEVT	0x1a
37501e04c3fSmrg#define DRM_I915_GEM_CREATE	0x1b
37601e04c3fSmrg#define DRM_I915_GEM_PREAD	0x1c
37701e04c3fSmrg#define DRM_I915_GEM_PWRITE	0x1d
37801e04c3fSmrg#define DRM_I915_GEM_MMAP	0x1e
37901e04c3fSmrg#define DRM_I915_GEM_SET_DOMAIN	0x1f
38001e04c3fSmrg#define DRM_I915_GEM_SW_FINISH	0x20
38101e04c3fSmrg#define DRM_I915_GEM_SET_TILING	0x21
38201e04c3fSmrg#define DRM_I915_GEM_GET_TILING	0x22
38301e04c3fSmrg#define DRM_I915_GEM_GET_APERTURE 0x23
38401e04c3fSmrg#define DRM_I915_GEM_MMAP_GTT	0x24
38501e04c3fSmrg#define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
38601e04c3fSmrg#define DRM_I915_GEM_MADVISE	0x26
38701e04c3fSmrg#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
38801e04c3fSmrg#define DRM_I915_OVERLAY_ATTRS	0x28
38901e04c3fSmrg#define DRM_I915_GEM_EXECBUFFER2	0x29
39001e04c3fSmrg#define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
39101e04c3fSmrg#define DRM_I915_GET_SPRITE_COLORKEY	0x2a
39201e04c3fSmrg#define DRM_I915_SET_SPRITE_COLORKEY	0x2b
39301e04c3fSmrg#define DRM_I915_GEM_WAIT	0x2c
39401e04c3fSmrg#define DRM_I915_GEM_CONTEXT_CREATE	0x2d
39501e04c3fSmrg#define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
39601e04c3fSmrg#define DRM_I915_GEM_SET_CACHING	0x2f
39701e04c3fSmrg#define DRM_I915_GEM_GET_CACHING	0x30
39801e04c3fSmrg#define DRM_I915_REG_READ		0x31
39901e04c3fSmrg#define DRM_I915_GET_RESET_STATS	0x32
40001e04c3fSmrg#define DRM_I915_GEM_USERPTR		0x33
40101e04c3fSmrg#define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
40201e04c3fSmrg#define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
40301e04c3fSmrg#define DRM_I915_PERF_OPEN		0x36
40401e04c3fSmrg#define DRM_I915_PERF_ADD_CONFIG	0x37
40501e04c3fSmrg#define DRM_I915_PERF_REMOVE_CONFIG	0x38
40601e04c3fSmrg#define DRM_I915_QUERY			0x39
4077ec681f3Smrg#define DRM_I915_GEM_VM_CREATE		0x3a
4087ec681f3Smrg#define DRM_I915_GEM_VM_DESTROY		0x3b
4097ec681f3Smrg#define DRM_I915_GEM_CREATE_EXT		0x3c
4107ec681f3Smrg/* Must be kept compact -- no holes */
41101e04c3fSmrg
41201e04c3fSmrg#define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
41301e04c3fSmrg#define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
41401e04c3fSmrg#define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
41501e04c3fSmrg#define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
41601e04c3fSmrg#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
41701e04c3fSmrg#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
41801e04c3fSmrg#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
41901e04c3fSmrg#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
42001e04c3fSmrg#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
42101e04c3fSmrg#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
42201e04c3fSmrg#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
42301e04c3fSmrg#define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
42401e04c3fSmrg#define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
42501e04c3fSmrg#define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
42601e04c3fSmrg#define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
42701e04c3fSmrg#define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
42801e04c3fSmrg#define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
42901e04c3fSmrg#define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
43001e04c3fSmrg#define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
43101e04c3fSmrg#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
43201e04c3fSmrg#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
43301e04c3fSmrg#define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
43401e04c3fSmrg#define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
43501e04c3fSmrg#define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
43601e04c3fSmrg#define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
43701e04c3fSmrg#define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
43801e04c3fSmrg#define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
43901e04c3fSmrg#define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
44001e04c3fSmrg#define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
44101e04c3fSmrg#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
4427ec681f3Smrg#define DRM_IOCTL_I915_GEM_CREATE_EXT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
44301e04c3fSmrg#define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
44401e04c3fSmrg#define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
44501e04c3fSmrg#define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
44601e04c3fSmrg#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
4477ec681f3Smrg#define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
44801e04c3fSmrg#define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
44901e04c3fSmrg#define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
45001e04c3fSmrg#define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
45101e04c3fSmrg#define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
45201e04c3fSmrg#define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
45301e04c3fSmrg#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
45401e04c3fSmrg#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
45501e04c3fSmrg#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
45601e04c3fSmrg#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
45701e04c3fSmrg#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
45801e04c3fSmrg#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
45901e04c3fSmrg#define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
46001e04c3fSmrg#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
4617ec681f3Smrg#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
46201e04c3fSmrg#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
46301e04c3fSmrg#define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
46401e04c3fSmrg#define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
46501e04c3fSmrg#define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
46601e04c3fSmrg#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
46701e04c3fSmrg#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
46801e04c3fSmrg#define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
46901e04c3fSmrg#define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
47001e04c3fSmrg#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
47101e04c3fSmrg#define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
4727ec681f3Smrg#define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
4737ec681f3Smrg#define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
47401e04c3fSmrg
47501e04c3fSmrg/* Allow drivers to submit batchbuffers directly to hardware, relying
47601e04c3fSmrg * on the security mechanisms provided by hardware.
47701e04c3fSmrg */
47801e04c3fSmrgtypedef struct drm_i915_batchbuffer {
47901e04c3fSmrg	int start;		/* agp offset */
48001e04c3fSmrg	int used;		/* nr bytes in use */
48101e04c3fSmrg	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
48201e04c3fSmrg	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
48301e04c3fSmrg	int num_cliprects;	/* mulitpass with multiple cliprects? */
48401e04c3fSmrg	struct drm_clip_rect *cliprects;	/* pointer to userspace cliprects */
48501e04c3fSmrg} drm_i915_batchbuffer_t;
48601e04c3fSmrg
48701e04c3fSmrg/* As above, but pass a pointer to userspace buffer which can be
48801e04c3fSmrg * validated by the kernel prior to sending to hardware.
48901e04c3fSmrg */
49001e04c3fSmrgtypedef struct _drm_i915_cmdbuffer {
49101e04c3fSmrg	char *buf;	/* pointer to userspace command buffer */
49201e04c3fSmrg	int sz;			/* nr bytes in buf */
49301e04c3fSmrg	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
49401e04c3fSmrg	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
49501e04c3fSmrg	int num_cliprects;	/* mulitpass with multiple cliprects? */
49601e04c3fSmrg	struct drm_clip_rect *cliprects;	/* pointer to userspace cliprects */
49701e04c3fSmrg} drm_i915_cmdbuffer_t;
49801e04c3fSmrg
49901e04c3fSmrg/* Userspace can request & wait on irq's:
50001e04c3fSmrg */
50101e04c3fSmrgtypedef struct drm_i915_irq_emit {
50201e04c3fSmrg	int *irq_seq;
50301e04c3fSmrg} drm_i915_irq_emit_t;
50401e04c3fSmrg
50501e04c3fSmrgtypedef struct drm_i915_irq_wait {
50601e04c3fSmrg	int irq_seq;
50701e04c3fSmrg} drm_i915_irq_wait_t;
50801e04c3fSmrg
50953c12917Smaya/*
51053c12917Smaya * Different modes of per-process Graphics Translation Table,
51153c12917Smaya * see I915_PARAM_HAS_ALIASING_PPGTT
51253c12917Smaya */
51353c12917Smaya#define I915_GEM_PPGTT_NONE	0
51453c12917Smaya#define I915_GEM_PPGTT_ALIASING	1
51553c12917Smaya#define I915_GEM_PPGTT_FULL	2
51653c12917Smaya
51701e04c3fSmrg/* Ioctl to query kernel params:
51801e04c3fSmrg */
51901e04c3fSmrg#define I915_PARAM_IRQ_ACTIVE            1
52001e04c3fSmrg#define I915_PARAM_ALLOW_BATCHBUFFER     2
52101e04c3fSmrg#define I915_PARAM_LAST_DISPATCH         3
52201e04c3fSmrg#define I915_PARAM_CHIPSET_ID            4
52301e04c3fSmrg#define I915_PARAM_HAS_GEM               5
52401e04c3fSmrg#define I915_PARAM_NUM_FENCES_AVAIL      6
52501e04c3fSmrg#define I915_PARAM_HAS_OVERLAY           7
52601e04c3fSmrg#define I915_PARAM_HAS_PAGEFLIPPING	 8
52701e04c3fSmrg#define I915_PARAM_HAS_EXECBUF2          9
52801e04c3fSmrg#define I915_PARAM_HAS_BSD		 10
52901e04c3fSmrg#define I915_PARAM_HAS_BLT		 11
53001e04c3fSmrg#define I915_PARAM_HAS_RELAXED_FENCING	 12
53101e04c3fSmrg#define I915_PARAM_HAS_COHERENT_RINGS	 13
53201e04c3fSmrg#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
53301e04c3fSmrg#define I915_PARAM_HAS_RELAXED_DELTA	 15
53401e04c3fSmrg#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
53501e04c3fSmrg#define I915_PARAM_HAS_LLC     	 	 17
53601e04c3fSmrg#define I915_PARAM_HAS_ALIASING_PPGTT	 18
53701e04c3fSmrg#define I915_PARAM_HAS_WAIT_TIMEOUT	 19
53801e04c3fSmrg#define I915_PARAM_HAS_SEMAPHORES	 20
53901e04c3fSmrg#define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
54001e04c3fSmrg#define I915_PARAM_HAS_VEBOX		 22
54101e04c3fSmrg#define I915_PARAM_HAS_SECURE_BATCHES	 23
54201e04c3fSmrg#define I915_PARAM_HAS_PINNED_BATCHES	 24
54301e04c3fSmrg#define I915_PARAM_HAS_EXEC_NO_RELOC	 25
54401e04c3fSmrg#define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
54501e04c3fSmrg#define I915_PARAM_HAS_WT     	 	 27
54601e04c3fSmrg#define I915_PARAM_CMD_PARSER_VERSION	 28
54701e04c3fSmrg#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
54801e04c3fSmrg#define I915_PARAM_MMAP_VERSION          30
54901e04c3fSmrg#define I915_PARAM_HAS_BSD2		 31
55001e04c3fSmrg#define I915_PARAM_REVISION              32
55101e04c3fSmrg#define I915_PARAM_SUBSLICE_TOTAL	 33
55201e04c3fSmrg#define I915_PARAM_EU_TOTAL		 34
55301e04c3fSmrg#define I915_PARAM_HAS_GPU_RESET	 35
55401e04c3fSmrg#define I915_PARAM_HAS_RESOURCE_STREAMER 36
55501e04c3fSmrg#define I915_PARAM_HAS_EXEC_SOFTPIN	 37
55601e04c3fSmrg#define I915_PARAM_HAS_POOLED_EU	 38
55701e04c3fSmrg#define I915_PARAM_MIN_EU_IN_POOL	 39
55801e04c3fSmrg#define I915_PARAM_MMAP_GTT_VERSION	 40
55901e04c3fSmrg
56001e04c3fSmrg/*
56101e04c3fSmrg * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
56201e04c3fSmrg * priorities and the driver will attempt to execute batches in priority order.
56301e04c3fSmrg * The param returns a capability bitmask, nonzero implies that the scheduler
56401e04c3fSmrg * is enabled, with different features present according to the mask.
56501e04c3fSmrg *
56601e04c3fSmrg * The initial priority for each batch is supplied by the context and is
56701e04c3fSmrg * controlled via I915_CONTEXT_PARAM_PRIORITY.
56801e04c3fSmrg */
56901e04c3fSmrg#define I915_PARAM_HAS_SCHEDULER	 41
57001e04c3fSmrg#define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
57101e04c3fSmrg#define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
57201e04c3fSmrg#define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
5737ec681f3Smrg#define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
5747ec681f3Smrg#define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
5757ec681f3Smrg/*
5767ec681f3Smrg * Indicates the 2k user priority levels are statically mapped into 3 buckets as
5777ec681f3Smrg * follows:
5787ec681f3Smrg *
5797ec681f3Smrg * -1k to -1	Low priority
5807ec681f3Smrg * 0		Normal priority
5817ec681f3Smrg * 1 to 1k	Highest priority
5827ec681f3Smrg */
5837ec681f3Smrg#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
58401e04c3fSmrg
58501e04c3fSmrg#define I915_PARAM_HUC_STATUS		 42
58601e04c3fSmrg
58701e04c3fSmrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
58801e04c3fSmrg * synchronisation with implicit fencing on individual objects.
58901e04c3fSmrg * See EXEC_OBJECT_ASYNC.
59001e04c3fSmrg */
59101e04c3fSmrg#define I915_PARAM_HAS_EXEC_ASYNC	 43
59201e04c3fSmrg
59301e04c3fSmrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
59401e04c3fSmrg * both being able to pass in a sync_file fd to wait upon before executing,
59501e04c3fSmrg * and being able to return a new sync_file fd that is signaled when the
59601e04c3fSmrg * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
59701e04c3fSmrg */
59801e04c3fSmrg#define I915_PARAM_HAS_EXEC_FENCE	 44
59901e04c3fSmrg
60001e04c3fSmrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
60101e04c3fSmrg * user specified bufffers for post-mortem debugging of GPU hangs. See
60201e04c3fSmrg * EXEC_OBJECT_CAPTURE.
60301e04c3fSmrg */
60401e04c3fSmrg#define I915_PARAM_HAS_EXEC_CAPTURE	 45
60501e04c3fSmrg
60601e04c3fSmrg#define I915_PARAM_SLICE_MASK		 46
60701e04c3fSmrg
60801e04c3fSmrg/* Assuming it's uniform for each slice, this queries the mask of subslices
60901e04c3fSmrg * per-slice for this system.
61001e04c3fSmrg */
61101e04c3fSmrg#define I915_PARAM_SUBSLICE_MASK	 47
61201e04c3fSmrg
61301e04c3fSmrg/*
61401e04c3fSmrg * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
61501e04c3fSmrg * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
61601e04c3fSmrg */
61701e04c3fSmrg#define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
61801e04c3fSmrg
61901e04c3fSmrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
62001e04c3fSmrg * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
62101e04c3fSmrg */
62201e04c3fSmrg#define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
62301e04c3fSmrg
62401e04c3fSmrg/*
62501e04c3fSmrg * Query whether every context (both per-file default and user created) is
62601e04c3fSmrg * isolated (insofar as HW supports). If this parameter is not true, then
62701e04c3fSmrg * freshly created contexts may inherit values from an existing context,
62801e04c3fSmrg * rather than default HW values. If true, it also ensures (insofar as HW
62901e04c3fSmrg * supports) that all state set by this context will not leak to any other
63001e04c3fSmrg * context.
63101e04c3fSmrg *
63201e04c3fSmrg * As not every engine across every gen support contexts, the returned
63301e04c3fSmrg * value reports the support of context isolation for individual engines by
63401e04c3fSmrg * returning a bitmask of each engine class set to true if that class supports
63501e04c3fSmrg * isolation.
63601e04c3fSmrg */
63701e04c3fSmrg#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
63801e04c3fSmrg
63901e04c3fSmrg/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
64001e04c3fSmrg * registers. This used to be fixed per platform but from CNL onwards, this
64101e04c3fSmrg * might vary depending on the parts.
64201e04c3fSmrg */
64301e04c3fSmrg#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
64401e04c3fSmrg
64553c12917Smaya/*
64653c12917Smaya * Once upon a time we supposed that writes through the GGTT would be
64753c12917Smaya * immediately in physical memory (once flushed out of the CPU path). However,
64853c12917Smaya * on a few different processors and chipsets, this is not necessarily the case
64953c12917Smaya * as the writes appear to be buffered internally. Thus a read of the backing
65053c12917Smaya * storage (physical memory) via a different path (with different physical tags
65153c12917Smaya * to the indirect write via the GGTT) will see stale values from before
65253c12917Smaya * the GGTT write. Inside the kernel, we can for the most part keep track of
65353c12917Smaya * the different read/write domains in use (e.g. set-domain), but the assumption
65453c12917Smaya * of coherency is baked into the ABI, hence reporting its true state in this
65553c12917Smaya * parameter.
65653c12917Smaya *
65753c12917Smaya * Reports true when writes via mmap_gtt are immediately visible following an
65853c12917Smaya * lfence to flush the WCB.
65953c12917Smaya *
66053c12917Smaya * Reports false when writes via mmap_gtt are indeterminately delayed in an in
66153c12917Smaya * internal buffer and are _not_ immediately visible to third parties accessing
66253c12917Smaya * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
66353c12917Smaya * communications channel when reporting false is strongly disadvised.
66453c12917Smaya */
66553c12917Smaya#define I915_PARAM_MMAP_GTT_COHERENT	52
66653c12917Smaya
6677ec681f3Smrg/*
6687ec681f3Smrg * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
6697ec681f3Smrg * execution through use of explicit fence support.
6707ec681f3Smrg * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
6717ec681f3Smrg */
6727ec681f3Smrg#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
6737ec681f3Smrg
6747ec681f3Smrg/*
6757ec681f3Smrg * Revision of the i915-perf uAPI. The value returned helps determine what
6767ec681f3Smrg * i915-perf features are available. See drm_i915_perf_property_id.
6777ec681f3Smrg */
6787ec681f3Smrg#define I915_PARAM_PERF_REVISION	54
6797ec681f3Smrg
6807ec681f3Smrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
6817ec681f3Smrg * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
6827ec681f3Smrg * I915_EXEC_USE_EXTENSIONS.
6837ec681f3Smrg */
6847ec681f3Smrg#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
6857ec681f3Smrg
6867ec681f3Smrg/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
6877ec681f3Smrg#define I915_PARAM_HAS_USERPTR_PROBE 56
6887ec681f3Smrg
6897ec681f3Smrg/* Must be kept compact -- no holes and well documented */
6907ec681f3Smrg
69101e04c3fSmrgtypedef struct drm_i915_getparam {
69201e04c3fSmrg	__s32 param;
69301e04c3fSmrg	/*
69401e04c3fSmrg	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
69501e04c3fSmrg	 * compat32 code. Don't repeat this mistake.
69601e04c3fSmrg	 */
69701e04c3fSmrg	int *value;
69801e04c3fSmrg} drm_i915_getparam_t;
69901e04c3fSmrg
70001e04c3fSmrg/* Ioctl to set kernel params:
70101e04c3fSmrg */
70201e04c3fSmrg#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
70301e04c3fSmrg#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
70401e04c3fSmrg#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
70501e04c3fSmrg#define I915_SETPARAM_NUM_USED_FENCES                     4
7067ec681f3Smrg/* Must be kept compact -- no holes */
70701e04c3fSmrg
70801e04c3fSmrgtypedef struct drm_i915_setparam {
70901e04c3fSmrg	int param;
71001e04c3fSmrg	int value;
71101e04c3fSmrg} drm_i915_setparam_t;
71201e04c3fSmrg
71301e04c3fSmrg/* A memory manager for regions of shared memory:
71401e04c3fSmrg */
71501e04c3fSmrg#define I915_MEM_REGION_AGP 1
71601e04c3fSmrg
71701e04c3fSmrgtypedef struct drm_i915_mem_alloc {
71801e04c3fSmrg	int region;
71901e04c3fSmrg	int alignment;
72001e04c3fSmrg	int size;
72101e04c3fSmrg	int *region_offset;	/* offset from start of fb or agp */
72201e04c3fSmrg} drm_i915_mem_alloc_t;
72301e04c3fSmrg
72401e04c3fSmrgtypedef struct drm_i915_mem_free {
72501e04c3fSmrg	int region;
72601e04c3fSmrg	int region_offset;
72701e04c3fSmrg} drm_i915_mem_free_t;
72801e04c3fSmrg
72901e04c3fSmrgtypedef struct drm_i915_mem_init_heap {
73001e04c3fSmrg	int region;
73101e04c3fSmrg	int size;
73201e04c3fSmrg	int start;
73301e04c3fSmrg} drm_i915_mem_init_heap_t;
73401e04c3fSmrg
73501e04c3fSmrg/* Allow memory manager to be torn down and re-initialized (eg on
73601e04c3fSmrg * rotate):
73701e04c3fSmrg */
73801e04c3fSmrgtypedef struct drm_i915_mem_destroy_heap {
73901e04c3fSmrg	int region;
74001e04c3fSmrg} drm_i915_mem_destroy_heap_t;
74101e04c3fSmrg
74201e04c3fSmrg/* Allow X server to configure which pipes to monitor for vblank signals
74301e04c3fSmrg */
74401e04c3fSmrg#define	DRM_I915_VBLANK_PIPE_A	1
74501e04c3fSmrg#define	DRM_I915_VBLANK_PIPE_B	2
74601e04c3fSmrg
74701e04c3fSmrgtypedef struct drm_i915_vblank_pipe {
74801e04c3fSmrg	int pipe;
74901e04c3fSmrg} drm_i915_vblank_pipe_t;
75001e04c3fSmrg
75101e04c3fSmrg/* Schedule buffer swap at given vertical blank:
75201e04c3fSmrg */
75301e04c3fSmrgtypedef struct drm_i915_vblank_swap {
75401e04c3fSmrg	drm_drawable_t drawable;
75501e04c3fSmrg	enum drm_vblank_seq_type seqtype;
75601e04c3fSmrg	unsigned int sequence;
75701e04c3fSmrg} drm_i915_vblank_swap_t;
75801e04c3fSmrg
75901e04c3fSmrgtypedef struct drm_i915_hws_addr {
76001e04c3fSmrg	__u64 addr;
76101e04c3fSmrg} drm_i915_hws_addr_t;
76201e04c3fSmrg
76301e04c3fSmrgstruct drm_i915_gem_init {
76401e04c3fSmrg	/**
76501e04c3fSmrg	 * Beginning offset in the GTT to be managed by the DRM memory
76601e04c3fSmrg	 * manager.
76701e04c3fSmrg	 */
76801e04c3fSmrg	__u64 gtt_start;
76901e04c3fSmrg	/**
77001e04c3fSmrg	 * Ending offset in the GTT to be managed by the DRM memory
77101e04c3fSmrg	 * manager.
77201e04c3fSmrg	 */
77301e04c3fSmrg	__u64 gtt_end;
77401e04c3fSmrg};
77501e04c3fSmrg
77601e04c3fSmrgstruct drm_i915_gem_create {
77701e04c3fSmrg	/**
77801e04c3fSmrg	 * Requested size for the object.
77901e04c3fSmrg	 *
78001e04c3fSmrg	 * The (page-aligned) allocated size for the object will be returned.
78101e04c3fSmrg	 */
78201e04c3fSmrg	__u64 size;
78301e04c3fSmrg	/**
78401e04c3fSmrg	 * Returned handle for the object.
78501e04c3fSmrg	 *
78601e04c3fSmrg	 * Object handles are nonzero.
78701e04c3fSmrg	 */
78801e04c3fSmrg	__u32 handle;
78901e04c3fSmrg	__u32 pad;
79001e04c3fSmrg};
79101e04c3fSmrg
79201e04c3fSmrgstruct drm_i915_gem_pread {
79301e04c3fSmrg	/** Handle for the object being read. */
79401e04c3fSmrg	__u32 handle;
79501e04c3fSmrg	__u32 pad;
79601e04c3fSmrg	/** Offset into the object to read from */
79701e04c3fSmrg	__u64 offset;
79801e04c3fSmrg	/** Length of data to read */
79901e04c3fSmrg	__u64 size;
80001e04c3fSmrg	/**
80101e04c3fSmrg	 * Pointer to write the data into.
80201e04c3fSmrg	 *
80301e04c3fSmrg	 * This is a fixed-size type for 32/64 compatibility.
80401e04c3fSmrg	 */
80501e04c3fSmrg	__u64 data_ptr;
80601e04c3fSmrg};
80701e04c3fSmrg
80801e04c3fSmrgstruct drm_i915_gem_pwrite {
80901e04c3fSmrg	/** Handle for the object being written to. */
81001e04c3fSmrg	__u32 handle;
81101e04c3fSmrg	__u32 pad;
81201e04c3fSmrg	/** Offset into the object to write to */
81301e04c3fSmrg	__u64 offset;
81401e04c3fSmrg	/** Length of data to write */
81501e04c3fSmrg	__u64 size;
81601e04c3fSmrg	/**
81701e04c3fSmrg	 * Pointer to read the data from.
81801e04c3fSmrg	 *
81901e04c3fSmrg	 * This is a fixed-size type for 32/64 compatibility.
82001e04c3fSmrg	 */
82101e04c3fSmrg	__u64 data_ptr;
82201e04c3fSmrg};
82301e04c3fSmrg
82401e04c3fSmrgstruct drm_i915_gem_mmap {
82501e04c3fSmrg	/** Handle for the object being mapped. */
82601e04c3fSmrg	__u32 handle;
82701e04c3fSmrg	__u32 pad;
82801e04c3fSmrg	/** Offset in the object to map. */
82901e04c3fSmrg	__u64 offset;
83001e04c3fSmrg	/**
83101e04c3fSmrg	 * Length of data to map.
83201e04c3fSmrg	 *
83301e04c3fSmrg	 * The value will be page-aligned.
83401e04c3fSmrg	 */
83501e04c3fSmrg	__u64 size;
83601e04c3fSmrg	/**
83701e04c3fSmrg	 * Returned pointer the data was mapped at.
83801e04c3fSmrg	 *
83901e04c3fSmrg	 * This is a fixed-size type for 32/64 compatibility.
84001e04c3fSmrg	 */
84101e04c3fSmrg	__u64 addr_ptr;
84201e04c3fSmrg
84301e04c3fSmrg	/**
84401e04c3fSmrg	 * Flags for extended behaviour.
84501e04c3fSmrg	 *
84601e04c3fSmrg	 * Added in version 2.
84701e04c3fSmrg	 */
84801e04c3fSmrg	__u64 flags;
84901e04c3fSmrg#define I915_MMAP_WC 0x1
85001e04c3fSmrg};
85101e04c3fSmrg
85201e04c3fSmrgstruct drm_i915_gem_mmap_gtt {
85301e04c3fSmrg	/** Handle for the object being mapped. */
85401e04c3fSmrg	__u32 handle;
85501e04c3fSmrg	__u32 pad;
85601e04c3fSmrg	/**
85701e04c3fSmrg	 * Fake offset to use for subsequent mmap call
85801e04c3fSmrg	 *
85901e04c3fSmrg	 * This is a fixed-size type for 32/64 compatibility.
86001e04c3fSmrg	 */
86101e04c3fSmrg	__u64 offset;
86201e04c3fSmrg};
86301e04c3fSmrg
8647ec681f3Smrg/**
8657ec681f3Smrg * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
8667ec681f3Smrg *
8677ec681f3Smrg * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
8687ec681f3Smrg * and is used to retrieve the fake offset to mmap an object specified by &handle.
8697ec681f3Smrg *
8707ec681f3Smrg * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
8717ec681f3Smrg * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
8727ec681f3Smrg * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
8737ec681f3Smrg */
8747ec681f3Smrgstruct drm_i915_gem_mmap_offset {
8757ec681f3Smrg	/** @handle: Handle for the object being mapped. */
8767ec681f3Smrg	__u32 handle;
8777ec681f3Smrg	/** @pad: Must be zero */
8787ec681f3Smrg	__u32 pad;
8797ec681f3Smrg	/**
8807ec681f3Smrg	 * @offset: The fake offset to use for subsequent mmap call
8817ec681f3Smrg	 *
8827ec681f3Smrg	 * This is a fixed-size type for 32/64 compatibility.
8837ec681f3Smrg	 */
8847ec681f3Smrg	__u64 offset;
8857ec681f3Smrg
8867ec681f3Smrg	/**
8877ec681f3Smrg	 * @flags: Flags for extended behaviour.
8887ec681f3Smrg	 *
8897ec681f3Smrg	 * It is mandatory that one of the `MMAP_OFFSET` types
8907ec681f3Smrg	 * should be included:
8917ec681f3Smrg	 *
8927ec681f3Smrg	 * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
8937ec681f3Smrg	 * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
8947ec681f3Smrg	 * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
8957ec681f3Smrg	 * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
8967ec681f3Smrg	 *
8977ec681f3Smrg	 * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
8987ec681f3Smrg	 * type. On devices without local memory, this caching mode is invalid.
8997ec681f3Smrg	 *
9007ec681f3Smrg	 * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
9017ec681f3Smrg	 * be used, depending on the object placement on creation. WB will be used
9027ec681f3Smrg	 * when the object can only exist in system memory, WC otherwise.
9037ec681f3Smrg	 */
9047ec681f3Smrg	__u64 flags;
9057ec681f3Smrg
9067ec681f3Smrg#define I915_MMAP_OFFSET_GTT	0
9077ec681f3Smrg#define I915_MMAP_OFFSET_WC	1
9087ec681f3Smrg#define I915_MMAP_OFFSET_WB	2
9097ec681f3Smrg#define I915_MMAP_OFFSET_UC	3
9107ec681f3Smrg#define I915_MMAP_OFFSET_FIXED	4
9117ec681f3Smrg
9127ec681f3Smrg	/**
9137ec681f3Smrg	 * @extensions: Zero-terminated chain of extensions.
9147ec681f3Smrg	 *
9157ec681f3Smrg	 * No current extensions defined; mbz.
9167ec681f3Smrg	 */
9177ec681f3Smrg	__u64 extensions;
9187ec681f3Smrg};
9197ec681f3Smrg
9207ec681f3Smrg/**
9217ec681f3Smrg * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
9227ec681f3Smrg * preparation for accessing the pages via some CPU domain.
9237ec681f3Smrg *
9247ec681f3Smrg * Specifying a new write or read domain will flush the object out of the
9257ec681f3Smrg * previous domain(if required), before then updating the objects domain
9267ec681f3Smrg * tracking with the new domain.
9277ec681f3Smrg *
9287ec681f3Smrg * Note this might involve waiting for the object first if it is still active on
9297ec681f3Smrg * the GPU.
9307ec681f3Smrg *
9317ec681f3Smrg * Supported values for @read_domains and @write_domain:
9327ec681f3Smrg *
9337ec681f3Smrg *	- I915_GEM_DOMAIN_WC: Uncached write-combined domain
9347ec681f3Smrg *	- I915_GEM_DOMAIN_CPU: CPU cache domain
9357ec681f3Smrg *	- I915_GEM_DOMAIN_GTT: Mappable aperture domain
9367ec681f3Smrg *
9377ec681f3Smrg * All other domains are rejected.
9387ec681f3Smrg *
9397ec681f3Smrg * Note that for discrete, starting from DG1, this is no longer supported, and
9407ec681f3Smrg * is instead rejected. On such platforms the CPU domain is effectively static,
9417ec681f3Smrg * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
9427ec681f3Smrg * which can't be set explicitly and instead depends on the object placements,
9437ec681f3Smrg * as per the below.
9447ec681f3Smrg *
9457ec681f3Smrg * Implicit caching rules, starting from DG1:
9467ec681f3Smrg *
9477ec681f3Smrg *	- If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
9487ec681f3Smrg *	  contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
9497ec681f3Smrg *	  mapped as write-combined only.
9507ec681f3Smrg *
9517ec681f3Smrg *	- Everything else is always allocated and mapped as write-back, with the
9527ec681f3Smrg *	  guarantee that everything is also coherent with the GPU.
9537ec681f3Smrg *
9547ec681f3Smrg * Note that this is likely to change in the future again, where we might need
9557ec681f3Smrg * more flexibility on future devices, so making this all explicit as part of a
9567ec681f3Smrg * new &drm_i915_gem_create_ext extension is probable.
9577ec681f3Smrg */
95801e04c3fSmrgstruct drm_i915_gem_set_domain {
9597ec681f3Smrg	/** @handle: Handle for the object. */
96001e04c3fSmrg	__u32 handle;
96101e04c3fSmrg
9627ec681f3Smrg	/** @read_domains: New read domains. */
96301e04c3fSmrg	__u32 read_domains;
96401e04c3fSmrg
9657ec681f3Smrg	/**
9667ec681f3Smrg	 * @write_domain: New write domain.
9677ec681f3Smrg	 *
9687ec681f3Smrg	 * Note that having something in the write domain implies it's in the
9697ec681f3Smrg	 * read domain, and only that read domain.
9707ec681f3Smrg	 */
97101e04c3fSmrg	__u32 write_domain;
97201e04c3fSmrg};
97301e04c3fSmrg
97401e04c3fSmrgstruct drm_i915_gem_sw_finish {
97501e04c3fSmrg	/** Handle for the object */
97601e04c3fSmrg	__u32 handle;
97701e04c3fSmrg};
97801e04c3fSmrg
97901e04c3fSmrgstruct drm_i915_gem_relocation_entry {
98001e04c3fSmrg	/**
98101e04c3fSmrg	 * Handle of the buffer being pointed to by this relocation entry.
98201e04c3fSmrg	 *
98301e04c3fSmrg	 * It's appealing to make this be an index into the mm_validate_entry
98401e04c3fSmrg	 * list to refer to the buffer, but this allows the driver to create
98501e04c3fSmrg	 * a relocation list for state buffers and not re-write it per
98601e04c3fSmrg	 * exec using the buffer.
98701e04c3fSmrg	 */
98801e04c3fSmrg	__u32 target_handle;
98901e04c3fSmrg
99001e04c3fSmrg	/**
99101e04c3fSmrg	 * Value to be added to the offset of the target buffer to make up
99201e04c3fSmrg	 * the relocation entry.
99301e04c3fSmrg	 */
99401e04c3fSmrg	__u32 delta;
99501e04c3fSmrg
99601e04c3fSmrg	/** Offset in the buffer the relocation entry will be written into */
99701e04c3fSmrg	__u64 offset;
99801e04c3fSmrg
99901e04c3fSmrg	/**
100001e04c3fSmrg	 * Offset value of the target buffer that the relocation entry was last
100101e04c3fSmrg	 * written as.
100201e04c3fSmrg	 *
100301e04c3fSmrg	 * If the buffer has the same offset as last time, we can skip syncing
100401e04c3fSmrg	 * and writing the relocation.  This value is written back out by
100501e04c3fSmrg	 * the execbuffer ioctl when the relocation is written.
100601e04c3fSmrg	 */
100701e04c3fSmrg	__u64 presumed_offset;
100801e04c3fSmrg
100901e04c3fSmrg	/**
101001e04c3fSmrg	 * Target memory domains read by this operation.
101101e04c3fSmrg	 */
101201e04c3fSmrg	__u32 read_domains;
101301e04c3fSmrg
101401e04c3fSmrg	/**
101501e04c3fSmrg	 * Target memory domains written by this operation.
101601e04c3fSmrg	 *
101701e04c3fSmrg	 * Note that only one domain may be written by the whole
101801e04c3fSmrg	 * execbuffer operation, so that where there are conflicts,
101901e04c3fSmrg	 * the application will get -EINVAL back.
102001e04c3fSmrg	 */
102101e04c3fSmrg	__u32 write_domain;
102201e04c3fSmrg};
102301e04c3fSmrg
102401e04c3fSmrg/** @{
102501e04c3fSmrg * Intel memory domains
102601e04c3fSmrg *
102701e04c3fSmrg * Most of these just align with the various caches in
102801e04c3fSmrg * the system and are used to flush and invalidate as
102901e04c3fSmrg * objects end up cached in different domains.
103001e04c3fSmrg */
103101e04c3fSmrg/** CPU cache */
103201e04c3fSmrg#define I915_GEM_DOMAIN_CPU		0x00000001
103301e04c3fSmrg/** Render cache, used by 2D and 3D drawing */
103401e04c3fSmrg#define I915_GEM_DOMAIN_RENDER		0x00000002
103501e04c3fSmrg/** Sampler cache, used by texture engine */
103601e04c3fSmrg#define I915_GEM_DOMAIN_SAMPLER		0x00000004
103701e04c3fSmrg/** Command queue, used to load batch buffers */
103801e04c3fSmrg#define I915_GEM_DOMAIN_COMMAND		0x00000008
103901e04c3fSmrg/** Instruction cache, used by shader programs */
104001e04c3fSmrg#define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
104101e04c3fSmrg/** Vertex address cache */
104201e04c3fSmrg#define I915_GEM_DOMAIN_VERTEX		0x00000020
104301e04c3fSmrg/** GTT domain - aperture and scanout */
104401e04c3fSmrg#define I915_GEM_DOMAIN_GTT		0x00000040
104501e04c3fSmrg/** WC domain - uncached access */
104601e04c3fSmrg#define I915_GEM_DOMAIN_WC		0x00000080
104701e04c3fSmrg/** @} */
104801e04c3fSmrg
104901e04c3fSmrgstruct drm_i915_gem_exec_object {
105001e04c3fSmrg	/**
105101e04c3fSmrg	 * User's handle for a buffer to be bound into the GTT for this
105201e04c3fSmrg	 * operation.
105301e04c3fSmrg	 */
105401e04c3fSmrg	__u32 handle;
105501e04c3fSmrg
105601e04c3fSmrg	/** Number of relocations to be performed on this buffer */
105701e04c3fSmrg	__u32 relocation_count;
105801e04c3fSmrg	/**
105901e04c3fSmrg	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
106001e04c3fSmrg	 * the relocations to be performed in this buffer.
106101e04c3fSmrg	 */
106201e04c3fSmrg	__u64 relocs_ptr;
106301e04c3fSmrg
106401e04c3fSmrg	/** Required alignment in graphics aperture */
106501e04c3fSmrg	__u64 alignment;
106601e04c3fSmrg
106701e04c3fSmrg	/**
106801e04c3fSmrg	 * Returned value of the updated offset of the object, for future
106901e04c3fSmrg	 * presumed_offset writes.
107001e04c3fSmrg	 */
107101e04c3fSmrg	__u64 offset;
107201e04c3fSmrg};
107301e04c3fSmrg
10747ec681f3Smrg/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
107501e04c3fSmrgstruct drm_i915_gem_execbuffer {
107601e04c3fSmrg	/**
107701e04c3fSmrg	 * List of buffers to be validated with their relocations to be
107801e04c3fSmrg	 * performend on them.
107901e04c3fSmrg	 *
108001e04c3fSmrg	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
108101e04c3fSmrg	 *
108201e04c3fSmrg	 * These buffers must be listed in an order such that all relocations
108301e04c3fSmrg	 * a buffer is performing refer to buffers that have already appeared
108401e04c3fSmrg	 * in the validate list.
108501e04c3fSmrg	 */
108601e04c3fSmrg	__u64 buffers_ptr;
108701e04c3fSmrg	__u32 buffer_count;
108801e04c3fSmrg
108901e04c3fSmrg	/** Offset in the batchbuffer to start execution from. */
109001e04c3fSmrg	__u32 batch_start_offset;
109101e04c3fSmrg	/** Bytes used in batchbuffer from batch_start_offset */
109201e04c3fSmrg	__u32 batch_len;
109301e04c3fSmrg	__u32 DR1;
109401e04c3fSmrg	__u32 DR4;
109501e04c3fSmrg	__u32 num_cliprects;
109601e04c3fSmrg	/** This is a struct drm_clip_rect *cliprects */
109701e04c3fSmrg	__u64 cliprects_ptr;
109801e04c3fSmrg};
109901e04c3fSmrg
110001e04c3fSmrgstruct drm_i915_gem_exec_object2 {
110101e04c3fSmrg	/**
110201e04c3fSmrg	 * User's handle for a buffer to be bound into the GTT for this
110301e04c3fSmrg	 * operation.
110401e04c3fSmrg	 */
110501e04c3fSmrg	__u32 handle;
110601e04c3fSmrg
110701e04c3fSmrg	/** Number of relocations to be performed on this buffer */
110801e04c3fSmrg	__u32 relocation_count;
110901e04c3fSmrg	/**
111001e04c3fSmrg	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
111101e04c3fSmrg	 * the relocations to be performed in this buffer.
111201e04c3fSmrg	 */
111301e04c3fSmrg	__u64 relocs_ptr;
111401e04c3fSmrg
111501e04c3fSmrg	/** Required alignment in graphics aperture */
111601e04c3fSmrg	__u64 alignment;
111701e04c3fSmrg
111801e04c3fSmrg	/**
111901e04c3fSmrg	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
112001e04c3fSmrg	 * the user with the GTT offset at which this object will be pinned.
112101e04c3fSmrg	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
112201e04c3fSmrg	 * presumed_offset of the object.
112301e04c3fSmrg	 * During execbuffer2 the kernel populates it with the value of the
112401e04c3fSmrg	 * current GTT offset of the object, for future presumed_offset writes.
112501e04c3fSmrg	 */
112601e04c3fSmrg	__u64 offset;
112701e04c3fSmrg
112801e04c3fSmrg#define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
112901e04c3fSmrg#define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
113001e04c3fSmrg#define EXEC_OBJECT_WRITE		 (1<<2)
113101e04c3fSmrg#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
113201e04c3fSmrg#define EXEC_OBJECT_PINNED		 (1<<4)
113301e04c3fSmrg#define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
113401e04c3fSmrg/* The kernel implicitly tracks GPU activity on all GEM objects, and
113501e04c3fSmrg * synchronises operations with outstanding rendering. This includes
113601e04c3fSmrg * rendering on other devices if exported via dma-buf. However, sometimes
113701e04c3fSmrg * this tracking is too coarse and the user knows better. For example,
113801e04c3fSmrg * if the object is split into non-overlapping ranges shared between different
113901e04c3fSmrg * clients or engines (i.e. suballocating objects), the implicit tracking
114001e04c3fSmrg * by kernel assumes that each operation affects the whole object rather
114101e04c3fSmrg * than an individual range, causing needless synchronisation between clients.
114201e04c3fSmrg * The kernel will also forgo any CPU cache flushes prior to rendering from
114301e04c3fSmrg * the object as the client is expected to be also handling such domain
114401e04c3fSmrg * tracking.
114501e04c3fSmrg *
114601e04c3fSmrg * The kernel maintains the implicit tracking in order to manage resources
114701e04c3fSmrg * used by the GPU - this flag only disables the synchronisation prior to
114801e04c3fSmrg * rendering with this object in this execbuf.
114901e04c3fSmrg *
115001e04c3fSmrg * Opting out of implicit synhronisation requires the user to do its own
115101e04c3fSmrg * explicit tracking to avoid rendering corruption. See, for example,
115201e04c3fSmrg * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
115301e04c3fSmrg */
115401e04c3fSmrg#define EXEC_OBJECT_ASYNC		(1<<6)
115501e04c3fSmrg/* Request that the contents of this execobject be copied into the error
115601e04c3fSmrg * state upon a GPU hang involving this batch for post-mortem debugging.
115701e04c3fSmrg * These buffers are recorded in no particular order as "user" in
115801e04c3fSmrg * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
115901e04c3fSmrg * if the kernel supports this flag.
116001e04c3fSmrg */
116101e04c3fSmrg#define EXEC_OBJECT_CAPTURE		(1<<7)
116201e04c3fSmrg/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
116301e04c3fSmrg#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
116401e04c3fSmrg	__u64 flags;
116501e04c3fSmrg
116601e04c3fSmrg	union {
116701e04c3fSmrg		__u64 rsvd1;
116801e04c3fSmrg		__u64 pad_to_size;
116901e04c3fSmrg	};
117001e04c3fSmrg	__u64 rsvd2;
117101e04c3fSmrg};
117201e04c3fSmrg
117301e04c3fSmrgstruct drm_i915_gem_exec_fence {
117401e04c3fSmrg	/**
117501e04c3fSmrg	 * User's handle for a drm_syncobj to wait on or signal.
117601e04c3fSmrg	 */
117701e04c3fSmrg	__u32 handle;
117801e04c3fSmrg
117901e04c3fSmrg#define I915_EXEC_FENCE_WAIT            (1<<0)
118001e04c3fSmrg#define I915_EXEC_FENCE_SIGNAL          (1<<1)
118101e04c3fSmrg#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
118201e04c3fSmrg	__u32 flags;
118301e04c3fSmrg};
118401e04c3fSmrg
11857ec681f3Smrg/*
11867ec681f3Smrg * See drm_i915_gem_execbuffer_ext_timeline_fences.
11877ec681f3Smrg */
11887ec681f3Smrg#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
11897ec681f3Smrg
11907ec681f3Smrg/*
11917ec681f3Smrg * This structure describes an array of drm_syncobj and associated points for
11927ec681f3Smrg * timeline variants of drm_syncobj. It is invalid to append this structure to
11937ec681f3Smrg * the execbuf if I915_EXEC_FENCE_ARRAY is set.
11947ec681f3Smrg */
11957ec681f3Smrgstruct drm_i915_gem_execbuffer_ext_timeline_fences {
11967ec681f3Smrg	struct i915_user_extension base;
11977ec681f3Smrg
11987ec681f3Smrg	/**
11997ec681f3Smrg	 * Number of element in the handles_ptr & value_ptr arrays.
12007ec681f3Smrg	 */
12017ec681f3Smrg	__u64 fence_count;
12027ec681f3Smrg
12037ec681f3Smrg	/**
12047ec681f3Smrg	 * Pointer to an array of struct drm_i915_gem_exec_fence of length
12057ec681f3Smrg	 * fence_count.
12067ec681f3Smrg	 */
12077ec681f3Smrg	__u64 handles_ptr;
12087ec681f3Smrg
12097ec681f3Smrg	/**
12107ec681f3Smrg	 * Pointer to an array of u64 values of length fence_count. Values
12117ec681f3Smrg	 * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
12127ec681f3Smrg	 * drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
12137ec681f3Smrg	 */
12147ec681f3Smrg	__u64 values_ptr;
12157ec681f3Smrg};
12167ec681f3Smrg
121701e04c3fSmrgstruct drm_i915_gem_execbuffer2 {
121801e04c3fSmrg	/**
121901e04c3fSmrg	 * List of gem_exec_object2 structs
122001e04c3fSmrg	 */
122101e04c3fSmrg	__u64 buffers_ptr;
122201e04c3fSmrg	__u32 buffer_count;
122301e04c3fSmrg
122401e04c3fSmrg	/** Offset in the batchbuffer to start execution from. */
122501e04c3fSmrg	__u32 batch_start_offset;
122601e04c3fSmrg	/** Bytes used in batchbuffer from batch_start_offset */
122701e04c3fSmrg	__u32 batch_len;
122801e04c3fSmrg	__u32 DR1;
122901e04c3fSmrg	__u32 DR4;
123001e04c3fSmrg	__u32 num_cliprects;
123101e04c3fSmrg	/**
123201e04c3fSmrg	 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
12337ec681f3Smrg	 * & I915_EXEC_USE_EXTENSIONS are not set.
12347ec681f3Smrg	 *
12357ec681f3Smrg	 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
12367ec681f3Smrg	 * of struct drm_i915_gem_exec_fence and num_cliprects is the length
12377ec681f3Smrg	 * of the array.
12387ec681f3Smrg	 *
12397ec681f3Smrg	 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
12407ec681f3Smrg	 * single struct i915_user_extension and num_cliprects is 0.
124101e04c3fSmrg	 */
124201e04c3fSmrg	__u64 cliprects_ptr;
12437ec681f3Smrg#define I915_EXEC_RING_MASK              (0x3f)
124401e04c3fSmrg#define I915_EXEC_DEFAULT                (0<<0)
124501e04c3fSmrg#define I915_EXEC_RENDER                 (1<<0)
124601e04c3fSmrg#define I915_EXEC_BSD                    (2<<0)
124701e04c3fSmrg#define I915_EXEC_BLT                    (3<<0)
124801e04c3fSmrg#define I915_EXEC_VEBOX                  (4<<0)
124901e04c3fSmrg
125001e04c3fSmrg/* Used for switching the constants addressing mode on gen4+ RENDER ring.
125101e04c3fSmrg * Gen6+ only supports relative addressing to dynamic state (default) and
125201e04c3fSmrg * absolute addressing.
125301e04c3fSmrg *
125401e04c3fSmrg * These flags are ignored for the BSD and BLT rings.
125501e04c3fSmrg */
125601e04c3fSmrg#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
125701e04c3fSmrg#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
125801e04c3fSmrg#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
125901e04c3fSmrg#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
126001e04c3fSmrg	__u64 flags;
126101e04c3fSmrg	__u64 rsvd1; /* now used for context info */
126201e04c3fSmrg	__u64 rsvd2;
126301e04c3fSmrg};
126401e04c3fSmrg
126501e04c3fSmrg/** Resets the SO write offset registers for transform feedback on gen7. */
126601e04c3fSmrg#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
126701e04c3fSmrg
126801e04c3fSmrg/** Request a privileged ("secure") batch buffer. Note only available for
126901e04c3fSmrg * DRM_ROOT_ONLY | DRM_MASTER processes.
127001e04c3fSmrg */
127101e04c3fSmrg#define I915_EXEC_SECURE		(1<<9)
127201e04c3fSmrg
127301e04c3fSmrg/** Inform the kernel that the batch is and will always be pinned. This
127401e04c3fSmrg * negates the requirement for a workaround to be performed to avoid
127501e04c3fSmrg * an incoherent CS (such as can be found on 830/845). If this flag is
127601e04c3fSmrg * not passed, the kernel will endeavour to make sure the batch is
127701e04c3fSmrg * coherent with the CS before execution. If this flag is passed,
127801e04c3fSmrg * userspace assumes the responsibility for ensuring the same.
127901e04c3fSmrg */
128001e04c3fSmrg#define I915_EXEC_IS_PINNED		(1<<10)
128101e04c3fSmrg
128201e04c3fSmrg/** Provide a hint to the kernel that the command stream and auxiliary
128301e04c3fSmrg * state buffers already holds the correct presumed addresses and so the
128401e04c3fSmrg * relocation process may be skipped if no buffers need to be moved in
128501e04c3fSmrg * preparation for the execbuffer.
128601e04c3fSmrg */
128701e04c3fSmrg#define I915_EXEC_NO_RELOC		(1<<11)
128801e04c3fSmrg
128901e04c3fSmrg/** Use the reloc.handle as an index into the exec object array rather
129001e04c3fSmrg * than as the per-file handle.
129101e04c3fSmrg */
129201e04c3fSmrg#define I915_EXEC_HANDLE_LUT		(1<<12)
129301e04c3fSmrg
129401e04c3fSmrg/** Used for switching BSD rings on the platforms with two BSD rings */
129501e04c3fSmrg#define I915_EXEC_BSD_SHIFT	 (13)
129601e04c3fSmrg#define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
129701e04c3fSmrg/* default ping-pong mode */
129801e04c3fSmrg#define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
129901e04c3fSmrg#define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
130001e04c3fSmrg#define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
130101e04c3fSmrg
130201e04c3fSmrg/** Tell the kernel that the batchbuffer is processed by
130301e04c3fSmrg *  the resource streamer.
130401e04c3fSmrg */
130501e04c3fSmrg#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
130601e04c3fSmrg
130701e04c3fSmrg/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
130801e04c3fSmrg * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
130901e04c3fSmrg * the batch.
131001e04c3fSmrg *
131101e04c3fSmrg * Returns -EINVAL if the sync_file fd cannot be found.
131201e04c3fSmrg */
131301e04c3fSmrg#define I915_EXEC_FENCE_IN		(1<<16)
131401e04c3fSmrg
131501e04c3fSmrg/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
131601e04c3fSmrg * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
131701e04c3fSmrg * to the caller, and it should be close() after use. (The fd is a regular
131801e04c3fSmrg * file descriptor and will be cleaned up on process termination. It holds
131901e04c3fSmrg * a reference to the request, but nothing else.)
132001e04c3fSmrg *
132101e04c3fSmrg * The sync_file fd can be combined with other sync_file and passed either
132201e04c3fSmrg * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
132301e04c3fSmrg * will only occur after this request completes), or to other devices.
132401e04c3fSmrg *
132501e04c3fSmrg * Using I915_EXEC_FENCE_OUT requires use of
132601e04c3fSmrg * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
132701e04c3fSmrg * back to userspace. Failure to do so will cause the out-fence to always
132801e04c3fSmrg * be reported as zero, and the real fence fd to be leaked.
132901e04c3fSmrg */
133001e04c3fSmrg#define I915_EXEC_FENCE_OUT		(1<<17)
133101e04c3fSmrg
133201e04c3fSmrg/*
133301e04c3fSmrg * Traditionally the execbuf ioctl has only considered the final element in
133401e04c3fSmrg * the execobject[] to be the executable batch. Often though, the client
133501e04c3fSmrg * will known the batch object prior to construction and being able to place
133601e04c3fSmrg * it into the execobject[] array first can simplify the relocation tracking.
133701e04c3fSmrg * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
133801e04c3fSmrg * execobject[] as the * batch instead (the default is to use the last
133901e04c3fSmrg * element).
134001e04c3fSmrg */
134101e04c3fSmrg#define I915_EXEC_BATCH_FIRST		(1<<18)
134201e04c3fSmrg
134301e04c3fSmrg/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
134401e04c3fSmrg * define an array of i915_gem_exec_fence structures which specify a set of
134501e04c3fSmrg * dma fences to wait upon or signal.
134601e04c3fSmrg */
134701e04c3fSmrg#define I915_EXEC_FENCE_ARRAY   (1<<19)
134801e04c3fSmrg
13497ec681f3Smrg/*
13507ec681f3Smrg * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
13517ec681f3Smrg * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
13527ec681f3Smrg * the batch.
13537ec681f3Smrg *
13547ec681f3Smrg * Returns -EINVAL if the sync_file fd cannot be found.
13557ec681f3Smrg */
13567ec681f3Smrg#define I915_EXEC_FENCE_SUBMIT		(1 << 20)
13577ec681f3Smrg
13587ec681f3Smrg/*
13597ec681f3Smrg * Setting I915_EXEC_USE_EXTENSIONS implies that
13607ec681f3Smrg * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
13617ec681f3Smrg * list of i915_user_extension. Each i915_user_extension node is the base of a
13627ec681f3Smrg * larger structure. The list of supported structures are listed in the
13637ec681f3Smrg * drm_i915_gem_execbuffer_ext enum.
13647ec681f3Smrg */
13657ec681f3Smrg#define I915_EXEC_USE_EXTENSIONS	(1 << 21)
13667ec681f3Smrg
13677ec681f3Smrg#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
136801e04c3fSmrg
136901e04c3fSmrg#define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
137001e04c3fSmrg#define i915_execbuffer2_set_context_id(eb2, context) \
137101e04c3fSmrg	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
137201e04c3fSmrg#define i915_execbuffer2_get_context_id(eb2) \
137301e04c3fSmrg	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
137401e04c3fSmrg
137501e04c3fSmrgstruct drm_i915_gem_pin {
137601e04c3fSmrg	/** Handle of the buffer to be pinned. */
137701e04c3fSmrg	__u32 handle;
137801e04c3fSmrg	__u32 pad;
137901e04c3fSmrg
138001e04c3fSmrg	/** alignment required within the aperture */
138101e04c3fSmrg	__u64 alignment;
138201e04c3fSmrg
138301e04c3fSmrg	/** Returned GTT offset of the buffer. */
138401e04c3fSmrg	__u64 offset;
138501e04c3fSmrg};
138601e04c3fSmrg
138701e04c3fSmrgstruct drm_i915_gem_unpin {
138801e04c3fSmrg	/** Handle of the buffer to be unpinned. */
138901e04c3fSmrg	__u32 handle;
139001e04c3fSmrg	__u32 pad;
139101e04c3fSmrg};
139201e04c3fSmrg
139301e04c3fSmrgstruct drm_i915_gem_busy {
139401e04c3fSmrg	/** Handle of the buffer to check for busy */
139501e04c3fSmrg	__u32 handle;
139601e04c3fSmrg
139701e04c3fSmrg	/** Return busy status
139801e04c3fSmrg	 *
139901e04c3fSmrg	 * A return of 0 implies that the object is idle (after
140001e04c3fSmrg	 * having flushed any pending activity), and a non-zero return that
140101e04c3fSmrg	 * the object is still in-flight on the GPU. (The GPU has not yet
140201e04c3fSmrg	 * signaled completion for all pending requests that reference the
140301e04c3fSmrg	 * object.) An object is guaranteed to become idle eventually (so
140401e04c3fSmrg	 * long as no new GPU commands are executed upon it). Due to the
140501e04c3fSmrg	 * asynchronous nature of the hardware, an object reported
140601e04c3fSmrg	 * as busy may become idle before the ioctl is completed.
140701e04c3fSmrg	 *
140801e04c3fSmrg	 * Furthermore, if the object is busy, which engine is busy is only
14097ec681f3Smrg	 * provided as a guide and only indirectly by reporting its class
14107ec681f3Smrg	 * (there may be more than one engine in each class). There are race
14117ec681f3Smrg	 * conditions which prevent the report of which engines are busy from
14127ec681f3Smrg	 * being always accurate.  However, the converse is not true. If the
14137ec681f3Smrg	 * object is idle, the result of the ioctl, that all engines are idle,
14147ec681f3Smrg	 * is accurate.
141501e04c3fSmrg	 *
141601e04c3fSmrg	 * The returned dword is split into two fields to indicate both
14177ec681f3Smrg	 * the engine classess on which the object is being read, and the
14187ec681f3Smrg	 * engine class on which it is currently being written (if any).
141901e04c3fSmrg	 *
142001e04c3fSmrg	 * The low word (bits 0:15) indicate if the object is being written
142101e04c3fSmrg	 * to by any engine (there can only be one, as the GEM implicit
142201e04c3fSmrg	 * synchronisation rules force writes to be serialised). Only the
14237ec681f3Smrg	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
14247ec681f3Smrg	 * 1 not 0 etc) for the last write is reported.
142501e04c3fSmrg	 *
14267ec681f3Smrg	 * The high word (bits 16:31) are a bitmask of which engines classes
14277ec681f3Smrg	 * are currently reading from the object. Multiple engines may be
142801e04c3fSmrg	 * reading from the object simultaneously.
142901e04c3fSmrg	 *
14307ec681f3Smrg	 * The value of each engine class is the same as specified in the
14317ec681f3Smrg	 * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
14327ec681f3Smrg	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
14337ec681f3Smrg	 * Some hardware may have parallel execution engines, e.g. multiple
14347ec681f3Smrg	 * media engines, which are mapped to the same class identifier and so
14357ec681f3Smrg	 * are not separately reported for busyness.
143601e04c3fSmrg	 *
143701e04c3fSmrg	 * Caveat emptor:
143801e04c3fSmrg	 * Only the boolean result of this query is reliable; that is whether
143901e04c3fSmrg	 * the object is idle or busy. The report of which engines are busy
144001e04c3fSmrg	 * should be only used as a heuristic.
144101e04c3fSmrg	 */
144201e04c3fSmrg	__u32 busy;
144301e04c3fSmrg};
144401e04c3fSmrg
144501e04c3fSmrg/**
14467ec681f3Smrg * struct drm_i915_gem_caching - Set or get the caching for given object
14477ec681f3Smrg * handle.
144801e04c3fSmrg *
14497ec681f3Smrg * Allow userspace to control the GTT caching bits for a given object when the
14507ec681f3Smrg * object is later mapped through the ppGTT(or GGTT on older platforms lacking
14517ec681f3Smrg * ppGTT support, or if the object is used for scanout). Note that this might
14527ec681f3Smrg * require unbinding the object from the GTT first, if its current caching value
14537ec681f3Smrg * doesn't match.
145401e04c3fSmrg *
14557ec681f3Smrg * Note that this all changes on discrete platforms, starting from DG1, the
14567ec681f3Smrg * set/get caching is no longer supported, and is now rejected.  Instead the CPU
14577ec681f3Smrg * caching attributes(WB vs WC) will become an immutable creation time property
14587ec681f3Smrg * for the object, along with the GTT caching level. For now we don't expose any
14597ec681f3Smrg * new uAPI for this, instead on DG1 this is all implicit, although this largely
14607ec681f3Smrg * shouldn't matter since DG1 is coherent by default(without any way of
14617ec681f3Smrg * controlling it).
14627ec681f3Smrg *
14637ec681f3Smrg * Implicit caching rules, starting from DG1:
14647ec681f3Smrg *
14657ec681f3Smrg *     - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
14667ec681f3Smrg *       contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
14677ec681f3Smrg *       mapped as write-combined only.
14687ec681f3Smrg *
14697ec681f3Smrg *     - Everything else is always allocated and mapped as write-back, with the
14707ec681f3Smrg *       guarantee that everything is also coherent with the GPU.
147101e04c3fSmrg *
14727ec681f3Smrg * Note that this is likely to change in the future again, where we might need
14737ec681f3Smrg * more flexibility on future devices, so making this all explicit as part of a
14747ec681f3Smrg * new &drm_i915_gem_create_ext extension is probable.
14757ec681f3Smrg *
14767ec681f3Smrg * Side note: Part of the reason for this is that changing the at-allocation-time CPU
14777ec681f3Smrg * caching attributes for the pages might be required(and is expensive) if we
14787ec681f3Smrg * need to then CPU map the pages later with different caching attributes. This
14797ec681f3Smrg * inconsistent caching behaviour, while supported on x86, is not universally
14807ec681f3Smrg * supported on other architectures. So for simplicity we opt for setting
14817ec681f3Smrg * everything at creation time, whilst also making it immutable, on discrete
14827ec681f3Smrg * platforms.
148301e04c3fSmrg */
148401e04c3fSmrgstruct drm_i915_gem_caching {
148501e04c3fSmrg	/**
14867ec681f3Smrg	 * @handle: Handle of the buffer to set/get the caching level.
14877ec681f3Smrg	 */
148801e04c3fSmrg	__u32 handle;
148901e04c3fSmrg
149001e04c3fSmrg	/**
14917ec681f3Smrg	 * @caching: The GTT caching level to apply or possible return value.
149201e04c3fSmrg	 *
14937ec681f3Smrg	 * The supported @caching values:
14947ec681f3Smrg	 *
14957ec681f3Smrg	 * I915_CACHING_NONE:
14967ec681f3Smrg	 *
14977ec681f3Smrg	 * GPU access is not coherent with CPU caches.  Default for machines
14987ec681f3Smrg	 * without an LLC. This means manual flushing might be needed, if we
14997ec681f3Smrg	 * want GPU access to be coherent.
15007ec681f3Smrg	 *
15017ec681f3Smrg	 * I915_CACHING_CACHED:
15027ec681f3Smrg	 *
15037ec681f3Smrg	 * GPU access is coherent with CPU caches and furthermore the data is
15047ec681f3Smrg	 * cached in last-level caches shared between CPU cores and the GPU GT.
15057ec681f3Smrg	 *
15067ec681f3Smrg	 * I915_CACHING_DISPLAY:
15077ec681f3Smrg	 *
15087ec681f3Smrg	 * Special GPU caching mode which is coherent with the scanout engines.
15097ec681f3Smrg	 * Transparently falls back to I915_CACHING_NONE on platforms where no
15107ec681f3Smrg	 * special cache mode (like write-through or gfdt flushing) is
15117ec681f3Smrg	 * available. The kernel automatically sets this mode when using a
15127ec681f3Smrg	 * buffer as a scanout target.  Userspace can manually set this mode to
15137ec681f3Smrg	 * avoid a costly stall and clflush in the hotpath of drawing the first
15147ec681f3Smrg	 * frame.
15157ec681f3Smrg	 */
15167ec681f3Smrg#define I915_CACHING_NONE		0
15177ec681f3Smrg#define I915_CACHING_CACHED		1
15187ec681f3Smrg#define I915_CACHING_DISPLAY		2
151901e04c3fSmrg	__u32 caching;
152001e04c3fSmrg};
152101e04c3fSmrg
152201e04c3fSmrg#define I915_TILING_NONE	0
152301e04c3fSmrg#define I915_TILING_X		1
152401e04c3fSmrg#define I915_TILING_Y		2
152501e04c3fSmrg#define I915_TILING_LAST	I915_TILING_Y
152601e04c3fSmrg
152701e04c3fSmrg#define I915_BIT_6_SWIZZLE_NONE		0
152801e04c3fSmrg#define I915_BIT_6_SWIZZLE_9		1
152901e04c3fSmrg#define I915_BIT_6_SWIZZLE_9_10		2
153001e04c3fSmrg#define I915_BIT_6_SWIZZLE_9_11		3
153101e04c3fSmrg#define I915_BIT_6_SWIZZLE_9_10_11	4
153201e04c3fSmrg/* Not seen by userland */
153301e04c3fSmrg#define I915_BIT_6_SWIZZLE_UNKNOWN	5
153401e04c3fSmrg/* Seen by userland. */
153501e04c3fSmrg#define I915_BIT_6_SWIZZLE_9_17		6
153601e04c3fSmrg#define I915_BIT_6_SWIZZLE_9_10_17	7
153701e04c3fSmrg
153801e04c3fSmrgstruct drm_i915_gem_set_tiling {
153901e04c3fSmrg	/** Handle of the buffer to have its tiling state updated */
154001e04c3fSmrg	__u32 handle;
154101e04c3fSmrg
154201e04c3fSmrg	/**
154301e04c3fSmrg	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
154401e04c3fSmrg	 * I915_TILING_Y).
154501e04c3fSmrg	 *
154601e04c3fSmrg	 * This value is to be set on request, and will be updated by the
154701e04c3fSmrg	 * kernel on successful return with the actual chosen tiling layout.
154801e04c3fSmrg	 *
154901e04c3fSmrg	 * The tiling mode may be demoted to I915_TILING_NONE when the system
155001e04c3fSmrg	 * has bit 6 swizzling that can't be managed correctly by GEM.
155101e04c3fSmrg	 *
155201e04c3fSmrg	 * Buffer contents become undefined when changing tiling_mode.
155301e04c3fSmrg	 */
155401e04c3fSmrg	__u32 tiling_mode;
155501e04c3fSmrg
155601e04c3fSmrg	/**
155701e04c3fSmrg	 * Stride in bytes for the object when in I915_TILING_X or
155801e04c3fSmrg	 * I915_TILING_Y.
155901e04c3fSmrg	 */
156001e04c3fSmrg	__u32 stride;
156101e04c3fSmrg
156201e04c3fSmrg	/**
156301e04c3fSmrg	 * Returned address bit 6 swizzling required for CPU access through
156401e04c3fSmrg	 * mmap mapping.
156501e04c3fSmrg	 */
156601e04c3fSmrg	__u32 swizzle_mode;
156701e04c3fSmrg};
156801e04c3fSmrg
156901e04c3fSmrgstruct drm_i915_gem_get_tiling {
157001e04c3fSmrg	/** Handle of the buffer to get tiling state for. */
157101e04c3fSmrg	__u32 handle;
157201e04c3fSmrg
157301e04c3fSmrg	/**
157401e04c3fSmrg	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
157501e04c3fSmrg	 * I915_TILING_Y).
157601e04c3fSmrg	 */
157701e04c3fSmrg	__u32 tiling_mode;
157801e04c3fSmrg
157901e04c3fSmrg	/**
158001e04c3fSmrg	 * Returned address bit 6 swizzling required for CPU access through
158101e04c3fSmrg	 * mmap mapping.
158201e04c3fSmrg	 */
158301e04c3fSmrg	__u32 swizzle_mode;
158401e04c3fSmrg
158501e04c3fSmrg	/**
158601e04c3fSmrg	 * Returned address bit 6 swizzling required for CPU access through
158701e04c3fSmrg	 * mmap mapping whilst bound.
158801e04c3fSmrg	 */
158901e04c3fSmrg	__u32 phys_swizzle_mode;
159001e04c3fSmrg};
159101e04c3fSmrg
159201e04c3fSmrgstruct drm_i915_gem_get_aperture {
159301e04c3fSmrg	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
159401e04c3fSmrg	__u64 aper_size;
159501e04c3fSmrg
159601e04c3fSmrg	/**
159701e04c3fSmrg	 * Available space in the aperture used by i915_gem_execbuffer, in
159801e04c3fSmrg	 * bytes
159901e04c3fSmrg	 */
160001e04c3fSmrg	__u64 aper_available_size;
160101e04c3fSmrg};
160201e04c3fSmrg
160301e04c3fSmrgstruct drm_i915_get_pipe_from_crtc_id {
160401e04c3fSmrg	/** ID of CRTC being requested **/
160501e04c3fSmrg	__u32 crtc_id;
160601e04c3fSmrg
160701e04c3fSmrg	/** pipe of requested CRTC **/
160801e04c3fSmrg	__u32 pipe;
160901e04c3fSmrg};
161001e04c3fSmrg
161101e04c3fSmrg#define I915_MADV_WILLNEED 0
161201e04c3fSmrg#define I915_MADV_DONTNEED 1
161301e04c3fSmrg#define __I915_MADV_PURGED 2 /* internal state */
161401e04c3fSmrg
161501e04c3fSmrgstruct drm_i915_gem_madvise {
161601e04c3fSmrg	/** Handle of the buffer to change the backing store advice */
161701e04c3fSmrg	__u32 handle;
161801e04c3fSmrg
161901e04c3fSmrg	/* Advice: either the buffer will be needed again in the near future,
162001e04c3fSmrg	 *         or wont be and could be discarded under memory pressure.
162101e04c3fSmrg	 */
162201e04c3fSmrg	__u32 madv;
162301e04c3fSmrg
162401e04c3fSmrg	/** Whether the backing store still exists. */
162501e04c3fSmrg	__u32 retained;
162601e04c3fSmrg};
162701e04c3fSmrg
162801e04c3fSmrg/* flags */
162901e04c3fSmrg#define I915_OVERLAY_TYPE_MASK 		0xff
163001e04c3fSmrg#define I915_OVERLAY_YUV_PLANAR 	0x01
163101e04c3fSmrg#define I915_OVERLAY_YUV_PACKED 	0x02
163201e04c3fSmrg#define I915_OVERLAY_RGB		0x03
163301e04c3fSmrg
163401e04c3fSmrg#define I915_OVERLAY_DEPTH_MASK		0xff00
163501e04c3fSmrg#define I915_OVERLAY_RGB24		0x1000
163601e04c3fSmrg#define I915_OVERLAY_RGB16		0x2000
163701e04c3fSmrg#define I915_OVERLAY_RGB15		0x3000
163801e04c3fSmrg#define I915_OVERLAY_YUV422		0x0100
163901e04c3fSmrg#define I915_OVERLAY_YUV411		0x0200
164001e04c3fSmrg#define I915_OVERLAY_YUV420		0x0300
164101e04c3fSmrg#define I915_OVERLAY_YUV410		0x0400
164201e04c3fSmrg
164301e04c3fSmrg#define I915_OVERLAY_SWAP_MASK		0xff0000
164401e04c3fSmrg#define I915_OVERLAY_NO_SWAP		0x000000
164501e04c3fSmrg#define I915_OVERLAY_UV_SWAP		0x010000
164601e04c3fSmrg#define I915_OVERLAY_Y_SWAP		0x020000
164701e04c3fSmrg#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
164801e04c3fSmrg
164901e04c3fSmrg#define I915_OVERLAY_FLAGS_MASK		0xff000000
165001e04c3fSmrg#define I915_OVERLAY_ENABLE		0x01000000
165101e04c3fSmrg
165201e04c3fSmrgstruct drm_intel_overlay_put_image {
165301e04c3fSmrg	/* various flags and src format description */
165401e04c3fSmrg	__u32 flags;
165501e04c3fSmrg	/* source picture description */
165601e04c3fSmrg	__u32 bo_handle;
165701e04c3fSmrg	/* stride values and offsets are in bytes, buffer relative */
165801e04c3fSmrg	__u16 stride_Y; /* stride for packed formats */
165901e04c3fSmrg	__u16 stride_UV;
166001e04c3fSmrg	__u32 offset_Y; /* offset for packet formats */
166101e04c3fSmrg	__u32 offset_U;
166201e04c3fSmrg	__u32 offset_V;
166301e04c3fSmrg	/* in pixels */
166401e04c3fSmrg	__u16 src_width;
166501e04c3fSmrg	__u16 src_height;
166601e04c3fSmrg	/* to compensate the scaling factors for partially covered surfaces */
166701e04c3fSmrg	__u16 src_scan_width;
166801e04c3fSmrg	__u16 src_scan_height;
166901e04c3fSmrg	/* output crtc description */
167001e04c3fSmrg	__u32 crtc_id;
167101e04c3fSmrg	__u16 dst_x;
167201e04c3fSmrg	__u16 dst_y;
167301e04c3fSmrg	__u16 dst_width;
167401e04c3fSmrg	__u16 dst_height;
167501e04c3fSmrg};
167601e04c3fSmrg
167701e04c3fSmrg/* flags */
167801e04c3fSmrg#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
167901e04c3fSmrg#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
168001e04c3fSmrg#define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
168101e04c3fSmrgstruct drm_intel_overlay_attrs {
168201e04c3fSmrg	__u32 flags;
168301e04c3fSmrg	__u32 color_key;
168401e04c3fSmrg	__s32 brightness;
168501e04c3fSmrg	__u32 contrast;
168601e04c3fSmrg	__u32 saturation;
168701e04c3fSmrg	__u32 gamma0;
168801e04c3fSmrg	__u32 gamma1;
168901e04c3fSmrg	__u32 gamma2;
169001e04c3fSmrg	__u32 gamma3;
169101e04c3fSmrg	__u32 gamma4;
169201e04c3fSmrg	__u32 gamma5;
169301e04c3fSmrg};
169401e04c3fSmrg
169501e04c3fSmrg/*
169601e04c3fSmrg * Intel sprite handling
169701e04c3fSmrg *
169801e04c3fSmrg * Color keying works with a min/mask/max tuple.  Both source and destination
169901e04c3fSmrg * color keying is allowed.
170001e04c3fSmrg *
170101e04c3fSmrg * Source keying:
170201e04c3fSmrg * Sprite pixels within the min & max values, masked against the color channels
170301e04c3fSmrg * specified in the mask field, will be transparent.  All other pixels will
170401e04c3fSmrg * be displayed on top of the primary plane.  For RGB surfaces, only the min
170501e04c3fSmrg * and mask fields will be used; ranged compares are not allowed.
170601e04c3fSmrg *
170701e04c3fSmrg * Destination keying:
170801e04c3fSmrg * Primary plane pixels that match the min value, masked against the color
170901e04c3fSmrg * channels specified in the mask field, will be replaced by corresponding
171001e04c3fSmrg * pixels from the sprite plane.
171101e04c3fSmrg *
171201e04c3fSmrg * Note that source & destination keying are exclusive; only one can be
171301e04c3fSmrg * active on a given plane.
171401e04c3fSmrg */
171501e04c3fSmrg
171601e04c3fSmrg#define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
171701e04c3fSmrg						* flags==0 to disable colorkeying.
171801e04c3fSmrg						*/
171901e04c3fSmrg#define I915_SET_COLORKEY_DESTINATION	(1<<1)
172001e04c3fSmrg#define I915_SET_COLORKEY_SOURCE	(1<<2)
172101e04c3fSmrgstruct drm_intel_sprite_colorkey {
172201e04c3fSmrg	__u32 plane_id;
172301e04c3fSmrg	__u32 min_value;
172401e04c3fSmrg	__u32 channel_mask;
172501e04c3fSmrg	__u32 max_value;
172601e04c3fSmrg	__u32 flags;
172701e04c3fSmrg};
172801e04c3fSmrg
172901e04c3fSmrgstruct drm_i915_gem_wait {
173001e04c3fSmrg	/** Handle of BO we shall wait on */
173101e04c3fSmrg	__u32 bo_handle;
173201e04c3fSmrg	__u32 flags;
173301e04c3fSmrg	/** Number of nanoseconds to wait, Returns time remaining. */
173401e04c3fSmrg	__s64 timeout_ns;
173501e04c3fSmrg};
173601e04c3fSmrg
173701e04c3fSmrgstruct drm_i915_gem_context_create {
17387ec681f3Smrg	__u32 ctx_id; /* output: id of new context*/
173901e04c3fSmrg	__u32 pad;
174001e04c3fSmrg};
174101e04c3fSmrg
17427ec681f3Smrgstruct drm_i915_gem_context_create_ext {
17437ec681f3Smrg	__u32 ctx_id; /* output: id of new context*/
174401e04c3fSmrg	__u32 flags;
17457ec681f3Smrg#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
17467ec681f3Smrg#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
17477ec681f3Smrg#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
17487ec681f3Smrg	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
17497ec681f3Smrg	__u64 extensions;
175001e04c3fSmrg};
175101e04c3fSmrg
175201e04c3fSmrgstruct drm_i915_gem_context_param {
175301e04c3fSmrg	__u32 ctx_id;
175401e04c3fSmrg	__u32 size;
175501e04c3fSmrg	__u64 param;
175601e04c3fSmrg#define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
17577ec681f3Smrg/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed.  On the off chance
17587ec681f3Smrg * someone somewhere has attempted to use it, never re-use this context
17597ec681f3Smrg * param number.
17607ec681f3Smrg */
176101e04c3fSmrg#define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
176201e04c3fSmrg#define I915_CONTEXT_PARAM_GTT_SIZE	0x3
176301e04c3fSmrg#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
176401e04c3fSmrg#define I915_CONTEXT_PARAM_BANNABLE	0x5
176501e04c3fSmrg#define I915_CONTEXT_PARAM_PRIORITY	0x6
176601e04c3fSmrg#define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
176701e04c3fSmrg#define   I915_CONTEXT_DEFAULT_PRIORITY		0
176801e04c3fSmrg#define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
176953c12917Smaya	/*
177053c12917Smaya	 * When using the following param, value should be a pointer to
177153c12917Smaya	 * drm_i915_gem_context_param_sseu.
177253c12917Smaya	 */
177353c12917Smaya#define I915_CONTEXT_PARAM_SSEU		0x7
17747ec681f3Smrg
17757ec681f3Smrg/*
17767ec681f3Smrg * Not all clients may want to attempt automatic recover of a context after
17777ec681f3Smrg * a hang (for example, some clients may only submit very small incremental
17787ec681f3Smrg * batches relying on known logical state of previous batches which will never
17797ec681f3Smrg * recover correctly and each attempt will hang), and so would prefer that
17807ec681f3Smrg * the context is forever banned instead.
17817ec681f3Smrg *
17827ec681f3Smrg * If set to false (0), after a reset, subsequent (and in flight) rendering
17837ec681f3Smrg * from this context is discarded, and the client will need to create a new
17847ec681f3Smrg * context to use instead.
17857ec681f3Smrg *
17867ec681f3Smrg * If set to true (1), the kernel will automatically attempt to recover the
17877ec681f3Smrg * context by skipping the hanging batch and executing the next batch starting
17887ec681f3Smrg * from the default context state (discarding the incomplete logical context
17897ec681f3Smrg * state lost due to the reset).
17907ec681f3Smrg *
17917ec681f3Smrg * On creation, all new contexts are marked as recoverable.
17927ec681f3Smrg */
17937ec681f3Smrg#define I915_CONTEXT_PARAM_RECOVERABLE	0x8
17947ec681f3Smrg
17957ec681f3Smrg	/*
17967ec681f3Smrg	 * The id of the associated virtual memory address space (ppGTT) of
17977ec681f3Smrg	 * this context. Can be retrieved and passed to another context
17987ec681f3Smrg	 * (on the same fd) for both to use the same ppGTT and so share
17997ec681f3Smrg	 * address layouts, and avoid reloading the page tables on context
18007ec681f3Smrg	 * switches between themselves.
18017ec681f3Smrg	 *
18027ec681f3Smrg	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
18037ec681f3Smrg	 */
18047ec681f3Smrg#define I915_CONTEXT_PARAM_VM		0x9
18057ec681f3Smrg
18067ec681f3Smrg/*
18077ec681f3Smrg * I915_CONTEXT_PARAM_ENGINES:
18087ec681f3Smrg *
18097ec681f3Smrg * Bind this context to operate on this subset of available engines. Henceforth,
18107ec681f3Smrg * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
18117ec681f3Smrg * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
18127ec681f3Smrg * and upwards. Slots 0...N are filled in using the specified (class, instance).
18137ec681f3Smrg * Use
18147ec681f3Smrg *	engine_class: I915_ENGINE_CLASS_INVALID,
18157ec681f3Smrg *	engine_instance: I915_ENGINE_CLASS_INVALID_NONE
18167ec681f3Smrg * to specify a gap in the array that can be filled in later, e.g. by a
18177ec681f3Smrg * virtual engine used for load balancing.
18187ec681f3Smrg *
18197ec681f3Smrg * Setting the number of engines bound to the context to 0, by passing a zero
18207ec681f3Smrg * sized argument, will revert back to default settings.
18217ec681f3Smrg *
18227ec681f3Smrg * See struct i915_context_param_engines.
18237ec681f3Smrg *
18247ec681f3Smrg * Extensions:
18257ec681f3Smrg *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
18267ec681f3Smrg *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
18277ec681f3Smrg */
18287ec681f3Smrg#define I915_CONTEXT_PARAM_ENGINES	0xa
18297ec681f3Smrg
18307ec681f3Smrg/*
18317ec681f3Smrg * I915_CONTEXT_PARAM_PERSISTENCE:
18327ec681f3Smrg *
18337ec681f3Smrg * Allow the context and active rendering to survive the process until
18347ec681f3Smrg * completion. Persistence allows fire-and-forget clients to queue up a
18357ec681f3Smrg * bunch of work, hand the output over to a display server and then quit.
18367ec681f3Smrg * If the context is marked as not persistent, upon closing (either via
18377ec681f3Smrg * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
18387ec681f3Smrg * or process termination), the context and any outstanding requests will be
18397ec681f3Smrg * cancelled (and exported fences for cancelled requests marked as -EIO).
18407ec681f3Smrg *
18417ec681f3Smrg * By default, new contexts allow persistence.
18427ec681f3Smrg */
18437ec681f3Smrg#define I915_CONTEXT_PARAM_PERSISTENCE	0xb
18447ec681f3Smrg
18457ec681f3Smrg/* This API has been removed.  On the off chance someone somewhere has
18467ec681f3Smrg * attempted to use it, never re-use this context param number.
18477ec681f3Smrg */
18487ec681f3Smrg#define I915_CONTEXT_PARAM_RINGSIZE	0xc
18497ec681f3Smrg/* Must be kept compact -- no holes and well documented */
18507ec681f3Smrg
185101e04c3fSmrg	__u64 value;
185201e04c3fSmrg};
185301e04c3fSmrg
18547ec681f3Smrg/*
185553c12917Smaya * Context SSEU programming
185653c12917Smaya *
185753c12917Smaya * It may be necessary for either functional or performance reason to configure
185853c12917Smaya * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
185953c12917Smaya * Sub-slice/EU).
186053c12917Smaya *
186153c12917Smaya * This is done by configuring SSEU configuration using the below
186253c12917Smaya * @struct drm_i915_gem_context_param_sseu for every supported engine which
186353c12917Smaya * userspace intends to use.
186453c12917Smaya *
186553c12917Smaya * Not all GPUs or engines support this functionality in which case an error
186653c12917Smaya * code -ENODEV will be returned.
186753c12917Smaya *
186853c12917Smaya * Also, flexibility of possible SSEU configuration permutations varies between
186953c12917Smaya * GPU generations and software imposed limitations. Requesting such a
187053c12917Smaya * combination will return an error code of -EINVAL.
187153c12917Smaya *
187253c12917Smaya * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
187353c12917Smaya * favour of a single global setting.
187453c12917Smaya */
187553c12917Smayastruct drm_i915_gem_context_param_sseu {
187653c12917Smaya	/*
187753c12917Smaya	 * Engine class & instance to be configured or queried.
187853c12917Smaya	 */
18797ec681f3Smrg	struct i915_engine_class_instance engine;
188053c12917Smaya
188153c12917Smaya	/*
18827ec681f3Smrg	 * Unknown flags must be cleared to zero.
188353c12917Smaya	 */
188453c12917Smaya	__u32 flags;
18857ec681f3Smrg#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
188653c12917Smaya
188753c12917Smaya	/*
188853c12917Smaya	 * Mask of slices to enable for the context. Valid values are a subset
188953c12917Smaya	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
189053c12917Smaya	 */
189153c12917Smaya	__u64 slice_mask;
189253c12917Smaya
189353c12917Smaya	/*
189453c12917Smaya	 * Mask of subslices to enable for the context. Valid values are a
189553c12917Smaya	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
189653c12917Smaya	 */
189753c12917Smaya	__u64 subslice_mask;
189853c12917Smaya
189953c12917Smaya	/*
190053c12917Smaya	 * Minimum/Maximum number of EUs to enable per subslice for the
190153c12917Smaya	 * context. min_eus_per_subslice must be inferior or equal to
190253c12917Smaya	 * max_eus_per_subslice.
190353c12917Smaya	 */
190453c12917Smaya	__u16 min_eus_per_subslice;
190553c12917Smaya	__u16 max_eus_per_subslice;
190653c12917Smaya
190753c12917Smaya	/*
190853c12917Smaya	 * Unused for now. Must be cleared to zero.
190953c12917Smaya	 */
191053c12917Smaya	__u32 rsvd;
191153c12917Smaya};
191253c12917Smaya
19137ec681f3Smrg/**
19147ec681f3Smrg * DOC: Virtual Engine uAPI
19157ec681f3Smrg *
19167ec681f3Smrg * Virtual engine is a concept where userspace is able to configure a set of
19177ec681f3Smrg * physical engines, submit a batch buffer, and let the driver execute it on any
19187ec681f3Smrg * engine from the set as it sees fit.
19197ec681f3Smrg *
19207ec681f3Smrg * This is primarily useful on parts which have multiple instances of a same
19217ec681f3Smrg * class engine, like for example GT3+ Skylake parts with their two VCS engines.
19227ec681f3Smrg *
19237ec681f3Smrg * For instance userspace can enumerate all engines of a certain class using the
19247ec681f3Smrg * previously described `Engine Discovery uAPI`_. After that userspace can
19257ec681f3Smrg * create a GEM context with a placeholder slot for the virtual engine (using
19267ec681f3Smrg * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
19277ec681f3Smrg * and instance respectively) and finally using the
19287ec681f3Smrg * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
19297ec681f3Smrg * the same reserved slot.
19307ec681f3Smrg *
19317ec681f3Smrg * Example of creating a virtual engine and submitting a batch buffer to it:
19327ec681f3Smrg *
19337ec681f3Smrg * .. code-block:: C
19347ec681f3Smrg *
19357ec681f3Smrg * 	I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
19367ec681f3Smrg * 		.base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
19377ec681f3Smrg * 		.engine_index = 0, // Place this virtual engine into engine map slot 0
19387ec681f3Smrg * 		.num_siblings = 2,
19397ec681f3Smrg * 		.engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
19407ec681f3Smrg * 			     { I915_ENGINE_CLASS_VIDEO, 1 }, },
19417ec681f3Smrg * 	};
19427ec681f3Smrg * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
19437ec681f3Smrg * 		.engines = { { I915_ENGINE_CLASS_INVALID,
19447ec681f3Smrg * 			       I915_ENGINE_CLASS_INVALID_NONE } },
19457ec681f3Smrg * 		.extensions = to_user_pointer(&virtual), // Chains after load_balance extension
19467ec681f3Smrg * 	};
19477ec681f3Smrg * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
19487ec681f3Smrg * 		.base = {
19497ec681f3Smrg * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
19507ec681f3Smrg * 		},
19517ec681f3Smrg * 		.param = {
19527ec681f3Smrg * 			.param = I915_CONTEXT_PARAM_ENGINES,
19537ec681f3Smrg * 			.value = to_user_pointer(&engines),
19547ec681f3Smrg * 			.size = sizeof(engines),
19557ec681f3Smrg * 		},
19567ec681f3Smrg * 	};
19577ec681f3Smrg * 	struct drm_i915_gem_context_create_ext create = {
19587ec681f3Smrg * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
19597ec681f3Smrg * 		.extensions = to_user_pointer(&p_engines);
19607ec681f3Smrg * 	};
19617ec681f3Smrg *
19627ec681f3Smrg * 	ctx_id = gem_context_create_ext(drm_fd, &create);
19637ec681f3Smrg *
19647ec681f3Smrg * 	// Now we have created a GEM context with its engine map containing a
19657ec681f3Smrg * 	// single virtual engine. Submissions to this slot can go either to
19667ec681f3Smrg * 	// vcs0 or vcs1, depending on the load balancing algorithm used inside
19677ec681f3Smrg * 	// the driver. The load balancing is dynamic from one batch buffer to
19687ec681f3Smrg * 	// another and transparent to userspace.
19697ec681f3Smrg *
19707ec681f3Smrg * 	...
19717ec681f3Smrg * 	execbuf.rsvd1 = ctx_id;
19727ec681f3Smrg * 	execbuf.flags = 0; // Submits to index 0 which is the virtual engine
19737ec681f3Smrg * 	gem_execbuf(drm_fd, &execbuf);
19747ec681f3Smrg */
197501e04c3fSmrg
19767ec681f3Smrg/*
19777ec681f3Smrg * i915_context_engines_load_balance:
19787ec681f3Smrg *
19797ec681f3Smrg * Enable load balancing across this set of engines.
19807ec681f3Smrg *
19817ec681f3Smrg * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
19827ec681f3Smrg * used will proxy the execbuffer request onto one of the set of engines
19837ec681f3Smrg * in such a way as to distribute the load evenly across the set.
19847ec681f3Smrg *
19857ec681f3Smrg * The set of engines must be compatible (e.g. the same HW class) as they
19867ec681f3Smrg * will share the same logical GPU context and ring.
19877ec681f3Smrg *
19887ec681f3Smrg * To intermix rendering with the virtual engine and direct rendering onto
19897ec681f3Smrg * the backing engines (bypassing the load balancing proxy), the context must
19907ec681f3Smrg * be defined to use a single timeline for all engines.
19917ec681f3Smrg */
19927ec681f3Smrgstruct i915_context_engines_load_balance {
19937ec681f3Smrg	struct i915_user_extension base;
199401e04c3fSmrg
19957ec681f3Smrg	__u16 engine_index;
19967ec681f3Smrg	__u16 num_siblings;
19977ec681f3Smrg	__u32 flags; /* all undefined flags must be zero */
199801e04c3fSmrg
19997ec681f3Smrg	__u64 mbz64; /* reserved for future use; must be zero */
20007ec681f3Smrg
20017ec681f3Smrg	struct i915_engine_class_instance engines[0];
20027ec681f3Smrg} __attribute__((packed));
20037ec681f3Smrg
20047ec681f3Smrg#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
20057ec681f3Smrg	struct i915_user_extension base; \
20067ec681f3Smrg	__u16 engine_index; \
20077ec681f3Smrg	__u16 num_siblings; \
20087ec681f3Smrg	__u32 flags; \
20097ec681f3Smrg	__u64 mbz64; \
20107ec681f3Smrg	struct i915_engine_class_instance engines[N__]; \
20117ec681f3Smrg} __attribute__((packed)) name__
20127ec681f3Smrg
20137ec681f3Smrg/*
20147ec681f3Smrg * i915_context_engines_bond:
20157ec681f3Smrg *
20167ec681f3Smrg * Constructed bonded pairs for execution within a virtual engine.
20177ec681f3Smrg *
20187ec681f3Smrg * All engines are equal, but some are more equal than others. Given
20197ec681f3Smrg * the distribution of resources in the HW, it may be preferable to run
20207ec681f3Smrg * a request on a given subset of engines in parallel to a request on a
20217ec681f3Smrg * specific engine. We enable this selection of engines within a virtual
20227ec681f3Smrg * engine by specifying bonding pairs, for any given master engine we will
20237ec681f3Smrg * only execute on one of the corresponding siblings within the virtual engine.
20247ec681f3Smrg *
20257ec681f3Smrg * To execute a request in parallel on the master engine and a sibling requires
20267ec681f3Smrg * coordination with a I915_EXEC_FENCE_SUBMIT.
20277ec681f3Smrg */
20287ec681f3Smrgstruct i915_context_engines_bond {
20297ec681f3Smrg	struct i915_user_extension base;
20307ec681f3Smrg
20317ec681f3Smrg	struct i915_engine_class_instance master;
20327ec681f3Smrg
20337ec681f3Smrg	__u16 virtual_index; /* index of virtual engine in ctx->engines[] */
20347ec681f3Smrg	__u16 num_bonds;
20357ec681f3Smrg
20367ec681f3Smrg	__u64 flags; /* all undefined flags must be zero */
20377ec681f3Smrg	__u64 mbz64[4]; /* reserved for future use; must be zero */
20387ec681f3Smrg
20397ec681f3Smrg	struct i915_engine_class_instance engines[0];
20407ec681f3Smrg} __attribute__((packed));
20417ec681f3Smrg
20427ec681f3Smrg#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
20437ec681f3Smrg	struct i915_user_extension base; \
20447ec681f3Smrg	struct i915_engine_class_instance master; \
20457ec681f3Smrg	__u16 virtual_index; \
20467ec681f3Smrg	__u16 num_bonds; \
20477ec681f3Smrg	__u64 flags; \
20487ec681f3Smrg	__u64 mbz64[4]; \
20497ec681f3Smrg	struct i915_engine_class_instance engines[N__]; \
20507ec681f3Smrg} __attribute__((packed)) name__
20517ec681f3Smrg
20527ec681f3Smrg/**
20537ec681f3Smrg * DOC: Context Engine Map uAPI
20547ec681f3Smrg *
20557ec681f3Smrg * Context engine map is a new way of addressing engines when submitting batch-
20567ec681f3Smrg * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
20577ec681f3Smrg * inside the flags field of `struct drm_i915_gem_execbuffer2`.
20587ec681f3Smrg *
20597ec681f3Smrg * To use it created GEM contexts need to be configured with a list of engines
20607ec681f3Smrg * the user is intending to submit to. This is accomplished using the
20617ec681f3Smrg * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
20627ec681f3Smrg * i915_context_param_engines`.
20637ec681f3Smrg *
20647ec681f3Smrg * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
20657ec681f3Smrg * configured map.
20667ec681f3Smrg *
20677ec681f3Smrg * Example of creating such context and submitting against it:
20687ec681f3Smrg *
20697ec681f3Smrg * .. code-block:: C
20707ec681f3Smrg *
20717ec681f3Smrg * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
20727ec681f3Smrg * 		.engines = { { I915_ENGINE_CLASS_RENDER, 0 },
20737ec681f3Smrg * 			     { I915_ENGINE_CLASS_COPY, 0 } }
20747ec681f3Smrg * 	};
20757ec681f3Smrg * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
20767ec681f3Smrg * 		.base = {
20777ec681f3Smrg * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
20787ec681f3Smrg * 		},
20797ec681f3Smrg * 		.param = {
20807ec681f3Smrg * 			.param = I915_CONTEXT_PARAM_ENGINES,
20817ec681f3Smrg * 			.value = to_user_pointer(&engines),
20827ec681f3Smrg * 			.size = sizeof(engines),
20837ec681f3Smrg * 		},
20847ec681f3Smrg * 	};
20857ec681f3Smrg * 	struct drm_i915_gem_context_create_ext create = {
20867ec681f3Smrg * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
20877ec681f3Smrg * 		.extensions = to_user_pointer(&p_engines);
20887ec681f3Smrg * 	};
20897ec681f3Smrg *
20907ec681f3Smrg * 	ctx_id = gem_context_create_ext(drm_fd, &create);
20917ec681f3Smrg *
20927ec681f3Smrg * 	// We have now created a GEM context with two engines in the map:
20937ec681f3Smrg * 	// Index 0 points to rcs0 while index 1 points to bcs0. Other engines
20947ec681f3Smrg * 	// will not be accessible from this context.
20957ec681f3Smrg *
20967ec681f3Smrg * 	...
20977ec681f3Smrg * 	execbuf.rsvd1 = ctx_id;
20987ec681f3Smrg * 	execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
20997ec681f3Smrg * 	gem_execbuf(drm_fd, &execbuf);
21007ec681f3Smrg *
21017ec681f3Smrg * 	...
21027ec681f3Smrg * 	execbuf.rsvd1 = ctx_id;
21037ec681f3Smrg * 	execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
21047ec681f3Smrg * 	gem_execbuf(drm_fd, &execbuf);
21057ec681f3Smrg */
21067ec681f3Smrg
21077ec681f3Smrgstruct i915_context_param_engines {
21087ec681f3Smrg	__u64 extensions; /* linked chain of extension blocks, 0 terminates */
21097ec681f3Smrg#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
21107ec681f3Smrg#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
21117ec681f3Smrg	struct i915_engine_class_instance engines[0];
21127ec681f3Smrg} __attribute__((packed));
21137ec681f3Smrg
21147ec681f3Smrg#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
21157ec681f3Smrg	__u64 extensions; \
21167ec681f3Smrg	struct i915_engine_class_instance engines[N__]; \
21177ec681f3Smrg} __attribute__((packed)) name__
21187ec681f3Smrg
21197ec681f3Smrgstruct drm_i915_gem_context_create_ext_setparam {
21207ec681f3Smrg#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
21217ec681f3Smrg	struct i915_user_extension base;
21227ec681f3Smrg	struct drm_i915_gem_context_param param;
21237ec681f3Smrg};
21247ec681f3Smrg
21257ec681f3Smrg/* This API has been removed.  On the off chance someone somewhere has
21267ec681f3Smrg * attempted to use it, never re-use this extension number.
21277ec681f3Smrg */
21287ec681f3Smrg#define I915_CONTEXT_CREATE_EXT_CLONE 1
21297ec681f3Smrg
21307ec681f3Smrgstruct drm_i915_gem_context_destroy {
21317ec681f3Smrg	__u32 ctx_id;
21327ec681f3Smrg	__u32 pad;
21337ec681f3Smrg};
21347ec681f3Smrg
21357ec681f3Smrg/*
21367ec681f3Smrg * DRM_I915_GEM_VM_CREATE -
21377ec681f3Smrg *
21387ec681f3Smrg * Create a new virtual memory address space (ppGTT) for use within a context
21397ec681f3Smrg * on the same file. Extensions can be provided to configure exactly how the
21407ec681f3Smrg * address space is setup upon creation.
21417ec681f3Smrg *
21427ec681f3Smrg * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
21437ec681f3Smrg * returned in the outparam @id.
21447ec681f3Smrg *
21457ec681f3Smrg * No flags are defined, with all bits reserved and must be zero.
21467ec681f3Smrg *
21477ec681f3Smrg * An extension chain maybe provided, starting with @extensions, and terminated
21487ec681f3Smrg * by the @next_extension being 0. Currently, no extensions are defined.
21497ec681f3Smrg *
21507ec681f3Smrg * DRM_I915_GEM_VM_DESTROY -
21517ec681f3Smrg *
21527ec681f3Smrg * Destroys a previously created VM id, specified in @id.
21537ec681f3Smrg *
21547ec681f3Smrg * No extensions or flags are allowed currently, and so must be zero.
21557ec681f3Smrg */
21567ec681f3Smrgstruct drm_i915_gem_vm_control {
21577ec681f3Smrg	__u64 extensions;
21587ec681f3Smrg	__u32 flags;
21597ec681f3Smrg	__u32 vm_id;
21607ec681f3Smrg};
21617ec681f3Smrg
21627ec681f3Smrgstruct drm_i915_reg_read {
21637ec681f3Smrg	/*
21647ec681f3Smrg	 * Register offset.
21657ec681f3Smrg	 * For 64bit wide registers where the upper 32bits don't immediately
21667ec681f3Smrg	 * follow the lower 32bits, the offset of the lower 32bits must
21677ec681f3Smrg	 * be specified
21687ec681f3Smrg	 */
21697ec681f3Smrg	__u64 offset;
21707ec681f3Smrg#define I915_REG_READ_8B_WA (1ul << 0)
21717ec681f3Smrg
21727ec681f3Smrg	__u64 val; /* Return value */
21737ec681f3Smrg};
21747ec681f3Smrg
21757ec681f3Smrg/* Known registers:
21767ec681f3Smrg *
21777ec681f3Smrg * Render engine timestamp - 0x2358 + 64bit - gen7+
21787ec681f3Smrg * - Note this register returns an invalid value if using the default
21797ec681f3Smrg *   single instruction 8byte read, in order to workaround that pass
21807ec681f3Smrg *   flag I915_REG_READ_8B_WA in offset field.
21817ec681f3Smrg *
21827ec681f3Smrg */
21837ec681f3Smrg
21847ec681f3Smrgstruct drm_i915_reset_stats {
21857ec681f3Smrg	__u32 ctx_id;
21867ec681f3Smrg	__u32 flags;
21877ec681f3Smrg
21887ec681f3Smrg	/* All resets since boot/module reload, for all contexts */
21897ec681f3Smrg	__u32 reset_count;
21907ec681f3Smrg
21917ec681f3Smrg	/* Number of batches lost when active in GPU, for this context */
21927ec681f3Smrg	__u32 batch_active;
21937ec681f3Smrg
21947ec681f3Smrg	/* Number of batches lost pending for execution, for this context */
21957ec681f3Smrg	__u32 batch_pending;
21967ec681f3Smrg
21977ec681f3Smrg	__u32 pad;
21987ec681f3Smrg};
21997ec681f3Smrg
22007ec681f3Smrg/**
22017ec681f3Smrg * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
22027ec681f3Smrg *
22037ec681f3Smrg * Userptr objects have several restrictions on what ioctls can be used with the
22047ec681f3Smrg * object handle.
22057ec681f3Smrg */
22067ec681f3Smrgstruct drm_i915_gem_userptr {
22077ec681f3Smrg	/**
22087ec681f3Smrg	 * @user_ptr: The pointer to the allocated memory.
22097ec681f3Smrg	 *
22107ec681f3Smrg	 * Needs to be aligned to PAGE_SIZE.
22117ec681f3Smrg	 */
22127ec681f3Smrg	__u64 user_ptr;
22137ec681f3Smrg
22147ec681f3Smrg	/**
22157ec681f3Smrg	 * @user_size:
22167ec681f3Smrg	 *
22177ec681f3Smrg	 * The size in bytes for the allocated memory. This will also become the
22187ec681f3Smrg	 * object size.
22197ec681f3Smrg	 *
22207ec681f3Smrg	 * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
22217ec681f3Smrg	 * or larger.
22227ec681f3Smrg	 */
22237ec681f3Smrg	__u64 user_size;
22247ec681f3Smrg
22257ec681f3Smrg	/**
22267ec681f3Smrg	 * @flags:
22277ec681f3Smrg	 *
22287ec681f3Smrg	 * Supported flags:
22297ec681f3Smrg	 *
22307ec681f3Smrg	 * I915_USERPTR_READ_ONLY:
22317ec681f3Smrg	 *
22327ec681f3Smrg	 * Mark the object as readonly, this also means GPU access can only be
22337ec681f3Smrg	 * readonly. This is only supported on HW which supports readonly access
22347ec681f3Smrg	 * through the GTT. If the HW can't support readonly access, an error is
22357ec681f3Smrg	 * returned.
22367ec681f3Smrg	 *
22377ec681f3Smrg	 * I915_USERPTR_PROBE:
22387ec681f3Smrg	 *
22397ec681f3Smrg	 * Probe the provided @user_ptr range and validate that the @user_ptr is
22407ec681f3Smrg	 * indeed pointing to normal memory and that the range is also valid.
22417ec681f3Smrg	 * For example if some garbage address is given to the kernel, then this
22427ec681f3Smrg	 * should complain.
22437ec681f3Smrg	 *
22447ec681f3Smrg	 * Returns -EFAULT if the probe failed.
22457ec681f3Smrg	 *
22467ec681f3Smrg	 * Note that this doesn't populate the backing pages, and also doesn't
22477ec681f3Smrg	 * guarantee that the object will remain valid when the object is
22487ec681f3Smrg	 * eventually used.
22497ec681f3Smrg	 *
22507ec681f3Smrg	 * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
22517ec681f3Smrg	 * returns a non-zero value.
22527ec681f3Smrg	 *
22537ec681f3Smrg	 * I915_USERPTR_UNSYNCHRONIZED:
22547ec681f3Smrg	 *
22557ec681f3Smrg	 * NOT USED. Setting this flag will result in an error.
22567ec681f3Smrg	 */
22577ec681f3Smrg	__u32 flags;
22587ec681f3Smrg#define I915_USERPTR_READ_ONLY 0x1
22597ec681f3Smrg#define I915_USERPTR_PROBE 0x2
22607ec681f3Smrg#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
22617ec681f3Smrg	/**
22627ec681f3Smrg	 * @handle: Returned handle for the object.
22637ec681f3Smrg	 *
22647ec681f3Smrg	 * Object handles are nonzero.
22657ec681f3Smrg	 */
22667ec681f3Smrg	__u32 handle;
22677ec681f3Smrg};
22687ec681f3Smrg
22697ec681f3Smrgenum drm_i915_oa_format {
22707ec681f3Smrg	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
22717ec681f3Smrg	I915_OA_FORMAT_A29,	    /* HSW only */
22727ec681f3Smrg	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
22737ec681f3Smrg	I915_OA_FORMAT_B4_C8,	    /* HSW only */
22747ec681f3Smrg	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
22757ec681f3Smrg	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
22767ec681f3Smrg	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
22777ec681f3Smrg
22787ec681f3Smrg	/* Gen8+ */
22797ec681f3Smrg	I915_OA_FORMAT_A12,
22807ec681f3Smrg	I915_OA_FORMAT_A12_B8_C8,
22817ec681f3Smrg	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
22827ec681f3Smrg
22837ec681f3Smrg	I915_OA_FORMAT_MAX	    /* non-ABI */
22847ec681f3Smrg};
22857ec681f3Smrg
22867ec681f3Smrgenum drm_i915_perf_property_id {
22877ec681f3Smrg	/**
22887ec681f3Smrg	 * Open the stream for a specific context handle (as used with
228901e04c3fSmrg	 * execbuffer2). A stream opened for a specific context this way
229001e04c3fSmrg	 * won't typically require root privileges.
22917ec681f3Smrg	 *
22927ec681f3Smrg	 * This property is available in perf revision 1.
229301e04c3fSmrg	 */
229401e04c3fSmrg	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
229501e04c3fSmrg
229601e04c3fSmrg	/**
229701e04c3fSmrg	 * A value of 1 requests the inclusion of raw OA unit reports as
229801e04c3fSmrg	 * part of stream samples.
22997ec681f3Smrg	 *
23007ec681f3Smrg	 * This property is available in perf revision 1.
230101e04c3fSmrg	 */
230201e04c3fSmrg	DRM_I915_PERF_PROP_SAMPLE_OA,
230301e04c3fSmrg
230401e04c3fSmrg	/**
230501e04c3fSmrg	 * The value specifies which set of OA unit metrics should be
23067ec681f3Smrg	 * configured, defining the contents of any OA unit reports.
23077ec681f3Smrg	 *
23087ec681f3Smrg	 * This property is available in perf revision 1.
230901e04c3fSmrg	 */
231001e04c3fSmrg	DRM_I915_PERF_PROP_OA_METRICS_SET,
231101e04c3fSmrg
231201e04c3fSmrg	/**
231301e04c3fSmrg	 * The value specifies the size and layout of OA unit reports.
23147ec681f3Smrg	 *
23157ec681f3Smrg	 * This property is available in perf revision 1.
231601e04c3fSmrg	 */
231701e04c3fSmrg	DRM_I915_PERF_PROP_OA_FORMAT,
231801e04c3fSmrg
231901e04c3fSmrg	/**
232001e04c3fSmrg	 * Specifying this property implicitly requests periodic OA unit
232101e04c3fSmrg	 * sampling and (at least on Haswell) the sampling frequency is derived
232201e04c3fSmrg	 * from this exponent as follows:
232301e04c3fSmrg	 *
232401e04c3fSmrg	 *   80ns * 2^(period_exponent + 1)
23257ec681f3Smrg	 *
23267ec681f3Smrg	 * This property is available in perf revision 1.
232701e04c3fSmrg	 */
232801e04c3fSmrg	DRM_I915_PERF_PROP_OA_EXPONENT,
232901e04c3fSmrg
23307ec681f3Smrg	/**
23317ec681f3Smrg	 * Specifying this property is only valid when specify a context to
23327ec681f3Smrg	 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
23337ec681f3Smrg	 * will hold preemption of the particular context we want to gather
23347ec681f3Smrg	 * performance data about. The execbuf2 submissions must include a
23357ec681f3Smrg	 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
23367ec681f3Smrg	 *
23377ec681f3Smrg	 * This property is available in perf revision 3.
23387ec681f3Smrg	 */
23397ec681f3Smrg	DRM_I915_PERF_PROP_HOLD_PREEMPTION,
23407ec681f3Smrg
23417ec681f3Smrg	/**
23427ec681f3Smrg	 * Specifying this pins all contexts to the specified SSEU power
23437ec681f3Smrg	 * configuration for the duration of the recording.
23447ec681f3Smrg	 *
23457ec681f3Smrg	 * This parameter's value is a pointer to a struct
23467ec681f3Smrg	 * drm_i915_gem_context_param_sseu.
23477ec681f3Smrg	 *
23487ec681f3Smrg	 * This property is available in perf revision 4.
23497ec681f3Smrg	 */
23507ec681f3Smrg	DRM_I915_PERF_PROP_GLOBAL_SSEU,
23517ec681f3Smrg
23527ec681f3Smrg	/**
23537ec681f3Smrg	 * This optional parameter specifies the timer interval in nanoseconds
23547ec681f3Smrg	 * at which the i915 driver will check the OA buffer for available data.
23557ec681f3Smrg	 * Minimum allowed value is 100 microseconds. A default value is used by
23567ec681f3Smrg	 * the driver if this parameter is not specified. Note that larger timer
23577ec681f3Smrg	 * values will reduce cpu consumption during OA perf captures. However,
23587ec681f3Smrg	 * excessively large values would potentially result in OA buffer
23597ec681f3Smrg	 * overwrites as captures reach end of the OA buffer.
23607ec681f3Smrg	 *
23617ec681f3Smrg	 * This property is available in perf revision 5.
23627ec681f3Smrg	 */
23637ec681f3Smrg	DRM_I915_PERF_PROP_POLL_OA_PERIOD,
23647ec681f3Smrg
236501e04c3fSmrg	DRM_I915_PERF_PROP_MAX /* non-ABI */
236601e04c3fSmrg};
236701e04c3fSmrg
236801e04c3fSmrgstruct drm_i915_perf_open_param {
236901e04c3fSmrg	__u32 flags;
237001e04c3fSmrg#define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
237101e04c3fSmrg#define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
237201e04c3fSmrg#define I915_PERF_FLAG_DISABLED		(1<<2)
237301e04c3fSmrg
237401e04c3fSmrg	/** The number of u64 (id, value) pairs */
237501e04c3fSmrg	__u32 num_properties;
237601e04c3fSmrg
237701e04c3fSmrg	/**
237801e04c3fSmrg	 * Pointer to array of u64 (id, value) pairs configuring the stream
237901e04c3fSmrg	 * to open.
238001e04c3fSmrg	 */
238101e04c3fSmrg	__u64 properties_ptr;
238201e04c3fSmrg};
238301e04c3fSmrg
23847ec681f3Smrg/*
238501e04c3fSmrg * Enable data capture for a stream that was either opened in a disabled state
238601e04c3fSmrg * via I915_PERF_FLAG_DISABLED or was later disabled via
238701e04c3fSmrg * I915_PERF_IOCTL_DISABLE.
238801e04c3fSmrg *
238901e04c3fSmrg * It is intended to be cheaper to disable and enable a stream than it may be
239001e04c3fSmrg * to close and re-open a stream with the same configuration.
239101e04c3fSmrg *
239201e04c3fSmrg * It's undefined whether any pending data for the stream will be lost.
23937ec681f3Smrg *
23947ec681f3Smrg * This ioctl is available in perf revision 1.
239501e04c3fSmrg */
239601e04c3fSmrg#define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
239701e04c3fSmrg
23987ec681f3Smrg/*
239901e04c3fSmrg * Disable data capture for a stream.
240001e04c3fSmrg *
240101e04c3fSmrg * It is an error to try and read a stream that is disabled.
24027ec681f3Smrg *
24037ec681f3Smrg * This ioctl is available in perf revision 1.
240401e04c3fSmrg */
240501e04c3fSmrg#define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
240601e04c3fSmrg
24077ec681f3Smrg/*
24087ec681f3Smrg * Change metrics_set captured by a stream.
24097ec681f3Smrg *
24107ec681f3Smrg * If the stream is bound to a specific context, the configuration change
24117ec681f3Smrg * will performed __inline__ with that context such that it takes effect before
24127ec681f3Smrg * the next execbuf submission.
24137ec681f3Smrg *
24147ec681f3Smrg * Returns the previously bound metrics set id, or a negative error code.
24157ec681f3Smrg *
24167ec681f3Smrg * This ioctl is available in perf revision 2.
24177ec681f3Smrg */
24187ec681f3Smrg#define I915_PERF_IOCTL_CONFIG	_IO('i', 0x2)
24197ec681f3Smrg
24207ec681f3Smrg/*
242101e04c3fSmrg * Common to all i915 perf records
242201e04c3fSmrg */
242301e04c3fSmrgstruct drm_i915_perf_record_header {
242401e04c3fSmrg	__u32 type;
242501e04c3fSmrg	__u16 pad;
242601e04c3fSmrg	__u16 size;
242701e04c3fSmrg};
242801e04c3fSmrg
242901e04c3fSmrgenum drm_i915_perf_record_type {
243001e04c3fSmrg
243101e04c3fSmrg	/**
243201e04c3fSmrg	 * Samples are the work horse record type whose contents are extensible
243301e04c3fSmrg	 * and defined when opening an i915 perf stream based on the given
243401e04c3fSmrg	 * properties.
243501e04c3fSmrg	 *
243601e04c3fSmrg	 * Boolean properties following the naming convention
243701e04c3fSmrg	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
243801e04c3fSmrg	 * every sample.
243901e04c3fSmrg	 *
244001e04c3fSmrg	 * The order of these sample properties given by userspace has no
244101e04c3fSmrg	 * affect on the ordering of data within a sample. The order is
244201e04c3fSmrg	 * documented here.
244301e04c3fSmrg	 *
244401e04c3fSmrg	 * struct {
244501e04c3fSmrg	 *     struct drm_i915_perf_record_header header;
244601e04c3fSmrg	 *
244701e04c3fSmrg	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
244801e04c3fSmrg	 * };
244901e04c3fSmrg	 */
245001e04c3fSmrg	DRM_I915_PERF_RECORD_SAMPLE = 1,
245101e04c3fSmrg
245201e04c3fSmrg	/*
245301e04c3fSmrg	 * Indicates that one or more OA reports were not written by the
245401e04c3fSmrg	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
245501e04c3fSmrg	 * command collides with periodic sampling - which would be more likely
245601e04c3fSmrg	 * at higher sampling frequencies.
245701e04c3fSmrg	 */
245801e04c3fSmrg	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
245901e04c3fSmrg
246001e04c3fSmrg	/**
246101e04c3fSmrg	 * An error occurred that resulted in all pending OA reports being lost.
246201e04c3fSmrg	 */
246301e04c3fSmrg	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
246401e04c3fSmrg
246501e04c3fSmrg	DRM_I915_PERF_RECORD_MAX /* non-ABI */
246601e04c3fSmrg};
246701e04c3fSmrg
24687ec681f3Smrg/*
246901e04c3fSmrg * Structure to upload perf dynamic configuration into the kernel.
247001e04c3fSmrg */
247101e04c3fSmrgstruct drm_i915_perf_oa_config {
247201e04c3fSmrg	/** String formatted like "%08x-%04x-%04x-%04x-%012x" */
247301e04c3fSmrg	char uuid[36];
247401e04c3fSmrg
247501e04c3fSmrg	__u32 n_mux_regs;
247601e04c3fSmrg	__u32 n_boolean_regs;
247701e04c3fSmrg	__u32 n_flex_regs;
247801e04c3fSmrg
247901e04c3fSmrg	/*
248001e04c3fSmrg	 * These fields are pointers to tuples of u32 values (register address,
248101e04c3fSmrg	 * value). For example the expected length of the buffer pointed by
248201e04c3fSmrg	 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
248301e04c3fSmrg	 */
248401e04c3fSmrg	__u64 mux_regs_ptr;
248501e04c3fSmrg	__u64 boolean_regs_ptr;
248601e04c3fSmrg	__u64 flex_regs_ptr;
248701e04c3fSmrg};
248801e04c3fSmrg
24897ec681f3Smrg/**
24907ec681f3Smrg * struct drm_i915_query_item - An individual query for the kernel to process.
24917ec681f3Smrg *
24927ec681f3Smrg * The behaviour is determined by the @query_id. Note that exactly what
24937ec681f3Smrg * @data_ptr is also depends on the specific @query_id.
24947ec681f3Smrg */
249501e04c3fSmrgstruct drm_i915_query_item {
24967ec681f3Smrg	/** @query_id: The id for this query */
249701e04c3fSmrg	__u64 query_id;
249801e04c3fSmrg#define DRM_I915_QUERY_TOPOLOGY_INFO    1
24997ec681f3Smrg#define DRM_I915_QUERY_ENGINE_INFO	2
25007ec681f3Smrg#define DRM_I915_QUERY_PERF_CONFIG      3
25017ec681f3Smrg#define DRM_I915_QUERY_MEMORY_REGIONS   4
25027ec681f3Smrg/* Must be kept compact -- no holes and well documented */
250301e04c3fSmrg
25047ec681f3Smrg	/**
25057ec681f3Smrg	 * @length:
25067ec681f3Smrg	 *
250701e04c3fSmrg	 * When set to zero by userspace, this is filled with the size of the
25087ec681f3Smrg	 * data to be written at the @data_ptr pointer. The kernel sets this
250901e04c3fSmrg	 * value to a negative value to signal an error on a particular query
251001e04c3fSmrg	 * item.
251101e04c3fSmrg	 */
251201e04c3fSmrg	__s32 length;
251301e04c3fSmrg
25147ec681f3Smrg	/**
25157ec681f3Smrg	 * @flags:
25167ec681f3Smrg	 *
25177ec681f3Smrg	 * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
25187ec681f3Smrg	 *
25197ec681f3Smrg	 * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
25207ec681f3Smrg	 * following:
25217ec681f3Smrg	 *
25227ec681f3Smrg	 *	- DRM_I915_QUERY_PERF_CONFIG_LIST
25237ec681f3Smrg	 *      - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
25247ec681f3Smrg	 *      - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
252501e04c3fSmrg	 */
252601e04c3fSmrg	__u32 flags;
25277ec681f3Smrg#define DRM_I915_QUERY_PERF_CONFIG_LIST          1
25287ec681f3Smrg#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
25297ec681f3Smrg#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
253001e04c3fSmrg
25317ec681f3Smrg	/**
25327ec681f3Smrg	 * @data_ptr:
25337ec681f3Smrg	 *
25347ec681f3Smrg	 * Data will be written at the location pointed by @data_ptr when the
25357ec681f3Smrg	 * value of @length matches the length of the data to be written by the
253601e04c3fSmrg	 * kernel.
253701e04c3fSmrg	 */
253801e04c3fSmrg	__u64 data_ptr;
253901e04c3fSmrg};
254001e04c3fSmrg
25417ec681f3Smrg/**
25427ec681f3Smrg * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
25437ec681f3Smrg * kernel to fill out.
25447ec681f3Smrg *
25457ec681f3Smrg * Note that this is generally a two step process for each struct
25467ec681f3Smrg * drm_i915_query_item in the array:
25477ec681f3Smrg *
25487ec681f3Smrg * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
25497ec681f3Smrg *    drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
25507ec681f3Smrg *    kernel will then fill in the size, in bytes, which tells userspace how
25517ec681f3Smrg *    memory it needs to allocate for the blob(say for an array of properties).
25527ec681f3Smrg *
25537ec681f3Smrg * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
25547ec681f3Smrg *    &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
25557ec681f3Smrg *    the &drm_i915_query_item.length should still be the same as what the
25567ec681f3Smrg *    kernel previously set. At this point the kernel can fill in the blob.
25577ec681f3Smrg *
25587ec681f3Smrg * Note that for some query items it can make sense for userspace to just pass
25597ec681f3Smrg * in a buffer/blob equal to or larger than the required size. In this case only
25607ec681f3Smrg * a single ioctl call is needed. For some smaller query items this can work
25617ec681f3Smrg * quite well.
25627ec681f3Smrg *
25637ec681f3Smrg */
256401e04c3fSmrgstruct drm_i915_query {
25657ec681f3Smrg	/** @num_items: The number of elements in the @items_ptr array */
256601e04c3fSmrg	__u32 num_items;
256701e04c3fSmrg
25687ec681f3Smrg	/**
25697ec681f3Smrg	 * @flags: Unused for now. Must be cleared to zero.
257001e04c3fSmrg	 */
257101e04c3fSmrg	__u32 flags;
257201e04c3fSmrg
25737ec681f3Smrg	/**
25747ec681f3Smrg	 * @items_ptr:
25757ec681f3Smrg	 *
25767ec681f3Smrg	 * Pointer to an array of struct drm_i915_query_item. The number of
25777ec681f3Smrg	 * array elements is @num_items.
257801e04c3fSmrg	 */
257901e04c3fSmrg	__u64 items_ptr;
258001e04c3fSmrg};
258101e04c3fSmrg
258201e04c3fSmrg/*
258301e04c3fSmrg * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
258401e04c3fSmrg *
258501e04c3fSmrg * data: contains the 3 pieces of information :
258601e04c3fSmrg *
258701e04c3fSmrg * - the slice mask with one bit per slice telling whether a slice is
258801e04c3fSmrg *   available. The availability of slice X can be queried with the following
258901e04c3fSmrg *   formula :
259001e04c3fSmrg *
259101e04c3fSmrg *           (data[X / 8] >> (X % 8)) & 1
259201e04c3fSmrg *
259301e04c3fSmrg * - the subslice mask for each slice with one bit per subslice telling
25947ec681f3Smrg *   whether a subslice is available. Gen12 has dual-subslices, which are
25957ec681f3Smrg *   similar to two gen11 subslices. For gen12, this array represents dual-
25967ec681f3Smrg *   subslices. The availability of subslice Y in slice X can be queried
25977ec681f3Smrg *   with the following formula :
259801e04c3fSmrg *
259901e04c3fSmrg *           (data[subslice_offset +
260001e04c3fSmrg *                 X * subslice_stride +
260101e04c3fSmrg *                 Y / 8] >> (Y % 8)) & 1
260201e04c3fSmrg *
260301e04c3fSmrg * - the EU mask for each subslice in each slice with one bit per EU telling
260401e04c3fSmrg *   whether an EU is available. The availability of EU Z in subslice Y in
260501e04c3fSmrg *   slice X can be queried with the following formula :
260601e04c3fSmrg *
260701e04c3fSmrg *           (data[eu_offset +
260801e04c3fSmrg *                 (X * max_subslices + Y) * eu_stride +
260901e04c3fSmrg *                 Z / 8] >> (Z % 8)) & 1
261001e04c3fSmrg */
261101e04c3fSmrgstruct drm_i915_query_topology_info {
261201e04c3fSmrg	/*
261301e04c3fSmrg	 * Unused for now. Must be cleared to zero.
261401e04c3fSmrg	 */
261501e04c3fSmrg	__u16 flags;
261601e04c3fSmrg
261701e04c3fSmrg	__u16 max_slices;
261801e04c3fSmrg	__u16 max_subslices;
261901e04c3fSmrg	__u16 max_eus_per_subslice;
262001e04c3fSmrg
262101e04c3fSmrg	/*
262201e04c3fSmrg	 * Offset in data[] at which the subslice masks are stored.
262301e04c3fSmrg	 */
262401e04c3fSmrg	__u16 subslice_offset;
262501e04c3fSmrg
262601e04c3fSmrg	/*
262701e04c3fSmrg	 * Stride at which each of the subslice masks for each slice are
262801e04c3fSmrg	 * stored.
262901e04c3fSmrg	 */
263001e04c3fSmrg	__u16 subslice_stride;
263101e04c3fSmrg
263201e04c3fSmrg	/*
263301e04c3fSmrg	 * Offset in data[] at which the EU masks are stored.
263401e04c3fSmrg	 */
263501e04c3fSmrg	__u16 eu_offset;
263601e04c3fSmrg
263701e04c3fSmrg	/*
263801e04c3fSmrg	 * Stride at which each of the EU masks for each subslice are stored.
263901e04c3fSmrg	 */
264001e04c3fSmrg	__u16 eu_stride;
264101e04c3fSmrg
264201e04c3fSmrg	__u8 data[];
264301e04c3fSmrg};
264401e04c3fSmrg
26457ec681f3Smrg/**
26467ec681f3Smrg * DOC: Engine Discovery uAPI
26477ec681f3Smrg *
26487ec681f3Smrg * Engine discovery uAPI is a way of enumerating physical engines present in a
26497ec681f3Smrg * GPU associated with an open i915 DRM file descriptor. This supersedes the old
26507ec681f3Smrg * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
26517ec681f3Smrg * `I915_PARAM_HAS_BLT`.
26527ec681f3Smrg *
26537ec681f3Smrg * The need for this interface came starting with Icelake and newer GPUs, which
26547ec681f3Smrg * started to establish a pattern of having multiple engines of a same class,
26557ec681f3Smrg * where not all instances were always completely functionally equivalent.
26567ec681f3Smrg *
26577ec681f3Smrg * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
26587ec681f3Smrg * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
26597ec681f3Smrg *
26607ec681f3Smrg * Example for getting the list of engines:
26617ec681f3Smrg *
26627ec681f3Smrg * .. code-block:: C
26637ec681f3Smrg *
26647ec681f3Smrg * 	struct drm_i915_query_engine_info *info;
26657ec681f3Smrg * 	struct drm_i915_query_item item = {
26667ec681f3Smrg * 		.query_id = DRM_I915_QUERY_ENGINE_INFO;
26677ec681f3Smrg * 	};
26687ec681f3Smrg * 	struct drm_i915_query query = {
26697ec681f3Smrg * 		.num_items = 1,
26707ec681f3Smrg * 		.items_ptr = (uintptr_t)&item,
26717ec681f3Smrg * 	};
26727ec681f3Smrg * 	int err, i;
26737ec681f3Smrg *
26747ec681f3Smrg * 	// First query the size of the blob we need, this needs to be large
26757ec681f3Smrg * 	// enough to hold our array of engines. The kernel will fill out the
26767ec681f3Smrg * 	// item.length for us, which is the number of bytes we need.
26777ec681f3Smrg * 	//
26787ec681f3Smrg * 	// Alternatively a large buffer can be allocated straight away enabling
26797ec681f3Smrg * 	// querying in one pass, in which case item.length should contain the
26807ec681f3Smrg * 	// length of the provided buffer.
26817ec681f3Smrg * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
26827ec681f3Smrg * 	if (err) ...
26837ec681f3Smrg *
26847ec681f3Smrg * 	info = calloc(1, item.length);
26857ec681f3Smrg * 	// Now that we allocated the required number of bytes, we call the ioctl
26867ec681f3Smrg * 	// again, this time with the data_ptr pointing to our newly allocated
26877ec681f3Smrg * 	// blob, which the kernel can then populate with info on all engines.
26887ec681f3Smrg * 	item.data_ptr = (uintptr_t)&info,
26897ec681f3Smrg *
26907ec681f3Smrg * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
26917ec681f3Smrg * 	if (err) ...
26927ec681f3Smrg *
26937ec681f3Smrg * 	// We can now access each engine in the array
26947ec681f3Smrg * 	for (i = 0; i < info->num_engines; i++) {
26957ec681f3Smrg * 		struct drm_i915_engine_info einfo = info->engines[i];
26967ec681f3Smrg * 		u16 class = einfo.engine.class;
26977ec681f3Smrg * 		u16 instance = einfo.engine.instance;
26987ec681f3Smrg * 		....
26997ec681f3Smrg * 	}
27007ec681f3Smrg *
27017ec681f3Smrg * 	free(info);
27027ec681f3Smrg *
27037ec681f3Smrg * Each of the enumerated engines, apart from being defined by its class and
27047ec681f3Smrg * instance (see `struct i915_engine_class_instance`), also can have flags and
27057ec681f3Smrg * capabilities defined as documented in i915_drm.h.
27067ec681f3Smrg *
27077ec681f3Smrg * For instance video engines which support HEVC encoding will have the
27087ec681f3Smrg * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
27097ec681f3Smrg *
27107ec681f3Smrg * Engine discovery only fully comes to its own when combined with the new way
27117ec681f3Smrg * of addressing engines when submitting batch buffers using contexts with
27127ec681f3Smrg * engine maps configured.
27137ec681f3Smrg */
27147ec681f3Smrg
27157ec681f3Smrg/**
27167ec681f3Smrg * struct drm_i915_engine_info
27177ec681f3Smrg *
27187ec681f3Smrg * Describes one engine and it's capabilities as known to the driver.
27197ec681f3Smrg */
27207ec681f3Smrgstruct drm_i915_engine_info {
27217ec681f3Smrg	/** @engine: Engine class and instance. */
27227ec681f3Smrg	struct i915_engine_class_instance engine;
27237ec681f3Smrg
27247ec681f3Smrg	/** @rsvd0: Reserved field. */
27257ec681f3Smrg	__u32 rsvd0;
27267ec681f3Smrg
27277ec681f3Smrg	/** @flags: Engine flags. */
27287ec681f3Smrg	__u64 flags;
27297ec681f3Smrg
27307ec681f3Smrg	/** @capabilities: Capabilities of this engine. */
27317ec681f3Smrg	__u64 capabilities;
27327ec681f3Smrg#define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
27337ec681f3Smrg#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
27347ec681f3Smrg
27357ec681f3Smrg	/** @rsvd1: Reserved fields. */
27367ec681f3Smrg	__u64 rsvd1[4];
27377ec681f3Smrg};
27387ec681f3Smrg
27397ec681f3Smrg/**
27407ec681f3Smrg * struct drm_i915_query_engine_info
27417ec681f3Smrg *
27427ec681f3Smrg * Engine info query enumerates all engines known to the driver by filling in
27437ec681f3Smrg * an array of struct drm_i915_engine_info structures.
27447ec681f3Smrg */
27457ec681f3Smrgstruct drm_i915_query_engine_info {
27467ec681f3Smrg	/** @num_engines: Number of struct drm_i915_engine_info structs following. */
27477ec681f3Smrg	__u32 num_engines;
27487ec681f3Smrg
27497ec681f3Smrg	/** @rsvd: MBZ */
27507ec681f3Smrg	__u32 rsvd[3];
27517ec681f3Smrg
27527ec681f3Smrg	/** @engines: Marker for drm_i915_engine_info structures. */
27537ec681f3Smrg	struct drm_i915_engine_info engines[];
27547ec681f3Smrg};
27557ec681f3Smrg
27567ec681f3Smrg/*
27577ec681f3Smrg * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
27587ec681f3Smrg */
27597ec681f3Smrgstruct drm_i915_query_perf_config {
27607ec681f3Smrg	union {
27617ec681f3Smrg		/*
27627ec681f3Smrg		 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
27637ec681f3Smrg		 * this fields to the number of configurations available.
27647ec681f3Smrg		 */
27657ec681f3Smrg		__u64 n_configs;
27667ec681f3Smrg
27677ec681f3Smrg		/*
27687ec681f3Smrg		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
27697ec681f3Smrg		 * i915 will use the value in this field as configuration
27707ec681f3Smrg		 * identifier to decide what data to write into config_ptr.
27717ec681f3Smrg		 */
27727ec681f3Smrg		__u64 config;
27737ec681f3Smrg
27747ec681f3Smrg		/*
27757ec681f3Smrg		 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
27767ec681f3Smrg		 * i915 will use the value in this field as configuration
27777ec681f3Smrg		 * identifier to decide what data to write into config_ptr.
27787ec681f3Smrg		 *
27797ec681f3Smrg		 * String formatted like "%08x-%04x-%04x-%04x-%012x"
27807ec681f3Smrg		 */
27817ec681f3Smrg		char uuid[36];
27827ec681f3Smrg	};
27837ec681f3Smrg
27847ec681f3Smrg	/*
27857ec681f3Smrg	 * Unused for now. Must be cleared to zero.
27867ec681f3Smrg	 */
27877ec681f3Smrg	__u32 flags;
27887ec681f3Smrg
27897ec681f3Smrg	/*
27907ec681f3Smrg	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
27917ec681f3Smrg	 * write an array of __u64 of configuration identifiers.
27927ec681f3Smrg	 *
27937ec681f3Smrg	 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
27947ec681f3Smrg	 * write a struct drm_i915_perf_oa_config. If the following fields of
27957ec681f3Smrg	 * drm_i915_perf_oa_config are set not set to 0, i915 will write into
27967ec681f3Smrg	 * the associated pointers the values of submitted when the
27977ec681f3Smrg	 * configuration was created :
27987ec681f3Smrg	 *
27997ec681f3Smrg	 *         - n_mux_regs
28007ec681f3Smrg	 *         - n_boolean_regs
28017ec681f3Smrg	 *         - n_flex_regs
28027ec681f3Smrg	 */
28037ec681f3Smrg	__u8 data[];
28047ec681f3Smrg};
28057ec681f3Smrg
28067ec681f3Smrg/**
28077ec681f3Smrg * enum drm_i915_gem_memory_class - Supported memory classes
28087ec681f3Smrg */
28097ec681f3Smrgenum drm_i915_gem_memory_class {
28107ec681f3Smrg	/** @I915_MEMORY_CLASS_SYSTEM: System memory */
28117ec681f3Smrg	I915_MEMORY_CLASS_SYSTEM = 0,
28127ec681f3Smrg	/** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
28137ec681f3Smrg	I915_MEMORY_CLASS_DEVICE,
28147ec681f3Smrg};
28157ec681f3Smrg
28167ec681f3Smrg/**
28177ec681f3Smrg * struct drm_i915_gem_memory_class_instance - Identify particular memory region
28187ec681f3Smrg */
28197ec681f3Smrgstruct drm_i915_gem_memory_class_instance {
28207ec681f3Smrg	/** @memory_class: See enum drm_i915_gem_memory_class */
28217ec681f3Smrg	__u16 memory_class;
28227ec681f3Smrg
28237ec681f3Smrg	/** @memory_instance: Which instance */
28247ec681f3Smrg	__u16 memory_instance;
28257ec681f3Smrg};
28267ec681f3Smrg
28277ec681f3Smrg/**
28287ec681f3Smrg * struct drm_i915_memory_region_info - Describes one region as known to the
28297ec681f3Smrg * driver.
28307ec681f3Smrg *
28317ec681f3Smrg * Note that we reserve some stuff here for potential future work. As an example
28327ec681f3Smrg * we might want expose the capabilities for a given region, which could include
28337ec681f3Smrg * things like if the region is CPU mappable/accessible, what are the supported
28347ec681f3Smrg * mapping types etc.
28357ec681f3Smrg *
28367ec681f3Smrg * Note that to extend struct drm_i915_memory_region_info and struct
28377ec681f3Smrg * drm_i915_query_memory_regions in the future the plan is to do the following:
28387ec681f3Smrg *
28397ec681f3Smrg * .. code-block:: C
28407ec681f3Smrg *
28417ec681f3Smrg *	struct drm_i915_memory_region_info {
28427ec681f3Smrg *		struct drm_i915_gem_memory_class_instance region;
28437ec681f3Smrg *		union {
28447ec681f3Smrg *			__u32 rsvd0;
28457ec681f3Smrg *			__u32 new_thing1;
28467ec681f3Smrg *		};
28477ec681f3Smrg *		...
28487ec681f3Smrg *		union {
28497ec681f3Smrg *			__u64 rsvd1[8];
28507ec681f3Smrg *			struct {
28517ec681f3Smrg *				__u64 new_thing2;
28527ec681f3Smrg *				__u64 new_thing3;
28537ec681f3Smrg *				...
28547ec681f3Smrg *			};
28557ec681f3Smrg *		};
28567ec681f3Smrg *	};
28577ec681f3Smrg *
28587ec681f3Smrg * With this things should remain source compatible between versions for
28597ec681f3Smrg * userspace, even as we add new fields.
28607ec681f3Smrg *
28617ec681f3Smrg * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
28627ec681f3Smrg * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
28637ec681f3Smrg * at &drm_i915_query_item.query_id.
28647ec681f3Smrg */
28657ec681f3Smrgstruct drm_i915_memory_region_info {
28667ec681f3Smrg	/** @region: The class:instance pair encoding */
28677ec681f3Smrg	struct drm_i915_gem_memory_class_instance region;
28687ec681f3Smrg
28697ec681f3Smrg	/** @rsvd0: MBZ */
28707ec681f3Smrg	__u32 rsvd0;
28717ec681f3Smrg
28727ec681f3Smrg	/** @probed_size: Memory probed by the driver (-1 = unknown) */
28737ec681f3Smrg	__u64 probed_size;
28747ec681f3Smrg
28757ec681f3Smrg	/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
28767ec681f3Smrg	__u64 unallocated_size;
28777ec681f3Smrg
28787ec681f3Smrg	/** @rsvd1: MBZ */
28797ec681f3Smrg	__u64 rsvd1[8];
28807ec681f3Smrg};
28817ec681f3Smrg
28827ec681f3Smrg/**
28837ec681f3Smrg * struct drm_i915_query_memory_regions
28847ec681f3Smrg *
28857ec681f3Smrg * The region info query enumerates all regions known to the driver by filling
28867ec681f3Smrg * in an array of struct drm_i915_memory_region_info structures.
28877ec681f3Smrg *
28887ec681f3Smrg * Example for getting the list of supported regions:
28897ec681f3Smrg *
28907ec681f3Smrg * .. code-block:: C
28917ec681f3Smrg *
28927ec681f3Smrg *	struct drm_i915_query_memory_regions *info;
28937ec681f3Smrg *	struct drm_i915_query_item item = {
28947ec681f3Smrg *		.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
28957ec681f3Smrg *	};
28967ec681f3Smrg *	struct drm_i915_query query = {
28977ec681f3Smrg *		.num_items = 1,
28987ec681f3Smrg *		.items_ptr = (uintptr_t)&item,
28997ec681f3Smrg *	};
29007ec681f3Smrg *	int err, i;
29017ec681f3Smrg *
29027ec681f3Smrg *	// First query the size of the blob we need, this needs to be large
29037ec681f3Smrg *	// enough to hold our array of regions. The kernel will fill out the
29047ec681f3Smrg *	// item.length for us, which is the number of bytes we need.
29057ec681f3Smrg *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
29067ec681f3Smrg *	if (err) ...
29077ec681f3Smrg *
29087ec681f3Smrg *	info = calloc(1, item.length);
29097ec681f3Smrg *	// Now that we allocated the required number of bytes, we call the ioctl
29107ec681f3Smrg *	// again, this time with the data_ptr pointing to our newly allocated
29117ec681f3Smrg *	// blob, which the kernel can then populate with the all the region info.
29127ec681f3Smrg *	item.data_ptr = (uintptr_t)&info,
29137ec681f3Smrg *
29147ec681f3Smrg *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
29157ec681f3Smrg *	if (err) ...
29167ec681f3Smrg *
29177ec681f3Smrg *	// We can now access each region in the array
29187ec681f3Smrg *	for (i = 0; i < info->num_regions; i++) {
29197ec681f3Smrg *		struct drm_i915_memory_region_info mr = info->regions[i];
29207ec681f3Smrg *		u16 class = mr.region.class;
29217ec681f3Smrg *		u16 instance = mr.region.instance;
29227ec681f3Smrg *
29237ec681f3Smrg *		....
29247ec681f3Smrg *	}
29257ec681f3Smrg *
29267ec681f3Smrg *	free(info);
29277ec681f3Smrg */
29287ec681f3Smrgstruct drm_i915_query_memory_regions {
29297ec681f3Smrg	/** @num_regions: Number of supported regions */
29307ec681f3Smrg	__u32 num_regions;
29317ec681f3Smrg
29327ec681f3Smrg	/** @rsvd: MBZ */
29337ec681f3Smrg	__u32 rsvd[3];
29347ec681f3Smrg
29357ec681f3Smrg	/** @regions: Info about each supported region */
29367ec681f3Smrg	struct drm_i915_memory_region_info regions[];
29377ec681f3Smrg};
29387ec681f3Smrg
29397ec681f3Smrg/**
29407ec681f3Smrg * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
29417ec681f3Smrg * extension support using struct i915_user_extension.
29427ec681f3Smrg *
29437ec681f3Smrg * Note that in the future we want to have our buffer flags here, at least for
29447ec681f3Smrg * the stuff that is immutable. Previously we would have two ioctls, one to
29457ec681f3Smrg * create the object with gem_create, and another to apply various parameters,
29467ec681f3Smrg * however this creates some ambiguity for the params which are considered
29477ec681f3Smrg * immutable. Also in general we're phasing out the various SET/GET ioctls.
29487ec681f3Smrg */
29497ec681f3Smrgstruct drm_i915_gem_create_ext {
29507ec681f3Smrg	/**
29517ec681f3Smrg	 * @size: Requested size for the object.
29527ec681f3Smrg	 *
29537ec681f3Smrg	 * The (page-aligned) allocated size for the object will be returned.
29547ec681f3Smrg	 *
29557ec681f3Smrg	 * Note that for some devices we have might have further minimum
29567ec681f3Smrg	 * page-size restrictions(larger than 4K), like for device local-memory.
29577ec681f3Smrg	 * However in general the final size here should always reflect any
29587ec681f3Smrg	 * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
29597ec681f3Smrg	 * extension to place the object in device local-memory.
29607ec681f3Smrg	 */
29617ec681f3Smrg	__u64 size;
29627ec681f3Smrg	/**
29637ec681f3Smrg	 * @handle: Returned handle for the object.
29647ec681f3Smrg	 *
29657ec681f3Smrg	 * Object handles are nonzero.
29667ec681f3Smrg	 */
29677ec681f3Smrg	__u32 handle;
29687ec681f3Smrg	/** @flags: MBZ */
29697ec681f3Smrg	__u32 flags;
29707ec681f3Smrg	/**
29717ec681f3Smrg	 * @extensions: The chain of extensions to apply to this object.
29727ec681f3Smrg	 *
29737ec681f3Smrg	 * This will be useful in the future when we need to support several
29747ec681f3Smrg	 * different extensions, and we need to apply more than one when
29757ec681f3Smrg	 * creating the object. See struct i915_user_extension.
29767ec681f3Smrg	 *
29777ec681f3Smrg	 * If we don't supply any extensions then we get the same old gem_create
29787ec681f3Smrg	 * behaviour.
29797ec681f3Smrg	 *
29807ec681f3Smrg	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
29817ec681f3Smrg	 * struct drm_i915_gem_create_ext_memory_regions.
29827ec681f3Smrg	 */
29837ec681f3Smrg#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
29847ec681f3Smrg	__u64 extensions;
29857ec681f3Smrg};
29867ec681f3Smrg
29877ec681f3Smrg/**
29887ec681f3Smrg * struct drm_i915_gem_create_ext_memory_regions - The
29897ec681f3Smrg * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
29907ec681f3Smrg *
29917ec681f3Smrg * Set the object with the desired set of placements/regions in priority
29927ec681f3Smrg * order. Each entry must be unique and supported by the device.
29937ec681f3Smrg *
29947ec681f3Smrg * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
29957ec681f3Smrg * an equivalent layout of class:instance pair encodings. See struct
29967ec681f3Smrg * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
29977ec681f3Smrg * query the supported regions for a device.
29987ec681f3Smrg *
29997ec681f3Smrg * As an example, on discrete devices, if we wish to set the placement as
30007ec681f3Smrg * device local-memory we can do something like:
30017ec681f3Smrg *
30027ec681f3Smrg * .. code-block:: C
30037ec681f3Smrg *
30047ec681f3Smrg *	struct drm_i915_gem_memory_class_instance region_lmem = {
30057ec681f3Smrg *              .memory_class = I915_MEMORY_CLASS_DEVICE,
30067ec681f3Smrg *              .memory_instance = 0,
30077ec681f3Smrg *      };
30087ec681f3Smrg *      struct drm_i915_gem_create_ext_memory_regions regions = {
30097ec681f3Smrg *              .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
30107ec681f3Smrg *              .regions = (uintptr_t)&region_lmem,
30117ec681f3Smrg *              .num_regions = 1,
30127ec681f3Smrg *      };
30137ec681f3Smrg *      struct drm_i915_gem_create_ext create_ext = {
30147ec681f3Smrg *              .size = 16 * PAGE_SIZE,
30157ec681f3Smrg *              .extensions = (uintptr_t)&regions,
30167ec681f3Smrg *      };
30177ec681f3Smrg *
30187ec681f3Smrg *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
30197ec681f3Smrg *      if (err) ...
30207ec681f3Smrg *
30217ec681f3Smrg * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
30227ec681f3Smrg * along with the final object size in &drm_i915_gem_create_ext.size, which
30237ec681f3Smrg * should account for any rounding up, if required.
30247ec681f3Smrg */
30257ec681f3Smrgstruct drm_i915_gem_create_ext_memory_regions {
30267ec681f3Smrg	/** @base: Extension link. See struct i915_user_extension. */
30277ec681f3Smrg	struct i915_user_extension base;
30287ec681f3Smrg
30297ec681f3Smrg	/** @pad: MBZ */
30307ec681f3Smrg	__u32 pad;
30317ec681f3Smrg	/** @num_regions: Number of elements in the @regions array. */
30327ec681f3Smrg	__u32 num_regions;
30337ec681f3Smrg	/**
30347ec681f3Smrg	 * @regions: The regions/placements array.
30357ec681f3Smrg	 *
30367ec681f3Smrg	 * An array of struct drm_i915_gem_memory_class_instance.
30377ec681f3Smrg	 */
30387ec681f3Smrg	__u64 regions;
30397ec681f3Smrg};
30407ec681f3Smrg
304101e04c3fSmrg#if defined(__cplusplus)
304201e04c3fSmrg}
304301e04c3fSmrg#endif
304401e04c3fSmrg
304501e04c3fSmrg#endif /* _I915_DRM_H_ */
3046