122944501Smrg/*
222944501Smrg * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
322944501Smrg * All Rights Reserved.
422944501Smrg *
522944501Smrg * Permission is hereby granted, free of charge, to any person obtaining a
622944501Smrg * copy of this software and associated documentation files (the
722944501Smrg * "Software"), to deal in the Software without restriction, including
822944501Smrg * without limitation the rights to use, copy, modify, merge, publish,
922944501Smrg * distribute, sub license, and/or sell copies of the Software, and to
1022944501Smrg * permit persons to whom the Software is furnished to do so, subject to
1122944501Smrg * the following conditions:
1222944501Smrg *
1322944501Smrg * The above copyright notice and this permission notice (including the
1422944501Smrg * next paragraph) shall be included in all copies or substantial portions
1522944501Smrg * of the Software.
1622944501Smrg *
1722944501Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1822944501Smrg * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1922944501Smrg * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
2022944501Smrg * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
2122944501Smrg * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
2222944501Smrg * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
2322944501Smrg * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2422944501Smrg *
2522944501Smrg */
2622944501Smrg
2722944501Smrg#ifndef _I915_DRM_H_
2822944501Smrg#define _I915_DRM_H_
2922944501Smrg
30fe517fc9Smrg#include "drm.h"
3122944501Smrg
322ee35494Smrg#if defined(__cplusplus)
332ee35494Smrgextern "C" {
342ee35494Smrg#endif
352ee35494Smrg
3622944501Smrg/* Please note that modifications to all structs defined here are
3722944501Smrg * subject to backwards-compatibility constraints.
3822944501Smrg */
3922944501Smrg
40e88f27b3Smrg/**
41e88f27b3Smrg * DOC: uevents generated by i915 on it's device node
42e88f27b3Smrg *
43e88f27b3Smrg * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44e88f27b3Smrg *	event from the gpu l3 cache. Additional information supplied is ROW,
45e88f27b3Smrg *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46e88f27b3Smrg *	track of these events and if a specific cache-line seems to have a
47e88f27b3Smrg *	persistent error remap it with the l3 remapping tool supplied in
48e88f27b3Smrg *	intel-gpu-tools.  The value supplied with the event is always 1.
49e88f27b3Smrg *
50e88f27b3Smrg * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51e88f27b3Smrg *	hangcheck. The error detection event is a good indicator of when things
52e88f27b3Smrg *	began to go badly. The value supplied with the event is a 1 upon error
53e88f27b3Smrg *	detection, and a 0 upon reset completion, signifying no more error
54e88f27b3Smrg *	exists. NOTE: Disabling hangcheck or reset via module parameter will
55e88f27b3Smrg *	cause the related events to not be seen.
56e88f27b3Smrg *
57e88f27b3Smrg * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
583b115362Smrg *	GPU. The value supplied with the event is always 1. NOTE: Disable
59e88f27b3Smrg *	reset via module parameter will cause this event to not be seen.
60e88f27b3Smrg */
61e88f27b3Smrg#define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
62e88f27b3Smrg#define I915_ERROR_UEVENT		"ERROR"
63e88f27b3Smrg#define I915_RESET_UEVENT		"RESET"
64e88f27b3Smrg
653b115362Smrg/**
663b115362Smrg * struct i915_user_extension - Base class for defining a chain of extensions
67bf6cc7dcSmrg *
68bf6cc7dcSmrg * Many interfaces need to grow over time. In most cases we can simply
69bf6cc7dcSmrg * extend the struct and have userspace pass in more data. Another option,
70bf6cc7dcSmrg * as demonstrated by Vulkan's approach to providing extensions for forward
71bf6cc7dcSmrg * and backward compatibility, is to use a list of optional structs to
72bf6cc7dcSmrg * provide those extra details.
73bf6cc7dcSmrg *
74bf6cc7dcSmrg * The key advantage to using an extension chain is that it allows us to
75bf6cc7dcSmrg * redefine the interface more easily than an ever growing struct of
76bf6cc7dcSmrg * increasing complexity, and for large parts of that interface to be
77bf6cc7dcSmrg * entirely optional. The downside is more pointer chasing; chasing across
78bf6cc7dcSmrg * the boundary with pointers encapsulated inside u64.
793b115362Smrg *
803b115362Smrg * Example chaining:
813b115362Smrg *
823b115362Smrg * .. code-block:: C
833b115362Smrg *
843b115362Smrg *	struct i915_user_extension ext3 {
853b115362Smrg *		.next_extension = 0, // end
863b115362Smrg *		.name = ...,
873b115362Smrg *	};
883b115362Smrg *	struct i915_user_extension ext2 {
893b115362Smrg *		.next_extension = (uintptr_t)&ext3,
903b115362Smrg *		.name = ...,
913b115362Smrg *	};
923b115362Smrg *	struct i915_user_extension ext1 {
933b115362Smrg *		.next_extension = (uintptr_t)&ext2,
943b115362Smrg *		.name = ...,
953b115362Smrg *	};
963b115362Smrg *
973b115362Smrg * Typically the struct i915_user_extension would be embedded in some uAPI
983b115362Smrg * struct, and in this case we would feed it the head of the chain(i.e ext1),
993b115362Smrg * which would then apply all of the above extensions.
1003b115362Smrg *
101bf6cc7dcSmrg */
102bf6cc7dcSmrgstruct i915_user_extension {
1033b115362Smrg	/**
1043b115362Smrg	 * @next_extension:
1053b115362Smrg	 *
1063b115362Smrg	 * Pointer to the next struct i915_user_extension, or zero if the end.
1073b115362Smrg	 */
108bf6cc7dcSmrg	__u64 next_extension;
1093b115362Smrg	/**
1103b115362Smrg	 * @name: Name of the extension.
1113b115362Smrg	 *
1123b115362Smrg	 * Note that the name here is just some integer.
1133b115362Smrg	 *
1143b115362Smrg	 * Also note that the name space for this is not global for the whole
1153b115362Smrg	 * driver, but rather its scope/meaning is limited to the specific piece
1163b115362Smrg	 * of uAPI which has embedded the struct i915_user_extension.
1173b115362Smrg	 */
118bf6cc7dcSmrg	__u32 name;
1193b115362Smrg	/**
1203b115362Smrg	 * @flags: MBZ
1213b115362Smrg	 *
1223b115362Smrg	 * All undefined bits must be zero.
1233b115362Smrg	 */
1243b115362Smrg	__u32 flags;
1253b115362Smrg	/**
1263b115362Smrg	 * @rsvd: MBZ
1273b115362Smrg	 *
1283b115362Smrg	 * Reserved for future use; must be zero.
1293b115362Smrg	 */
1303b115362Smrg	__u32 rsvd[4];
131bf6cc7dcSmrg};
132bf6cc7dcSmrg
1332ee35494Smrg/*
1342ee35494Smrg * MOCS indexes used for GPU surfaces, defining the cacheability of the
1352ee35494Smrg * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
1362ee35494Smrg */
1372ee35494Smrgenum i915_mocs_table_index {
1382ee35494Smrg	/*
1392ee35494Smrg	 * Not cached anywhere, coherency between CPU and GPU accesses is
1402ee35494Smrg	 * guaranteed.
1412ee35494Smrg	 */
1422ee35494Smrg	I915_MOCS_UNCACHED,
1432ee35494Smrg	/*
1442ee35494Smrg	 * Cacheability and coherency controlled by the kernel automatically
1452ee35494Smrg	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
1462ee35494Smrg	 * usage of the surface (used for display scanout or not).
1472ee35494Smrg	 */
1482ee35494Smrg	I915_MOCS_PTE,
1492ee35494Smrg	/*
1502ee35494Smrg	 * Cached in all GPU caches available on the platform.
1512ee35494Smrg	 * Coherency between CPU and GPU accesses to the surface is not
1522ee35494Smrg	 * guaranteed without extra synchronization.
1532ee35494Smrg	 */
1542ee35494Smrg	I915_MOCS_CACHED,
1552ee35494Smrg};
1562ee35494Smrg
1573b115362Smrg/**
1583b115362Smrg * enum drm_i915_gem_engine_class - uapi engine type enumeration
1593b115362Smrg *
1606260e5d5Smrg * Different engines serve different roles, and there may be more than one
1613b115362Smrg * engine serving each role.  This enum provides a classification of the role
1623b115362Smrg * of the engine, which may be used when requesting operations to be performed
1633b115362Smrg * on a certain subset of engines, or for providing information about that
1643b115362Smrg * group.
1656260e5d5Smrg */
1666260e5d5Smrgenum drm_i915_gem_engine_class {
1673b115362Smrg	/**
1683b115362Smrg	 * @I915_ENGINE_CLASS_RENDER:
1693b115362Smrg	 *
1703b115362Smrg	 * Render engines support instructions used for 3D, Compute (GPGPU),
1713b115362Smrg	 * and programmable media workloads.  These instructions fetch data and
1723b115362Smrg	 * dispatch individual work items to threads that operate in parallel.
1733b115362Smrg	 * The threads run small programs (called "kernels" or "shaders") on
1743b115362Smrg	 * the GPU's execution units (EUs).
1753b115362Smrg	 */
1766260e5d5Smrg	I915_ENGINE_CLASS_RENDER	= 0,
1773b115362Smrg
1783b115362Smrg	/**
1793b115362Smrg	 * @I915_ENGINE_CLASS_COPY:
1803b115362Smrg	 *
1813b115362Smrg	 * Copy engines (also referred to as "blitters") support instructions
1823b115362Smrg	 * that move blocks of data from one location in memory to another,
1833b115362Smrg	 * or that fill a specified location of memory with fixed data.
1843b115362Smrg	 * Copy engines can perform pre-defined logical or bitwise operations
1853b115362Smrg	 * on the source, destination, or pattern data.
1863b115362Smrg	 */
1876260e5d5Smrg	I915_ENGINE_CLASS_COPY		= 1,
1883b115362Smrg
1893b115362Smrg	/**
1903b115362Smrg	 * @I915_ENGINE_CLASS_VIDEO:
1913b115362Smrg	 *
1923b115362Smrg	 * Video engines (also referred to as "bit stream decode" (BSD) or
1933b115362Smrg	 * "vdbox") support instructions that perform fixed-function media
1943b115362Smrg	 * decode and encode.
1953b115362Smrg	 */
1966260e5d5Smrg	I915_ENGINE_CLASS_VIDEO		= 2,
1973b115362Smrg
1983b115362Smrg	/**
1993b115362Smrg	 * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
2003b115362Smrg	 *
2013b115362Smrg	 * Video enhancement engines (also referred to as "vebox") support
2023b115362Smrg	 * instructions related to image enhancement.
2033b115362Smrg	 */
2046260e5d5Smrg	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
2056260e5d5Smrg
2063b115362Smrg	/**
2073b115362Smrg	 * @I915_ENGINE_CLASS_COMPUTE:
2083b115362Smrg	 *
2093b115362Smrg	 * Compute engines support a subset of the instructions available
2103b115362Smrg	 * on render engines:  compute engines support Compute (GPGPU) and
2113b115362Smrg	 * programmable media workloads, but do not support the 3D pipeline.
2123b115362Smrg	 */
2133b115362Smrg	I915_ENGINE_CLASS_COMPUTE	= 4,
2143b115362Smrg
2153b115362Smrg	/* Values in this enum should be kept compact. */
216bf6cc7dcSmrg
2173b115362Smrg	/**
2183b115362Smrg	 * @I915_ENGINE_CLASS_INVALID:
2193b115362Smrg	 *
2203b115362Smrg	 * Placeholder value to represent an invalid engine class assignment.
2213b115362Smrg	 */
2226260e5d5Smrg	I915_ENGINE_CLASS_INVALID	= -1
2236260e5d5Smrg};
2246260e5d5Smrg
2253b115362Smrg/**
2263b115362Smrg * struct i915_engine_class_instance - Engine class/instance identifier
2273b115362Smrg *
2283b115362Smrg * There may be more than one engine fulfilling any role within the system.
2293b115362Smrg * Each engine of a class is given a unique instance number and therefore
2303b115362Smrg * any engine can be specified by its class:instance tuplet. APIs that allow
2313b115362Smrg * access to any engine in the system will use struct i915_engine_class_instance
2323b115362Smrg * for this identification.
2333b115362Smrg */
2343b115362Smrgstruct i915_engine_class_instance {
2353b115362Smrg	/**
2363b115362Smrg	 * @engine_class:
2373b115362Smrg	 *
2383b115362Smrg	 * Engine class from enum drm_i915_gem_engine_class
2393b115362Smrg	 */
2403b115362Smrg	__u16 engine_class;
2413b115362Smrg#define I915_ENGINE_CLASS_INVALID_NONE -1
2423b115362Smrg#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
2433b115362Smrg
2443b115362Smrg	/**
2453b115362Smrg	 * @engine_instance:
2463b115362Smrg	 *
2473b115362Smrg	 * Engine instance.
2483b115362Smrg	 */
2493b115362Smrg	__u16 engine_instance;
2503b115362Smrg};
2513b115362Smrg
2526260e5d5Smrg/**
2536260e5d5Smrg * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
2546260e5d5Smrg *
2556260e5d5Smrg */
2566260e5d5Smrg
2576260e5d5Smrgenum drm_i915_pmu_engine_sample {
2586260e5d5Smrg	I915_SAMPLE_BUSY = 0,
2596260e5d5Smrg	I915_SAMPLE_WAIT = 1,
2606260e5d5Smrg	I915_SAMPLE_SEMA = 2
2616260e5d5Smrg};
2626260e5d5Smrg
2636260e5d5Smrg#define I915_PMU_SAMPLE_BITS (4)
2646260e5d5Smrg#define I915_PMU_SAMPLE_MASK (0xf)
2656260e5d5Smrg#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
2666260e5d5Smrg#define I915_PMU_CLASS_SHIFT \
2676260e5d5Smrg	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
2686260e5d5Smrg
2696260e5d5Smrg#define __I915_PMU_ENGINE(class, instance, sample) \
2706260e5d5Smrg	((class) << I915_PMU_CLASS_SHIFT | \
2716260e5d5Smrg	(instance) << I915_PMU_SAMPLE_BITS | \
2726260e5d5Smrg	(sample))
2736260e5d5Smrg
2746260e5d5Smrg#define I915_PMU_ENGINE_BUSY(class, instance) \
2756260e5d5Smrg	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
2766260e5d5Smrg
2776260e5d5Smrg#define I915_PMU_ENGINE_WAIT(class, instance) \
2786260e5d5Smrg	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
2796260e5d5Smrg
2806260e5d5Smrg#define I915_PMU_ENGINE_SEMA(class, instance) \
2816260e5d5Smrg	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
2826260e5d5Smrg
2836260e5d5Smrg#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
2846260e5d5Smrg
2856260e5d5Smrg#define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
2866260e5d5Smrg#define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
2876260e5d5Smrg#define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
2886260e5d5Smrg#define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
2893b115362Smrg#define I915_PMU_SOFTWARE_GT_AWAKE_TIME	__I915_PMU_OTHER(4)
2906260e5d5Smrg
2913b115362Smrg#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
2926260e5d5Smrg
29322944501Smrg/* Each region is a minimum of 16k, and there are at most 255 of them.
29422944501Smrg */
29522944501Smrg#define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
29622944501Smrg				 * of chars for next/prev indices */
29722944501Smrg#define I915_LOG_MIN_TEX_REGION_SIZE 14
29822944501Smrg
29922944501Smrgtypedef struct _drm_i915_init {
30022944501Smrg	enum {
30122944501Smrg		I915_INIT_DMA = 0x01,
30222944501Smrg		I915_CLEANUP_DMA = 0x02,
30322944501Smrg		I915_RESUME_DMA = 0x03
30422944501Smrg	} func;
30522944501Smrg	unsigned int mmio_offset;
30622944501Smrg	int sarea_priv_offset;
30722944501Smrg	unsigned int ring_start;
30822944501Smrg	unsigned int ring_end;
30922944501Smrg	unsigned int ring_size;
31022944501Smrg	unsigned int front_offset;
31122944501Smrg	unsigned int back_offset;
31222944501Smrg	unsigned int depth_offset;
31322944501Smrg	unsigned int w;
31422944501Smrg	unsigned int h;
31522944501Smrg	unsigned int pitch;
31622944501Smrg	unsigned int pitch_bits;
31722944501Smrg	unsigned int back_pitch;
31822944501Smrg	unsigned int depth_pitch;
31922944501Smrg	unsigned int cpp;
32022944501Smrg	unsigned int chipset;
32122944501Smrg} drm_i915_init_t;
32222944501Smrg
32322944501Smrgtypedef struct _drm_i915_sarea {
32422944501Smrg	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
32522944501Smrg	int last_upload;	/* last time texture was uploaded */
32622944501Smrg	int last_enqueue;	/* last time a buffer was enqueued */
32722944501Smrg	int last_dispatch;	/* age of the most recently dispatched buffer */
32822944501Smrg	int ctxOwner;		/* last context to upload state */
32922944501Smrg	int texAge;
33022944501Smrg	int pf_enabled;		/* is pageflipping allowed? */
33122944501Smrg	int pf_active;
33222944501Smrg	int pf_current_page;	/* which buffer is being displayed? */
33322944501Smrg	int perf_boxes;		/* performance boxes to be displayed */
33422944501Smrg	int width, height;      /* screen size in pixels */
33522944501Smrg
33622944501Smrg	drm_handle_t front_handle;
33722944501Smrg	int front_offset;
33822944501Smrg	int front_size;
33922944501Smrg
34022944501Smrg	drm_handle_t back_handle;
34122944501Smrg	int back_offset;
34222944501Smrg	int back_size;
34322944501Smrg
34422944501Smrg	drm_handle_t depth_handle;
34522944501Smrg	int depth_offset;
34622944501Smrg	int depth_size;
34722944501Smrg
34822944501Smrg	drm_handle_t tex_handle;
34922944501Smrg	int tex_offset;
35022944501Smrg	int tex_size;
35122944501Smrg	int log_tex_granularity;
35222944501Smrg	int pitch;
35322944501Smrg	int rotation;           /* 0, 90, 180 or 270 */
35422944501Smrg	int rotated_offset;
35522944501Smrg	int rotated_size;
35622944501Smrg	int rotated_pitch;
35722944501Smrg	int virtualX, virtualY;
35822944501Smrg
35922944501Smrg	unsigned int front_tiled;
36022944501Smrg	unsigned int back_tiled;
36122944501Smrg	unsigned int depth_tiled;
36222944501Smrg	unsigned int rotated_tiled;
36322944501Smrg	unsigned int rotated2_tiled;
36422944501Smrg
36522944501Smrg	int pipeA_x;
36622944501Smrg	int pipeA_y;
36722944501Smrg	int pipeA_w;
36822944501Smrg	int pipeA_h;
36922944501Smrg	int pipeB_x;
37022944501Smrg	int pipeB_y;
37122944501Smrg	int pipeB_w;
37222944501Smrg	int pipeB_h;
37322944501Smrg
37422944501Smrg	/* fill out some space for old userspace triple buffer */
37522944501Smrg	drm_handle_t unused_handle;
37622944501Smrg	__u32 unused1, unused2, unused3;
37722944501Smrg
37822944501Smrg	/* buffer object handles for static buffers. May change
37922944501Smrg	 * over the lifetime of the client.
38022944501Smrg	 */
38122944501Smrg	__u32 front_bo_handle;
38222944501Smrg	__u32 back_bo_handle;
38322944501Smrg	__u32 unused_bo_handle;
38422944501Smrg	__u32 depth_bo_handle;
38522944501Smrg
38622944501Smrg} drm_i915_sarea_t;
38722944501Smrg
38822944501Smrg/* due to userspace building against these headers we need some compat here */
38922944501Smrg#define planeA_x pipeA_x
39022944501Smrg#define planeA_y pipeA_y
39122944501Smrg#define planeA_w pipeA_w
39222944501Smrg#define planeA_h pipeA_h
39322944501Smrg#define planeB_x pipeB_x
39422944501Smrg#define planeB_y pipeB_y
39522944501Smrg#define planeB_w pipeB_w
39622944501Smrg#define planeB_h pipeB_h
39722944501Smrg
39822944501Smrg/* Flags for perf_boxes
39922944501Smrg */
40022944501Smrg#define I915_BOX_RING_EMPTY    0x1
40122944501Smrg#define I915_BOX_FLIP          0x2
40222944501Smrg#define I915_BOX_WAIT          0x4
40322944501Smrg#define I915_BOX_TEXTURE_LOAD  0x8
40422944501Smrg#define I915_BOX_LOST_CONTEXT  0x10
40522944501Smrg
406fe517fc9Smrg/*
407fe517fc9Smrg * i915 specific ioctls.
408fe517fc9Smrg *
409fe517fc9Smrg * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
410fe517fc9Smrg * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
411fe517fc9Smrg * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
41222944501Smrg */
41322944501Smrg#define DRM_I915_INIT		0x00
41422944501Smrg#define DRM_I915_FLUSH		0x01
41522944501Smrg#define DRM_I915_FLIP		0x02
41622944501Smrg#define DRM_I915_BATCHBUFFER	0x03
41722944501Smrg#define DRM_I915_IRQ_EMIT	0x04
41822944501Smrg#define DRM_I915_IRQ_WAIT	0x05
41922944501Smrg#define DRM_I915_GETPARAM	0x06
42022944501Smrg#define DRM_I915_SETPARAM	0x07
42122944501Smrg#define DRM_I915_ALLOC		0x08
42222944501Smrg#define DRM_I915_FREE		0x09
42322944501Smrg#define DRM_I915_INIT_HEAP	0x0a
42422944501Smrg#define DRM_I915_CMDBUFFER	0x0b
42522944501Smrg#define DRM_I915_DESTROY_HEAP	0x0c
42622944501Smrg#define DRM_I915_SET_VBLANK_PIPE	0x0d
42722944501Smrg#define DRM_I915_GET_VBLANK_PIPE	0x0e
42822944501Smrg#define DRM_I915_VBLANK_SWAP	0x0f
42922944501Smrg#define DRM_I915_HWS_ADDR	0x11
43022944501Smrg#define DRM_I915_GEM_INIT	0x13
43122944501Smrg#define DRM_I915_GEM_EXECBUFFER	0x14
43222944501Smrg#define DRM_I915_GEM_PIN	0x15
43322944501Smrg#define DRM_I915_GEM_UNPIN	0x16
43422944501Smrg#define DRM_I915_GEM_BUSY	0x17
43522944501Smrg#define DRM_I915_GEM_THROTTLE	0x18
43622944501Smrg#define DRM_I915_GEM_ENTERVT	0x19
43722944501Smrg#define DRM_I915_GEM_LEAVEVT	0x1a
43822944501Smrg#define DRM_I915_GEM_CREATE	0x1b
43922944501Smrg#define DRM_I915_GEM_PREAD	0x1c
44022944501Smrg#define DRM_I915_GEM_PWRITE	0x1d
44122944501Smrg#define DRM_I915_GEM_MMAP	0x1e
44222944501Smrg#define DRM_I915_GEM_SET_DOMAIN	0x1f
44322944501Smrg#define DRM_I915_GEM_SW_FINISH	0x20
44422944501Smrg#define DRM_I915_GEM_SET_TILING	0x21
44522944501Smrg#define DRM_I915_GEM_GET_TILING	0x22
44622944501Smrg#define DRM_I915_GEM_GET_APERTURE 0x23
44722944501Smrg#define DRM_I915_GEM_MMAP_GTT	0x24
44822944501Smrg#define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
44922944501Smrg#define DRM_I915_GEM_MADVISE	0x26
45022944501Smrg#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
45122944501Smrg#define DRM_I915_OVERLAY_ATTRS	0x28
45222944501Smrg#define DRM_I915_GEM_EXECBUFFER2	0x29
4532ee35494Smrg#define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
454e88f27b3Smrg#define DRM_I915_GET_SPRITE_COLORKEY	0x2a
455e88f27b3Smrg#define DRM_I915_SET_SPRITE_COLORKEY	0x2b
456e88f27b3Smrg#define DRM_I915_GEM_WAIT	0x2c
457e88f27b3Smrg#define DRM_I915_GEM_CONTEXT_CREATE	0x2d
458e88f27b3Smrg#define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
459e88f27b3Smrg#define DRM_I915_GEM_SET_CACHING	0x2f
460e88f27b3Smrg#define DRM_I915_GEM_GET_CACHING	0x30
461e88f27b3Smrg#define DRM_I915_REG_READ		0x31
462e88f27b3Smrg#define DRM_I915_GET_RESET_STATS	0x32
463baaff307Smrg#define DRM_I915_GEM_USERPTR		0x33
464424e9256Smrg#define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
465424e9256Smrg#define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
4662ee35494Smrg#define DRM_I915_PERF_OPEN		0x36
4676260e5d5Smrg#define DRM_I915_PERF_ADD_CONFIG	0x37
4686260e5d5Smrg#define DRM_I915_PERF_REMOVE_CONFIG	0x38
4696260e5d5Smrg#define DRM_I915_QUERY			0x39
4703b115362Smrg#define DRM_I915_GEM_VM_CREATE		0x3a
4713b115362Smrg#define DRM_I915_GEM_VM_DESTROY		0x3b
4723b115362Smrg#define DRM_I915_GEM_CREATE_EXT		0x3c
473bf6cc7dcSmrg/* Must be kept compact -- no holes */
47422944501Smrg
47522944501Smrg#define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
47622944501Smrg#define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
47722944501Smrg#define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
47822944501Smrg#define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
47922944501Smrg#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
48022944501Smrg#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
48122944501Smrg#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
48222944501Smrg#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
48322944501Smrg#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
48422944501Smrg#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
48522944501Smrg#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
48622944501Smrg#define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
48722944501Smrg#define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
48822944501Smrg#define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
48922944501Smrg#define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
49022944501Smrg#define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
49169dda199Smrg#define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
49222944501Smrg#define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
49322944501Smrg#define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
49422944501Smrg#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
4952ee35494Smrg#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
49622944501Smrg#define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
49722944501Smrg#define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
49822944501Smrg#define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
499e88f27b3Smrg#define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
500e88f27b3Smrg#define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
50122944501Smrg#define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
50222944501Smrg#define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
50322944501Smrg#define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
50422944501Smrg#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
5053b115362Smrg#define DRM_IOCTL_I915_GEM_CREATE_EXT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
50622944501Smrg#define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
50722944501Smrg#define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
50822944501Smrg#define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
50922944501Smrg#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
5103b115362Smrg#define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
51122944501Smrg#define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
51222944501Smrg#define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
51322944501Smrg#define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
51422944501Smrg#define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
51522944501Smrg#define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
51622944501Smrg#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
51722944501Smrg#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
518e88f27b3Smrg#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
51922944501Smrg#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
520e88f27b3Smrg#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
521b7926a35Schristos#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
522e88f27b3Smrg#define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
523e88f27b3Smrg#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
524bf6cc7dcSmrg#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
525e88f27b3Smrg#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
526e88f27b3Smrg#define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
527e88f27b3Smrg#define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
528424e9256Smrg#define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
529424e9256Smrg#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
530424e9256Smrg#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
5312ee35494Smrg#define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
5326260e5d5Smrg#define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
5336260e5d5Smrg#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
5346260e5d5Smrg#define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
5353b115362Smrg#define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
5363b115362Smrg#define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
53722944501Smrg
53822944501Smrg/* Allow drivers to submit batchbuffers directly to hardware, relying
53922944501Smrg * on the security mechanisms provided by hardware.
54022944501Smrg */
54122944501Smrgtypedef struct drm_i915_batchbuffer {
54222944501Smrg	int start;		/* agp offset */
54322944501Smrg	int used;		/* nr bytes in use */
54422944501Smrg	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
54522944501Smrg	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
54622944501Smrg	int num_cliprects;	/* mulitpass with multiple cliprects? */
54722944501Smrg	struct drm_clip_rect *cliprects;	/* pointer to userspace cliprects */
54822944501Smrg} drm_i915_batchbuffer_t;
54922944501Smrg
55022944501Smrg/* As above, but pass a pointer to userspace buffer which can be
55122944501Smrg * validated by the kernel prior to sending to hardware.
55222944501Smrg */
55322944501Smrgtypedef struct _drm_i915_cmdbuffer {
55422944501Smrg	char *buf;	/* pointer to userspace command buffer */
55522944501Smrg	int sz;			/* nr bytes in buf */
55622944501Smrg	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
55722944501Smrg	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
55822944501Smrg	int num_cliprects;	/* mulitpass with multiple cliprects? */
55922944501Smrg	struct drm_clip_rect *cliprects;	/* pointer to userspace cliprects */
56022944501Smrg} drm_i915_cmdbuffer_t;
56122944501Smrg
56222944501Smrg/* Userspace can request & wait on irq's:
56322944501Smrg */
56422944501Smrgtypedef struct drm_i915_irq_emit {
56522944501Smrg	int *irq_seq;
56622944501Smrg} drm_i915_irq_emit_t;
56722944501Smrg
56822944501Smrgtypedef struct drm_i915_irq_wait {
56922944501Smrg	int irq_seq;
57022944501Smrg} drm_i915_irq_wait_t;
57122944501Smrg
572bf6cc7dcSmrg/*
573bf6cc7dcSmrg * Different modes of per-process Graphics Translation Table,
574bf6cc7dcSmrg * see I915_PARAM_HAS_ALIASING_PPGTT
575bf6cc7dcSmrg */
576bf6cc7dcSmrg#define I915_GEM_PPGTT_NONE	0
577bf6cc7dcSmrg#define I915_GEM_PPGTT_ALIASING	1
578bf6cc7dcSmrg#define I915_GEM_PPGTT_FULL	2
579bf6cc7dcSmrg
58022944501Smrg/* Ioctl to query kernel params:
58122944501Smrg */
58222944501Smrg#define I915_PARAM_IRQ_ACTIVE            1
58322944501Smrg#define I915_PARAM_ALLOW_BATCHBUFFER     2
58422944501Smrg#define I915_PARAM_LAST_DISPATCH         3
58522944501Smrg#define I915_PARAM_CHIPSET_ID            4
58622944501Smrg#define I915_PARAM_HAS_GEM               5
58722944501Smrg#define I915_PARAM_NUM_FENCES_AVAIL      6
58822944501Smrg#define I915_PARAM_HAS_OVERLAY           7
58922944501Smrg#define I915_PARAM_HAS_PAGEFLIPPING	 8
59022944501Smrg#define I915_PARAM_HAS_EXECBUF2          9
59113d1d17dSmrg#define I915_PARAM_HAS_BSD		 10
59269dda199Smrg#define I915_PARAM_HAS_BLT		 11
59369dda199Smrg#define I915_PARAM_HAS_RELAXED_FENCING	 12
594e88f27b3Smrg#define I915_PARAM_HAS_COHERENT_RINGS	 13
595e88f27b3Smrg#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
596e88f27b3Smrg#define I915_PARAM_HAS_RELAXED_DELTA	 15
597e88f27b3Smrg#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
598e88f27b3Smrg#define I915_PARAM_HAS_LLC     	 	 17
599e88f27b3Smrg#define I915_PARAM_HAS_ALIASING_PPGTT	 18
600e88f27b3Smrg#define I915_PARAM_HAS_WAIT_TIMEOUT	 19
601e88f27b3Smrg#define I915_PARAM_HAS_SEMAPHORES	 20
602e88f27b3Smrg#define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
603e88f27b3Smrg#define I915_PARAM_HAS_VEBOX		 22
604e88f27b3Smrg#define I915_PARAM_HAS_SECURE_BATCHES	 23
605e88f27b3Smrg#define I915_PARAM_HAS_PINNED_BATCHES	 24
606e88f27b3Smrg#define I915_PARAM_HAS_EXEC_NO_RELOC	 25
607e88f27b3Smrg#define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
608e88f27b3Smrg#define I915_PARAM_HAS_WT     	 	 27
609baaff307Smrg#define I915_PARAM_CMD_PARSER_VERSION	 28
610424e9256Smrg#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
611424e9256Smrg#define I915_PARAM_MMAP_VERSION          30
612424e9256Smrg#define I915_PARAM_HAS_BSD2		 31
613424e9256Smrg#define I915_PARAM_REVISION              32
614424e9256Smrg#define I915_PARAM_SUBSLICE_TOTAL	 33
615424e9256Smrg#define I915_PARAM_EU_TOTAL		 34
616fe517fc9Smrg#define I915_PARAM_HAS_GPU_RESET	 35
617fe517fc9Smrg#define I915_PARAM_HAS_RESOURCE_STREAMER 36
618fe517fc9Smrg#define I915_PARAM_HAS_EXEC_SOFTPIN	 37
6192ee35494Smrg#define I915_PARAM_HAS_POOLED_EU	 38
6202ee35494Smrg#define I915_PARAM_MIN_EU_IN_POOL	 39
6212ee35494Smrg#define I915_PARAM_MMAP_GTT_VERSION	 40
6222ee35494Smrg
6236260e5d5Smrg/*
6246260e5d5Smrg * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
6252ee35494Smrg * priorities and the driver will attempt to execute batches in priority order.
6266260e5d5Smrg * The param returns a capability bitmask, nonzero implies that the scheduler
6276260e5d5Smrg * is enabled, with different features present according to the mask.
6286260e5d5Smrg *
6296260e5d5Smrg * The initial priority for each batch is supplied by the context and is
6306260e5d5Smrg * controlled via I915_CONTEXT_PARAM_PRIORITY.
6312ee35494Smrg */
6322ee35494Smrg#define I915_PARAM_HAS_SCHEDULER	 41
6336260e5d5Smrg#define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
6346260e5d5Smrg#define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
6356260e5d5Smrg#define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
636bf6cc7dcSmrg#define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
6373b115362Smrg#define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
6383b115362Smrg/*
6393b115362Smrg * Indicates the 2k user priority levels are statically mapped into 3 buckets as
6403b115362Smrg * follows:
6413b115362Smrg *
6423b115362Smrg * -1k to -1	Low priority
6433b115362Smrg * 0		Normal priority
6443b115362Smrg * 1 to 1k	Highest priority
6453b115362Smrg */
6463b115362Smrg#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
6476260e5d5Smrg
6482ee35494Smrg#define I915_PARAM_HUC_STATUS		 42
6492ee35494Smrg
6502ee35494Smrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
6512ee35494Smrg * synchronisation with implicit fencing on individual objects.
6522ee35494Smrg * See EXEC_OBJECT_ASYNC.
6532ee35494Smrg */
6542ee35494Smrg#define I915_PARAM_HAS_EXEC_ASYNC	 43
6552ee35494Smrg
6562ee35494Smrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
6572ee35494Smrg * both being able to pass in a sync_file fd to wait upon before executing,
6582ee35494Smrg * and being able to return a new sync_file fd that is signaled when the
6592ee35494Smrg * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
6602ee35494Smrg */
6612ee35494Smrg#define I915_PARAM_HAS_EXEC_FENCE	 44
66222944501Smrg
6636260e5d5Smrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
6643b115362Smrg * user specified bufffers for post-mortem debugging of GPU hangs. See
6656260e5d5Smrg * EXEC_OBJECT_CAPTURE.
6666260e5d5Smrg */
6676260e5d5Smrg#define I915_PARAM_HAS_EXEC_CAPTURE	 45
6686260e5d5Smrg
6696260e5d5Smrg#define I915_PARAM_SLICE_MASK		 46
6706260e5d5Smrg
6716260e5d5Smrg/* Assuming it's uniform for each slice, this queries the mask of subslices
6726260e5d5Smrg * per-slice for this system.
6736260e5d5Smrg */
6746260e5d5Smrg#define I915_PARAM_SUBSLICE_MASK	 47
6756260e5d5Smrg
6766260e5d5Smrg/*
6776260e5d5Smrg * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
6786260e5d5Smrg * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
6796260e5d5Smrg */
6806260e5d5Smrg#define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
6816260e5d5Smrg
6826260e5d5Smrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
6836260e5d5Smrg * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
6846260e5d5Smrg */
6856260e5d5Smrg#define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
6866260e5d5Smrg
6876260e5d5Smrg/*
6886260e5d5Smrg * Query whether every context (both per-file default and user created) is
6896260e5d5Smrg * isolated (insofar as HW supports). If this parameter is not true, then
6906260e5d5Smrg * freshly created contexts may inherit values from an existing context,
6916260e5d5Smrg * rather than default HW values. If true, it also ensures (insofar as HW
6926260e5d5Smrg * supports) that all state set by this context will not leak to any other
6936260e5d5Smrg * context.
6946260e5d5Smrg *
6956260e5d5Smrg * As not every engine across every gen support contexts, the returned
6966260e5d5Smrg * value reports the support of context isolation for individual engines by
6976260e5d5Smrg * returning a bitmask of each engine class set to true if that class supports
6986260e5d5Smrg * isolation.
6996260e5d5Smrg */
7006260e5d5Smrg#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
7016260e5d5Smrg
7026260e5d5Smrg/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
7036260e5d5Smrg * registers. This used to be fixed per platform but from CNL onwards, this
7046260e5d5Smrg * might vary depending on the parts.
7056260e5d5Smrg */
7066260e5d5Smrg#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
7076260e5d5Smrg
7086260e5d5Smrg/*
7096260e5d5Smrg * Once upon a time we supposed that writes through the GGTT would be
7106260e5d5Smrg * immediately in physical memory (once flushed out of the CPU path). However,
7116260e5d5Smrg * on a few different processors and chipsets, this is not necessarily the case
7126260e5d5Smrg * as the writes appear to be buffered internally. Thus a read of the backing
7136260e5d5Smrg * storage (physical memory) via a different path (with different physical tags
7146260e5d5Smrg * to the indirect write via the GGTT) will see stale values from before
7156260e5d5Smrg * the GGTT write. Inside the kernel, we can for the most part keep track of
7166260e5d5Smrg * the different read/write domains in use (e.g. set-domain), but the assumption
7176260e5d5Smrg * of coherency is baked into the ABI, hence reporting its true state in this
7186260e5d5Smrg * parameter.
7196260e5d5Smrg *
7206260e5d5Smrg * Reports true when writes via mmap_gtt are immediately visible following an
7216260e5d5Smrg * lfence to flush the WCB.
7226260e5d5Smrg *
7236260e5d5Smrg * Reports false when writes via mmap_gtt are indeterminately delayed in an in
7246260e5d5Smrg * internal buffer and are _not_ immediately visible to third parties accessing
7256260e5d5Smrg * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
7266260e5d5Smrg * communications channel when reporting false is strongly disadvised.
7276260e5d5Smrg */
7286260e5d5Smrg#define I915_PARAM_MMAP_GTT_COHERENT	52
7296260e5d5Smrg
7303b115362Smrg/*
7313b115362Smrg * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
7323b115362Smrg * execution through use of explicit fence support.
7333b115362Smrg * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
7343b115362Smrg */
7353b115362Smrg#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
7363b115362Smrg
7373b115362Smrg/*
7383b115362Smrg * Revision of the i915-perf uAPI. The value returned helps determine what
7393b115362Smrg * i915-perf features are available. See drm_i915_perf_property_id.
7403b115362Smrg */
7413b115362Smrg#define I915_PARAM_PERF_REVISION	54
7423b115362Smrg
7433b115362Smrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
7443b115362Smrg * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
7453b115362Smrg * I915_EXEC_USE_EXTENSIONS.
7463b115362Smrg */
7473b115362Smrg#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
7483b115362Smrg
7493b115362Smrg/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
7503b115362Smrg#define I915_PARAM_HAS_USERPTR_PROBE 56
7513b115362Smrg
752bf6cc7dcSmrg/* Must be kept compact -- no holes and well documented */
753bf6cc7dcSmrg
7543b115362Smrg/**
7553b115362Smrg * struct drm_i915_getparam - Driver parameter query structure.
7563b115362Smrg */
7573b115362Smrgstruct drm_i915_getparam {
7583b115362Smrg	/** @param: Driver parameter to query. */
759fe517fc9Smrg	__s32 param;
7603b115362Smrg
7613b115362Smrg	/**
7623b115362Smrg	 * @value: Address of memory where queried value should be put.
7633b115362Smrg	 *
764fe517fc9Smrg	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
765fe517fc9Smrg	 * compat32 code. Don't repeat this mistake.
766fe517fc9Smrg	 */
76722944501Smrg	int *value;
7683b115362Smrg};
7693b115362Smrg
7703b115362Smrg/**
7713b115362Smrg * typedef drm_i915_getparam_t - Driver parameter query structure.
7723b115362Smrg * See struct drm_i915_getparam.
7733b115362Smrg */
7743b115362Smrgtypedef struct drm_i915_getparam drm_i915_getparam_t;
77522944501Smrg
77622944501Smrg/* Ioctl to set kernel params:
77722944501Smrg */
77822944501Smrg#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
77922944501Smrg#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
78022944501Smrg#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
78122944501Smrg#define I915_SETPARAM_NUM_USED_FENCES                     4
782bf6cc7dcSmrg/* Must be kept compact -- no holes */
78322944501Smrg
78422944501Smrgtypedef struct drm_i915_setparam {
78522944501Smrg	int param;
78622944501Smrg	int value;
78722944501Smrg} drm_i915_setparam_t;
78822944501Smrg
78922944501Smrg/* A memory manager for regions of shared memory:
79022944501Smrg */
79122944501Smrg#define I915_MEM_REGION_AGP 1
79222944501Smrg
79322944501Smrgtypedef struct drm_i915_mem_alloc {
79422944501Smrg	int region;
79522944501Smrg	int alignment;
79622944501Smrg	int size;
79722944501Smrg	int *region_offset;	/* offset from start of fb or agp */
79822944501Smrg} drm_i915_mem_alloc_t;
79922944501Smrg
80022944501Smrgtypedef struct drm_i915_mem_free {
80122944501Smrg	int region;
80222944501Smrg	int region_offset;
80322944501Smrg} drm_i915_mem_free_t;
80422944501Smrg
80522944501Smrgtypedef struct drm_i915_mem_init_heap {
80622944501Smrg	int region;
80722944501Smrg	int size;
80822944501Smrg	int start;
80922944501Smrg} drm_i915_mem_init_heap_t;
81022944501Smrg
81122944501Smrg/* Allow memory manager to be torn down and re-initialized (eg on
81222944501Smrg * rotate):
81322944501Smrg */
81422944501Smrgtypedef struct drm_i915_mem_destroy_heap {
81522944501Smrg	int region;
81622944501Smrg} drm_i915_mem_destroy_heap_t;
81722944501Smrg
81822944501Smrg/* Allow X server to configure which pipes to monitor for vblank signals
81922944501Smrg */
82022944501Smrg#define	DRM_I915_VBLANK_PIPE_A	1
82122944501Smrg#define	DRM_I915_VBLANK_PIPE_B	2
82222944501Smrg
82322944501Smrgtypedef struct drm_i915_vblank_pipe {
82422944501Smrg	int pipe;
82522944501Smrg} drm_i915_vblank_pipe_t;
82622944501Smrg
82722944501Smrg/* Schedule buffer swap at given vertical blank:
82822944501Smrg */
82922944501Smrgtypedef struct drm_i915_vblank_swap {
83022944501Smrg	drm_drawable_t drawable;
83122944501Smrg	enum drm_vblank_seq_type seqtype;
83222944501Smrg	unsigned int sequence;
83322944501Smrg} drm_i915_vblank_swap_t;
83422944501Smrg
83522944501Smrgtypedef struct drm_i915_hws_addr {
83622944501Smrg	__u64 addr;
83722944501Smrg} drm_i915_hws_addr_t;
83822944501Smrg
83922944501Smrgstruct drm_i915_gem_init {
84022944501Smrg	/**
84122944501Smrg	 * Beginning offset in the GTT to be managed by the DRM memory
84222944501Smrg	 * manager.
84322944501Smrg	 */
84422944501Smrg	__u64 gtt_start;
84522944501Smrg	/**
84622944501Smrg	 * Ending offset in the GTT to be managed by the DRM memory
84722944501Smrg	 * manager.
84822944501Smrg	 */
84922944501Smrg	__u64 gtt_end;
85022944501Smrg};
85122944501Smrg
85222944501Smrgstruct drm_i915_gem_create {
85322944501Smrg	/**
85422944501Smrg	 * Requested size for the object.
85522944501Smrg	 *
85622944501Smrg	 * The (page-aligned) allocated size for the object will be returned.
85722944501Smrg	 */
85822944501Smrg	__u64 size;
85922944501Smrg	/**
86022944501Smrg	 * Returned handle for the object.
86122944501Smrg	 *
86222944501Smrg	 * Object handles are nonzero.
86322944501Smrg	 */
86422944501Smrg	__u32 handle;
86522944501Smrg	__u32 pad;
86622944501Smrg};
86722944501Smrg
86822944501Smrgstruct drm_i915_gem_pread {
86922944501Smrg	/** Handle for the object being read. */
87022944501Smrg	__u32 handle;
87122944501Smrg	__u32 pad;
87222944501Smrg	/** Offset into the object to read from */
87322944501Smrg	__u64 offset;
87422944501Smrg	/** Length of data to read */
87522944501Smrg	__u64 size;
87622944501Smrg	/**
87722944501Smrg	 * Pointer to write the data into.
87822944501Smrg	 *
87922944501Smrg	 * This is a fixed-size type for 32/64 compatibility.
88022944501Smrg	 */
88122944501Smrg	__u64 data_ptr;
88222944501Smrg};
88322944501Smrg
88422944501Smrgstruct drm_i915_gem_pwrite {
88522944501Smrg	/** Handle for the object being written to. */
88622944501Smrg	__u32 handle;
88722944501Smrg	__u32 pad;
88822944501Smrg	/** Offset into the object to write to */
88922944501Smrg	__u64 offset;
89022944501Smrg	/** Length of data to write */
89122944501Smrg	__u64 size;
89222944501Smrg	/**
89322944501Smrg	 * Pointer to read the data from.
89422944501Smrg	 *
89522944501Smrg	 * This is a fixed-size type for 32/64 compatibility.
89622944501Smrg	 */
89722944501Smrg	__u64 data_ptr;
89822944501Smrg};
89922944501Smrg
90022944501Smrgstruct drm_i915_gem_mmap {
90122944501Smrg	/** Handle for the object being mapped. */
90222944501Smrg	__u32 handle;
90322944501Smrg	__u32 pad;
90422944501Smrg	/** Offset in the object to map. */
90522944501Smrg	__u64 offset;
90622944501Smrg	/**
90722944501Smrg	 * Length of data to map.
90822944501Smrg	 *
90922944501Smrg	 * The value will be page-aligned.
91022944501Smrg	 */
91122944501Smrg	__u64 size;
91222944501Smrg	/**
91322944501Smrg	 * Returned pointer the data was mapped at.
91422944501Smrg	 *
91522944501Smrg	 * This is a fixed-size type for 32/64 compatibility.
91622944501Smrg	 */
91722944501Smrg	__u64 addr_ptr;
918424e9256Smrg
919424e9256Smrg	/**
920424e9256Smrg	 * Flags for extended behaviour.
921424e9256Smrg	 *
922424e9256Smrg	 * Added in version 2.
923424e9256Smrg	 */
924424e9256Smrg	__u64 flags;
925424e9256Smrg#define I915_MMAP_WC 0x1
92622944501Smrg};
92722944501Smrg
92822944501Smrgstruct drm_i915_gem_mmap_gtt {
92922944501Smrg	/** Handle for the object being mapped. */
93022944501Smrg	__u32 handle;
93122944501Smrg	__u32 pad;
93222944501Smrg	/**
93322944501Smrg	 * Fake offset to use for subsequent mmap call
93422944501Smrg	 *
93522944501Smrg	 * This is a fixed-size type for 32/64 compatibility.
93622944501Smrg	 */
93722944501Smrg	__u64 offset;
93822944501Smrg};
93922944501Smrg
9403b115362Smrg/**
9413b115362Smrg * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
9423b115362Smrg *
9433b115362Smrg * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
9443b115362Smrg * and is used to retrieve the fake offset to mmap an object specified by &handle.
9453b115362Smrg *
9463b115362Smrg * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
9473b115362Smrg * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
9483b115362Smrg * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
9493b115362Smrg */
9503b115362Smrgstruct drm_i915_gem_mmap_offset {
9513b115362Smrg	/** @handle: Handle for the object being mapped. */
9523b115362Smrg	__u32 handle;
9533b115362Smrg	/** @pad: Must be zero */
9543b115362Smrg	__u32 pad;
9553b115362Smrg	/**
9563b115362Smrg	 * @offset: The fake offset to use for subsequent mmap call
9573b115362Smrg	 *
9583b115362Smrg	 * This is a fixed-size type for 32/64 compatibility.
9593b115362Smrg	 */
9603b115362Smrg	__u64 offset;
9613b115362Smrg
9623b115362Smrg	/**
9633b115362Smrg	 * @flags: Flags for extended behaviour.
9643b115362Smrg	 *
9653b115362Smrg	 * It is mandatory that one of the `MMAP_OFFSET` types
9663b115362Smrg	 * should be included:
9673b115362Smrg	 *
9683b115362Smrg	 * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
9693b115362Smrg	 * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
9703b115362Smrg	 * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
9713b115362Smrg	 * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
9723b115362Smrg	 *
9733b115362Smrg	 * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
9743b115362Smrg	 * type. On devices without local memory, this caching mode is invalid.
9753b115362Smrg	 *
9763b115362Smrg	 * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
9773b115362Smrg	 * be used, depending on the object placement on creation. WB will be used
9783b115362Smrg	 * when the object can only exist in system memory, WC otherwise.
9793b115362Smrg	 */
9803b115362Smrg	__u64 flags;
9813b115362Smrg
9823b115362Smrg#define I915_MMAP_OFFSET_GTT	0
9833b115362Smrg#define I915_MMAP_OFFSET_WC	1
9843b115362Smrg#define I915_MMAP_OFFSET_WB	2
9853b115362Smrg#define I915_MMAP_OFFSET_UC	3
9863b115362Smrg#define I915_MMAP_OFFSET_FIXED	4
9873b115362Smrg
9883b115362Smrg	/**
9893b115362Smrg	 * @extensions: Zero-terminated chain of extensions.
9903b115362Smrg	 *
9913b115362Smrg	 * No current extensions defined; mbz.
9923b115362Smrg	 */
9933b115362Smrg	__u64 extensions;
9943b115362Smrg};
9953b115362Smrg
9963b115362Smrg/**
9973b115362Smrg * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
9983b115362Smrg * preparation for accessing the pages via some CPU domain.
9993b115362Smrg *
10003b115362Smrg * Specifying a new write or read domain will flush the object out of the
10013b115362Smrg * previous domain(if required), before then updating the objects domain
10023b115362Smrg * tracking with the new domain.
10033b115362Smrg *
10043b115362Smrg * Note this might involve waiting for the object first if it is still active on
10053b115362Smrg * the GPU.
10063b115362Smrg *
10073b115362Smrg * Supported values for @read_domains and @write_domain:
10083b115362Smrg *
10093b115362Smrg *	- I915_GEM_DOMAIN_WC: Uncached write-combined domain
10103b115362Smrg *	- I915_GEM_DOMAIN_CPU: CPU cache domain
10113b115362Smrg *	- I915_GEM_DOMAIN_GTT: Mappable aperture domain
10123b115362Smrg *
10133b115362Smrg * All other domains are rejected.
10143b115362Smrg *
10153b115362Smrg * Note that for discrete, starting from DG1, this is no longer supported, and
10163b115362Smrg * is instead rejected. On such platforms the CPU domain is effectively static,
10173b115362Smrg * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
10183b115362Smrg * which can't be set explicitly and instead depends on the object placements,
10193b115362Smrg * as per the below.
10203b115362Smrg *
10213b115362Smrg * Implicit caching rules, starting from DG1:
10223b115362Smrg *
10233b115362Smrg *	- If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
10243b115362Smrg *	  contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
10253b115362Smrg *	  mapped as write-combined only.
10263b115362Smrg *
10273b115362Smrg *	- Everything else is always allocated and mapped as write-back, with the
10283b115362Smrg *	  guarantee that everything is also coherent with the GPU.
10293b115362Smrg *
10303b115362Smrg * Note that this is likely to change in the future again, where we might need
10313b115362Smrg * more flexibility on future devices, so making this all explicit as part of a
10323b115362Smrg * new &drm_i915_gem_create_ext extension is probable.
10333b115362Smrg */
103422944501Smrgstruct drm_i915_gem_set_domain {
10353b115362Smrg	/** @handle: Handle for the object. */
103622944501Smrg	__u32 handle;
103722944501Smrg
10383b115362Smrg	/** @read_domains: New read domains. */
103922944501Smrg	__u32 read_domains;
104022944501Smrg
10413b115362Smrg	/**
10423b115362Smrg	 * @write_domain: New write domain.
10433b115362Smrg	 *
10443b115362Smrg	 * Note that having something in the write domain implies it's in the
10453b115362Smrg	 * read domain, and only that read domain.
10463b115362Smrg	 */
104722944501Smrg	__u32 write_domain;
104822944501Smrg};
104922944501Smrg
105022944501Smrgstruct drm_i915_gem_sw_finish {
105122944501Smrg	/** Handle for the object */
105222944501Smrg	__u32 handle;
105322944501Smrg};
105422944501Smrg
105522944501Smrgstruct drm_i915_gem_relocation_entry {
105622944501Smrg	/**
105722944501Smrg	 * Handle of the buffer being pointed to by this relocation entry.
105822944501Smrg	 *
105922944501Smrg	 * It's appealing to make this be an index into the mm_validate_entry
106022944501Smrg	 * list to refer to the buffer, but this allows the driver to create
106122944501Smrg	 * a relocation list for state buffers and not re-write it per
106222944501Smrg	 * exec using the buffer.
106322944501Smrg	 */
106422944501Smrg	__u32 target_handle;
106522944501Smrg
106622944501Smrg	/**
106722944501Smrg	 * Value to be added to the offset of the target buffer to make up
106822944501Smrg	 * the relocation entry.
106922944501Smrg	 */
107022944501Smrg	__u32 delta;
107122944501Smrg
107222944501Smrg	/** Offset in the buffer the relocation entry will be written into */
107322944501Smrg	__u64 offset;
107422944501Smrg
107522944501Smrg	/**
107622944501Smrg	 * Offset value of the target buffer that the relocation entry was last
107722944501Smrg	 * written as.
107822944501Smrg	 *
107922944501Smrg	 * If the buffer has the same offset as last time, we can skip syncing
108022944501Smrg	 * and writing the relocation.  This value is written back out by
108122944501Smrg	 * the execbuffer ioctl when the relocation is written.
108222944501Smrg	 */
108322944501Smrg	__u64 presumed_offset;
108422944501Smrg
108522944501Smrg	/**
108622944501Smrg	 * Target memory domains read by this operation.
108722944501Smrg	 */
108822944501Smrg	__u32 read_domains;
108922944501Smrg
109022944501Smrg	/**
109122944501Smrg	 * Target memory domains written by this operation.
109222944501Smrg	 *
109322944501Smrg	 * Note that only one domain may be written by the whole
109422944501Smrg	 * execbuffer operation, so that where there are conflicts,
109522944501Smrg	 * the application will get -EINVAL back.
109622944501Smrg	 */
109722944501Smrg	__u32 write_domain;
109822944501Smrg};
109922944501Smrg
110022944501Smrg/** @{
110122944501Smrg * Intel memory domains
110222944501Smrg *
110322944501Smrg * Most of these just align with the various caches in
110422944501Smrg * the system and are used to flush and invalidate as
110522944501Smrg * objects end up cached in different domains.
110622944501Smrg */
110722944501Smrg/** CPU cache */
110822944501Smrg#define I915_GEM_DOMAIN_CPU		0x00000001
110922944501Smrg/** Render cache, used by 2D and 3D drawing */
111022944501Smrg#define I915_GEM_DOMAIN_RENDER		0x00000002
111122944501Smrg/** Sampler cache, used by texture engine */
111222944501Smrg#define I915_GEM_DOMAIN_SAMPLER		0x00000004
111322944501Smrg/** Command queue, used to load batch buffers */
111422944501Smrg#define I915_GEM_DOMAIN_COMMAND		0x00000008
111522944501Smrg/** Instruction cache, used by shader programs */
111622944501Smrg#define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
111722944501Smrg/** Vertex address cache */
111822944501Smrg#define I915_GEM_DOMAIN_VERTEX		0x00000020
111922944501Smrg/** GTT domain - aperture and scanout */
112022944501Smrg#define I915_GEM_DOMAIN_GTT		0x00000040
11216260e5d5Smrg/** WC domain - uncached access */
11226260e5d5Smrg#define I915_GEM_DOMAIN_WC		0x00000080
112322944501Smrg/** @} */
112422944501Smrg
112522944501Smrgstruct drm_i915_gem_exec_object {
112622944501Smrg	/**
112722944501Smrg	 * User's handle for a buffer to be bound into the GTT for this
112822944501Smrg	 * operation.
112922944501Smrg	 */
113022944501Smrg	__u32 handle;
113122944501Smrg
113222944501Smrg	/** Number of relocations to be performed on this buffer */
113322944501Smrg	__u32 relocation_count;
113422944501Smrg	/**
113522944501Smrg	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
113622944501Smrg	 * the relocations to be performed in this buffer.
113722944501Smrg	 */
113822944501Smrg	__u64 relocs_ptr;
113922944501Smrg
114022944501Smrg	/** Required alignment in graphics aperture */
114122944501Smrg	__u64 alignment;
114222944501Smrg
114322944501Smrg	/**
114422944501Smrg	 * Returned value of the updated offset of the object, for future
114522944501Smrg	 * presumed_offset writes.
114622944501Smrg	 */
114722944501Smrg	__u64 offset;
114822944501Smrg};
114922944501Smrg
11503b115362Smrg/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
115122944501Smrgstruct drm_i915_gem_execbuffer {
115222944501Smrg	/**
115322944501Smrg	 * List of buffers to be validated with their relocations to be
115422944501Smrg	 * performend on them.
115522944501Smrg	 *
115622944501Smrg	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
115722944501Smrg	 *
115822944501Smrg	 * These buffers must be listed in an order such that all relocations
115922944501Smrg	 * a buffer is performing refer to buffers that have already appeared
116022944501Smrg	 * in the validate list.
116122944501Smrg	 */
116222944501Smrg	__u64 buffers_ptr;
116322944501Smrg	__u32 buffer_count;
116422944501Smrg
116522944501Smrg	/** Offset in the batchbuffer to start execution from. */
116622944501Smrg	__u32 batch_start_offset;
116722944501Smrg	/** Bytes used in batchbuffer from batch_start_offset */
116822944501Smrg	__u32 batch_len;
116922944501Smrg	__u32 DR1;
117022944501Smrg	__u32 DR4;
117122944501Smrg	__u32 num_cliprects;
117222944501Smrg	/** This is a struct drm_clip_rect *cliprects */
117322944501Smrg	__u64 cliprects_ptr;
117422944501Smrg};
117522944501Smrg
117622944501Smrgstruct drm_i915_gem_exec_object2 {
117722944501Smrg	/**
117822944501Smrg	 * User's handle for a buffer to be bound into the GTT for this
117922944501Smrg	 * operation.
118022944501Smrg	 */
118122944501Smrg	__u32 handle;
118222944501Smrg
118322944501Smrg	/** Number of relocations to be performed on this buffer */
118422944501Smrg	__u32 relocation_count;
118522944501Smrg	/**
118622944501Smrg	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
118722944501Smrg	 * the relocations to be performed in this buffer.
118822944501Smrg	 */
118922944501Smrg	__u64 relocs_ptr;
119022944501Smrg
119122944501Smrg	/** Required alignment in graphics aperture */
119222944501Smrg	__u64 alignment;
119322944501Smrg
119422944501Smrg	/**
1195fe517fc9Smrg	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
1196fe517fc9Smrg	 * the user with the GTT offset at which this object will be pinned.
11973b115362Smrg	 *
1198fe517fc9Smrg	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
1199fe517fc9Smrg	 * presumed_offset of the object.
12003b115362Smrg	 *
1201fe517fc9Smrg	 * During execbuffer2 the kernel populates it with the value of the
1202fe517fc9Smrg	 * current GTT offset of the object, for future presumed_offset writes.
12033b115362Smrg	 *
12043b115362Smrg	 * See struct drm_i915_gem_create_ext for the rules when dealing with
12053b115362Smrg	 * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
12063b115362Smrg	 * minimum page sizes, like DG2.
120722944501Smrg	 */
120822944501Smrg	__u64 offset;
120922944501Smrg
12102ee35494Smrg#define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
12112ee35494Smrg#define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
12122ee35494Smrg#define EXEC_OBJECT_WRITE		 (1<<2)
1213fe517fc9Smrg#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
12142ee35494Smrg#define EXEC_OBJECT_PINNED		 (1<<4)
12152ee35494Smrg#define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
12162ee35494Smrg/* The kernel implicitly tracks GPU activity on all GEM objects, and
12172ee35494Smrg * synchronises operations with outstanding rendering. This includes
12182ee35494Smrg * rendering on other devices if exported via dma-buf. However, sometimes
12192ee35494Smrg * this tracking is too coarse and the user knows better. For example,
12202ee35494Smrg * if the object is split into non-overlapping ranges shared between different
12212ee35494Smrg * clients or engines (i.e. suballocating objects), the implicit tracking
12222ee35494Smrg * by kernel assumes that each operation affects the whole object rather
12232ee35494Smrg * than an individual range, causing needless synchronisation between clients.
12242ee35494Smrg * The kernel will also forgo any CPU cache flushes prior to rendering from
12252ee35494Smrg * the object as the client is expected to be also handling such domain
12262ee35494Smrg * tracking.
12272ee35494Smrg *
12282ee35494Smrg * The kernel maintains the implicit tracking in order to manage resources
12292ee35494Smrg * used by the GPU - this flag only disables the synchronisation prior to
12302ee35494Smrg * rendering with this object in this execbuf.
12312ee35494Smrg *
12322ee35494Smrg * Opting out of implicit synhronisation requires the user to do its own
12332ee35494Smrg * explicit tracking to avoid rendering corruption. See, for example,
12342ee35494Smrg * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
12352ee35494Smrg */
12362ee35494Smrg#define EXEC_OBJECT_ASYNC		(1<<6)
12376260e5d5Smrg/* Request that the contents of this execobject be copied into the error
12386260e5d5Smrg * state upon a GPU hang involving this batch for post-mortem debugging.
12396260e5d5Smrg * These buffers are recorded in no particular order as "user" in
12406260e5d5Smrg * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
12416260e5d5Smrg * if the kernel supports this flag.
12426260e5d5Smrg */
12436260e5d5Smrg#define EXEC_OBJECT_CAPTURE		(1<<7)
12442ee35494Smrg/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
12456260e5d5Smrg#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
124622944501Smrg	__u64 flags;
1247e88f27b3Smrg
12482ee35494Smrg	union {
12492ee35494Smrg		__u64 rsvd1;
12502ee35494Smrg		__u64 pad_to_size;
12512ee35494Smrg	};
125222944501Smrg	__u64 rsvd2;
125322944501Smrg};
125422944501Smrg
12553b115362Smrg/**
12563b115362Smrg * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf
12573b115362Smrg * ioctl.
12583b115362Smrg *
12593b115362Smrg * The request will wait for input fence to signal before submission.
12603b115362Smrg *
12613b115362Smrg * The returned output fence will be signaled after the completion of the
12623b115362Smrg * request.
12633b115362Smrg */
12646260e5d5Smrgstruct drm_i915_gem_exec_fence {
12653b115362Smrg	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
12666260e5d5Smrg	__u32 handle;
12676260e5d5Smrg
12683b115362Smrg	/**
12693b115362Smrg	 * @flags: Supported flags are:
12703b115362Smrg	 *
12713b115362Smrg	 * I915_EXEC_FENCE_WAIT:
12723b115362Smrg	 * Wait for the input fence before request submission.
12733b115362Smrg	 *
12743b115362Smrg	 * I915_EXEC_FENCE_SIGNAL:
12753b115362Smrg	 * Return request completion fence as output
12763b115362Smrg	 */
12773b115362Smrg	__u32 flags;
12786260e5d5Smrg#define I915_EXEC_FENCE_WAIT            (1<<0)
12796260e5d5Smrg#define I915_EXEC_FENCE_SIGNAL          (1<<1)
12806260e5d5Smrg#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
12816260e5d5Smrg};
12826260e5d5Smrg
12833b115362Smrg/**
12843b115362Smrg * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences
12853b115362Smrg * for execbuf ioctl.
12863b115362Smrg *
12873b115362Smrg * This structure describes an array of drm_syncobj and associated points for
12883b115362Smrg * timeline variants of drm_syncobj. It is invalid to append this structure to
12893b115362Smrg * the execbuf if I915_EXEC_FENCE_ARRAY is set.
12903b115362Smrg */
12913b115362Smrgstruct drm_i915_gem_execbuffer_ext_timeline_fences {
12923b115362Smrg#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
12933b115362Smrg	/** @base: Extension link. See struct i915_user_extension. */
12943b115362Smrg	struct i915_user_extension base;
12953b115362Smrg
12963b115362Smrg	/**
12973b115362Smrg	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
12983b115362Smrg	 * arrays.
12993b115362Smrg	 */
13003b115362Smrg	__u64 fence_count;
13013b115362Smrg
13023b115362Smrg	/**
13033b115362Smrg	 * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence
13043b115362Smrg	 * of length @fence_count.
13053b115362Smrg	 */
13063b115362Smrg	__u64 handles_ptr;
13073b115362Smrg
130822944501Smrg	/**
13093b115362Smrg	 * @values_ptr: Pointer to an array of u64 values of length
13103b115362Smrg	 * @fence_count.
13113b115362Smrg	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
13123b115362Smrg	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
13133b115362Smrg	 * binary one.
131422944501Smrg	 */
13153b115362Smrg	__u64 values_ptr;
13163b115362Smrg};
13173b115362Smrg
13183b115362Smrg/**
13193b115362Smrg * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2
13203b115362Smrg * ioctl.
13213b115362Smrg */
13223b115362Smrgstruct drm_i915_gem_execbuffer2 {
13233b115362Smrg	/** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */
132422944501Smrg	__u64 buffers_ptr;
13253b115362Smrg
13263b115362Smrg	/** @buffer_count: Number of elements in @buffers_ptr array */
132722944501Smrg	__u32 buffer_count;
132822944501Smrg
13293b115362Smrg	/**
13303b115362Smrg	 * @batch_start_offset: Offset in the batchbuffer to start execution
13313b115362Smrg	 * from.
13323b115362Smrg	 */
133322944501Smrg	__u32 batch_start_offset;
13343b115362Smrg
13353b115362Smrg	/**
13363b115362Smrg	 * @batch_len: Length in bytes of the batch buffer, starting from the
13373b115362Smrg	 * @batch_start_offset. If 0, length is assumed to be the batch buffer
13383b115362Smrg	 * object size.
13393b115362Smrg	 */
134022944501Smrg	__u32 batch_len;
13413b115362Smrg
13423b115362Smrg	/** @DR1: deprecated */
134322944501Smrg	__u32 DR1;
13443b115362Smrg
13453b115362Smrg	/** @DR4: deprecated */
134622944501Smrg	__u32 DR4;
13473b115362Smrg
13483b115362Smrg	/** @num_cliprects: See @cliprects_ptr */
134922944501Smrg	__u32 num_cliprects;
13503b115362Smrg
13516260e5d5Smrg	/**
13523b115362Smrg	 * @cliprects_ptr: Kernel clipping was a DRI1 misfeature.
13533b115362Smrg	 *
13543b115362Smrg	 * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or
13553b115362Smrg	 * I915_EXEC_USE_EXTENSIONS flags are not set.
13563b115362Smrg	 *
13573b115362Smrg	 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
13583b115362Smrg	 * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the
13593b115362Smrg	 * array.
13603b115362Smrg	 *
13613b115362Smrg	 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
13623b115362Smrg	 * single &i915_user_extension and num_cliprects is 0.
13636260e5d5Smrg	 */
136422944501Smrg	__u64 cliprects_ptr;
13653b115362Smrg
13663b115362Smrg	/** @flags: Execbuf flags */
13673b115362Smrg	__u64 flags;
1368bf6cc7dcSmrg#define I915_EXEC_RING_MASK              (0x3f)
136969dda199Smrg#define I915_EXEC_DEFAULT                (0<<0)
1370d049871aSmrg#define I915_EXEC_RENDER                 (1<<0)
137169dda199Smrg#define I915_EXEC_BSD                    (2<<0)
137269dda199Smrg#define I915_EXEC_BLT                    (3<<0)
1373e88f27b3Smrg#define I915_EXEC_VEBOX                  (4<<0)
1374e88f27b3Smrg
1375e88f27b3Smrg/* Used for switching the constants addressing mode on gen4+ RENDER ring.
1376e88f27b3Smrg * Gen6+ only supports relative addressing to dynamic state (default) and
1377e88f27b3Smrg * absolute addressing.
1378e88f27b3Smrg *
1379e88f27b3Smrg * These flags are ignored for the BSD and BLT rings.
1380e88f27b3Smrg */
1381e88f27b3Smrg#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
1382e88f27b3Smrg#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1383e88f27b3Smrg#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
1384e88f27b3Smrg#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
138522944501Smrg
1386e88f27b3Smrg/** Resets the SO write offset registers for transform feedback on gen7. */
1387e88f27b3Smrg#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
1388e88f27b3Smrg
1389e88f27b3Smrg/** Request a privileged ("secure") batch buffer. Note only available for
1390e88f27b3Smrg * DRM_ROOT_ONLY | DRM_MASTER processes.
1391e88f27b3Smrg */
1392e88f27b3Smrg#define I915_EXEC_SECURE		(1<<9)
1393e88f27b3Smrg
1394e88f27b3Smrg/** Inform the kernel that the batch is and will always be pinned. This
1395e88f27b3Smrg * negates the requirement for a workaround to be performed to avoid
1396e88f27b3Smrg * an incoherent CS (such as can be found on 830/845). If this flag is
1397e88f27b3Smrg * not passed, the kernel will endeavour to make sure the batch is
1398e88f27b3Smrg * coherent with the CS before execution. If this flag is passed,
1399e88f27b3Smrg * userspace assumes the responsibility for ensuring the same.
1400e88f27b3Smrg */
1401e88f27b3Smrg#define I915_EXEC_IS_PINNED		(1<<10)
1402e88f27b3Smrg
1403baaff307Smrg/** Provide a hint to the kernel that the command stream and auxiliary
1404e88f27b3Smrg * state buffers already holds the correct presumed addresses and so the
1405e88f27b3Smrg * relocation process may be skipped if no buffers need to be moved in
1406e88f27b3Smrg * preparation for the execbuffer.
1407e88f27b3Smrg */
1408e88f27b3Smrg#define I915_EXEC_NO_RELOC		(1<<11)
1409e88f27b3Smrg
1410e88f27b3Smrg/** Use the reloc.handle as an index into the exec object array rather
1411e88f27b3Smrg * than as the per-file handle.
1412e88f27b3Smrg */
1413e88f27b3Smrg#define I915_EXEC_HANDLE_LUT		(1<<12)
1414e88f27b3Smrg
1415424e9256Smrg/** Used for switching BSD rings on the platforms with two BSD rings */
1416fe517fc9Smrg#define I915_EXEC_BSD_SHIFT	 (13)
1417fe517fc9Smrg#define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
1418fe517fc9Smrg/* default ping-pong mode */
1419fe517fc9Smrg#define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
1420fe517fc9Smrg#define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
1421fe517fc9Smrg#define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
1422fe517fc9Smrg
1423fe517fc9Smrg/** Tell the kernel that the batchbuffer is processed by
1424fe517fc9Smrg *  the resource streamer.
1425fe517fc9Smrg */
1426fe517fc9Smrg#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
1427424e9256Smrg
14282ee35494Smrg/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
14292ee35494Smrg * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
14302ee35494Smrg * the batch.
14312ee35494Smrg *
14322ee35494Smrg * Returns -EINVAL if the sync_file fd cannot be found.
14332ee35494Smrg */
14342ee35494Smrg#define I915_EXEC_FENCE_IN		(1<<16)
14352ee35494Smrg
14362ee35494Smrg/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
14372ee35494Smrg * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
14382ee35494Smrg * to the caller, and it should be close() after use. (The fd is a regular
14392ee35494Smrg * file descriptor and will be cleaned up on process termination. It holds
14402ee35494Smrg * a reference to the request, but nothing else.)
14412ee35494Smrg *
14422ee35494Smrg * The sync_file fd can be combined with other sync_file and passed either
14432ee35494Smrg * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
14442ee35494Smrg * will only occur after this request completes), or to other devices.
14452ee35494Smrg *
14462ee35494Smrg * Using I915_EXEC_FENCE_OUT requires use of
14472ee35494Smrg * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
14482ee35494Smrg * back to userspace. Failure to do so will cause the out-fence to always
14492ee35494Smrg * be reported as zero, and the real fence fd to be leaked.
14502ee35494Smrg */
14512ee35494Smrg#define I915_EXEC_FENCE_OUT		(1<<17)
14522ee35494Smrg
14536260e5d5Smrg/*
14546260e5d5Smrg * Traditionally the execbuf ioctl has only considered the final element in
14556260e5d5Smrg * the execobject[] to be the executable batch. Often though, the client
14566260e5d5Smrg * will known the batch object prior to construction and being able to place
14576260e5d5Smrg * it into the execobject[] array first can simplify the relocation tracking.
14586260e5d5Smrg * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
14596260e5d5Smrg * execobject[] as the * batch instead (the default is to use the last
14606260e5d5Smrg * element).
14616260e5d5Smrg */
14626260e5d5Smrg#define I915_EXEC_BATCH_FIRST		(1<<18)
14636260e5d5Smrg
14646260e5d5Smrg/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
14656260e5d5Smrg * define an array of i915_gem_exec_fence structures which specify a set of
14666260e5d5Smrg * dma fences to wait upon or signal.
14676260e5d5Smrg */
14686260e5d5Smrg#define I915_EXEC_FENCE_ARRAY   (1<<19)
14696260e5d5Smrg
14703b115362Smrg/*
14713b115362Smrg * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
14723b115362Smrg * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
14733b115362Smrg * the batch.
14743b115362Smrg *
14753b115362Smrg * Returns -EINVAL if the sync_file fd cannot be found.
14763b115362Smrg */
14773b115362Smrg#define I915_EXEC_FENCE_SUBMIT		(1 << 20)
14783b115362Smrg
14793b115362Smrg/*
14803b115362Smrg * Setting I915_EXEC_USE_EXTENSIONS implies that
14813b115362Smrg * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
14823b115362Smrg * list of i915_user_extension. Each i915_user_extension node is the base of a
14833b115362Smrg * larger structure. The list of supported structures are listed in the
14843b115362Smrg * drm_i915_gem_execbuffer_ext enum.
14853b115362Smrg */
14863b115362Smrg#define I915_EXEC_USE_EXTENSIONS	(1 << 21)
14873b115362Smrg#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
14883b115362Smrg
14893b115362Smrg	/** @rsvd1: Context id */
14903b115362Smrg	__u64 rsvd1;
14913b115362Smrg
14923b115362Smrg	/**
14933b115362Smrg	 * @rsvd2: in and out sync_file file descriptors.
14943b115362Smrg	 *
14953b115362Smrg	 * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the
14963b115362Smrg	 * lower 32 bits of this field will have the in sync_file fd (input).
14973b115362Smrg	 *
14983b115362Smrg	 * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this
14993b115362Smrg	 * field will have the out sync_file fd (output).
15003b115362Smrg	 */
15013b115362Smrg	__u64 rsvd2;
15023b115362Smrg};
1503e88f27b3Smrg
1504e88f27b3Smrg#define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
1505e88f27b3Smrg#define i915_execbuffer2_set_context_id(eb2, context) \
1506e88f27b3Smrg	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1507e88f27b3Smrg#define i915_execbuffer2_get_context_id(eb2) \
1508e88f27b3Smrg	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1509e88f27b3Smrg
151022944501Smrgstruct drm_i915_gem_pin {
151122944501Smrg	/** Handle of the buffer to be pinned. */
151222944501Smrg	__u32 handle;
151322944501Smrg	__u32 pad;
151422944501Smrg
151522944501Smrg	/** alignment required within the aperture */
151622944501Smrg	__u64 alignment;
151722944501Smrg
151822944501Smrg	/** Returned GTT offset of the buffer. */
151922944501Smrg	__u64 offset;
152022944501Smrg};
152122944501Smrg
152222944501Smrgstruct drm_i915_gem_unpin {
152322944501Smrg	/** Handle of the buffer to be unpinned. */
152422944501Smrg	__u32 handle;
152522944501Smrg	__u32 pad;
152622944501Smrg};
152722944501Smrg
152822944501Smrgstruct drm_i915_gem_busy {
152922944501Smrg	/** Handle of the buffer to check for busy */
153022944501Smrg	__u32 handle;
153122944501Smrg
1532fe517fc9Smrg	/** Return busy status
1533fe517fc9Smrg	 *
1534fe517fc9Smrg	 * A return of 0 implies that the object is idle (after
1535fe517fc9Smrg	 * having flushed any pending activity), and a non-zero return that
1536fe517fc9Smrg	 * the object is still in-flight on the GPU. (The GPU has not yet
1537fe517fc9Smrg	 * signaled completion for all pending requests that reference the
15382ee35494Smrg	 * object.) An object is guaranteed to become idle eventually (so
15392ee35494Smrg	 * long as no new GPU commands are executed upon it). Due to the
15402ee35494Smrg	 * asynchronous nature of the hardware, an object reported
15412ee35494Smrg	 * as busy may become idle before the ioctl is completed.
15422ee35494Smrg	 *
15432ee35494Smrg	 * Furthermore, if the object is busy, which engine is busy is only
1544bf6cc7dcSmrg	 * provided as a guide and only indirectly by reporting its class
1545bf6cc7dcSmrg	 * (there may be more than one engine in each class). There are race
1546bf6cc7dcSmrg	 * conditions which prevent the report of which engines are busy from
1547bf6cc7dcSmrg	 * being always accurate.  However, the converse is not true. If the
1548bf6cc7dcSmrg	 * object is idle, the result of the ioctl, that all engines are idle,
1549bf6cc7dcSmrg	 * is accurate.
1550fe517fc9Smrg	 *
1551fe517fc9Smrg	 * The returned dword is split into two fields to indicate both
1552bf6cc7dcSmrg	 * the engine classess on which the object is being read, and the
1553bf6cc7dcSmrg	 * engine class on which it is currently being written (if any).
1554fe517fc9Smrg	 *
1555fe517fc9Smrg	 * The low word (bits 0:15) indicate if the object is being written
1556fe517fc9Smrg	 * to by any engine (there can only be one, as the GEM implicit
1557fe517fc9Smrg	 * synchronisation rules force writes to be serialised). Only the
1558bf6cc7dcSmrg	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1559bf6cc7dcSmrg	 * 1 not 0 etc) for the last write is reported.
1560fe517fc9Smrg	 *
1561bf6cc7dcSmrg	 * The high word (bits 16:31) are a bitmask of which engines classes
1562bf6cc7dcSmrg	 * are currently reading from the object. Multiple engines may be
1563fe517fc9Smrg	 * reading from the object simultaneously.
1564fe517fc9Smrg	 *
1565bf6cc7dcSmrg	 * The value of each engine class is the same as specified in the
15663b115362Smrg	 * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
1567bf6cc7dcSmrg	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
15683b115362Smrg	 * Some hardware may have parallel execution engines, e.g. multiple
15693b115362Smrg	 * media engines, which are mapped to the same class identifier and so
15703b115362Smrg	 * are not separately reported for busyness.
15712ee35494Smrg	 *
15722ee35494Smrg	 * Caveat emptor:
15732ee35494Smrg	 * Only the boolean result of this query is reliable; that is whether
15742ee35494Smrg	 * the object is idle or busy. The report of which engines are busy
15752ee35494Smrg	 * should be only used as a heuristic.
1576e88f27b3Smrg	 */
157722944501Smrg	__u32 busy;
157822944501Smrg};
157922944501Smrg
1580e88f27b3Smrg/**
15813b115362Smrg * struct drm_i915_gem_caching - Set or get the caching for given object
15823b115362Smrg * handle.
1583e88f27b3Smrg *
15843b115362Smrg * Allow userspace to control the GTT caching bits for a given object when the
15853b115362Smrg * object is later mapped through the ppGTT(or GGTT on older platforms lacking
15863b115362Smrg * ppGTT support, or if the object is used for scanout). Note that this might
15873b115362Smrg * require unbinding the object from the GTT first, if its current caching value
15883b115362Smrg * doesn't match.
1589e88f27b3Smrg *
15903b115362Smrg * Note that this all changes on discrete platforms, starting from DG1, the
15913b115362Smrg * set/get caching is no longer supported, and is now rejected.  Instead the CPU
15923b115362Smrg * caching attributes(WB vs WC) will become an immutable creation time property
15933b115362Smrg * for the object, along with the GTT caching level. For now we don't expose any
15943b115362Smrg * new uAPI for this, instead on DG1 this is all implicit, although this largely
15953b115362Smrg * shouldn't matter since DG1 is coherent by default(without any way of
15963b115362Smrg * controlling it).
15973b115362Smrg *
15983b115362Smrg * Implicit caching rules, starting from DG1:
15993b115362Smrg *
16003b115362Smrg *     - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
16013b115362Smrg *       contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
16023b115362Smrg *       mapped as write-combined only.
16033b115362Smrg *
16043b115362Smrg *     - Everything else is always allocated and mapped as write-back, with the
16053b115362Smrg *       guarantee that everything is also coherent with the GPU.
1606e88f27b3Smrg *
16073b115362Smrg * Note that this is likely to change in the future again, where we might need
16083b115362Smrg * more flexibility on future devices, so making this all explicit as part of a
16093b115362Smrg * new &drm_i915_gem_create_ext extension is probable.
16103b115362Smrg *
16113b115362Smrg * Side note: Part of the reason for this is that changing the at-allocation-time CPU
16123b115362Smrg * caching attributes for the pages might be required(and is expensive) if we
16133b115362Smrg * need to then CPU map the pages later with different caching attributes. This
16143b115362Smrg * inconsistent caching behaviour, while supported on x86, is not universally
16153b115362Smrg * supported on other architectures. So for simplicity we opt for setting
16163b115362Smrg * everything at creation time, whilst also making it immutable, on discrete
16173b115362Smrg * platforms.
1618e88f27b3Smrg */
1619e88f27b3Smrgstruct drm_i915_gem_caching {
1620e88f27b3Smrg	/**
16213b115362Smrg	 * @handle: Handle of the buffer to set/get the caching level.
16223b115362Smrg	 */
1623e88f27b3Smrg	__u32 handle;
1624e88f27b3Smrg
1625e88f27b3Smrg	/**
16263b115362Smrg	 * @caching: The GTT caching level to apply or possible return value.
1627e88f27b3Smrg	 *
16283b115362Smrg	 * The supported @caching values:
16293b115362Smrg	 *
16303b115362Smrg	 * I915_CACHING_NONE:
16313b115362Smrg	 *
16323b115362Smrg	 * GPU access is not coherent with CPU caches.  Default for machines
16333b115362Smrg	 * without an LLC. This means manual flushing might be needed, if we
16343b115362Smrg	 * want GPU access to be coherent.
16353b115362Smrg	 *
16363b115362Smrg	 * I915_CACHING_CACHED:
16373b115362Smrg	 *
16383b115362Smrg	 * GPU access is coherent with CPU caches and furthermore the data is
16393b115362Smrg	 * cached in last-level caches shared between CPU cores and the GPU GT.
16403b115362Smrg	 *
16413b115362Smrg	 * I915_CACHING_DISPLAY:
16423b115362Smrg	 *
16433b115362Smrg	 * Special GPU caching mode which is coherent with the scanout engines.
16443b115362Smrg	 * Transparently falls back to I915_CACHING_NONE on platforms where no
16453b115362Smrg	 * special cache mode (like write-through or gfdt flushing) is
16463b115362Smrg	 * available. The kernel automatically sets this mode when using a
16473b115362Smrg	 * buffer as a scanout target.  Userspace can manually set this mode to
16483b115362Smrg	 * avoid a costly stall and clflush in the hotpath of drawing the first
16493b115362Smrg	 * frame.
16503b115362Smrg	 */
16513b115362Smrg#define I915_CACHING_NONE		0
16523b115362Smrg#define I915_CACHING_CACHED		1
16533b115362Smrg#define I915_CACHING_DISPLAY		2
1654e88f27b3Smrg	__u32 caching;
1655e88f27b3Smrg};
1656e88f27b3Smrg
165722944501Smrg#define I915_TILING_NONE	0
165822944501Smrg#define I915_TILING_X		1
165922944501Smrg#define I915_TILING_Y		2
16603b115362Smrg/*
16613b115362Smrg * Do not add new tiling types here.  The I915_TILING_* values are for
16623b115362Smrg * de-tiling fence registers that no longer exist on modern platforms.  Although
16633b115362Smrg * the hardware may support new types of tiling in general (e.g., Tile4), we
16643b115362Smrg * do not need to add them to the uapi that is specific to now-defunct ioctls.
16653b115362Smrg */
16662ee35494Smrg#define I915_TILING_LAST	I915_TILING_Y
166722944501Smrg
166822944501Smrg#define I915_BIT_6_SWIZZLE_NONE		0
166922944501Smrg#define I915_BIT_6_SWIZZLE_9		1
167022944501Smrg#define I915_BIT_6_SWIZZLE_9_10		2
167122944501Smrg#define I915_BIT_6_SWIZZLE_9_11		3
167222944501Smrg#define I915_BIT_6_SWIZZLE_9_10_11	4
167322944501Smrg/* Not seen by userland */
167422944501Smrg#define I915_BIT_6_SWIZZLE_UNKNOWN	5
167522944501Smrg/* Seen by userland. */
167622944501Smrg#define I915_BIT_6_SWIZZLE_9_17		6
167722944501Smrg#define I915_BIT_6_SWIZZLE_9_10_17	7
167822944501Smrg
167922944501Smrgstruct drm_i915_gem_set_tiling {
168022944501Smrg	/** Handle of the buffer to have its tiling state updated */
168122944501Smrg	__u32 handle;
168222944501Smrg
168322944501Smrg	/**
168422944501Smrg	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
168522944501Smrg	 * I915_TILING_Y).
168622944501Smrg	 *
168722944501Smrg	 * This value is to be set on request, and will be updated by the
168822944501Smrg	 * kernel on successful return with the actual chosen tiling layout.
168922944501Smrg	 *
169022944501Smrg	 * The tiling mode may be demoted to I915_TILING_NONE when the system
169122944501Smrg	 * has bit 6 swizzling that can't be managed correctly by GEM.
169222944501Smrg	 *
169322944501Smrg	 * Buffer contents become undefined when changing tiling_mode.
169422944501Smrg	 */
169522944501Smrg	__u32 tiling_mode;
169622944501Smrg
169722944501Smrg	/**
169822944501Smrg	 * Stride in bytes for the object when in I915_TILING_X or
169922944501Smrg	 * I915_TILING_Y.
170022944501Smrg	 */
170122944501Smrg	__u32 stride;
170222944501Smrg
170322944501Smrg	/**
170422944501Smrg	 * Returned address bit 6 swizzling required for CPU access through
170522944501Smrg	 * mmap mapping.
170622944501Smrg	 */
170722944501Smrg	__u32 swizzle_mode;
170822944501Smrg};
170922944501Smrg
171022944501Smrgstruct drm_i915_gem_get_tiling {
171122944501Smrg	/** Handle of the buffer to get tiling state for. */
171222944501Smrg	__u32 handle;
171322944501Smrg
171422944501Smrg	/**
171522944501Smrg	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
171622944501Smrg	 * I915_TILING_Y).
171722944501Smrg	 */
171822944501Smrg	__u32 tiling_mode;
171922944501Smrg
172022944501Smrg	/**
172122944501Smrg	 * Returned address bit 6 swizzling required for CPU access through
172222944501Smrg	 * mmap mapping.
172322944501Smrg	 */
172422944501Smrg	__u32 swizzle_mode;
1725424e9256Smrg
1726424e9256Smrg	/**
1727424e9256Smrg	 * Returned address bit 6 swizzling required for CPU access through
1728424e9256Smrg	 * mmap mapping whilst bound.
1729424e9256Smrg	 */
1730424e9256Smrg	__u32 phys_swizzle_mode;
173122944501Smrg};
173222944501Smrg
173322944501Smrgstruct drm_i915_gem_get_aperture {
173422944501Smrg	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
173522944501Smrg	__u64 aper_size;
173622944501Smrg
173722944501Smrg	/**
173822944501Smrg	 * Available space in the aperture used by i915_gem_execbuffer, in
173922944501Smrg	 * bytes
174022944501Smrg	 */
174122944501Smrg	__u64 aper_available_size;
174222944501Smrg};
174322944501Smrg
174422944501Smrgstruct drm_i915_get_pipe_from_crtc_id {
174522944501Smrg	/** ID of CRTC being requested **/
174622944501Smrg	__u32 crtc_id;
174722944501Smrg
174822944501Smrg	/** pipe of requested CRTC **/
174922944501Smrg	__u32 pipe;
175022944501Smrg};
175122944501Smrg
175222944501Smrg#define I915_MADV_WILLNEED 0
175322944501Smrg#define I915_MADV_DONTNEED 1
175422944501Smrg#define __I915_MADV_PURGED 2 /* internal state */
175522944501Smrg
175622944501Smrgstruct drm_i915_gem_madvise {
175722944501Smrg	/** Handle of the buffer to change the backing store advice */
175822944501Smrg	__u32 handle;
175922944501Smrg
176022944501Smrg	/* Advice: either the buffer will be needed again in the near future,
176122944501Smrg	 *         or wont be and could be discarded under memory pressure.
176222944501Smrg	 */
176322944501Smrg	__u32 madv;
176422944501Smrg
176522944501Smrg	/** Whether the backing store still exists. */
176622944501Smrg	__u32 retained;
176722944501Smrg};
176822944501Smrg
176922944501Smrg/* flags */
177022944501Smrg#define I915_OVERLAY_TYPE_MASK 		0xff
177122944501Smrg#define I915_OVERLAY_YUV_PLANAR 	0x01
177222944501Smrg#define I915_OVERLAY_YUV_PACKED 	0x02
177322944501Smrg#define I915_OVERLAY_RGB		0x03
177422944501Smrg
177522944501Smrg#define I915_OVERLAY_DEPTH_MASK		0xff00
177622944501Smrg#define I915_OVERLAY_RGB24		0x1000
177722944501Smrg#define I915_OVERLAY_RGB16		0x2000
177822944501Smrg#define I915_OVERLAY_RGB15		0x3000
177922944501Smrg#define I915_OVERLAY_YUV422		0x0100
178022944501Smrg#define I915_OVERLAY_YUV411		0x0200
178122944501Smrg#define I915_OVERLAY_YUV420		0x0300
178222944501Smrg#define I915_OVERLAY_YUV410		0x0400
178322944501Smrg
178422944501Smrg#define I915_OVERLAY_SWAP_MASK		0xff0000
178522944501Smrg#define I915_OVERLAY_NO_SWAP		0x000000
178622944501Smrg#define I915_OVERLAY_UV_SWAP		0x010000
178722944501Smrg#define I915_OVERLAY_Y_SWAP		0x020000
178822944501Smrg#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
178922944501Smrg
179022944501Smrg#define I915_OVERLAY_FLAGS_MASK		0xff000000
179122944501Smrg#define I915_OVERLAY_ENABLE		0x01000000
179222944501Smrg
179322944501Smrgstruct drm_intel_overlay_put_image {
179422944501Smrg	/* various flags and src format description */
179522944501Smrg	__u32 flags;
179622944501Smrg	/* source picture description */
179722944501Smrg	__u32 bo_handle;
179822944501Smrg	/* stride values and offsets are in bytes, buffer relative */
179922944501Smrg	__u16 stride_Y; /* stride for packed formats */
180022944501Smrg	__u16 stride_UV;
180122944501Smrg	__u32 offset_Y; /* offset for packet formats */
180222944501Smrg	__u32 offset_U;
180322944501Smrg	__u32 offset_V;
180422944501Smrg	/* in pixels */
180522944501Smrg	__u16 src_width;
180622944501Smrg	__u16 src_height;
180722944501Smrg	/* to compensate the scaling factors for partially covered surfaces */
180822944501Smrg	__u16 src_scan_width;
180922944501Smrg	__u16 src_scan_height;
181022944501Smrg	/* output crtc description */
181122944501Smrg	__u32 crtc_id;
181222944501Smrg	__u16 dst_x;
181322944501Smrg	__u16 dst_y;
181422944501Smrg	__u16 dst_width;
181522944501Smrg	__u16 dst_height;
181622944501Smrg};
181722944501Smrg
181822944501Smrg/* flags */
181922944501Smrg#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
182022944501Smrg#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1821fe517fc9Smrg#define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
182222944501Smrgstruct drm_intel_overlay_attrs {
182322944501Smrg	__u32 flags;
182422944501Smrg	__u32 color_key;
182522944501Smrg	__s32 brightness;
182622944501Smrg	__u32 contrast;
182722944501Smrg	__u32 saturation;
182822944501Smrg	__u32 gamma0;
182922944501Smrg	__u32 gamma1;
183022944501Smrg	__u32 gamma2;
183122944501Smrg	__u32 gamma3;
183222944501Smrg	__u32 gamma4;
183322944501Smrg	__u32 gamma5;
183422944501Smrg};
183522944501Smrg
1836e88f27b3Smrg/*
1837e88f27b3Smrg * Intel sprite handling
1838e88f27b3Smrg *
1839e88f27b3Smrg * Color keying works with a min/mask/max tuple.  Both source and destination
1840e88f27b3Smrg * color keying is allowed.
1841e88f27b3Smrg *
1842e88f27b3Smrg * Source keying:
1843e88f27b3Smrg * Sprite pixels within the min & max values, masked against the color channels
1844e88f27b3Smrg * specified in the mask field, will be transparent.  All other pixels will
1845e88f27b3Smrg * be displayed on top of the primary plane.  For RGB surfaces, only the min
1846e88f27b3Smrg * and mask fields will be used; ranged compares are not allowed.
1847e88f27b3Smrg *
1848e88f27b3Smrg * Destination keying:
1849e88f27b3Smrg * Primary plane pixels that match the min value, masked against the color
1850e88f27b3Smrg * channels specified in the mask field, will be replaced by corresponding
1851e88f27b3Smrg * pixels from the sprite plane.
1852e88f27b3Smrg *
1853e88f27b3Smrg * Note that source & destination keying are exclusive; only one can be
1854e88f27b3Smrg * active on a given plane.
1855e88f27b3Smrg */
1856e88f27b3Smrg
18576260e5d5Smrg#define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
18586260e5d5Smrg						* flags==0 to disable colorkeying.
18596260e5d5Smrg						*/
1860e88f27b3Smrg#define I915_SET_COLORKEY_DESTINATION	(1<<1)
1861e88f27b3Smrg#define I915_SET_COLORKEY_SOURCE	(1<<2)
1862e88f27b3Smrgstruct drm_intel_sprite_colorkey {
1863e88f27b3Smrg	__u32 plane_id;
1864e88f27b3Smrg	__u32 min_value;
1865e88f27b3Smrg	__u32 channel_mask;
1866e88f27b3Smrg	__u32 max_value;
1867e88f27b3Smrg	__u32 flags;
1868e88f27b3Smrg};
1869e88f27b3Smrg
1870e88f27b3Smrgstruct drm_i915_gem_wait {
1871e88f27b3Smrg	/** Handle of BO we shall wait on */
1872e88f27b3Smrg	__u32 bo_handle;
1873e88f27b3Smrg	__u32 flags;
1874e88f27b3Smrg	/** Number of nanoseconds to wait, Returns time remaining. */
1875e88f27b3Smrg	__s64 timeout_ns;
1876e88f27b3Smrg};
1877e88f27b3Smrg
1878e88f27b3Smrgstruct drm_i915_gem_context_create {
1879bf6cc7dcSmrg	__u32 ctx_id; /* output: id of new context*/
1880e88f27b3Smrg	__u32 pad;
1881e88f27b3Smrg};
1882e88f27b3Smrg
18833b115362Smrg/**
18843b115362Smrg * struct drm_i915_gem_context_create_ext - Structure for creating contexts.
18853b115362Smrg */
1886bf6cc7dcSmrgstruct drm_i915_gem_context_create_ext {
18873b115362Smrg	/** @ctx_id: Id of the created context (output) */
18883b115362Smrg	__u32 ctx_id;
18893b115362Smrg
18903b115362Smrg	/**
18913b115362Smrg	 * @flags: Supported flags are:
18923b115362Smrg	 *
18933b115362Smrg	 * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS:
18943b115362Smrg	 *
18953b115362Smrg	 * Extensions may be appended to this structure and driver must check
18963b115362Smrg	 * for those. See @extensions.
18973b115362Smrg	 *
18983b115362Smrg	 * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE
18993b115362Smrg	 *
19003b115362Smrg	 * Created context will have single timeline.
19013b115362Smrg	 */
1902bf6cc7dcSmrg	__u32 flags;
1903bf6cc7dcSmrg#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
19043b115362Smrg#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
1905bf6cc7dcSmrg#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
19063b115362Smrg	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
19073b115362Smrg
19083b115362Smrg	/**
19093b115362Smrg	 * @extensions: Zero-terminated chain of extensions.
19103b115362Smrg	 *
19113b115362Smrg	 * I915_CONTEXT_CREATE_EXT_SETPARAM:
19123b115362Smrg	 * Context parameter to set or query during context creation.
19133b115362Smrg	 * See struct drm_i915_gem_context_create_ext_setparam.
19143b115362Smrg	 *
19153b115362Smrg	 * I915_CONTEXT_CREATE_EXT_CLONE:
19163b115362Smrg	 * This extension has been removed. On the off chance someone somewhere
19173b115362Smrg	 * has attempted to use it, never re-use this extension number.
19183b115362Smrg	 */
1919bf6cc7dcSmrg	__u64 extensions;
19203b115362Smrg#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
19213b115362Smrg#define I915_CONTEXT_CREATE_EXT_CLONE 1
1922bf6cc7dcSmrg};
1923bf6cc7dcSmrg
19243b115362Smrg/**
19253b115362Smrg * struct drm_i915_gem_context_param - Context parameter to set or query.
19263b115362Smrg */
1927bf6cc7dcSmrgstruct drm_i915_gem_context_param {
19283b115362Smrg	/** @ctx_id: Context id */
1929bf6cc7dcSmrg	__u32 ctx_id;
19303b115362Smrg
19313b115362Smrg	/** @size: Size of the parameter @value */
1932bf6cc7dcSmrg	__u32 size;
19333b115362Smrg
19343b115362Smrg	/** @param: Parameter to set or query */
1935bf6cc7dcSmrg	__u64 param;
1936bf6cc7dcSmrg#define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
19373b115362Smrg/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed.  On the off chance
19383b115362Smrg * someone somewhere has attempted to use it, never re-use this context
19393b115362Smrg * param number.
19403b115362Smrg */
1941bf6cc7dcSmrg#define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
1942bf6cc7dcSmrg#define I915_CONTEXT_PARAM_GTT_SIZE	0x3
1943bf6cc7dcSmrg#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
1944bf6cc7dcSmrg#define I915_CONTEXT_PARAM_BANNABLE	0x5
1945bf6cc7dcSmrg#define I915_CONTEXT_PARAM_PRIORITY	0x6
1946bf6cc7dcSmrg#define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
1947bf6cc7dcSmrg#define   I915_CONTEXT_DEFAULT_PRIORITY		0
1948bf6cc7dcSmrg#define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
1949bf6cc7dcSmrg	/*
1950bf6cc7dcSmrg	 * When using the following param, value should be a pointer to
1951bf6cc7dcSmrg	 * drm_i915_gem_context_param_sseu.
1952bf6cc7dcSmrg	 */
1953bf6cc7dcSmrg#define I915_CONTEXT_PARAM_SSEU		0x7
1954bf6cc7dcSmrg
1955bf6cc7dcSmrg/*
1956bf6cc7dcSmrg * Not all clients may want to attempt automatic recover of a context after
1957bf6cc7dcSmrg * a hang (for example, some clients may only submit very small incremental
1958bf6cc7dcSmrg * batches relying on known logical state of previous batches which will never
1959bf6cc7dcSmrg * recover correctly and each attempt will hang), and so would prefer that
1960bf6cc7dcSmrg * the context is forever banned instead.
1961bf6cc7dcSmrg *
1962bf6cc7dcSmrg * If set to false (0), after a reset, subsequent (and in flight) rendering
1963bf6cc7dcSmrg * from this context is discarded, and the client will need to create a new
1964bf6cc7dcSmrg * context to use instead.
1965bf6cc7dcSmrg *
1966bf6cc7dcSmrg * If set to true (1), the kernel will automatically attempt to recover the
1967bf6cc7dcSmrg * context by skipping the hanging batch and executing the next batch starting
1968bf6cc7dcSmrg * from the default context state (discarding the incomplete logical context
1969bf6cc7dcSmrg * state lost due to the reset).
1970bf6cc7dcSmrg *
1971bf6cc7dcSmrg * On creation, all new contexts are marked as recoverable.
1972bf6cc7dcSmrg */
1973bf6cc7dcSmrg#define I915_CONTEXT_PARAM_RECOVERABLE	0x8
1974bf6cc7dcSmrg
19753b115362Smrg	/*
19763b115362Smrg	 * The id of the associated virtual memory address space (ppGTT) of
19773b115362Smrg	 * this context. Can be retrieved and passed to another context
19783b115362Smrg	 * (on the same fd) for both to use the same ppGTT and so share
19793b115362Smrg	 * address layouts, and avoid reloading the page tables on context
19803b115362Smrg	 * switches between themselves.
19813b115362Smrg	 *
19823b115362Smrg	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
19833b115362Smrg	 */
19843b115362Smrg#define I915_CONTEXT_PARAM_VM		0x9
1985bf6cc7dcSmrg
19863b115362Smrg/*
19873b115362Smrg * I915_CONTEXT_PARAM_ENGINES:
1988bf6cc7dcSmrg *
19893b115362Smrg * Bind this context to operate on this subset of available engines. Henceforth,
19903b115362Smrg * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
19913b115362Smrg * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
19923b115362Smrg * and upwards. Slots 0...N are filled in using the specified (class, instance).
19933b115362Smrg * Use
19943b115362Smrg *	engine_class: I915_ENGINE_CLASS_INVALID,
19953b115362Smrg *	engine_instance: I915_ENGINE_CLASS_INVALID_NONE
19963b115362Smrg * to specify a gap in the array that can be filled in later, e.g. by a
19973b115362Smrg * virtual engine used for load balancing.
1998bf6cc7dcSmrg *
19993b115362Smrg * Setting the number of engines bound to the context to 0, by passing a zero
20003b115362Smrg * sized argument, will revert back to default settings.
20013b115362Smrg *
20023b115362Smrg * See struct i915_context_param_engines.
20033b115362Smrg *
20043b115362Smrg * Extensions:
20053b115362Smrg *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
20063b115362Smrg *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
20073b115362Smrg *   i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
20083b115362Smrg */
20093b115362Smrg#define I915_CONTEXT_PARAM_ENGINES	0xa
20103b115362Smrg
20113b115362Smrg/*
20123b115362Smrg * I915_CONTEXT_PARAM_PERSISTENCE:
20133b115362Smrg *
20143b115362Smrg * Allow the context and active rendering to survive the process until
20153b115362Smrg * completion. Persistence allows fire-and-forget clients to queue up a
20163b115362Smrg * bunch of work, hand the output over to a display server and then quit.
20173b115362Smrg * If the context is marked as not persistent, upon closing (either via
20183b115362Smrg * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
20193b115362Smrg * or process termination), the context and any outstanding requests will be
20203b115362Smrg * cancelled (and exported fences for cancelled requests marked as -EIO).
20213b115362Smrg *
20223b115362Smrg * By default, new contexts allow persistence.
20233b115362Smrg */
20243b115362Smrg#define I915_CONTEXT_PARAM_PERSISTENCE	0xb
20253b115362Smrg
20263b115362Smrg/* This API has been removed.  On the off chance someone somewhere has
20273b115362Smrg * attempted to use it, never re-use this context param number.
20283b115362Smrg */
20293b115362Smrg#define I915_CONTEXT_PARAM_RINGSIZE	0xc
20303b115362Smrg
20313b115362Smrg/*
20323b115362Smrg * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
20333b115362Smrg *
20343b115362Smrg * Mark that the context makes use of protected content, which will result
20353b115362Smrg * in the context being invalidated when the protected content session is.
20363b115362Smrg * Given that the protected content session is killed on suspend, the device
20373b115362Smrg * is kept awake for the lifetime of a protected context, so the user should
20383b115362Smrg * make sure to dispose of them once done.
20393b115362Smrg * This flag can only be set at context creation time and, when set to true,
20403b115362Smrg * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
20413b115362Smrg * to false. This flag can't be set to true in conjunction with setting the
20423b115362Smrg * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
20433b115362Smrg *
20443b115362Smrg * .. code-block:: C
20453b115362Smrg *
20463b115362Smrg *	struct drm_i915_gem_context_create_ext_setparam p_protected = {
20473b115362Smrg *		.base = {
20483b115362Smrg *			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
20493b115362Smrg *		},
20503b115362Smrg *		.param = {
20513b115362Smrg *			.param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
20523b115362Smrg *			.value = 1,
20533b115362Smrg *		}
20543b115362Smrg *	};
20553b115362Smrg *	struct drm_i915_gem_context_create_ext_setparam p_norecover = {
20563b115362Smrg *		.base = {
20573b115362Smrg *			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
20583b115362Smrg *			.next_extension = to_user_pointer(&p_protected),
20593b115362Smrg *		},
20603b115362Smrg *		.param = {
20613b115362Smrg *			.param = I915_CONTEXT_PARAM_RECOVERABLE,
20623b115362Smrg *			.value = 0,
20633b115362Smrg *		}
20643b115362Smrg *	};
20653b115362Smrg *	struct drm_i915_gem_context_create_ext create = {
20663b115362Smrg *		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
20673b115362Smrg *		.extensions = to_user_pointer(&p_norecover);
20683b115362Smrg *	};
20693b115362Smrg *
20703b115362Smrg *	ctx_id = gem_context_create_ext(drm_fd, &create);
20713b115362Smrg *
20723b115362Smrg * In addition to the normal failure cases, setting this flag during context
20733b115362Smrg * creation can result in the following errors:
20743b115362Smrg *
20753b115362Smrg * -ENODEV: feature not available
20763b115362Smrg * -EPERM: trying to mark a recoverable or not bannable context as protected
20773b115362Smrg */
20783b115362Smrg#define I915_CONTEXT_PARAM_PROTECTED_CONTENT    0xd
20793b115362Smrg/* Must be kept compact -- no holes and well documented */
20803b115362Smrg
20813b115362Smrg	/** @value: Context parameter value to be set or queried */
20823b115362Smrg	__u64 value;
20833b115362Smrg};
20843b115362Smrg
20853b115362Smrg/*
20863b115362Smrg * Context SSEU programming
20873b115362Smrg *
20883b115362Smrg * It may be necessary for either functional or performance reason to configure
20893b115362Smrg * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
20903b115362Smrg * Sub-slice/EU).
20913b115362Smrg *
20923b115362Smrg * This is done by configuring SSEU configuration using the below
20933b115362Smrg * @struct drm_i915_gem_context_param_sseu for every supported engine which
2094bf6cc7dcSmrg * userspace intends to use.
2095bf6cc7dcSmrg *
2096bf6cc7dcSmrg * Not all GPUs or engines support this functionality in which case an error
2097bf6cc7dcSmrg * code -ENODEV will be returned.
2098bf6cc7dcSmrg *
2099bf6cc7dcSmrg * Also, flexibility of possible SSEU configuration permutations varies between
2100bf6cc7dcSmrg * GPU generations and software imposed limitations. Requesting such a
2101bf6cc7dcSmrg * combination will return an error code of -EINVAL.
2102bf6cc7dcSmrg *
2103bf6cc7dcSmrg * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
2104bf6cc7dcSmrg * favour of a single global setting.
2105bf6cc7dcSmrg */
2106bf6cc7dcSmrgstruct drm_i915_gem_context_param_sseu {
2107bf6cc7dcSmrg	/*
2108bf6cc7dcSmrg	 * Engine class & instance to be configured or queried.
2109bf6cc7dcSmrg	 */
21103b115362Smrg	struct i915_engine_class_instance engine;
2111bf6cc7dcSmrg
2112bf6cc7dcSmrg	/*
21133b115362Smrg	 * Unknown flags must be cleared to zero.
2114bf6cc7dcSmrg	 */
2115bf6cc7dcSmrg	__u32 flags;
21163b115362Smrg#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
2117bf6cc7dcSmrg
2118bf6cc7dcSmrg	/*
2119bf6cc7dcSmrg	 * Mask of slices to enable for the context. Valid values are a subset
2120bf6cc7dcSmrg	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
2121bf6cc7dcSmrg	 */
2122bf6cc7dcSmrg	__u64 slice_mask;
2123bf6cc7dcSmrg
2124bf6cc7dcSmrg	/*
2125bf6cc7dcSmrg	 * Mask of subslices to enable for the context. Valid values are a
2126bf6cc7dcSmrg	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
2127bf6cc7dcSmrg	 */
2128bf6cc7dcSmrg	__u64 subslice_mask;
2129bf6cc7dcSmrg
2130bf6cc7dcSmrg	/*
2131bf6cc7dcSmrg	 * Minimum/Maximum number of EUs to enable per subslice for the
2132bf6cc7dcSmrg	 * context. min_eus_per_subslice must be inferior or equal to
2133bf6cc7dcSmrg	 * max_eus_per_subslice.
2134bf6cc7dcSmrg	 */
2135bf6cc7dcSmrg	__u16 min_eus_per_subslice;
2136bf6cc7dcSmrg	__u16 max_eus_per_subslice;
2137bf6cc7dcSmrg
2138bf6cc7dcSmrg	/*
2139bf6cc7dcSmrg	 * Unused for now. Must be cleared to zero.
2140bf6cc7dcSmrg	 */
2141bf6cc7dcSmrg	__u32 rsvd;
2142bf6cc7dcSmrg};
2143bf6cc7dcSmrg
21443b115362Smrg/**
21453b115362Smrg * DOC: Virtual Engine uAPI
21463b115362Smrg *
21473b115362Smrg * Virtual engine is a concept where userspace is able to configure a set of
21483b115362Smrg * physical engines, submit a batch buffer, and let the driver execute it on any
21493b115362Smrg * engine from the set as it sees fit.
21503b115362Smrg *
21513b115362Smrg * This is primarily useful on parts which have multiple instances of a same
21523b115362Smrg * class engine, like for example GT3+ Skylake parts with their two VCS engines.
21533b115362Smrg *
21543b115362Smrg * For instance userspace can enumerate all engines of a certain class using the
21553b115362Smrg * previously described `Engine Discovery uAPI`_. After that userspace can
21563b115362Smrg * create a GEM context with a placeholder slot for the virtual engine (using
21573b115362Smrg * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
21583b115362Smrg * and instance respectively) and finally using the
21593b115362Smrg * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
21603b115362Smrg * the same reserved slot.
21613b115362Smrg *
21623b115362Smrg * Example of creating a virtual engine and submitting a batch buffer to it:
21633b115362Smrg *
21643b115362Smrg * .. code-block:: C
21653b115362Smrg *
21663b115362Smrg * 	I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
21673b115362Smrg * 		.base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
21683b115362Smrg * 		.engine_index = 0, // Place this virtual engine into engine map slot 0
21693b115362Smrg * 		.num_siblings = 2,
21703b115362Smrg * 		.engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
21713b115362Smrg * 			     { I915_ENGINE_CLASS_VIDEO, 1 }, },
21723b115362Smrg * 	};
21733b115362Smrg * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
21743b115362Smrg * 		.engines = { { I915_ENGINE_CLASS_INVALID,
21753b115362Smrg * 			       I915_ENGINE_CLASS_INVALID_NONE } },
21763b115362Smrg * 		.extensions = to_user_pointer(&virtual), // Chains after load_balance extension
21773b115362Smrg * 	};
21783b115362Smrg * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
21793b115362Smrg * 		.base = {
21803b115362Smrg * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
21813b115362Smrg * 		},
21823b115362Smrg * 		.param = {
21833b115362Smrg * 			.param = I915_CONTEXT_PARAM_ENGINES,
21843b115362Smrg * 			.value = to_user_pointer(&engines),
21853b115362Smrg * 			.size = sizeof(engines),
21863b115362Smrg * 		},
21873b115362Smrg * 	};
21883b115362Smrg * 	struct drm_i915_gem_context_create_ext create = {
21893b115362Smrg * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
21903b115362Smrg * 		.extensions = to_user_pointer(&p_engines);
21913b115362Smrg * 	};
21923b115362Smrg *
21933b115362Smrg * 	ctx_id = gem_context_create_ext(drm_fd, &create);
21943b115362Smrg *
21953b115362Smrg * 	// Now we have created a GEM context with its engine map containing a
21963b115362Smrg * 	// single virtual engine. Submissions to this slot can go either to
21973b115362Smrg * 	// vcs0 or vcs1, depending on the load balancing algorithm used inside
21983b115362Smrg * 	// the driver. The load balancing is dynamic from one batch buffer to
21993b115362Smrg * 	// another and transparent to userspace.
22003b115362Smrg *
22013b115362Smrg * 	...
22023b115362Smrg * 	execbuf.rsvd1 = ctx_id;
22033b115362Smrg * 	execbuf.flags = 0; // Submits to index 0 which is the virtual engine
22043b115362Smrg * 	gem_execbuf(drm_fd, &execbuf);
22053b115362Smrg */
22063b115362Smrg
22073b115362Smrg/*
22083b115362Smrg * i915_context_engines_load_balance:
22093b115362Smrg *
22103b115362Smrg * Enable load balancing across this set of engines.
22113b115362Smrg *
22123b115362Smrg * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
22133b115362Smrg * used will proxy the execbuffer request onto one of the set of engines
22143b115362Smrg * in such a way as to distribute the load evenly across the set.
22153b115362Smrg *
22163b115362Smrg * The set of engines must be compatible (e.g. the same HW class) as they
22173b115362Smrg * will share the same logical GPU context and ring.
22183b115362Smrg *
22193b115362Smrg * To intermix rendering with the virtual engine and direct rendering onto
22203b115362Smrg * the backing engines (bypassing the load balancing proxy), the context must
22213b115362Smrg * be defined to use a single timeline for all engines.
22223b115362Smrg */
22233b115362Smrgstruct i915_context_engines_load_balance {
22243b115362Smrg	struct i915_user_extension base;
22253b115362Smrg
22263b115362Smrg	__u16 engine_index;
22273b115362Smrg	__u16 num_siblings;
22283b115362Smrg	__u32 flags; /* all undefined flags must be zero */
22293b115362Smrg
22303b115362Smrg	__u64 mbz64; /* reserved for future use; must be zero */
22313b115362Smrg
22323b115362Smrg	struct i915_engine_class_instance engines[];
22333b115362Smrg} __attribute__((packed));
22343b115362Smrg
22353b115362Smrg#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
22363b115362Smrg	struct i915_user_extension base; \
22373b115362Smrg	__u16 engine_index; \
22383b115362Smrg	__u16 num_siblings; \
22393b115362Smrg	__u32 flags; \
22403b115362Smrg	__u64 mbz64; \
22413b115362Smrg	struct i915_engine_class_instance engines[N__]; \
22423b115362Smrg} __attribute__((packed)) name__
22433b115362Smrg
22443b115362Smrg/*
22453b115362Smrg * i915_context_engines_bond:
22463b115362Smrg *
22473b115362Smrg * Constructed bonded pairs for execution within a virtual engine.
22483b115362Smrg *
22493b115362Smrg * All engines are equal, but some are more equal than others. Given
22503b115362Smrg * the distribution of resources in the HW, it may be preferable to run
22513b115362Smrg * a request on a given subset of engines in parallel to a request on a
22523b115362Smrg * specific engine. We enable this selection of engines within a virtual
22533b115362Smrg * engine by specifying bonding pairs, for any given master engine we will
22543b115362Smrg * only execute on one of the corresponding siblings within the virtual engine.
22553b115362Smrg *
22563b115362Smrg * To execute a request in parallel on the master engine and a sibling requires
22573b115362Smrg * coordination with a I915_EXEC_FENCE_SUBMIT.
22583b115362Smrg */
22593b115362Smrgstruct i915_context_engines_bond {
22603b115362Smrg	struct i915_user_extension base;
22613b115362Smrg
22623b115362Smrg	struct i915_engine_class_instance master;
22633b115362Smrg
22643b115362Smrg	__u16 virtual_index; /* index of virtual engine in ctx->engines[] */
22653b115362Smrg	__u16 num_bonds;
22663b115362Smrg
22673b115362Smrg	__u64 flags; /* all undefined flags must be zero */
22683b115362Smrg	__u64 mbz64[4]; /* reserved for future use; must be zero */
22693b115362Smrg
22703b115362Smrg	struct i915_engine_class_instance engines[];
22713b115362Smrg} __attribute__((packed));
22723b115362Smrg
22733b115362Smrg#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
22743b115362Smrg	struct i915_user_extension base; \
22753b115362Smrg	struct i915_engine_class_instance master; \
22763b115362Smrg	__u16 virtual_index; \
22773b115362Smrg	__u16 num_bonds; \
22783b115362Smrg	__u64 flags; \
22793b115362Smrg	__u64 mbz64[4]; \
22803b115362Smrg	struct i915_engine_class_instance engines[N__]; \
22813b115362Smrg} __attribute__((packed)) name__
22823b115362Smrg
22833b115362Smrg/**
22843b115362Smrg * struct i915_context_engines_parallel_submit - Configure engine for
22853b115362Smrg * parallel submission.
22863b115362Smrg *
22873b115362Smrg * Setup a slot in the context engine map to allow multiple BBs to be submitted
22883b115362Smrg * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
22893b115362Smrg * in parallel. Multiple hardware contexts are created internally in the i915 to
22903b115362Smrg * run these BBs. Once a slot is configured for N BBs only N BBs can be
22913b115362Smrg * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
22923b115362Smrg * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
22933b115362Smrg * many BBs there are based on the slot's configuration. The N BBs are the last
22943b115362Smrg * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
22953b115362Smrg *
22963b115362Smrg * The default placement behavior is to create implicit bonds between each
22973b115362Smrg * context if each context maps to more than 1 physical engine (e.g. context is
22983b115362Smrg * a virtual engine). Also we only allow contexts of same engine class and these
22993b115362Smrg * contexts must be in logically contiguous order. Examples of the placement
23003b115362Smrg * behavior are described below. Lastly, the default is to not allow BBs to be
23013b115362Smrg * preempted mid-batch. Rather insert coordinated preemption points on all
23023b115362Smrg * hardware contexts between each set of BBs. Flags could be added in the future
23033b115362Smrg * to change both of these default behaviors.
23043b115362Smrg *
23053b115362Smrg * Returns -EINVAL if hardware context placement configuration is invalid or if
23063b115362Smrg * the placement configuration isn't supported on the platform / submission
23073b115362Smrg * interface.
23083b115362Smrg * Returns -ENODEV if extension isn't supported on the platform / submission
23093b115362Smrg * interface.
23103b115362Smrg *
23113b115362Smrg * .. code-block:: none
23123b115362Smrg *
23133b115362Smrg *	Examples syntax:
23143b115362Smrg *	CS[X] = generic engine of same class, logical instance X
23153b115362Smrg *	INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
23163b115362Smrg *
23173b115362Smrg *	Example 1 pseudo code:
23183b115362Smrg *	set_engines(INVALID)
23193b115362Smrg *	set_parallel(engine_index=0, width=2, num_siblings=1,
23203b115362Smrg *		     engines=CS[0],CS[1])
23213b115362Smrg *
23223b115362Smrg *	Results in the following valid placement:
23233b115362Smrg *	CS[0], CS[1]
23243b115362Smrg *
23253b115362Smrg *	Example 2 pseudo code:
23263b115362Smrg *	set_engines(INVALID)
23273b115362Smrg *	set_parallel(engine_index=0, width=2, num_siblings=2,
23283b115362Smrg *		     engines=CS[0],CS[2],CS[1],CS[3])
23293b115362Smrg *
23303b115362Smrg *	Results in the following valid placements:
23313b115362Smrg *	CS[0], CS[1]
23323b115362Smrg *	CS[2], CS[3]
23333b115362Smrg *
23343b115362Smrg *	This can be thought of as two virtual engines, each containing two
23353b115362Smrg *	engines thereby making a 2D array. However, there are bonds tying the
23363b115362Smrg *	entries together and placing restrictions on how they can be scheduled.
23373b115362Smrg *	Specifically, the scheduler can choose only vertical columns from the 2D
23383b115362Smrg *	array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
23393b115362Smrg *	scheduler wants to submit to CS[0], it must also choose CS[1] and vice
23403b115362Smrg *	versa. Same for CS[2] requires also using CS[3].
23413b115362Smrg *	VE[0] = CS[0], CS[2]
23423b115362Smrg *	VE[1] = CS[1], CS[3]
23433b115362Smrg *
23443b115362Smrg *	Example 3 pseudo code:
23453b115362Smrg *	set_engines(INVALID)
23463b115362Smrg *	set_parallel(engine_index=0, width=2, num_siblings=2,
23473b115362Smrg *		     engines=CS[0],CS[1],CS[1],CS[3])
23483b115362Smrg *
23493b115362Smrg *	Results in the following valid and invalid placements:
23503b115362Smrg *	CS[0], CS[1]
23513b115362Smrg *	CS[1], CS[3] - Not logically contiguous, return -EINVAL
23523b115362Smrg */
23533b115362Smrgstruct i915_context_engines_parallel_submit {
23543b115362Smrg	/**
23553b115362Smrg	 * @base: base user extension.
23563b115362Smrg	 */
23573b115362Smrg	struct i915_user_extension base;
23583b115362Smrg
23593b115362Smrg	/**
23603b115362Smrg	 * @engine_index: slot for parallel engine
23613b115362Smrg	 */
23623b115362Smrg	__u16 engine_index;
23633b115362Smrg
23643b115362Smrg	/**
23653b115362Smrg	 * @width: number of contexts per parallel engine or in other words the
23663b115362Smrg	 * number of batches in each submission
23673b115362Smrg	 */
23683b115362Smrg	__u16 width;
23693b115362Smrg
23703b115362Smrg	/**
23713b115362Smrg	 * @num_siblings: number of siblings per context or in other words the
23723b115362Smrg	 * number of possible placements for each submission
23733b115362Smrg	 */
23743b115362Smrg	__u16 num_siblings;
23753b115362Smrg
23763b115362Smrg	/**
23773b115362Smrg	 * @mbz16: reserved for future use; must be zero
23783b115362Smrg	 */
23793b115362Smrg	__u16 mbz16;
23803b115362Smrg
23813b115362Smrg	/**
23823b115362Smrg	 * @flags: all undefined flags must be zero, currently not defined flags
23833b115362Smrg	 */
23843b115362Smrg	__u64 flags;
23853b115362Smrg
23863b115362Smrg	/**
23873b115362Smrg	 * @mbz64: reserved for future use; must be zero
23883b115362Smrg	 */
23893b115362Smrg	__u64 mbz64[3];
23903b115362Smrg
23913b115362Smrg	/**
23923b115362Smrg	 * @engines: 2-d array of engine instances to configure parallel engine
23933b115362Smrg	 *
23943b115362Smrg	 * length = width (i) * num_siblings (j)
23953b115362Smrg	 * index = j + i * num_siblings
23963b115362Smrg	 */
23973b115362Smrg	struct i915_engine_class_instance engines[];
23983b115362Smrg
23993b115362Smrg} __attribute__((packed));
24003b115362Smrg
24013b115362Smrg#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
24023b115362Smrg	struct i915_user_extension base; \
24033b115362Smrg	__u16 engine_index; \
24043b115362Smrg	__u16 width; \
24053b115362Smrg	__u16 num_siblings; \
24063b115362Smrg	__u16 mbz16; \
24073b115362Smrg	__u64 flags; \
24083b115362Smrg	__u64 mbz64[3]; \
24093b115362Smrg	struct i915_engine_class_instance engines[N__]; \
24103b115362Smrg} __attribute__((packed)) name__
24113b115362Smrg
24123b115362Smrg/**
24133b115362Smrg * DOC: Context Engine Map uAPI
24143b115362Smrg *
24153b115362Smrg * Context engine map is a new way of addressing engines when submitting batch-
24163b115362Smrg * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
24173b115362Smrg * inside the flags field of `struct drm_i915_gem_execbuffer2`.
24183b115362Smrg *
24193b115362Smrg * To use it created GEM contexts need to be configured with a list of engines
24203b115362Smrg * the user is intending to submit to. This is accomplished using the
24213b115362Smrg * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
24223b115362Smrg * i915_context_param_engines`.
24233b115362Smrg *
24243b115362Smrg * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
24253b115362Smrg * configured map.
24263b115362Smrg *
24273b115362Smrg * Example of creating such context and submitting against it:
24283b115362Smrg *
24293b115362Smrg * .. code-block:: C
24303b115362Smrg *
24313b115362Smrg * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
24323b115362Smrg * 		.engines = { { I915_ENGINE_CLASS_RENDER, 0 },
24333b115362Smrg * 			     { I915_ENGINE_CLASS_COPY, 0 } }
24343b115362Smrg * 	};
24353b115362Smrg * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
24363b115362Smrg * 		.base = {
24373b115362Smrg * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
24383b115362Smrg * 		},
24393b115362Smrg * 		.param = {
24403b115362Smrg * 			.param = I915_CONTEXT_PARAM_ENGINES,
24413b115362Smrg * 			.value = to_user_pointer(&engines),
24423b115362Smrg * 			.size = sizeof(engines),
24433b115362Smrg * 		},
24443b115362Smrg * 	};
24453b115362Smrg * 	struct drm_i915_gem_context_create_ext create = {
24463b115362Smrg * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
24473b115362Smrg * 		.extensions = to_user_pointer(&p_engines);
24483b115362Smrg * 	};
24493b115362Smrg *
24503b115362Smrg * 	ctx_id = gem_context_create_ext(drm_fd, &create);
24513b115362Smrg *
24523b115362Smrg * 	// We have now created a GEM context with two engines in the map:
24533b115362Smrg * 	// Index 0 points to rcs0 while index 1 points to bcs0. Other engines
24543b115362Smrg * 	// will not be accessible from this context.
24553b115362Smrg *
24563b115362Smrg * 	...
24573b115362Smrg * 	execbuf.rsvd1 = ctx_id;
24583b115362Smrg * 	execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
24593b115362Smrg * 	gem_execbuf(drm_fd, &execbuf);
24603b115362Smrg *
24613b115362Smrg * 	...
24623b115362Smrg * 	execbuf.rsvd1 = ctx_id;
24633b115362Smrg * 	execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
24643b115362Smrg * 	gem_execbuf(drm_fd, &execbuf);
24653b115362Smrg */
24663b115362Smrg
24673b115362Smrgstruct i915_context_param_engines {
24683b115362Smrg	__u64 extensions; /* linked chain of extension blocks, 0 terminates */
24693b115362Smrg#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
24703b115362Smrg#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
24713b115362Smrg#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
24723b115362Smrg	struct i915_engine_class_instance engines[0];
24733b115362Smrg} __attribute__((packed));
24743b115362Smrg
24753b115362Smrg#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
24763b115362Smrg	__u64 extensions; \
24773b115362Smrg	struct i915_engine_class_instance engines[N__]; \
24783b115362Smrg} __attribute__((packed)) name__
24793b115362Smrg
24803b115362Smrg/**
24813b115362Smrg * struct drm_i915_gem_context_create_ext_setparam - Context parameter
24823b115362Smrg * to set or query during context creation.
24833b115362Smrg */
2484bf6cc7dcSmrgstruct drm_i915_gem_context_create_ext_setparam {
24853b115362Smrg	/** @base: Extension link. See struct i915_user_extension. */
2486bf6cc7dcSmrg	struct i915_user_extension base;
24873b115362Smrg
24883b115362Smrg	/**
24893b115362Smrg	 * @param: Context parameter to set or query.
24903b115362Smrg	 * See struct drm_i915_gem_context_param.
24913b115362Smrg	 */
2492bf6cc7dcSmrg	struct drm_i915_gem_context_param param;
2493bf6cc7dcSmrg};
2494bf6cc7dcSmrg
2495e88f27b3Smrgstruct drm_i915_gem_context_destroy {
2496e88f27b3Smrg	__u32 ctx_id;
2497e88f27b3Smrg	__u32 pad;
2498e88f27b3Smrg};
2499e88f27b3Smrg
25003b115362Smrg/**
25013b115362Smrg * struct drm_i915_gem_vm_control - Structure to create or destroy VM.
25023b115362Smrg *
2503bf6cc7dcSmrg * DRM_I915_GEM_VM_CREATE -
2504bf6cc7dcSmrg *
2505bf6cc7dcSmrg * Create a new virtual memory address space (ppGTT) for use within a context
2506bf6cc7dcSmrg * on the same file. Extensions can be provided to configure exactly how the
2507bf6cc7dcSmrg * address space is setup upon creation.
2508bf6cc7dcSmrg *
2509bf6cc7dcSmrg * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
2510bf6cc7dcSmrg * returned in the outparam @id.
2511bf6cc7dcSmrg *
2512bf6cc7dcSmrg * An extension chain maybe provided, starting with @extensions, and terminated
2513bf6cc7dcSmrg * by the @next_extension being 0. Currently, no extensions are defined.
2514bf6cc7dcSmrg *
2515bf6cc7dcSmrg * DRM_I915_GEM_VM_DESTROY -
2516bf6cc7dcSmrg *
25173b115362Smrg * Destroys a previously created VM id, specified in @vm_id.
2518bf6cc7dcSmrg *
2519bf6cc7dcSmrg * No extensions or flags are allowed currently, and so must be zero.
2520bf6cc7dcSmrg */
2521bf6cc7dcSmrgstruct drm_i915_gem_vm_control {
25223b115362Smrg	/** @extensions: Zero-terminated chain of extensions. */
2523bf6cc7dcSmrg	__u64 extensions;
25243b115362Smrg
25253b115362Smrg	/** @flags: reserved for future usage, currently MBZ */
2526bf6cc7dcSmrg	__u32 flags;
25273b115362Smrg
25283b115362Smrg	/** @vm_id: Id of the VM created or to be destroyed */
2529bf6cc7dcSmrg	__u32 vm_id;
2530bf6cc7dcSmrg};
2531bf6cc7dcSmrg
2532e88f27b3Smrgstruct drm_i915_reg_read {
2533fe517fc9Smrg	/*
2534fe517fc9Smrg	 * Register offset.
2535fe517fc9Smrg	 * For 64bit wide registers where the upper 32bits don't immediately
2536fe517fc9Smrg	 * follow the lower 32bits, the offset of the lower 32bits must
2537fe517fc9Smrg	 * be specified
2538fe517fc9Smrg	 */
2539e88f27b3Smrg	__u64 offset;
25406260e5d5Smrg#define I915_REG_READ_8B_WA (1ul << 0)
25416260e5d5Smrg
2542e88f27b3Smrg	__u64 val; /* Return value */
2543e88f27b3Smrg};
2544bf6cc7dcSmrg
2545fe517fc9Smrg/* Known registers:
2546fe517fc9Smrg *
2547fe517fc9Smrg * Render engine timestamp - 0x2358 + 64bit - gen7+
2548fe517fc9Smrg * - Note this register returns an invalid value if using the default
25496260e5d5Smrg *   single instruction 8byte read, in order to workaround that pass
25506260e5d5Smrg *   flag I915_REG_READ_8B_WA in offset field.
2551fe517fc9Smrg *
2552fe517fc9Smrg */
2553e88f27b3Smrg
2554e88f27b3Smrgstruct drm_i915_reset_stats {
2555e88f27b3Smrg	__u32 ctx_id;
2556e88f27b3Smrg	__u32 flags;
2557e88f27b3Smrg
2558e88f27b3Smrg	/* All resets since boot/module reload, for all contexts */
2559e88f27b3Smrg	__u32 reset_count;
2560e88f27b3Smrg
2561e88f27b3Smrg	/* Number of batches lost when active in GPU, for this context */
2562e88f27b3Smrg	__u32 batch_active;
2563e88f27b3Smrg
2564e88f27b3Smrg	/* Number of batches lost pending for execution, for this context */
2565e88f27b3Smrg	__u32 batch_pending;
2566e88f27b3Smrg
2567e88f27b3Smrg	__u32 pad;
2568e88f27b3Smrg};
2569e88f27b3Smrg
25703b115362Smrg/**
25713b115362Smrg * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
25723b115362Smrg *
25733b115362Smrg * Userptr objects have several restrictions on what ioctls can be used with the
25743b115362Smrg * object handle.
25753b115362Smrg */
2576baaff307Smrgstruct drm_i915_gem_userptr {
25773b115362Smrg	/**
25783b115362Smrg	 * @user_ptr: The pointer to the allocated memory.
25793b115362Smrg	 *
25803b115362Smrg	 * Needs to be aligned to PAGE_SIZE.
25813b115362Smrg	 */
2582baaff307Smrg	__u64 user_ptr;
25833b115362Smrg
25843b115362Smrg	/**
25853b115362Smrg	 * @user_size:
25863b115362Smrg	 *
25873b115362Smrg	 * The size in bytes for the allocated memory. This will also become the
25883b115362Smrg	 * object size.
25893b115362Smrg	 *
25903b115362Smrg	 * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
25913b115362Smrg	 * or larger.
25923b115362Smrg	 */
2593baaff307Smrg	__u64 user_size;
25943b115362Smrg
25953b115362Smrg	/**
25963b115362Smrg	 * @flags:
25973b115362Smrg	 *
25983b115362Smrg	 * Supported flags:
25993b115362Smrg	 *
26003b115362Smrg	 * I915_USERPTR_READ_ONLY:
26013b115362Smrg	 *
26023b115362Smrg	 * Mark the object as readonly, this also means GPU access can only be
26033b115362Smrg	 * readonly. This is only supported on HW which supports readonly access
26043b115362Smrg	 * through the GTT. If the HW can't support readonly access, an error is
26053b115362Smrg	 * returned.
26063b115362Smrg	 *
26073b115362Smrg	 * I915_USERPTR_PROBE:
26083b115362Smrg	 *
26093b115362Smrg	 * Probe the provided @user_ptr range and validate that the @user_ptr is
26103b115362Smrg	 * indeed pointing to normal memory and that the range is also valid.
26113b115362Smrg	 * For example if some garbage address is given to the kernel, then this
26123b115362Smrg	 * should complain.
26133b115362Smrg	 *
26143b115362Smrg	 * Returns -EFAULT if the probe failed.
26153b115362Smrg	 *
26163b115362Smrg	 * Note that this doesn't populate the backing pages, and also doesn't
26173b115362Smrg	 * guarantee that the object will remain valid when the object is
26183b115362Smrg	 * eventually used.
26193b115362Smrg	 *
26203b115362Smrg	 * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
26213b115362Smrg	 * returns a non-zero value.
26223b115362Smrg	 *
26233b115362Smrg	 * I915_USERPTR_UNSYNCHRONIZED:
26243b115362Smrg	 *
26253b115362Smrg	 * NOT USED. Setting this flag will result in an error.
26263b115362Smrg	 */
2627baaff307Smrg	__u32 flags;
2628baaff307Smrg#define I915_USERPTR_READ_ONLY 0x1
26293b115362Smrg#define I915_USERPTR_PROBE 0x2
2630baaff307Smrg#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
2631baaff307Smrg	/**
26323b115362Smrg	 * @handle: Returned handle for the object.
2633424e9256Smrg	 *
2634424e9256Smrg	 * Object handles are nonzero.
2635424e9256Smrg	 */
2636baaff307Smrg	__u32 handle;
2637baaff307Smrg};
2638baaff307Smrg
26392ee35494Smrgenum drm_i915_oa_format {
26406260e5d5Smrg	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
26416260e5d5Smrg	I915_OA_FORMAT_A29,	    /* HSW only */
26426260e5d5Smrg	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
26436260e5d5Smrg	I915_OA_FORMAT_B4_C8,	    /* HSW only */
26446260e5d5Smrg	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
26456260e5d5Smrg	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
26466260e5d5Smrg	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
26476260e5d5Smrg
26486260e5d5Smrg	/* Gen8+ */
26496260e5d5Smrg	I915_OA_FORMAT_A12,
26506260e5d5Smrg	I915_OA_FORMAT_A12_B8_C8,
26516260e5d5Smrg	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
26522ee35494Smrg
26532ee35494Smrg	I915_OA_FORMAT_MAX	    /* non-ABI */
26542ee35494Smrg};
26552ee35494Smrg
26562ee35494Smrgenum drm_i915_perf_property_id {
26572ee35494Smrg	/**
26582ee35494Smrg	 * Open the stream for a specific context handle (as used with
26592ee35494Smrg	 * execbuffer2). A stream opened for a specific context this way
26602ee35494Smrg	 * won't typically require root privileges.
26613b115362Smrg	 *
26623b115362Smrg	 * This property is available in perf revision 1.
26632ee35494Smrg	 */
26642ee35494Smrg	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
26652ee35494Smrg
26662ee35494Smrg	/**
26672ee35494Smrg	 * A value of 1 requests the inclusion of raw OA unit reports as
26682ee35494Smrg	 * part of stream samples.
26693b115362Smrg	 *
26703b115362Smrg	 * This property is available in perf revision 1.
26712ee35494Smrg	 */
26722ee35494Smrg	DRM_I915_PERF_PROP_SAMPLE_OA,
26732ee35494Smrg
26742ee35494Smrg	/**
26752ee35494Smrg	 * The value specifies which set of OA unit metrics should be
26763b115362Smrg	 * configured, defining the contents of any OA unit reports.
26773b115362Smrg	 *
26783b115362Smrg	 * This property is available in perf revision 1.
26792ee35494Smrg	 */
26802ee35494Smrg	DRM_I915_PERF_PROP_OA_METRICS_SET,
26812ee35494Smrg
26822ee35494Smrg	/**
26832ee35494Smrg	 * The value specifies the size and layout of OA unit reports.
26843b115362Smrg	 *
26853b115362Smrg	 * This property is available in perf revision 1.
26862ee35494Smrg	 */
26872ee35494Smrg	DRM_I915_PERF_PROP_OA_FORMAT,
26882ee35494Smrg
26892ee35494Smrg	/**
26902ee35494Smrg	 * Specifying this property implicitly requests periodic OA unit
26912ee35494Smrg	 * sampling and (at least on Haswell) the sampling frequency is derived
26922ee35494Smrg	 * from this exponent as follows:
26932ee35494Smrg	 *
26942ee35494Smrg	 *   80ns * 2^(period_exponent + 1)
26953b115362Smrg	 *
26963b115362Smrg	 * This property is available in perf revision 1.
26972ee35494Smrg	 */
26982ee35494Smrg	DRM_I915_PERF_PROP_OA_EXPONENT,
26992ee35494Smrg
27003b115362Smrg	/**
27013b115362Smrg	 * Specifying this property is only valid when specify a context to
27023b115362Smrg	 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
27033b115362Smrg	 * will hold preemption of the particular context we want to gather
27043b115362Smrg	 * performance data about. The execbuf2 submissions must include a
27053b115362Smrg	 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
27063b115362Smrg	 *
27073b115362Smrg	 * This property is available in perf revision 3.
27083b115362Smrg	 */
27093b115362Smrg	DRM_I915_PERF_PROP_HOLD_PREEMPTION,
27103b115362Smrg
27113b115362Smrg	/**
27123b115362Smrg	 * Specifying this pins all contexts to the specified SSEU power
27133b115362Smrg	 * configuration for the duration of the recording.
27143b115362Smrg	 *
27153b115362Smrg	 * This parameter's value is a pointer to a struct
27163b115362Smrg	 * drm_i915_gem_context_param_sseu.
27173b115362Smrg	 *
27183b115362Smrg	 * This property is available in perf revision 4.
27193b115362Smrg	 */
27203b115362Smrg	DRM_I915_PERF_PROP_GLOBAL_SSEU,
27213b115362Smrg
27223b115362Smrg	/**
27233b115362Smrg	 * This optional parameter specifies the timer interval in nanoseconds
27243b115362Smrg	 * at which the i915 driver will check the OA buffer for available data.
27253b115362Smrg	 * Minimum allowed value is 100 microseconds. A default value is used by
27263b115362Smrg	 * the driver if this parameter is not specified. Note that larger timer
27273b115362Smrg	 * values will reduce cpu consumption during OA perf captures. However,
27283b115362Smrg	 * excessively large values would potentially result in OA buffer
27293b115362Smrg	 * overwrites as captures reach end of the OA buffer.
27303b115362Smrg	 *
27313b115362Smrg	 * This property is available in perf revision 5.
27323b115362Smrg	 */
27333b115362Smrg	DRM_I915_PERF_PROP_POLL_OA_PERIOD,
27343b115362Smrg
27352ee35494Smrg	DRM_I915_PERF_PROP_MAX /* non-ABI */
27362ee35494Smrg};
27372ee35494Smrg
27382ee35494Smrgstruct drm_i915_perf_open_param {
27392ee35494Smrg	__u32 flags;
27402ee35494Smrg#define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
27412ee35494Smrg#define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
27422ee35494Smrg#define I915_PERF_FLAG_DISABLED		(1<<2)
27432ee35494Smrg
27442ee35494Smrg	/** The number of u64 (id, value) pairs */
27452ee35494Smrg	__u32 num_properties;
27462ee35494Smrg
27472ee35494Smrg	/**
27482ee35494Smrg	 * Pointer to array of u64 (id, value) pairs configuring the stream
27492ee35494Smrg	 * to open.
27502ee35494Smrg	 */
27512ee35494Smrg	__u64 properties_ptr;
27522ee35494Smrg};
27532ee35494Smrg
27543b115362Smrg/*
27552ee35494Smrg * Enable data capture for a stream that was either opened in a disabled state
27562ee35494Smrg * via I915_PERF_FLAG_DISABLED or was later disabled via
27572ee35494Smrg * I915_PERF_IOCTL_DISABLE.
27582ee35494Smrg *
27592ee35494Smrg * It is intended to be cheaper to disable and enable a stream than it may be
27602ee35494Smrg * to close and re-open a stream with the same configuration.
27612ee35494Smrg *
27622ee35494Smrg * It's undefined whether any pending data for the stream will be lost.
27633b115362Smrg *
27643b115362Smrg * This ioctl is available in perf revision 1.
27652ee35494Smrg */
27662ee35494Smrg#define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
27672ee35494Smrg
27683b115362Smrg/*
27692ee35494Smrg * Disable data capture for a stream.
27702ee35494Smrg *
27712ee35494Smrg * It is an error to try and read a stream that is disabled.
27723b115362Smrg *
27733b115362Smrg * This ioctl is available in perf revision 1.
27742ee35494Smrg */
27752ee35494Smrg#define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
27762ee35494Smrg
27773b115362Smrg/*
27783b115362Smrg * Change metrics_set captured by a stream.
27793b115362Smrg *
27803b115362Smrg * If the stream is bound to a specific context, the configuration change
27813b115362Smrg * will performed __inline__ with that context such that it takes effect before
27823b115362Smrg * the next execbuf submission.
27833b115362Smrg *
27843b115362Smrg * Returns the previously bound metrics set id, or a negative error code.
27853b115362Smrg *
27863b115362Smrg * This ioctl is available in perf revision 2.
27873b115362Smrg */
27883b115362Smrg#define I915_PERF_IOCTL_CONFIG	_IO('i', 0x2)
27893b115362Smrg
27903b115362Smrg/*
27912ee35494Smrg * Common to all i915 perf records
27922ee35494Smrg */
27932ee35494Smrgstruct drm_i915_perf_record_header {
27942ee35494Smrg	__u32 type;
27952ee35494Smrg	__u16 pad;
27962ee35494Smrg	__u16 size;
27972ee35494Smrg};
27982ee35494Smrg
27992ee35494Smrgenum drm_i915_perf_record_type {
28002ee35494Smrg
28012ee35494Smrg	/**
28022ee35494Smrg	 * Samples are the work horse record type whose contents are extensible
28032ee35494Smrg	 * and defined when opening an i915 perf stream based on the given
28042ee35494Smrg	 * properties.
28052ee35494Smrg	 *
28062ee35494Smrg	 * Boolean properties following the naming convention
28072ee35494Smrg	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
28082ee35494Smrg	 * every sample.
28092ee35494Smrg	 *
28102ee35494Smrg	 * The order of these sample properties given by userspace has no
28112ee35494Smrg	 * affect on the ordering of data within a sample. The order is
28122ee35494Smrg	 * documented here.
28132ee35494Smrg	 *
28142ee35494Smrg	 * struct {
28152ee35494Smrg	 *     struct drm_i915_perf_record_header header;
28162ee35494Smrg	 *
28172ee35494Smrg	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
28182ee35494Smrg	 * };
28192ee35494Smrg	 */
28202ee35494Smrg	DRM_I915_PERF_RECORD_SAMPLE = 1,
28212ee35494Smrg
28222ee35494Smrg	/*
28232ee35494Smrg	 * Indicates that one or more OA reports were not written by the
28242ee35494Smrg	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
28252ee35494Smrg	 * command collides with periodic sampling - which would be more likely
28262ee35494Smrg	 * at higher sampling frequencies.
28272ee35494Smrg	 */
28282ee35494Smrg	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
28292ee35494Smrg
28302ee35494Smrg	/**
28312ee35494Smrg	 * An error occurred that resulted in all pending OA reports being lost.
28322ee35494Smrg	 */
28332ee35494Smrg	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
28342ee35494Smrg
28352ee35494Smrg	DRM_I915_PERF_RECORD_MAX /* non-ABI */
28362ee35494Smrg};
28372ee35494Smrg
28386260e5d5Smrg/**
28393b115362Smrg * struct drm_i915_perf_oa_config
28403b115362Smrg *
28416260e5d5Smrg * Structure to upload perf dynamic configuration into the kernel.
28426260e5d5Smrg */
28436260e5d5Smrgstruct drm_i915_perf_oa_config {
28443b115362Smrg	/**
28453b115362Smrg	 * @uuid:
28463b115362Smrg	 *
28473b115362Smrg	 * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
28483b115362Smrg	 */
28496260e5d5Smrg	char uuid[36];
28506260e5d5Smrg
28513b115362Smrg	/**
28523b115362Smrg	 * @n_mux_regs:
28533b115362Smrg	 *
28543b115362Smrg	 * Number of mux regs in &mux_regs_ptr.
28553b115362Smrg	 */
28566260e5d5Smrg	__u32 n_mux_regs;
28573b115362Smrg
28583b115362Smrg	/**
28593b115362Smrg	 * @n_boolean_regs:
28603b115362Smrg	 *
28613b115362Smrg	 * Number of boolean regs in &boolean_regs_ptr.
28623b115362Smrg	 */
28636260e5d5Smrg	__u32 n_boolean_regs;
28643b115362Smrg
28653b115362Smrg	/**
28663b115362Smrg	 * @n_flex_regs:
28673b115362Smrg	 *
28683b115362Smrg	 * Number of flex regs in &flex_regs_ptr.
28693b115362Smrg	 */
28706260e5d5Smrg	__u32 n_flex_regs;
28716260e5d5Smrg
28723b115362Smrg	/**
28733b115362Smrg	 * @mux_regs_ptr:
28743b115362Smrg	 *
28753b115362Smrg	 * Pointer to tuples of u32 values (register address, value) for mux
28763b115362Smrg	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
28773b115362Smrg	 * &n_mux_regs).
28786260e5d5Smrg	 */
28796260e5d5Smrg	__u64 mux_regs_ptr;
28803b115362Smrg
28813b115362Smrg	/**
28823b115362Smrg	 * @boolean_regs_ptr:
28833b115362Smrg	 *
28843b115362Smrg	 * Pointer to tuples of u32 values (register address, value) for mux
28853b115362Smrg	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
28863b115362Smrg	 * &n_boolean_regs).
28873b115362Smrg	 */
28886260e5d5Smrg	__u64 boolean_regs_ptr;
28893b115362Smrg
28903b115362Smrg	/**
28913b115362Smrg	 * @flex_regs_ptr:
28923b115362Smrg	 *
28933b115362Smrg	 * Pointer to tuples of u32 values (register address, value) for mux
28943b115362Smrg	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
28953b115362Smrg	 * &n_flex_regs).
28963b115362Smrg	 */
28976260e5d5Smrg	__u64 flex_regs_ptr;
28986260e5d5Smrg};
28996260e5d5Smrg
29003b115362Smrg/**
29013b115362Smrg * struct drm_i915_query_item - An individual query for the kernel to process.
29023b115362Smrg *
29033b115362Smrg * The behaviour is determined by the @query_id. Note that exactly what
29043b115362Smrg * @data_ptr is also depends on the specific @query_id.
29053b115362Smrg */
29066260e5d5Smrgstruct drm_i915_query_item {
29073b115362Smrg	/**
29083b115362Smrg	 * @query_id:
29093b115362Smrg	 *
29103b115362Smrg	 * The id for this query.  Currently accepted query IDs are:
29113b115362Smrg	 *  - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
29123b115362Smrg	 *  - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
29133b115362Smrg	 *  - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
29143b115362Smrg	 *  - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
29153b115362Smrg	 *  - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
29163b115362Smrg	 *  - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
29173b115362Smrg	 */
29186260e5d5Smrg	__u64 query_id;
29193b115362Smrg#define DRM_I915_QUERY_TOPOLOGY_INFO		1
29203b115362Smrg#define DRM_I915_QUERY_ENGINE_INFO		2
29213b115362Smrg#define DRM_I915_QUERY_PERF_CONFIG		3
29223b115362Smrg#define DRM_I915_QUERY_MEMORY_REGIONS		4
29233b115362Smrg#define DRM_I915_QUERY_HWCONFIG_BLOB		5
29243b115362Smrg#define DRM_I915_QUERY_GEOMETRY_SUBSLICES	6
2925bf6cc7dcSmrg/* Must be kept compact -- no holes and well documented */
29266260e5d5Smrg
29273b115362Smrg	/**
29283b115362Smrg	 * @length:
29293b115362Smrg	 *
29306260e5d5Smrg	 * When set to zero by userspace, this is filled with the size of the
29313b115362Smrg	 * data to be written at the @data_ptr pointer. The kernel sets this
29326260e5d5Smrg	 * value to a negative value to signal an error on a particular query
29336260e5d5Smrg	 * item.
29346260e5d5Smrg	 */
29356260e5d5Smrg	__s32 length;
29366260e5d5Smrg
29373b115362Smrg	/**
29383b115362Smrg	 * @flags:
29393b115362Smrg	 *
29403b115362Smrg	 * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
29413b115362Smrg	 *
29423b115362Smrg	 * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
29433b115362Smrg	 * following:
29443b115362Smrg	 *
29453b115362Smrg	 *	- %DRM_I915_QUERY_PERF_CONFIG_LIST
29463b115362Smrg	 *      - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
29473b115362Smrg	 *      - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
29483b115362Smrg	 *
29493b115362Smrg	 * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
29503b115362Smrg	 * a struct i915_engine_class_instance that references a render engine.
29516260e5d5Smrg	 */
29526260e5d5Smrg	__u32 flags;
29533b115362Smrg#define DRM_I915_QUERY_PERF_CONFIG_LIST          1
29543b115362Smrg#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
29553b115362Smrg#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
29566260e5d5Smrg
29573b115362Smrg	/**
29583b115362Smrg	 * @data_ptr:
29593b115362Smrg	 *
29603b115362Smrg	 * Data will be written at the location pointed by @data_ptr when the
29613b115362Smrg	 * value of @length matches the length of the data to be written by the
29626260e5d5Smrg	 * kernel.
29636260e5d5Smrg	 */
29646260e5d5Smrg	__u64 data_ptr;
29656260e5d5Smrg};
29666260e5d5Smrg
29673b115362Smrg/**
29683b115362Smrg * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
29693b115362Smrg * kernel to fill out.
29703b115362Smrg *
29713b115362Smrg * Note that this is generally a two step process for each struct
29723b115362Smrg * drm_i915_query_item in the array:
29733b115362Smrg *
29743b115362Smrg * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
29753b115362Smrg *    drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
29763b115362Smrg *    kernel will then fill in the size, in bytes, which tells userspace how
29773b115362Smrg *    memory it needs to allocate for the blob(say for an array of properties).
29783b115362Smrg *
29793b115362Smrg * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
29803b115362Smrg *    &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
29813b115362Smrg *    the &drm_i915_query_item.length should still be the same as what the
29823b115362Smrg *    kernel previously set. At this point the kernel can fill in the blob.
29833b115362Smrg *
29843b115362Smrg * Note that for some query items it can make sense for userspace to just pass
29853b115362Smrg * in a buffer/blob equal to or larger than the required size. In this case only
29863b115362Smrg * a single ioctl call is needed. For some smaller query items this can work
29873b115362Smrg * quite well.
29883b115362Smrg *
29893b115362Smrg */
29906260e5d5Smrgstruct drm_i915_query {
29913b115362Smrg	/** @num_items: The number of elements in the @items_ptr array */
29926260e5d5Smrg	__u32 num_items;
29936260e5d5Smrg
29943b115362Smrg	/**
29953b115362Smrg	 * @flags: Unused for now. Must be cleared to zero.
29966260e5d5Smrg	 */
29976260e5d5Smrg	__u32 flags;
29986260e5d5Smrg
29993b115362Smrg	/**
30003b115362Smrg	 * @items_ptr:
30013b115362Smrg	 *
30023b115362Smrg	 * Pointer to an array of struct drm_i915_query_item. The number of
30033b115362Smrg	 * array elements is @num_items.
30046260e5d5Smrg	 */
30056260e5d5Smrg	__u64 items_ptr;
30066260e5d5Smrg};
30076260e5d5Smrg
30083b115362Smrg/**
30093b115362Smrg * struct drm_i915_query_topology_info
30106260e5d5Smrg *
30113b115362Smrg * Describes slice/subslice/EU information queried by
30123b115362Smrg * %DRM_I915_QUERY_TOPOLOGY_INFO
30136260e5d5Smrg */
30146260e5d5Smrgstruct drm_i915_query_topology_info {
30153b115362Smrg	/**
30163b115362Smrg	 * @flags:
30173b115362Smrg	 *
30186260e5d5Smrg	 * Unused for now. Must be cleared to zero.
30196260e5d5Smrg	 */
30206260e5d5Smrg	__u16 flags;
30216260e5d5Smrg
30223b115362Smrg	/**
30233b115362Smrg	 * @max_slices:
30243b115362Smrg	 *
30253b115362Smrg	 * The number of bits used to express the slice mask.
30263b115362Smrg	 */
30276260e5d5Smrg	__u16 max_slices;
30283b115362Smrg
30293b115362Smrg	/**
30303b115362Smrg	 * @max_subslices:
30313b115362Smrg	 *
30323b115362Smrg	 * The number of bits used to express the subslice mask.
30333b115362Smrg	 */
30346260e5d5Smrg	__u16 max_subslices;
30353b115362Smrg
30363b115362Smrg	/**
30373b115362Smrg	 * @max_eus_per_subslice:
30383b115362Smrg	 *
30393b115362Smrg	 * The number of bits in the EU mask that correspond to a single
30403b115362Smrg	 * subslice's EUs.
30413b115362Smrg	 */
30426260e5d5Smrg	__u16 max_eus_per_subslice;
30436260e5d5Smrg
30443b115362Smrg	/**
30453b115362Smrg	 * @subslice_offset:
30463b115362Smrg	 *
30476260e5d5Smrg	 * Offset in data[] at which the subslice masks are stored.
30486260e5d5Smrg	 */
30496260e5d5Smrg	__u16 subslice_offset;
30506260e5d5Smrg
30513b115362Smrg	/**
30523b115362Smrg	 * @subslice_stride:
30533b115362Smrg	 *
30546260e5d5Smrg	 * Stride at which each of the subslice masks for each slice are
30556260e5d5Smrg	 * stored.
30566260e5d5Smrg	 */
30576260e5d5Smrg	__u16 subslice_stride;
30586260e5d5Smrg
30593b115362Smrg	/**
30603b115362Smrg	 * @eu_offset:
30613b115362Smrg	 *
30626260e5d5Smrg	 * Offset in data[] at which the EU masks are stored.
30636260e5d5Smrg	 */
30646260e5d5Smrg	__u16 eu_offset;
30656260e5d5Smrg
30663b115362Smrg	/**
30673b115362Smrg	 * @eu_stride:
30683b115362Smrg	 *
30696260e5d5Smrg	 * Stride at which each of the EU masks for each subslice are stored.
30706260e5d5Smrg	 */
30716260e5d5Smrg	__u16 eu_stride;
30726260e5d5Smrg
30733b115362Smrg	/**
30743b115362Smrg	 * @data:
30753b115362Smrg	 *
30763b115362Smrg	 * Contains 3 pieces of information :
30773b115362Smrg	 *
30783b115362Smrg	 * - The slice mask with one bit per slice telling whether a slice is
30793b115362Smrg	 *   available. The availability of slice X can be queried with the
30803b115362Smrg	 *   following formula :
30813b115362Smrg	 *
30823b115362Smrg	 *   .. code:: c
30833b115362Smrg	 *
30843b115362Smrg	 *      (data[X / 8] >> (X % 8)) & 1
30853b115362Smrg	 *
30863b115362Smrg	 *   Starting with Xe_HP platforms, Intel hardware no longer has
30873b115362Smrg	 *   traditional slices so i915 will always report a single slice
30883b115362Smrg	 *   (hardcoded slicemask = 0x1) which contains all of the platform's
30893b115362Smrg	 *   subslices.  I.e., the mask here does not reflect any of the newer
30903b115362Smrg	 *   hardware concepts such as "gslices" or "cslices" since userspace
30913b115362Smrg	 *   is capable of inferring those from the subslice mask.
30923b115362Smrg	 *
30933b115362Smrg	 * - The subslice mask for each slice with one bit per subslice telling
30943b115362Smrg	 *   whether a subslice is available.  Starting with Gen12 we use the
30953b115362Smrg	 *   term "subslice" to refer to what the hardware documentation
30963b115362Smrg	 *   describes as a "dual-subslices."  The availability of subslice Y
30973b115362Smrg	 *   in slice X can be queried with the following formula :
30983b115362Smrg	 *
30993b115362Smrg	 *   .. code:: c
31003b115362Smrg	 *
31013b115362Smrg	 *      (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
31023b115362Smrg	 *
31033b115362Smrg	 * - The EU mask for each subslice in each slice, with one bit per EU
31043b115362Smrg	 *   telling whether an EU is available. The availability of EU Z in
31053b115362Smrg	 *   subslice Y in slice X can be queried with the following formula :
31063b115362Smrg	 *
31073b115362Smrg	 *   .. code:: c
31083b115362Smrg	 *
31093b115362Smrg	 *      (data[eu_offset +
31103b115362Smrg	 *            (X * max_subslices + Y) * eu_stride +
31113b115362Smrg	 *            Z / 8
31123b115362Smrg	 *       ] >> (Z % 8)) & 1
31133b115362Smrg	 */
31143b115362Smrg	__u8 data[];
31153b115362Smrg};
31163b115362Smrg
31173b115362Smrg/**
31183b115362Smrg * DOC: Engine Discovery uAPI
31193b115362Smrg *
31203b115362Smrg * Engine discovery uAPI is a way of enumerating physical engines present in a
31213b115362Smrg * GPU associated with an open i915 DRM file descriptor. This supersedes the old
31223b115362Smrg * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
31233b115362Smrg * `I915_PARAM_HAS_BLT`.
31243b115362Smrg *
31253b115362Smrg * The need for this interface came starting with Icelake and newer GPUs, which
31263b115362Smrg * started to establish a pattern of having multiple engines of a same class,
31273b115362Smrg * where not all instances were always completely functionally equivalent.
31283b115362Smrg *
31293b115362Smrg * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
31303b115362Smrg * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
31313b115362Smrg *
31323b115362Smrg * Example for getting the list of engines:
31333b115362Smrg *
31343b115362Smrg * .. code-block:: C
31353b115362Smrg *
31363b115362Smrg * 	struct drm_i915_query_engine_info *info;
31373b115362Smrg * 	struct drm_i915_query_item item = {
31383b115362Smrg * 		.query_id = DRM_I915_QUERY_ENGINE_INFO;
31393b115362Smrg * 	};
31403b115362Smrg * 	struct drm_i915_query query = {
31413b115362Smrg * 		.num_items = 1,
31423b115362Smrg * 		.items_ptr = (uintptr_t)&item,
31433b115362Smrg * 	};
31443b115362Smrg * 	int err, i;
31453b115362Smrg *
31463b115362Smrg * 	// First query the size of the blob we need, this needs to be large
31473b115362Smrg * 	// enough to hold our array of engines. The kernel will fill out the
31483b115362Smrg * 	// item.length for us, which is the number of bytes we need.
31493b115362Smrg * 	//
31503b115362Smrg * 	// Alternatively a large buffer can be allocated straight away enabling
31513b115362Smrg * 	// querying in one pass, in which case item.length should contain the
31523b115362Smrg * 	// length of the provided buffer.
31533b115362Smrg * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
31543b115362Smrg * 	if (err) ...
31553b115362Smrg *
31563b115362Smrg * 	info = calloc(1, item.length);
31573b115362Smrg * 	// Now that we allocated the required number of bytes, we call the ioctl
31583b115362Smrg * 	// again, this time with the data_ptr pointing to our newly allocated
31593b115362Smrg * 	// blob, which the kernel can then populate with info on all engines.
31603b115362Smrg * 	item.data_ptr = (uintptr_t)&info,
31613b115362Smrg *
31623b115362Smrg * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
31633b115362Smrg * 	if (err) ...
31643b115362Smrg *
31653b115362Smrg * 	// We can now access each engine in the array
31663b115362Smrg * 	for (i = 0; i < info->num_engines; i++) {
31673b115362Smrg * 		struct drm_i915_engine_info einfo = info->engines[i];
31683b115362Smrg * 		u16 class = einfo.engine.class;
31693b115362Smrg * 		u16 instance = einfo.engine.instance;
31703b115362Smrg * 		....
31713b115362Smrg * 	}
31723b115362Smrg *
31733b115362Smrg * 	free(info);
31743b115362Smrg *
31753b115362Smrg * Each of the enumerated engines, apart from being defined by its class and
31763b115362Smrg * instance (see `struct i915_engine_class_instance`), also can have flags and
31773b115362Smrg * capabilities defined as documented in i915_drm.h.
31783b115362Smrg *
31793b115362Smrg * For instance video engines which support HEVC encoding will have the
31803b115362Smrg * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
31813b115362Smrg *
31823b115362Smrg * Engine discovery only fully comes to its own when combined with the new way
31833b115362Smrg * of addressing engines when submitting batch buffers using contexts with
31843b115362Smrg * engine maps configured.
31853b115362Smrg */
31863b115362Smrg
31873b115362Smrg/**
31883b115362Smrg * struct drm_i915_engine_info
31893b115362Smrg *
31903b115362Smrg * Describes one engine and it's capabilities as known to the driver.
31913b115362Smrg */
31923b115362Smrgstruct drm_i915_engine_info {
31933b115362Smrg	/** @engine: Engine class and instance. */
31943b115362Smrg	struct i915_engine_class_instance engine;
31953b115362Smrg
31963b115362Smrg	/** @rsvd0: Reserved field. */
31973b115362Smrg	__u32 rsvd0;
31983b115362Smrg
31993b115362Smrg	/** @flags: Engine flags. */
32003b115362Smrg	__u64 flags;
32013b115362Smrg#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE		(1 << 0)
32023b115362Smrg
32033b115362Smrg	/** @capabilities: Capabilities of this engine. */
32043b115362Smrg	__u64 capabilities;
32053b115362Smrg#define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
32063b115362Smrg#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
32073b115362Smrg
32083b115362Smrg	/** @logical_instance: Logical instance of engine */
32093b115362Smrg	__u16 logical_instance;
32103b115362Smrg
32113b115362Smrg	/** @rsvd1: Reserved fields. */
32123b115362Smrg	__u16 rsvd1[3];
32133b115362Smrg	/** @rsvd2: Reserved fields. */
32143b115362Smrg	__u64 rsvd2[3];
32153b115362Smrg};
32163b115362Smrg
32173b115362Smrg/**
32183b115362Smrg * struct drm_i915_query_engine_info
32193b115362Smrg *
32203b115362Smrg * Engine info query enumerates all engines known to the driver by filling in
32213b115362Smrg * an array of struct drm_i915_engine_info structures.
32223b115362Smrg */
32233b115362Smrgstruct drm_i915_query_engine_info {
32243b115362Smrg	/** @num_engines: Number of struct drm_i915_engine_info structs following. */
32253b115362Smrg	__u32 num_engines;
32263b115362Smrg
32273b115362Smrg	/** @rsvd: MBZ */
32283b115362Smrg	__u32 rsvd[3];
32293b115362Smrg
32303b115362Smrg	/** @engines: Marker for drm_i915_engine_info structures. */
32313b115362Smrg	struct drm_i915_engine_info engines[];
32323b115362Smrg};
32333b115362Smrg
32343b115362Smrg/**
32353b115362Smrg * struct drm_i915_query_perf_config
32363b115362Smrg *
32373b115362Smrg * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
32383b115362Smrg * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
32393b115362Smrg */
32403b115362Smrgstruct drm_i915_query_perf_config {
32413b115362Smrg	union {
32423b115362Smrg		/**
32433b115362Smrg		 * @n_configs:
32443b115362Smrg		 *
32453b115362Smrg		 * When &drm_i915_query_item.flags ==
32463b115362Smrg		 * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
32473b115362Smrg		 * the number of configurations available.
32483b115362Smrg		 */
32493b115362Smrg		__u64 n_configs;
32503b115362Smrg
32513b115362Smrg		/**
32523b115362Smrg		 * @config:
32533b115362Smrg		 *
32543b115362Smrg		 * When &drm_i915_query_item.flags ==
32553b115362Smrg		 * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
32563b115362Smrg		 * value in this field as configuration identifier to decide
32573b115362Smrg		 * what data to write into config_ptr.
32583b115362Smrg		 */
32593b115362Smrg		__u64 config;
32603b115362Smrg
32613b115362Smrg		/**
32623b115362Smrg		 * @uuid:
32633b115362Smrg		 *
32643b115362Smrg		 * When &drm_i915_query_item.flags ==
32653b115362Smrg		 * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
32663b115362Smrg		 * value in this field as configuration identifier to decide
32673b115362Smrg		 * what data to write into config_ptr.
32683b115362Smrg		 *
32693b115362Smrg		 * String formatted like "%08x-%04x-%04x-%04x-%012x"
32703b115362Smrg		 */
32713b115362Smrg		char uuid[36];
32723b115362Smrg	};
32733b115362Smrg
32743b115362Smrg	/**
32753b115362Smrg	 * @flags:
32763b115362Smrg	 *
32773b115362Smrg	 * Unused for now. Must be cleared to zero.
32783b115362Smrg	 */
32793b115362Smrg	__u32 flags;
32803b115362Smrg
32813b115362Smrg	/**
32823b115362Smrg	 * @data:
32833b115362Smrg	 *
32843b115362Smrg	 * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
32853b115362Smrg	 * i915 will write an array of __u64 of configuration identifiers.
32863b115362Smrg	 *
32873b115362Smrg	 * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
32883b115362Smrg	 * i915 will write a struct drm_i915_perf_oa_config. If the following
32893b115362Smrg	 * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
32903b115362Smrg	 * write into the associated pointers the values of submitted when the
32913b115362Smrg	 * configuration was created :
32923b115362Smrg	 *
32933b115362Smrg	 *  - &drm_i915_perf_oa_config.n_mux_regs
32943b115362Smrg	 *  - &drm_i915_perf_oa_config.n_boolean_regs
32953b115362Smrg	 *  - &drm_i915_perf_oa_config.n_flex_regs
32963b115362Smrg	 */
32976260e5d5Smrg	__u8 data[];
32986260e5d5Smrg};
32996260e5d5Smrg
33003b115362Smrg/**
33013b115362Smrg * enum drm_i915_gem_memory_class - Supported memory classes
33023b115362Smrg */
33033b115362Smrgenum drm_i915_gem_memory_class {
33043b115362Smrg	/** @I915_MEMORY_CLASS_SYSTEM: System memory */
33053b115362Smrg	I915_MEMORY_CLASS_SYSTEM = 0,
33063b115362Smrg	/** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
33073b115362Smrg	I915_MEMORY_CLASS_DEVICE,
33083b115362Smrg};
33093b115362Smrg
33103b115362Smrg/**
33113b115362Smrg * struct drm_i915_gem_memory_class_instance - Identify particular memory region
33123b115362Smrg */
33133b115362Smrgstruct drm_i915_gem_memory_class_instance {
33143b115362Smrg	/** @memory_class: See enum drm_i915_gem_memory_class */
33153b115362Smrg	__u16 memory_class;
33163b115362Smrg
33173b115362Smrg	/** @memory_instance: Which instance */
33183b115362Smrg	__u16 memory_instance;
33193b115362Smrg};
33203b115362Smrg
33213b115362Smrg/**
33223b115362Smrg * struct drm_i915_memory_region_info - Describes one region as known to the
33233b115362Smrg * driver.
33243b115362Smrg *
33253b115362Smrg * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
33263b115362Smrg * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
33273b115362Smrg * at &drm_i915_query_item.query_id.
33283b115362Smrg */
33293b115362Smrgstruct drm_i915_memory_region_info {
33303b115362Smrg	/** @region: The class:instance pair encoding */
33313b115362Smrg	struct drm_i915_gem_memory_class_instance region;
33323b115362Smrg
33333b115362Smrg	/** @rsvd0: MBZ */
33343b115362Smrg	__u32 rsvd0;
33353b115362Smrg
33363b115362Smrg	/**
33373b115362Smrg	 * @probed_size: Memory probed by the driver
33383b115362Smrg	 *
33393b115362Smrg	 * Note that it should not be possible to ever encounter a zero value
33403b115362Smrg	 * here, also note that no current region type will ever return -1 here.
33413b115362Smrg	 * Although for future region types, this might be a possibility. The
33423b115362Smrg	 * same applies to the other size fields.
33433b115362Smrg	 */
33443b115362Smrg	__u64 probed_size;
33453b115362Smrg
33463b115362Smrg	/**
33473b115362Smrg	 * @unallocated_size: Estimate of memory remaining
33483b115362Smrg	 *
33493b115362Smrg	 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting.
33503b115362Smrg	 * Without this (or if this is an older kernel) the value here will
33513b115362Smrg	 * always equal the @probed_size. Note this is only currently tracked
33523b115362Smrg	 * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here
33533b115362Smrg	 * will always equal the @probed_size).
33543b115362Smrg	 */
33553b115362Smrg	__u64 unallocated_size;
33563b115362Smrg
33573b115362Smrg	union {
33583b115362Smrg		/** @rsvd1: MBZ */
33593b115362Smrg		__u64 rsvd1[8];
33603b115362Smrg		struct {
33613b115362Smrg			/**
33623b115362Smrg			 * @probed_cpu_visible_size: Memory probed by the driver
33633b115362Smrg			 * that is CPU accessible.
33643b115362Smrg			 *
33653b115362Smrg			 * This will be always be <= @probed_size, and the
33663b115362Smrg			 * remainder (if there is any) will not be CPU
33673b115362Smrg			 * accessible.
33683b115362Smrg			 *
33693b115362Smrg			 * On systems without small BAR, the @probed_size will
33703b115362Smrg			 * always equal the @probed_cpu_visible_size, since all
33713b115362Smrg			 * of it will be CPU accessible.
33723b115362Smrg			 *
33733b115362Smrg			 * Note this is only tracked for
33743b115362Smrg			 * I915_MEMORY_CLASS_DEVICE regions (for other types the
33753b115362Smrg			 * value here will always equal the @probed_size).
33763b115362Smrg			 *
33773b115362Smrg			 * Note that if the value returned here is zero, then
33783b115362Smrg			 * this must be an old kernel which lacks the relevant
33793b115362Smrg			 * small-bar uAPI support (including
33803b115362Smrg			 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on
33813b115362Smrg			 * such systems we should never actually end up with a
33823b115362Smrg			 * small BAR configuration, assuming we are able to load
33833b115362Smrg			 * the kernel module. Hence it should be safe to treat
33843b115362Smrg			 * this the same as when @probed_cpu_visible_size ==
33853b115362Smrg			 * @probed_size.
33863b115362Smrg			 */
33873b115362Smrg			__u64 probed_cpu_visible_size;
33883b115362Smrg
33893b115362Smrg			/**
33903b115362Smrg			 * @unallocated_cpu_visible_size: Estimate of CPU
33913b115362Smrg			 * visible memory remaining.
33923b115362Smrg			 *
33933b115362Smrg			 * Note this is only tracked for
33943b115362Smrg			 * I915_MEMORY_CLASS_DEVICE regions (for other types the
33953b115362Smrg			 * value here will always equal the
33963b115362Smrg			 * @probed_cpu_visible_size).
33973b115362Smrg			 *
33983b115362Smrg			 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
33993b115362Smrg			 * accounting.  Without this the value here will always
34003b115362Smrg			 * equal the @probed_cpu_visible_size. Note this is only
34013b115362Smrg			 * currently tracked for I915_MEMORY_CLASS_DEVICE
34023b115362Smrg			 * regions (for other types the value here will also
34033b115362Smrg			 * always equal the @probed_cpu_visible_size).
34043b115362Smrg			 *
34053b115362Smrg			 * If this is an older kernel the value here will be
34063b115362Smrg			 * zero, see also @probed_cpu_visible_size.
34073b115362Smrg			 */
34083b115362Smrg			__u64 unallocated_cpu_visible_size;
34093b115362Smrg		};
34103b115362Smrg	};
34113b115362Smrg};
34123b115362Smrg
34133b115362Smrg/**
34143b115362Smrg * struct drm_i915_query_memory_regions
34153b115362Smrg *
34163b115362Smrg * The region info query enumerates all regions known to the driver by filling
34173b115362Smrg * in an array of struct drm_i915_memory_region_info structures.
34183b115362Smrg *
34193b115362Smrg * Example for getting the list of supported regions:
34203b115362Smrg *
34213b115362Smrg * .. code-block:: C
34223b115362Smrg *
34233b115362Smrg *	struct drm_i915_query_memory_regions *info;
34243b115362Smrg *	struct drm_i915_query_item item = {
34253b115362Smrg *		.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
34263b115362Smrg *	};
34273b115362Smrg *	struct drm_i915_query query = {
34283b115362Smrg *		.num_items = 1,
34293b115362Smrg *		.items_ptr = (uintptr_t)&item,
34303b115362Smrg *	};
34313b115362Smrg *	int err, i;
34323b115362Smrg *
34333b115362Smrg *	// First query the size of the blob we need, this needs to be large
34343b115362Smrg *	// enough to hold our array of regions. The kernel will fill out the
34353b115362Smrg *	// item.length for us, which is the number of bytes we need.
34363b115362Smrg *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
34373b115362Smrg *	if (err) ...
34383b115362Smrg *
34393b115362Smrg *	info = calloc(1, item.length);
34403b115362Smrg *	// Now that we allocated the required number of bytes, we call the ioctl
34413b115362Smrg *	// again, this time with the data_ptr pointing to our newly allocated
34423b115362Smrg *	// blob, which the kernel can then populate with the all the region info.
34433b115362Smrg *	item.data_ptr = (uintptr_t)&info,
34443b115362Smrg *
34453b115362Smrg *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
34463b115362Smrg *	if (err) ...
34473b115362Smrg *
34483b115362Smrg *	// We can now access each region in the array
34493b115362Smrg *	for (i = 0; i < info->num_regions; i++) {
34503b115362Smrg *		struct drm_i915_memory_region_info mr = info->regions[i];
34513b115362Smrg *		u16 class = mr.region.class;
34523b115362Smrg *		u16 instance = mr.region.instance;
34533b115362Smrg *
34543b115362Smrg *		....
34553b115362Smrg *	}
34563b115362Smrg *
34573b115362Smrg *	free(info);
34583b115362Smrg */
34593b115362Smrgstruct drm_i915_query_memory_regions {
34603b115362Smrg	/** @num_regions: Number of supported regions */
34613b115362Smrg	__u32 num_regions;
34623b115362Smrg
34633b115362Smrg	/** @rsvd: MBZ */
34643b115362Smrg	__u32 rsvd[3];
34653b115362Smrg
34663b115362Smrg	/** @regions: Info about each supported region */
34673b115362Smrg	struct drm_i915_memory_region_info regions[];
34683b115362Smrg};
34693b115362Smrg
34703b115362Smrg/**
34713b115362Smrg * DOC: GuC HWCONFIG blob uAPI
34723b115362Smrg *
34733b115362Smrg * The GuC produces a blob with information about the current device.
34743b115362Smrg * i915 reads this blob from GuC and makes it available via this uAPI.
34753b115362Smrg *
34763b115362Smrg * The format and meaning of the blob content are documented in the
34773b115362Smrg * Programmer's Reference Manual.
34783b115362Smrg */
34793b115362Smrg
34803b115362Smrg/**
34813b115362Smrg * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
34823b115362Smrg * extension support using struct i915_user_extension.
34833b115362Smrg *
34843b115362Smrg * Note that new buffer flags should be added here, at least for the stuff that
34853b115362Smrg * is immutable. Previously we would have two ioctls, one to create the object
34863b115362Smrg * with gem_create, and another to apply various parameters, however this
34873b115362Smrg * creates some ambiguity for the params which are considered immutable. Also in
34883b115362Smrg * general we're phasing out the various SET/GET ioctls.
34893b115362Smrg */
34903b115362Smrgstruct drm_i915_gem_create_ext {
34913b115362Smrg	/**
34923b115362Smrg	 * @size: Requested size for the object.
34933b115362Smrg	 *
34943b115362Smrg	 * The (page-aligned) allocated size for the object will be returned.
34953b115362Smrg	 *
34963b115362Smrg	 * DG2 64K min page size implications:
34973b115362Smrg	 *
34983b115362Smrg	 * On discrete platforms, starting from DG2, we have to contend with GTT
34993b115362Smrg	 * page size restrictions when dealing with I915_MEMORY_CLASS_DEVICE
35003b115362Smrg	 * objects.  Specifically the hardware only supports 64K or larger GTT
35013b115362Smrg	 * page sizes for such memory. The kernel will already ensure that all
35023b115362Smrg	 * I915_MEMORY_CLASS_DEVICE memory is allocated using 64K or larger page
35033b115362Smrg	 * sizes underneath.
35043b115362Smrg	 *
35053b115362Smrg	 * Note that the returned size here will always reflect any required
35063b115362Smrg	 * rounding up done by the kernel, i.e 4K will now become 64K on devices
35073b115362Smrg	 * such as DG2. The kernel will always select the largest minimum
35083b115362Smrg	 * page-size for the set of possible placements as the value to use when
35093b115362Smrg	 * rounding up the @size.
35103b115362Smrg	 *
35113b115362Smrg	 * Special DG2 GTT address alignment requirement:
35123b115362Smrg	 *
35133b115362Smrg	 * The GTT alignment will also need to be at least 2M for such objects.
35143b115362Smrg	 *
35153b115362Smrg	 * Note that due to how the hardware implements 64K GTT page support, we
35163b115362Smrg	 * have some further complications:
35173b115362Smrg	 *
35183b115362Smrg	 *   1) The entire PDE (which covers a 2MB virtual address range), must
35193b115362Smrg	 *   contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
35203b115362Smrg	 *   PDE is forbidden by the hardware.
35213b115362Smrg	 *
35223b115362Smrg	 *   2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
35233b115362Smrg	 *   objects.
35243b115362Smrg	 *
35253b115362Smrg	 * To keep things simple for userland, we mandate that any GTT mappings
35263b115362Smrg	 * must be aligned to and rounded up to 2MB. The kernel will internally
35273b115362Smrg	 * pad them out to the next 2MB boundary. As this only wastes virtual
35283b115362Smrg	 * address space and avoids userland having to copy any needlessly
35293b115362Smrg	 * complicated PDE sharing scheme (coloring) and only affects DG2, this
35303b115362Smrg	 * is deemed to be a good compromise.
35313b115362Smrg	 */
35323b115362Smrg	__u64 size;
35333b115362Smrg
35343b115362Smrg	/**
35353b115362Smrg	 * @handle: Returned handle for the object.
35363b115362Smrg	 *
35373b115362Smrg	 * Object handles are nonzero.
35383b115362Smrg	 */
35393b115362Smrg	__u32 handle;
35403b115362Smrg
35413b115362Smrg	/**
35423b115362Smrg	 * @flags: Optional flags.
35433b115362Smrg	 *
35443b115362Smrg	 * Supported values:
35453b115362Smrg	 *
35463b115362Smrg	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
35473b115362Smrg	 * the object will need to be accessed via the CPU.
35483b115362Smrg	 *
35493b115362Smrg	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only
35503b115362Smrg	 * strictly required on configurations where some subset of the device
35513b115362Smrg	 * memory is directly visible/mappable through the CPU (which we also
35523b115362Smrg	 * call small BAR), like on some DG2+ systems. Note that this is quite
35533b115362Smrg	 * undesirable, but due to various factors like the client CPU, BIOS etc
35543b115362Smrg	 * it's something we can expect to see in the wild. See
35553b115362Smrg	 * &drm_i915_memory_region_info.probed_cpu_visible_size for how to
35563b115362Smrg	 * determine if this system applies.
35573b115362Smrg	 *
35583b115362Smrg	 * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to
35593b115362Smrg	 * ensure the kernel can always spill the allocation to system memory,
35603b115362Smrg	 * if the object can't be allocated in the mappable part of
35613b115362Smrg	 * I915_MEMORY_CLASS_DEVICE.
35623b115362Smrg	 *
35633b115362Smrg	 * Also note that since the kernel only supports flat-CCS on objects
35643b115362Smrg	 * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
35653b115362Smrg	 * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
35663b115362Smrg	 * flat-CCS.
35673b115362Smrg	 *
35683b115362Smrg	 * Without this hint, the kernel will assume that non-mappable
35693b115362Smrg	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
35703b115362Smrg	 * kernel can still migrate the object to the mappable part, as a last
35713b115362Smrg	 * resort, if userspace ever CPU faults this object, but this might be
35723b115362Smrg	 * expensive, and so ideally should be avoided.
35733b115362Smrg	 *
35743b115362Smrg	 * On older kernels which lack the relevant small-bar uAPI support (see
35753b115362Smrg	 * also &drm_i915_memory_region_info.probed_cpu_visible_size),
35763b115362Smrg	 * usage of the flag will result in an error, but it should NEVER be
35773b115362Smrg	 * possible to end up with a small BAR configuration, assuming we can
35783b115362Smrg	 * also successfully load the i915 kernel module. In such cases the
35793b115362Smrg	 * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as
35803b115362Smrg	 * such there are zero restrictions on where the object can be placed.
35813b115362Smrg	 */
35823b115362Smrg#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
35833b115362Smrg	__u32 flags;
35843b115362Smrg
35853b115362Smrg	/**
35863b115362Smrg	 * @extensions: The chain of extensions to apply to this object.
35873b115362Smrg	 *
35883b115362Smrg	 * This will be useful in the future when we need to support several
35893b115362Smrg	 * different extensions, and we need to apply more than one when
35903b115362Smrg	 * creating the object. See struct i915_user_extension.
35913b115362Smrg	 *
35923b115362Smrg	 * If we don't supply any extensions then we get the same old gem_create
35933b115362Smrg	 * behaviour.
35943b115362Smrg	 *
35953b115362Smrg	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
35963b115362Smrg	 * struct drm_i915_gem_create_ext_memory_regions.
35973b115362Smrg	 *
35983b115362Smrg	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
35993b115362Smrg	 * struct drm_i915_gem_create_ext_protected_content.
36003b115362Smrg	 */
36013b115362Smrg#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
36023b115362Smrg#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
36033b115362Smrg	__u64 extensions;
36043b115362Smrg};
36053b115362Smrg
36063b115362Smrg/**
36073b115362Smrg * struct drm_i915_gem_create_ext_memory_regions - The
36083b115362Smrg * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
36093b115362Smrg *
36103b115362Smrg * Set the object with the desired set of placements/regions in priority
36113b115362Smrg * order. Each entry must be unique and supported by the device.
36123b115362Smrg *
36133b115362Smrg * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
36143b115362Smrg * an equivalent layout of class:instance pair encodings. See struct
36153b115362Smrg * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
36163b115362Smrg * query the supported regions for a device.
36173b115362Smrg *
36183b115362Smrg * As an example, on discrete devices, if we wish to set the placement as
36193b115362Smrg * device local-memory we can do something like:
36203b115362Smrg *
36213b115362Smrg * .. code-block:: C
36223b115362Smrg *
36233b115362Smrg *	struct drm_i915_gem_memory_class_instance region_lmem = {
36243b115362Smrg *              .memory_class = I915_MEMORY_CLASS_DEVICE,
36253b115362Smrg *              .memory_instance = 0,
36263b115362Smrg *      };
36273b115362Smrg *      struct drm_i915_gem_create_ext_memory_regions regions = {
36283b115362Smrg *              .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
36293b115362Smrg *              .regions = (uintptr_t)&region_lmem,
36303b115362Smrg *              .num_regions = 1,
36313b115362Smrg *      };
36323b115362Smrg *      struct drm_i915_gem_create_ext create_ext = {
36333b115362Smrg *              .size = 16 * PAGE_SIZE,
36343b115362Smrg *              .extensions = (uintptr_t)&regions,
36353b115362Smrg *      };
36363b115362Smrg *
36373b115362Smrg *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
36383b115362Smrg *      if (err) ...
36393b115362Smrg *
36403b115362Smrg * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
36413b115362Smrg * along with the final object size in &drm_i915_gem_create_ext.size, which
36423b115362Smrg * should account for any rounding up, if required.
36433b115362Smrg *
36443b115362Smrg * Note that userspace has no means of knowing the current backing region
36453b115362Smrg * for objects where @num_regions is larger than one. The kernel will only
36463b115362Smrg * ensure that the priority order of the @regions array is honoured, either
36473b115362Smrg * when initially placing the object, or when moving memory around due to
36483b115362Smrg * memory pressure
36493b115362Smrg *
36503b115362Smrg * On Flat-CCS capable HW, compression is supported for the objects residing
36513b115362Smrg * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other
36523b115362Smrg * memory class in @regions and migrated (by i915, due to memory
36533b115362Smrg * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to
36543b115362Smrg * decompress the content. But i915 doesn't have the required information to
36553b115362Smrg * decompress the userspace compressed objects.
36563b115362Smrg *
36573b115362Smrg * So i915 supports Flat-CCS, on the objects which can reside only on
36583b115362Smrg * I915_MEMORY_CLASS_DEVICE regions.
36593b115362Smrg */
36603b115362Smrgstruct drm_i915_gem_create_ext_memory_regions {
36613b115362Smrg	/** @base: Extension link. See struct i915_user_extension. */
36623b115362Smrg	struct i915_user_extension base;
36633b115362Smrg
36643b115362Smrg	/** @pad: MBZ */
36653b115362Smrg	__u32 pad;
36663b115362Smrg	/** @num_regions: Number of elements in the @regions array. */
36673b115362Smrg	__u32 num_regions;
36683b115362Smrg	/**
36693b115362Smrg	 * @regions: The regions/placements array.
36703b115362Smrg	 *
36713b115362Smrg	 * An array of struct drm_i915_gem_memory_class_instance.
36723b115362Smrg	 */
36733b115362Smrg	__u64 regions;
36743b115362Smrg};
36753b115362Smrg
36763b115362Smrg/**
36773b115362Smrg * struct drm_i915_gem_create_ext_protected_content - The
36783b115362Smrg * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
36793b115362Smrg *
36803b115362Smrg * If this extension is provided, buffer contents are expected to be protected
36813b115362Smrg * by PXP encryption and require decryption for scan out and processing. This
36823b115362Smrg * is only possible on platforms that have PXP enabled, on all other scenarios
36833b115362Smrg * using this extension will cause the ioctl to fail and return -ENODEV. The
36843b115362Smrg * flags parameter is reserved for future expansion and must currently be set
36853b115362Smrg * to zero.
36863b115362Smrg *
36873b115362Smrg * The buffer contents are considered invalid after a PXP session teardown.
36883b115362Smrg *
36893b115362Smrg * The encryption is guaranteed to be processed correctly only if the object
36903b115362Smrg * is submitted with a context created using the
36913b115362Smrg * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
36923b115362Smrg * at submission time on the validity of the objects involved.
36933b115362Smrg *
36943b115362Smrg * Below is an example on how to create a protected object:
36953b115362Smrg *
36963b115362Smrg * .. code-block:: C
36973b115362Smrg *
36983b115362Smrg *      struct drm_i915_gem_create_ext_protected_content protected_ext = {
36993b115362Smrg *              .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
37003b115362Smrg *              .flags = 0,
37013b115362Smrg *      };
37023b115362Smrg *      struct drm_i915_gem_create_ext create_ext = {
37033b115362Smrg *              .size = PAGE_SIZE,
37043b115362Smrg *              .extensions = (uintptr_t)&protected_ext,
37053b115362Smrg *      };
37063b115362Smrg *
37073b115362Smrg *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
37083b115362Smrg *      if (err) ...
37093b115362Smrg */
37103b115362Smrgstruct drm_i915_gem_create_ext_protected_content {
37113b115362Smrg	/** @base: Extension link. See struct i915_user_extension. */
37123b115362Smrg	struct i915_user_extension base;
37133b115362Smrg	/** @flags: reserved for future usage, currently MBZ */
37143b115362Smrg	__u32 flags;
37153b115362Smrg};
37163b115362Smrg
37173b115362Smrg/* ID of the protected content session managed by i915 when PXP is active */
37183b115362Smrg#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
37193b115362Smrg
37202ee35494Smrg#if defined(__cplusplus)
37212ee35494Smrg}
37222ee35494Smrg#endif
37232ee35494Smrg
3724e88f27b3Smrg#endif /* _I915_DRM_H_ */
3725