i915_drm.h revision bf6cc7dc
1/*
2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef _I915_DRM_H_
28#define _I915_DRM_H_
29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
36/* Please note that modifications to all structs defined here are
37 * subject to backwards-compatibility constraints.
38 */
39
40/**
41 * DOC: uevents generated by i915 on it's device node
42 *
43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44 *	event from the gpu l3 cache. Additional information supplied is ROW,
45 *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46 *	track of these events and if a specific cache-line seems to have a
47 *	persistent error remap it with the l3 remapping tool supplied in
48 *	intel-gpu-tools.  The value supplied with the event is always 1.
49 *
50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51 *	hangcheck. The error detection event is a good indicator of when things
52 *	began to go badly. The value supplied with the event is a 1 upon error
53 *	detection, and a 0 upon reset completion, signifying no more error
54 *	exists. NOTE: Disabling hangcheck or reset via module parameter will
55 *	cause the related events to not be seen.
56 *
57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58 *	the GPU. The value supplied with the event is always 1. NOTE: Disable
59 *	reset via module parameter will cause this event to not be seen.
60 */
61#define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
62#define I915_ERROR_UEVENT		"ERROR"
63#define I915_RESET_UEVENT		"RESET"
64
65/*
66 * i915_user_extension: Base class for defining a chain of extensions
67 *
68 * Many interfaces need to grow over time. In most cases we can simply
69 * extend the struct and have userspace pass in more data. Another option,
70 * as demonstrated by Vulkan's approach to providing extensions for forward
71 * and backward compatibility, is to use a list of optional structs to
72 * provide those extra details.
73 *
74 * The key advantage to using an extension chain is that it allows us to
75 * redefine the interface more easily than an ever growing struct of
76 * increasing complexity, and for large parts of that interface to be
77 * entirely optional. The downside is more pointer chasing; chasing across
78 * the boundary with pointers encapsulated inside u64.
79 */
80struct i915_user_extension {
81	__u64 next_extension;
82	__u32 name;
83	__u32 flags; /* All undefined bits must be zero. */
84	__u32 rsvd[4]; /* Reserved for future use; must be zero. */
85};
86
87/*
88 * MOCS indexes used for GPU surfaces, defining the cacheability of the
89 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
90 */
91enum i915_mocs_table_index {
92	/*
93	 * Not cached anywhere, coherency between CPU and GPU accesses is
94	 * guaranteed.
95	 */
96	I915_MOCS_UNCACHED,
97	/*
98	 * Cacheability and coherency controlled by the kernel automatically
99	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
100	 * usage of the surface (used for display scanout or not).
101	 */
102	I915_MOCS_PTE,
103	/*
104	 * Cached in all GPU caches available on the platform.
105	 * Coherency between CPU and GPU accesses to the surface is not
106	 * guaranteed without extra synchronization.
107	 */
108	I915_MOCS_CACHED,
109};
110
111/*
112 * Different engines serve different roles, and there may be more than one
113 * engine serving each role. enum drm_i915_gem_engine_class provides a
114 * classification of the role of the engine, which may be used when requesting
115 * operations to be performed on a certain subset of engines, or for providing
116 * information about that group.
117 */
118enum drm_i915_gem_engine_class {
119	I915_ENGINE_CLASS_RENDER	= 0,
120	I915_ENGINE_CLASS_COPY		= 1,
121	I915_ENGINE_CLASS_VIDEO		= 2,
122	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
123
124	/* should be kept compact */
125
126	I915_ENGINE_CLASS_INVALID	= -1
127};
128
129/**
130 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
131 *
132 */
133
134enum drm_i915_pmu_engine_sample {
135	I915_SAMPLE_BUSY = 0,
136	I915_SAMPLE_WAIT = 1,
137	I915_SAMPLE_SEMA = 2
138};
139
140#define I915_PMU_SAMPLE_BITS (4)
141#define I915_PMU_SAMPLE_MASK (0xf)
142#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
143#define I915_PMU_CLASS_SHIFT \
144	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
145
146#define __I915_PMU_ENGINE(class, instance, sample) \
147	((class) << I915_PMU_CLASS_SHIFT | \
148	(instance) << I915_PMU_SAMPLE_BITS | \
149	(sample))
150
151#define I915_PMU_ENGINE_BUSY(class, instance) \
152	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
153
154#define I915_PMU_ENGINE_WAIT(class, instance) \
155	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
156
157#define I915_PMU_ENGINE_SEMA(class, instance) \
158	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
159
160#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
161
162#define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
163#define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
164#define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
165#define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
166
167#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
168
169/* Each region is a minimum of 16k, and there are at most 255 of them.
170 */
171#define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
172				 * of chars for next/prev indices */
173#define I915_LOG_MIN_TEX_REGION_SIZE 14
174
175typedef struct _drm_i915_init {
176	enum {
177		I915_INIT_DMA = 0x01,
178		I915_CLEANUP_DMA = 0x02,
179		I915_RESUME_DMA = 0x03
180	} func;
181	unsigned int mmio_offset;
182	int sarea_priv_offset;
183	unsigned int ring_start;
184	unsigned int ring_end;
185	unsigned int ring_size;
186	unsigned int front_offset;
187	unsigned int back_offset;
188	unsigned int depth_offset;
189	unsigned int w;
190	unsigned int h;
191	unsigned int pitch;
192	unsigned int pitch_bits;
193	unsigned int back_pitch;
194	unsigned int depth_pitch;
195	unsigned int cpp;
196	unsigned int chipset;
197} drm_i915_init_t;
198
199typedef struct _drm_i915_sarea {
200	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
201	int last_upload;	/* last time texture was uploaded */
202	int last_enqueue;	/* last time a buffer was enqueued */
203	int last_dispatch;	/* age of the most recently dispatched buffer */
204	int ctxOwner;		/* last context to upload state */
205	int texAge;
206	int pf_enabled;		/* is pageflipping allowed? */
207	int pf_active;
208	int pf_current_page;	/* which buffer is being displayed? */
209	int perf_boxes;		/* performance boxes to be displayed */
210	int width, height;      /* screen size in pixels */
211
212	drm_handle_t front_handle;
213	int front_offset;
214	int front_size;
215
216	drm_handle_t back_handle;
217	int back_offset;
218	int back_size;
219
220	drm_handle_t depth_handle;
221	int depth_offset;
222	int depth_size;
223
224	drm_handle_t tex_handle;
225	int tex_offset;
226	int tex_size;
227	int log_tex_granularity;
228	int pitch;
229	int rotation;           /* 0, 90, 180 or 270 */
230	int rotated_offset;
231	int rotated_size;
232	int rotated_pitch;
233	int virtualX, virtualY;
234
235	unsigned int front_tiled;
236	unsigned int back_tiled;
237	unsigned int depth_tiled;
238	unsigned int rotated_tiled;
239	unsigned int rotated2_tiled;
240
241	int pipeA_x;
242	int pipeA_y;
243	int pipeA_w;
244	int pipeA_h;
245	int pipeB_x;
246	int pipeB_y;
247	int pipeB_w;
248	int pipeB_h;
249
250	/* fill out some space for old userspace triple buffer */
251	drm_handle_t unused_handle;
252	__u32 unused1, unused2, unused3;
253
254	/* buffer object handles for static buffers. May change
255	 * over the lifetime of the client.
256	 */
257	__u32 front_bo_handle;
258	__u32 back_bo_handle;
259	__u32 unused_bo_handle;
260	__u32 depth_bo_handle;
261
262} drm_i915_sarea_t;
263
264/* due to userspace building against these headers we need some compat here */
265#define planeA_x pipeA_x
266#define planeA_y pipeA_y
267#define planeA_w pipeA_w
268#define planeA_h pipeA_h
269#define planeB_x pipeB_x
270#define planeB_y pipeB_y
271#define planeB_w pipeB_w
272#define planeB_h pipeB_h
273
274/* Flags for perf_boxes
275 */
276#define I915_BOX_RING_EMPTY    0x1
277#define I915_BOX_FLIP          0x2
278#define I915_BOX_WAIT          0x4
279#define I915_BOX_TEXTURE_LOAD  0x8
280#define I915_BOX_LOST_CONTEXT  0x10
281
282/*
283 * i915 specific ioctls.
284 *
285 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
286 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
287 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
288 */
289#define DRM_I915_INIT		0x00
290#define DRM_I915_FLUSH		0x01
291#define DRM_I915_FLIP		0x02
292#define DRM_I915_BATCHBUFFER	0x03
293#define DRM_I915_IRQ_EMIT	0x04
294#define DRM_I915_IRQ_WAIT	0x05
295#define DRM_I915_GETPARAM	0x06
296#define DRM_I915_SETPARAM	0x07
297#define DRM_I915_ALLOC		0x08
298#define DRM_I915_FREE		0x09
299#define DRM_I915_INIT_HEAP	0x0a
300#define DRM_I915_CMDBUFFER	0x0b
301#define DRM_I915_DESTROY_HEAP	0x0c
302#define DRM_I915_SET_VBLANK_PIPE	0x0d
303#define DRM_I915_GET_VBLANK_PIPE	0x0e
304#define DRM_I915_VBLANK_SWAP	0x0f
305#define DRM_I915_HWS_ADDR	0x11
306#define DRM_I915_GEM_INIT	0x13
307#define DRM_I915_GEM_EXECBUFFER	0x14
308#define DRM_I915_GEM_PIN	0x15
309#define DRM_I915_GEM_UNPIN	0x16
310#define DRM_I915_GEM_BUSY	0x17
311#define DRM_I915_GEM_THROTTLE	0x18
312#define DRM_I915_GEM_ENTERVT	0x19
313#define DRM_I915_GEM_LEAVEVT	0x1a
314#define DRM_I915_GEM_CREATE	0x1b
315#define DRM_I915_GEM_PREAD	0x1c
316#define DRM_I915_GEM_PWRITE	0x1d
317#define DRM_I915_GEM_MMAP	0x1e
318#define DRM_I915_GEM_SET_DOMAIN	0x1f
319#define DRM_I915_GEM_SW_FINISH	0x20
320#define DRM_I915_GEM_SET_TILING	0x21
321#define DRM_I915_GEM_GET_TILING	0x22
322#define DRM_I915_GEM_GET_APERTURE 0x23
323#define DRM_I915_GEM_MMAP_GTT	0x24
324#define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
325#define DRM_I915_GEM_MADVISE	0x26
326#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
327#define DRM_I915_OVERLAY_ATTRS	0x28
328#define DRM_I915_GEM_EXECBUFFER2	0x29
329#define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
330#define DRM_I915_GET_SPRITE_COLORKEY	0x2a
331#define DRM_I915_SET_SPRITE_COLORKEY	0x2b
332#define DRM_I915_GEM_WAIT	0x2c
333#define DRM_I915_GEM_CONTEXT_CREATE	0x2d
334#define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
335#define DRM_I915_GEM_SET_CACHING	0x2f
336#define DRM_I915_GEM_GET_CACHING	0x30
337#define DRM_I915_REG_READ		0x31
338#define DRM_I915_GET_RESET_STATS	0x32
339#define DRM_I915_GEM_USERPTR		0x33
340#define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
341#define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
342#define DRM_I915_PERF_OPEN		0x36
343#define DRM_I915_PERF_ADD_CONFIG	0x37
344#define DRM_I915_PERF_REMOVE_CONFIG	0x38
345#define DRM_I915_QUERY			0x39
346/* Must be kept compact -- no holes */
347
348#define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
349#define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
350#define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
351#define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
352#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
353#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
354#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
355#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
356#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
357#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
358#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
359#define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
360#define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
361#define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
362#define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
363#define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
364#define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
365#define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
366#define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
367#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
368#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
369#define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
370#define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
371#define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
372#define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
373#define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
374#define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
375#define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
376#define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
377#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
378#define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
379#define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
380#define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
381#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
382#define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
383#define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
384#define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
385#define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
386#define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
387#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
388#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
389#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
390#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
391#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
392#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
393#define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
394#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
395#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
396#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
397#define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
398#define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
399#define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
400#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
401#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
402#define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
403#define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
404#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
405#define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
406
407/* Allow drivers to submit batchbuffers directly to hardware, relying
408 * on the security mechanisms provided by hardware.
409 */
410typedef struct drm_i915_batchbuffer {
411	int start;		/* agp offset */
412	int used;		/* nr bytes in use */
413	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
414	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
415	int num_cliprects;	/* mulitpass with multiple cliprects? */
416	struct drm_clip_rect *cliprects;	/* pointer to userspace cliprects */
417} drm_i915_batchbuffer_t;
418
419/* As above, but pass a pointer to userspace buffer which can be
420 * validated by the kernel prior to sending to hardware.
421 */
422typedef struct _drm_i915_cmdbuffer {
423	char *buf;	/* pointer to userspace command buffer */
424	int sz;			/* nr bytes in buf */
425	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
426	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
427	int num_cliprects;	/* mulitpass with multiple cliprects? */
428	struct drm_clip_rect *cliprects;	/* pointer to userspace cliprects */
429} drm_i915_cmdbuffer_t;
430
431/* Userspace can request & wait on irq's:
432 */
433typedef struct drm_i915_irq_emit {
434	int *irq_seq;
435} drm_i915_irq_emit_t;
436
437typedef struct drm_i915_irq_wait {
438	int irq_seq;
439} drm_i915_irq_wait_t;
440
441/*
442 * Different modes of per-process Graphics Translation Table,
443 * see I915_PARAM_HAS_ALIASING_PPGTT
444 */
445#define I915_GEM_PPGTT_NONE	0
446#define I915_GEM_PPGTT_ALIASING	1
447#define I915_GEM_PPGTT_FULL	2
448
449/* Ioctl to query kernel params:
450 */
451#define I915_PARAM_IRQ_ACTIVE            1
452#define I915_PARAM_ALLOW_BATCHBUFFER     2
453#define I915_PARAM_LAST_DISPATCH         3
454#define I915_PARAM_CHIPSET_ID            4
455#define I915_PARAM_HAS_GEM               5
456#define I915_PARAM_NUM_FENCES_AVAIL      6
457#define I915_PARAM_HAS_OVERLAY           7
458#define I915_PARAM_HAS_PAGEFLIPPING	 8
459#define I915_PARAM_HAS_EXECBUF2          9
460#define I915_PARAM_HAS_BSD		 10
461#define I915_PARAM_HAS_BLT		 11
462#define I915_PARAM_HAS_RELAXED_FENCING	 12
463#define I915_PARAM_HAS_COHERENT_RINGS	 13
464#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
465#define I915_PARAM_HAS_RELAXED_DELTA	 15
466#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
467#define I915_PARAM_HAS_LLC     	 	 17
468#define I915_PARAM_HAS_ALIASING_PPGTT	 18
469#define I915_PARAM_HAS_WAIT_TIMEOUT	 19
470#define I915_PARAM_HAS_SEMAPHORES	 20
471#define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
472#define I915_PARAM_HAS_VEBOX		 22
473#define I915_PARAM_HAS_SECURE_BATCHES	 23
474#define I915_PARAM_HAS_PINNED_BATCHES	 24
475#define I915_PARAM_HAS_EXEC_NO_RELOC	 25
476#define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
477#define I915_PARAM_HAS_WT     	 	 27
478#define I915_PARAM_CMD_PARSER_VERSION	 28
479#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
480#define I915_PARAM_MMAP_VERSION          30
481#define I915_PARAM_HAS_BSD2		 31
482#define I915_PARAM_REVISION              32
483#define I915_PARAM_SUBSLICE_TOTAL	 33
484#define I915_PARAM_EU_TOTAL		 34
485#define I915_PARAM_HAS_GPU_RESET	 35
486#define I915_PARAM_HAS_RESOURCE_STREAMER 36
487#define I915_PARAM_HAS_EXEC_SOFTPIN	 37
488#define I915_PARAM_HAS_POOLED_EU	 38
489#define I915_PARAM_MIN_EU_IN_POOL	 39
490#define I915_PARAM_MMAP_GTT_VERSION	 40
491
492/*
493 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
494 * priorities and the driver will attempt to execute batches in priority order.
495 * The param returns a capability bitmask, nonzero implies that the scheduler
496 * is enabled, with different features present according to the mask.
497 *
498 * The initial priority for each batch is supplied by the context and is
499 * controlled via I915_CONTEXT_PARAM_PRIORITY.
500 */
501#define I915_PARAM_HAS_SCHEDULER	 41
502#define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
503#define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
504#define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
505#define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
506
507#define I915_PARAM_HUC_STATUS		 42
508
509/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
510 * synchronisation with implicit fencing on individual objects.
511 * See EXEC_OBJECT_ASYNC.
512 */
513#define I915_PARAM_HAS_EXEC_ASYNC	 43
514
515/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
516 * both being able to pass in a sync_file fd to wait upon before executing,
517 * and being able to return a new sync_file fd that is signaled when the
518 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
519 */
520#define I915_PARAM_HAS_EXEC_FENCE	 44
521
522/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
523 * user specified buffers for post-mortem debugging of GPU hangs. See
524 * EXEC_OBJECT_CAPTURE.
525 */
526#define I915_PARAM_HAS_EXEC_CAPTURE	 45
527
528#define I915_PARAM_SLICE_MASK		 46
529
530/* Assuming it's uniform for each slice, this queries the mask of subslices
531 * per-slice for this system.
532 */
533#define I915_PARAM_SUBSLICE_MASK	 47
534
535/*
536 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
537 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
538 */
539#define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
540
541/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
542 * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
543 */
544#define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
545
546/*
547 * Query whether every context (both per-file default and user created) is
548 * isolated (insofar as HW supports). If this parameter is not true, then
549 * freshly created contexts may inherit values from an existing context,
550 * rather than default HW values. If true, it also ensures (insofar as HW
551 * supports) that all state set by this context will not leak to any other
552 * context.
553 *
554 * As not every engine across every gen support contexts, the returned
555 * value reports the support of context isolation for individual engines by
556 * returning a bitmask of each engine class set to true if that class supports
557 * isolation.
558 */
559#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
560
561/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
562 * registers. This used to be fixed per platform but from CNL onwards, this
563 * might vary depending on the parts.
564 */
565#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
566
567/*
568 * Once upon a time we supposed that writes through the GGTT would be
569 * immediately in physical memory (once flushed out of the CPU path). However,
570 * on a few different processors and chipsets, this is not necessarily the case
571 * as the writes appear to be buffered internally. Thus a read of the backing
572 * storage (physical memory) via a different path (with different physical tags
573 * to the indirect write via the GGTT) will see stale values from before
574 * the GGTT write. Inside the kernel, we can for the most part keep track of
575 * the different read/write domains in use (e.g. set-domain), but the assumption
576 * of coherency is baked into the ABI, hence reporting its true state in this
577 * parameter.
578 *
579 * Reports true when writes via mmap_gtt are immediately visible following an
580 * lfence to flush the WCB.
581 *
582 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
583 * internal buffer and are _not_ immediately visible to third parties accessing
584 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
585 * communications channel when reporting false is strongly disadvised.
586 */
587#define I915_PARAM_MMAP_GTT_COHERENT	52
588
589/* Must be kept compact -- no holes and well documented */
590
591typedef struct drm_i915_getparam {
592	__s32 param;
593	/*
594	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
595	 * compat32 code. Don't repeat this mistake.
596	 */
597	int *value;
598} drm_i915_getparam_t;
599
600/* Ioctl to set kernel params:
601 */
602#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
603#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
604#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
605#define I915_SETPARAM_NUM_USED_FENCES                     4
606/* Must be kept compact -- no holes */
607
608typedef struct drm_i915_setparam {
609	int param;
610	int value;
611} drm_i915_setparam_t;
612
613/* A memory manager for regions of shared memory:
614 */
615#define I915_MEM_REGION_AGP 1
616
617typedef struct drm_i915_mem_alloc {
618	int region;
619	int alignment;
620	int size;
621	int *region_offset;	/* offset from start of fb or agp */
622} drm_i915_mem_alloc_t;
623
624typedef struct drm_i915_mem_free {
625	int region;
626	int region_offset;
627} drm_i915_mem_free_t;
628
629typedef struct drm_i915_mem_init_heap {
630	int region;
631	int size;
632	int start;
633} drm_i915_mem_init_heap_t;
634
635/* Allow memory manager to be torn down and re-initialized (eg on
636 * rotate):
637 */
638typedef struct drm_i915_mem_destroy_heap {
639	int region;
640} drm_i915_mem_destroy_heap_t;
641
642/* Allow X server to configure which pipes to monitor for vblank signals
643 */
644#define	DRM_I915_VBLANK_PIPE_A	1
645#define	DRM_I915_VBLANK_PIPE_B	2
646
647typedef struct drm_i915_vblank_pipe {
648	int pipe;
649} drm_i915_vblank_pipe_t;
650
651/* Schedule buffer swap at given vertical blank:
652 */
653typedef struct drm_i915_vblank_swap {
654	drm_drawable_t drawable;
655	enum drm_vblank_seq_type seqtype;
656	unsigned int sequence;
657} drm_i915_vblank_swap_t;
658
659typedef struct drm_i915_hws_addr {
660	__u64 addr;
661} drm_i915_hws_addr_t;
662
663struct drm_i915_gem_init {
664	/**
665	 * Beginning offset in the GTT to be managed by the DRM memory
666	 * manager.
667	 */
668	__u64 gtt_start;
669	/**
670	 * Ending offset in the GTT to be managed by the DRM memory
671	 * manager.
672	 */
673	__u64 gtt_end;
674};
675
676struct drm_i915_gem_create {
677	/**
678	 * Requested size for the object.
679	 *
680	 * The (page-aligned) allocated size for the object will be returned.
681	 */
682	__u64 size;
683	/**
684	 * Returned handle for the object.
685	 *
686	 * Object handles are nonzero.
687	 */
688	__u32 handle;
689	__u32 pad;
690};
691
692struct drm_i915_gem_pread {
693	/** Handle for the object being read. */
694	__u32 handle;
695	__u32 pad;
696	/** Offset into the object to read from */
697	__u64 offset;
698	/** Length of data to read */
699	__u64 size;
700	/**
701	 * Pointer to write the data into.
702	 *
703	 * This is a fixed-size type for 32/64 compatibility.
704	 */
705	__u64 data_ptr;
706};
707
708struct drm_i915_gem_pwrite {
709	/** Handle for the object being written to. */
710	__u32 handle;
711	__u32 pad;
712	/** Offset into the object to write to */
713	__u64 offset;
714	/** Length of data to write */
715	__u64 size;
716	/**
717	 * Pointer to read the data from.
718	 *
719	 * This is a fixed-size type for 32/64 compatibility.
720	 */
721	__u64 data_ptr;
722};
723
724struct drm_i915_gem_mmap {
725	/** Handle for the object being mapped. */
726	__u32 handle;
727	__u32 pad;
728	/** Offset in the object to map. */
729	__u64 offset;
730	/**
731	 * Length of data to map.
732	 *
733	 * The value will be page-aligned.
734	 */
735	__u64 size;
736	/**
737	 * Returned pointer the data was mapped at.
738	 *
739	 * This is a fixed-size type for 32/64 compatibility.
740	 */
741	__u64 addr_ptr;
742
743	/**
744	 * Flags for extended behaviour.
745	 *
746	 * Added in version 2.
747	 */
748	__u64 flags;
749#define I915_MMAP_WC 0x1
750};
751
752struct drm_i915_gem_mmap_gtt {
753	/** Handle for the object being mapped. */
754	__u32 handle;
755	__u32 pad;
756	/**
757	 * Fake offset to use for subsequent mmap call
758	 *
759	 * This is a fixed-size type for 32/64 compatibility.
760	 */
761	__u64 offset;
762};
763
764struct drm_i915_gem_set_domain {
765	/** Handle for the object */
766	__u32 handle;
767
768	/** New read domains */
769	__u32 read_domains;
770
771	/** New write domain */
772	__u32 write_domain;
773};
774
775struct drm_i915_gem_sw_finish {
776	/** Handle for the object */
777	__u32 handle;
778};
779
780struct drm_i915_gem_relocation_entry {
781	/**
782	 * Handle of the buffer being pointed to by this relocation entry.
783	 *
784	 * It's appealing to make this be an index into the mm_validate_entry
785	 * list to refer to the buffer, but this allows the driver to create
786	 * a relocation list for state buffers and not re-write it per
787	 * exec using the buffer.
788	 */
789	__u32 target_handle;
790
791	/**
792	 * Value to be added to the offset of the target buffer to make up
793	 * the relocation entry.
794	 */
795	__u32 delta;
796
797	/** Offset in the buffer the relocation entry will be written into */
798	__u64 offset;
799
800	/**
801	 * Offset value of the target buffer that the relocation entry was last
802	 * written as.
803	 *
804	 * If the buffer has the same offset as last time, we can skip syncing
805	 * and writing the relocation.  This value is written back out by
806	 * the execbuffer ioctl when the relocation is written.
807	 */
808	__u64 presumed_offset;
809
810	/**
811	 * Target memory domains read by this operation.
812	 */
813	__u32 read_domains;
814
815	/**
816	 * Target memory domains written by this operation.
817	 *
818	 * Note that only one domain may be written by the whole
819	 * execbuffer operation, so that where there are conflicts,
820	 * the application will get -EINVAL back.
821	 */
822	__u32 write_domain;
823};
824
825/** @{
826 * Intel memory domains
827 *
828 * Most of these just align with the various caches in
829 * the system and are used to flush and invalidate as
830 * objects end up cached in different domains.
831 */
832/** CPU cache */
833#define I915_GEM_DOMAIN_CPU		0x00000001
834/** Render cache, used by 2D and 3D drawing */
835#define I915_GEM_DOMAIN_RENDER		0x00000002
836/** Sampler cache, used by texture engine */
837#define I915_GEM_DOMAIN_SAMPLER		0x00000004
838/** Command queue, used to load batch buffers */
839#define I915_GEM_DOMAIN_COMMAND		0x00000008
840/** Instruction cache, used by shader programs */
841#define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
842/** Vertex address cache */
843#define I915_GEM_DOMAIN_VERTEX		0x00000020
844/** GTT domain - aperture and scanout */
845#define I915_GEM_DOMAIN_GTT		0x00000040
846/** WC domain - uncached access */
847#define I915_GEM_DOMAIN_WC		0x00000080
848/** @} */
849
850struct drm_i915_gem_exec_object {
851	/**
852	 * User's handle for a buffer to be bound into the GTT for this
853	 * operation.
854	 */
855	__u32 handle;
856
857	/** Number of relocations to be performed on this buffer */
858	__u32 relocation_count;
859	/**
860	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
861	 * the relocations to be performed in this buffer.
862	 */
863	__u64 relocs_ptr;
864
865	/** Required alignment in graphics aperture */
866	__u64 alignment;
867
868	/**
869	 * Returned value of the updated offset of the object, for future
870	 * presumed_offset writes.
871	 */
872	__u64 offset;
873};
874
875struct drm_i915_gem_execbuffer {
876	/**
877	 * List of buffers to be validated with their relocations to be
878	 * performend on them.
879	 *
880	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
881	 *
882	 * These buffers must be listed in an order such that all relocations
883	 * a buffer is performing refer to buffers that have already appeared
884	 * in the validate list.
885	 */
886	__u64 buffers_ptr;
887	__u32 buffer_count;
888
889	/** Offset in the batchbuffer to start execution from. */
890	__u32 batch_start_offset;
891	/** Bytes used in batchbuffer from batch_start_offset */
892	__u32 batch_len;
893	__u32 DR1;
894	__u32 DR4;
895	__u32 num_cliprects;
896	/** This is a struct drm_clip_rect *cliprects */
897	__u64 cliprects_ptr;
898};
899
900struct drm_i915_gem_exec_object2 {
901	/**
902	 * User's handle for a buffer to be bound into the GTT for this
903	 * operation.
904	 */
905	__u32 handle;
906
907	/** Number of relocations to be performed on this buffer */
908	__u32 relocation_count;
909	/**
910	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
911	 * the relocations to be performed in this buffer.
912	 */
913	__u64 relocs_ptr;
914
915	/** Required alignment in graphics aperture */
916	__u64 alignment;
917
918	/**
919	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
920	 * the user with the GTT offset at which this object will be pinned.
921	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
922	 * presumed_offset of the object.
923	 * During execbuffer2 the kernel populates it with the value of the
924	 * current GTT offset of the object, for future presumed_offset writes.
925	 */
926	__u64 offset;
927
928#define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
929#define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
930#define EXEC_OBJECT_WRITE		 (1<<2)
931#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
932#define EXEC_OBJECT_PINNED		 (1<<4)
933#define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
934/* The kernel implicitly tracks GPU activity on all GEM objects, and
935 * synchronises operations with outstanding rendering. This includes
936 * rendering on other devices if exported via dma-buf. However, sometimes
937 * this tracking is too coarse and the user knows better. For example,
938 * if the object is split into non-overlapping ranges shared between different
939 * clients or engines (i.e. suballocating objects), the implicit tracking
940 * by kernel assumes that each operation affects the whole object rather
941 * than an individual range, causing needless synchronisation between clients.
942 * The kernel will also forgo any CPU cache flushes prior to rendering from
943 * the object as the client is expected to be also handling such domain
944 * tracking.
945 *
946 * The kernel maintains the implicit tracking in order to manage resources
947 * used by the GPU - this flag only disables the synchronisation prior to
948 * rendering with this object in this execbuf.
949 *
950 * Opting out of implicit synhronisation requires the user to do its own
951 * explicit tracking to avoid rendering corruption. See, for example,
952 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
953 */
954#define EXEC_OBJECT_ASYNC		(1<<6)
955/* Request that the contents of this execobject be copied into the error
956 * state upon a GPU hang involving this batch for post-mortem debugging.
957 * These buffers are recorded in no particular order as "user" in
958 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
959 * if the kernel supports this flag.
960 */
961#define EXEC_OBJECT_CAPTURE		(1<<7)
962/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
963#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
964	__u64 flags;
965
966	union {
967		__u64 rsvd1;
968		__u64 pad_to_size;
969	};
970	__u64 rsvd2;
971};
972
973struct drm_i915_gem_exec_fence {
974	/**
975	 * User's handle for a drm_syncobj to wait on or signal.
976	 */
977	__u32 handle;
978
979#define I915_EXEC_FENCE_WAIT            (1<<0)
980#define I915_EXEC_FENCE_SIGNAL          (1<<1)
981#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
982	__u32 flags;
983};
984
985struct drm_i915_gem_execbuffer2 {
986	/**
987	 * List of gem_exec_object2 structs
988	 */
989	__u64 buffers_ptr;
990	__u32 buffer_count;
991
992	/** Offset in the batchbuffer to start execution from. */
993	__u32 batch_start_offset;
994	/** Bytes used in batchbuffer from batch_start_offset */
995	__u32 batch_len;
996	__u32 DR1;
997	__u32 DR4;
998	__u32 num_cliprects;
999	/**
1000	 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
1001	 * is not set.  If I915_EXEC_FENCE_ARRAY is set, then this is a
1002	 * struct drm_i915_gem_exec_fence *fences.
1003	 */
1004	__u64 cliprects_ptr;
1005#define I915_EXEC_RING_MASK              (0x3f)
1006#define I915_EXEC_DEFAULT                (0<<0)
1007#define I915_EXEC_RENDER                 (1<<0)
1008#define I915_EXEC_BSD                    (2<<0)
1009#define I915_EXEC_BLT                    (3<<0)
1010#define I915_EXEC_VEBOX                  (4<<0)
1011
1012/* Used for switching the constants addressing mode on gen4+ RENDER ring.
1013 * Gen6+ only supports relative addressing to dynamic state (default) and
1014 * absolute addressing.
1015 *
1016 * These flags are ignored for the BSD and BLT rings.
1017 */
1018#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
1019#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1020#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
1021#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1022	__u64 flags;
1023	__u64 rsvd1; /* now used for context info */
1024	__u64 rsvd2;
1025};
1026
1027/** Resets the SO write offset registers for transform feedback on gen7. */
1028#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
1029
1030/** Request a privileged ("secure") batch buffer. Note only available for
1031 * DRM_ROOT_ONLY | DRM_MASTER processes.
1032 */
1033#define I915_EXEC_SECURE		(1<<9)
1034
1035/** Inform the kernel that the batch is and will always be pinned. This
1036 * negates the requirement for a workaround to be performed to avoid
1037 * an incoherent CS (such as can be found on 830/845). If this flag is
1038 * not passed, the kernel will endeavour to make sure the batch is
1039 * coherent with the CS before execution. If this flag is passed,
1040 * userspace assumes the responsibility for ensuring the same.
1041 */
1042#define I915_EXEC_IS_PINNED		(1<<10)
1043
1044/** Provide a hint to the kernel that the command stream and auxiliary
1045 * state buffers already holds the correct presumed addresses and so the
1046 * relocation process may be skipped if no buffers need to be moved in
1047 * preparation for the execbuffer.
1048 */
1049#define I915_EXEC_NO_RELOC		(1<<11)
1050
1051/** Use the reloc.handle as an index into the exec object array rather
1052 * than as the per-file handle.
1053 */
1054#define I915_EXEC_HANDLE_LUT		(1<<12)
1055
1056/** Used for switching BSD rings on the platforms with two BSD rings */
1057#define I915_EXEC_BSD_SHIFT	 (13)
1058#define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
1059/* default ping-pong mode */
1060#define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
1061#define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
1062#define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
1063
1064/** Tell the kernel that the batchbuffer is processed by
1065 *  the resource streamer.
1066 */
1067#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
1068
1069/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1070 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1071 * the batch.
1072 *
1073 * Returns -EINVAL if the sync_file fd cannot be found.
1074 */
1075#define I915_EXEC_FENCE_IN		(1<<16)
1076
1077/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1078 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1079 * to the caller, and it should be close() after use. (The fd is a regular
1080 * file descriptor and will be cleaned up on process termination. It holds
1081 * a reference to the request, but nothing else.)
1082 *
1083 * The sync_file fd can be combined with other sync_file and passed either
1084 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1085 * will only occur after this request completes), or to other devices.
1086 *
1087 * Using I915_EXEC_FENCE_OUT requires use of
1088 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1089 * back to userspace. Failure to do so will cause the out-fence to always
1090 * be reported as zero, and the real fence fd to be leaked.
1091 */
1092#define I915_EXEC_FENCE_OUT		(1<<17)
1093
1094/*
1095 * Traditionally the execbuf ioctl has only considered the final element in
1096 * the execobject[] to be the executable batch. Often though, the client
1097 * will known the batch object prior to construction and being able to place
1098 * it into the execobject[] array first can simplify the relocation tracking.
1099 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1100 * execobject[] as the * batch instead (the default is to use the last
1101 * element).
1102 */
1103#define I915_EXEC_BATCH_FIRST		(1<<18)
1104
1105/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1106 * define an array of i915_gem_exec_fence structures which specify a set of
1107 * dma fences to wait upon or signal.
1108 */
1109#define I915_EXEC_FENCE_ARRAY   (1<<19)
1110
1111#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
1112
1113#define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
1114#define i915_execbuffer2_set_context_id(eb2, context) \
1115	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1116#define i915_execbuffer2_get_context_id(eb2) \
1117	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1118
1119struct drm_i915_gem_pin {
1120	/** Handle of the buffer to be pinned. */
1121	__u32 handle;
1122	__u32 pad;
1123
1124	/** alignment required within the aperture */
1125	__u64 alignment;
1126
1127	/** Returned GTT offset of the buffer. */
1128	__u64 offset;
1129};
1130
1131struct drm_i915_gem_unpin {
1132	/** Handle of the buffer to be unpinned. */
1133	__u32 handle;
1134	__u32 pad;
1135};
1136
1137struct drm_i915_gem_busy {
1138	/** Handle of the buffer to check for busy */
1139	__u32 handle;
1140
1141	/** Return busy status
1142	 *
1143	 * A return of 0 implies that the object is idle (after
1144	 * having flushed any pending activity), and a non-zero return that
1145	 * the object is still in-flight on the GPU. (The GPU has not yet
1146	 * signaled completion for all pending requests that reference the
1147	 * object.) An object is guaranteed to become idle eventually (so
1148	 * long as no new GPU commands are executed upon it). Due to the
1149	 * asynchronous nature of the hardware, an object reported
1150	 * as busy may become idle before the ioctl is completed.
1151	 *
1152	 * Furthermore, if the object is busy, which engine is busy is only
1153	 * provided as a guide and only indirectly by reporting its class
1154	 * (there may be more than one engine in each class). There are race
1155	 * conditions which prevent the report of which engines are busy from
1156	 * being always accurate.  However, the converse is not true. If the
1157	 * object is idle, the result of the ioctl, that all engines are idle,
1158	 * is accurate.
1159	 *
1160	 * The returned dword is split into two fields to indicate both
1161	 * the engine classess on which the object is being read, and the
1162	 * engine class on which it is currently being written (if any).
1163	 *
1164	 * The low word (bits 0:15) indicate if the object is being written
1165	 * to by any engine (there can only be one, as the GEM implicit
1166	 * synchronisation rules force writes to be serialised). Only the
1167	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1168	 * 1 not 0 etc) for the last write is reported.
1169	 *
1170	 * The high word (bits 16:31) are a bitmask of which engines classes
1171	 * are currently reading from the object. Multiple engines may be
1172	 * reading from the object simultaneously.
1173	 *
1174	 * The value of each engine class is the same as specified in the
1175	 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
1176	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1177	 * reported as active itself. Some hardware may have parallel
1178	 * execution engines, e.g. multiple media engines, which are
1179	 * mapped to the same class identifier and so are not separately
1180	 * reported for busyness.
1181	 *
1182	 * Caveat emptor:
1183	 * Only the boolean result of this query is reliable; that is whether
1184	 * the object is idle or busy. The report of which engines are busy
1185	 * should be only used as a heuristic.
1186	 */
1187	__u32 busy;
1188};
1189
1190/**
1191 * I915_CACHING_NONE
1192 *
1193 * GPU access is not coherent with cpu caches. Default for machines without an
1194 * LLC.
1195 */
1196#define I915_CACHING_NONE		0
1197/**
1198 * I915_CACHING_CACHED
1199 *
1200 * GPU access is coherent with cpu caches and furthermore the data is cached in
1201 * last-level caches shared between cpu cores and the gpu GT. Default on
1202 * machines with HAS_LLC.
1203 */
1204#define I915_CACHING_CACHED		1
1205/**
1206 * I915_CACHING_DISPLAY
1207 *
1208 * Special GPU caching mode which is coherent with the scanout engines.
1209 * Transparently falls back to I915_CACHING_NONE on platforms where no special
1210 * cache mode (like write-through or gfdt flushing) is available. The kernel
1211 * automatically sets this mode when using a buffer as a scanout target.
1212 * Userspace can manually set this mode to avoid a costly stall and clflush in
1213 * the hotpath of drawing the first frame.
1214 */
1215#define I915_CACHING_DISPLAY		2
1216
1217struct drm_i915_gem_caching {
1218	/**
1219	 * Handle of the buffer to set/get the caching level of. */
1220	__u32 handle;
1221
1222	/**
1223	 * Caching level to apply or return value
1224	 *
1225	 * bits0-15 are for generic caching control (i.e. the above defined
1226	 * values). bits16-31 are reserved for platform-specific variations
1227	 * (e.g. l3$ caching on gen7). */
1228	__u32 caching;
1229};
1230
1231#define I915_TILING_NONE	0
1232#define I915_TILING_X		1
1233#define I915_TILING_Y		2
1234#define I915_TILING_LAST	I915_TILING_Y
1235
1236#define I915_BIT_6_SWIZZLE_NONE		0
1237#define I915_BIT_6_SWIZZLE_9		1
1238#define I915_BIT_6_SWIZZLE_9_10		2
1239#define I915_BIT_6_SWIZZLE_9_11		3
1240#define I915_BIT_6_SWIZZLE_9_10_11	4
1241/* Not seen by userland */
1242#define I915_BIT_6_SWIZZLE_UNKNOWN	5
1243/* Seen by userland. */
1244#define I915_BIT_6_SWIZZLE_9_17		6
1245#define I915_BIT_6_SWIZZLE_9_10_17	7
1246
1247struct drm_i915_gem_set_tiling {
1248	/** Handle of the buffer to have its tiling state updated */
1249	__u32 handle;
1250
1251	/**
1252	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1253	 * I915_TILING_Y).
1254	 *
1255	 * This value is to be set on request, and will be updated by the
1256	 * kernel on successful return with the actual chosen tiling layout.
1257	 *
1258	 * The tiling mode may be demoted to I915_TILING_NONE when the system
1259	 * has bit 6 swizzling that can't be managed correctly by GEM.
1260	 *
1261	 * Buffer contents become undefined when changing tiling_mode.
1262	 */
1263	__u32 tiling_mode;
1264
1265	/**
1266	 * Stride in bytes for the object when in I915_TILING_X or
1267	 * I915_TILING_Y.
1268	 */
1269	__u32 stride;
1270
1271	/**
1272	 * Returned address bit 6 swizzling required for CPU access through
1273	 * mmap mapping.
1274	 */
1275	__u32 swizzle_mode;
1276};
1277
1278struct drm_i915_gem_get_tiling {
1279	/** Handle of the buffer to get tiling state for. */
1280	__u32 handle;
1281
1282	/**
1283	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1284	 * I915_TILING_Y).
1285	 */
1286	__u32 tiling_mode;
1287
1288	/**
1289	 * Returned address bit 6 swizzling required for CPU access through
1290	 * mmap mapping.
1291	 */
1292	__u32 swizzle_mode;
1293
1294	/**
1295	 * Returned address bit 6 swizzling required for CPU access through
1296	 * mmap mapping whilst bound.
1297	 */
1298	__u32 phys_swizzle_mode;
1299};
1300
1301struct drm_i915_gem_get_aperture {
1302	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1303	__u64 aper_size;
1304
1305	/**
1306	 * Available space in the aperture used by i915_gem_execbuffer, in
1307	 * bytes
1308	 */
1309	__u64 aper_available_size;
1310};
1311
1312struct drm_i915_get_pipe_from_crtc_id {
1313	/** ID of CRTC being requested **/
1314	__u32 crtc_id;
1315
1316	/** pipe of requested CRTC **/
1317	__u32 pipe;
1318};
1319
1320#define I915_MADV_WILLNEED 0
1321#define I915_MADV_DONTNEED 1
1322#define __I915_MADV_PURGED 2 /* internal state */
1323
1324struct drm_i915_gem_madvise {
1325	/** Handle of the buffer to change the backing store advice */
1326	__u32 handle;
1327
1328	/* Advice: either the buffer will be needed again in the near future,
1329	 *         or wont be and could be discarded under memory pressure.
1330	 */
1331	__u32 madv;
1332
1333	/** Whether the backing store still exists. */
1334	__u32 retained;
1335};
1336
1337/* flags */
1338#define I915_OVERLAY_TYPE_MASK 		0xff
1339#define I915_OVERLAY_YUV_PLANAR 	0x01
1340#define I915_OVERLAY_YUV_PACKED 	0x02
1341#define I915_OVERLAY_RGB		0x03
1342
1343#define I915_OVERLAY_DEPTH_MASK		0xff00
1344#define I915_OVERLAY_RGB24		0x1000
1345#define I915_OVERLAY_RGB16		0x2000
1346#define I915_OVERLAY_RGB15		0x3000
1347#define I915_OVERLAY_YUV422		0x0100
1348#define I915_OVERLAY_YUV411		0x0200
1349#define I915_OVERLAY_YUV420		0x0300
1350#define I915_OVERLAY_YUV410		0x0400
1351
1352#define I915_OVERLAY_SWAP_MASK		0xff0000
1353#define I915_OVERLAY_NO_SWAP		0x000000
1354#define I915_OVERLAY_UV_SWAP		0x010000
1355#define I915_OVERLAY_Y_SWAP		0x020000
1356#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
1357
1358#define I915_OVERLAY_FLAGS_MASK		0xff000000
1359#define I915_OVERLAY_ENABLE		0x01000000
1360
1361struct drm_intel_overlay_put_image {
1362	/* various flags and src format description */
1363	__u32 flags;
1364	/* source picture description */
1365	__u32 bo_handle;
1366	/* stride values and offsets are in bytes, buffer relative */
1367	__u16 stride_Y; /* stride for packed formats */
1368	__u16 stride_UV;
1369	__u32 offset_Y; /* offset for packet formats */
1370	__u32 offset_U;
1371	__u32 offset_V;
1372	/* in pixels */
1373	__u16 src_width;
1374	__u16 src_height;
1375	/* to compensate the scaling factors for partially covered surfaces */
1376	__u16 src_scan_width;
1377	__u16 src_scan_height;
1378	/* output crtc description */
1379	__u32 crtc_id;
1380	__u16 dst_x;
1381	__u16 dst_y;
1382	__u16 dst_width;
1383	__u16 dst_height;
1384};
1385
1386/* flags */
1387#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
1388#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1389#define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
1390struct drm_intel_overlay_attrs {
1391	__u32 flags;
1392	__u32 color_key;
1393	__s32 brightness;
1394	__u32 contrast;
1395	__u32 saturation;
1396	__u32 gamma0;
1397	__u32 gamma1;
1398	__u32 gamma2;
1399	__u32 gamma3;
1400	__u32 gamma4;
1401	__u32 gamma5;
1402};
1403
1404/*
1405 * Intel sprite handling
1406 *
1407 * Color keying works with a min/mask/max tuple.  Both source and destination
1408 * color keying is allowed.
1409 *
1410 * Source keying:
1411 * Sprite pixels within the min & max values, masked against the color channels
1412 * specified in the mask field, will be transparent.  All other pixels will
1413 * be displayed on top of the primary plane.  For RGB surfaces, only the min
1414 * and mask fields will be used; ranged compares are not allowed.
1415 *
1416 * Destination keying:
1417 * Primary plane pixels that match the min value, masked against the color
1418 * channels specified in the mask field, will be replaced by corresponding
1419 * pixels from the sprite plane.
1420 *
1421 * Note that source & destination keying are exclusive; only one can be
1422 * active on a given plane.
1423 */
1424
1425#define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
1426						* flags==0 to disable colorkeying.
1427						*/
1428#define I915_SET_COLORKEY_DESTINATION	(1<<1)
1429#define I915_SET_COLORKEY_SOURCE	(1<<2)
1430struct drm_intel_sprite_colorkey {
1431	__u32 plane_id;
1432	__u32 min_value;
1433	__u32 channel_mask;
1434	__u32 max_value;
1435	__u32 flags;
1436};
1437
1438struct drm_i915_gem_wait {
1439	/** Handle of BO we shall wait on */
1440	__u32 bo_handle;
1441	__u32 flags;
1442	/** Number of nanoseconds to wait, Returns time remaining. */
1443	__s64 timeout_ns;
1444};
1445
1446struct drm_i915_gem_context_create {
1447	__u32 ctx_id; /* output: id of new context*/
1448	__u32 pad;
1449};
1450
1451struct drm_i915_gem_context_create_ext {
1452	__u32 ctx_id; /* output: id of new context*/
1453	__u32 flags;
1454#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
1455#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1456	(-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
1457	__u64 extensions;
1458};
1459
1460struct drm_i915_gem_context_param {
1461	__u32 ctx_id;
1462	__u32 size;
1463	__u64 param;
1464#define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
1465#define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
1466#define I915_CONTEXT_PARAM_GTT_SIZE	0x3
1467#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
1468#define I915_CONTEXT_PARAM_BANNABLE	0x5
1469#define I915_CONTEXT_PARAM_PRIORITY	0x6
1470#define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
1471#define   I915_CONTEXT_DEFAULT_PRIORITY		0
1472#define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
1473	/*
1474	 * When using the following param, value should be a pointer to
1475	 * drm_i915_gem_context_param_sseu.
1476	 */
1477#define I915_CONTEXT_PARAM_SSEU		0x7
1478
1479/*
1480 * Not all clients may want to attempt automatic recover of a context after
1481 * a hang (for example, some clients may only submit very small incremental
1482 * batches relying on known logical state of previous batches which will never
1483 * recover correctly and each attempt will hang), and so would prefer that
1484 * the context is forever banned instead.
1485 *
1486 * If set to false (0), after a reset, subsequent (and in flight) rendering
1487 * from this context is discarded, and the client will need to create a new
1488 * context to use instead.
1489 *
1490 * If set to true (1), the kernel will automatically attempt to recover the
1491 * context by skipping the hanging batch and executing the next batch starting
1492 * from the default context state (discarding the incomplete logical context
1493 * state lost due to the reset).
1494 *
1495 * On creation, all new contexts are marked as recoverable.
1496 */
1497#define I915_CONTEXT_PARAM_RECOVERABLE	0x8
1498/* Must be kept compact -- no holes and well documented */
1499
1500	__u64 value;
1501};
1502
1503/**
1504 * Context SSEU programming
1505 *
1506 * It may be necessary for either functional or performance reason to configure
1507 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1508 * Sub-slice/EU).
1509 *
1510 * This is done by configuring SSEU configuration using the below
1511 * @struct drm_i915_gem_context_param_sseu for every supported engine which
1512 * userspace intends to use.
1513 *
1514 * Not all GPUs or engines support this functionality in which case an error
1515 * code -ENODEV will be returned.
1516 *
1517 * Also, flexibility of possible SSEU configuration permutations varies between
1518 * GPU generations and software imposed limitations. Requesting such a
1519 * combination will return an error code of -EINVAL.
1520 *
1521 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1522 * favour of a single global setting.
1523 */
1524struct drm_i915_gem_context_param_sseu {
1525	/*
1526	 * Engine class & instance to be configured or queried.
1527	 */
1528	__u16 engine_class;
1529	__u16 engine_instance;
1530
1531	/*
1532	 * Unused for now. Must be cleared to zero.
1533	 */
1534	__u32 flags;
1535
1536	/*
1537	 * Mask of slices to enable for the context. Valid values are a subset
1538	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1539	 */
1540	__u64 slice_mask;
1541
1542	/*
1543	 * Mask of subslices to enable for the context. Valid values are a
1544	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1545	 */
1546	__u64 subslice_mask;
1547
1548	/*
1549	 * Minimum/Maximum number of EUs to enable per subslice for the
1550	 * context. min_eus_per_subslice must be inferior or equal to
1551	 * max_eus_per_subslice.
1552	 */
1553	__u16 min_eus_per_subslice;
1554	__u16 max_eus_per_subslice;
1555
1556	/*
1557	 * Unused for now. Must be cleared to zero.
1558	 */
1559	__u32 rsvd;
1560};
1561
1562struct drm_i915_gem_context_create_ext_setparam {
1563#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1564	struct i915_user_extension base;
1565	struct drm_i915_gem_context_param param;
1566};
1567
1568struct drm_i915_gem_context_destroy {
1569	__u32 ctx_id;
1570	__u32 pad;
1571};
1572
1573/*
1574 * DRM_I915_GEM_VM_CREATE -
1575 *
1576 * Create a new virtual memory address space (ppGTT) for use within a context
1577 * on the same file. Extensions can be provided to configure exactly how the
1578 * address space is setup upon creation.
1579 *
1580 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
1581 * returned in the outparam @id.
1582 *
1583 * No flags are defined, with all bits reserved and must be zero.
1584 *
1585 * An extension chain maybe provided, starting with @extensions, and terminated
1586 * by the @next_extension being 0. Currently, no extensions are defined.
1587 *
1588 * DRM_I915_GEM_VM_DESTROY -
1589 *
1590 * Destroys a previously created VM id, specified in @id.
1591 *
1592 * No extensions or flags are allowed currently, and so must be zero.
1593 */
1594struct drm_i915_gem_vm_control {
1595	__u64 extensions;
1596	__u32 flags;
1597	__u32 vm_id;
1598};
1599
1600struct drm_i915_reg_read {
1601	/*
1602	 * Register offset.
1603	 * For 64bit wide registers where the upper 32bits don't immediately
1604	 * follow the lower 32bits, the offset of the lower 32bits must
1605	 * be specified
1606	 */
1607	__u64 offset;
1608#define I915_REG_READ_8B_WA (1ul << 0)
1609
1610	__u64 val; /* Return value */
1611};
1612
1613/* Known registers:
1614 *
1615 * Render engine timestamp - 0x2358 + 64bit - gen7+
1616 * - Note this register returns an invalid value if using the default
1617 *   single instruction 8byte read, in order to workaround that pass
1618 *   flag I915_REG_READ_8B_WA in offset field.
1619 *
1620 */
1621
1622struct drm_i915_reset_stats {
1623	__u32 ctx_id;
1624	__u32 flags;
1625
1626	/* All resets since boot/module reload, for all contexts */
1627	__u32 reset_count;
1628
1629	/* Number of batches lost when active in GPU, for this context */
1630	__u32 batch_active;
1631
1632	/* Number of batches lost pending for execution, for this context */
1633	__u32 batch_pending;
1634
1635	__u32 pad;
1636};
1637
1638struct drm_i915_gem_userptr {
1639	__u64 user_ptr;
1640	__u64 user_size;
1641	__u32 flags;
1642#define I915_USERPTR_READ_ONLY 0x1
1643#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1644	/**
1645	 * Returned handle for the object.
1646	 *
1647	 * Object handles are nonzero.
1648	 */
1649	__u32 handle;
1650};
1651
1652enum drm_i915_oa_format {
1653	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
1654	I915_OA_FORMAT_A29,	    /* HSW only */
1655	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
1656	I915_OA_FORMAT_B4_C8,	    /* HSW only */
1657	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
1658	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
1659	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
1660
1661	/* Gen8+ */
1662	I915_OA_FORMAT_A12,
1663	I915_OA_FORMAT_A12_B8_C8,
1664	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1665
1666	I915_OA_FORMAT_MAX	    /* non-ABI */
1667};
1668
1669enum drm_i915_perf_property_id {
1670	/**
1671	 * Open the stream for a specific context handle (as used with
1672	 * execbuffer2). A stream opened for a specific context this way
1673	 * won't typically require root privileges.
1674	 */
1675	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1676
1677	/**
1678	 * A value of 1 requests the inclusion of raw OA unit reports as
1679	 * part of stream samples.
1680	 */
1681	DRM_I915_PERF_PROP_SAMPLE_OA,
1682
1683	/**
1684	 * The value specifies which set of OA unit metrics should be
1685	 * be configured, defining the contents of any OA unit reports.
1686	 */
1687	DRM_I915_PERF_PROP_OA_METRICS_SET,
1688
1689	/**
1690	 * The value specifies the size and layout of OA unit reports.
1691	 */
1692	DRM_I915_PERF_PROP_OA_FORMAT,
1693
1694	/**
1695	 * Specifying this property implicitly requests periodic OA unit
1696	 * sampling and (at least on Haswell) the sampling frequency is derived
1697	 * from this exponent as follows:
1698	 *
1699	 *   80ns * 2^(period_exponent + 1)
1700	 */
1701	DRM_I915_PERF_PROP_OA_EXPONENT,
1702
1703	DRM_I915_PERF_PROP_MAX /* non-ABI */
1704};
1705
1706struct drm_i915_perf_open_param {
1707	__u32 flags;
1708#define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
1709#define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
1710#define I915_PERF_FLAG_DISABLED		(1<<2)
1711
1712	/** The number of u64 (id, value) pairs */
1713	__u32 num_properties;
1714
1715	/**
1716	 * Pointer to array of u64 (id, value) pairs configuring the stream
1717	 * to open.
1718	 */
1719	__u64 properties_ptr;
1720};
1721
1722/**
1723 * Enable data capture for a stream that was either opened in a disabled state
1724 * via I915_PERF_FLAG_DISABLED or was later disabled via
1725 * I915_PERF_IOCTL_DISABLE.
1726 *
1727 * It is intended to be cheaper to disable and enable a stream than it may be
1728 * to close and re-open a stream with the same configuration.
1729 *
1730 * It's undefined whether any pending data for the stream will be lost.
1731 */
1732#define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
1733
1734/**
1735 * Disable data capture for a stream.
1736 *
1737 * It is an error to try and read a stream that is disabled.
1738 */
1739#define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
1740
1741/**
1742 * Common to all i915 perf records
1743 */
1744struct drm_i915_perf_record_header {
1745	__u32 type;
1746	__u16 pad;
1747	__u16 size;
1748};
1749
1750enum drm_i915_perf_record_type {
1751
1752	/**
1753	 * Samples are the work horse record type whose contents are extensible
1754	 * and defined when opening an i915 perf stream based on the given
1755	 * properties.
1756	 *
1757	 * Boolean properties following the naming convention
1758	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
1759	 * every sample.
1760	 *
1761	 * The order of these sample properties given by userspace has no
1762	 * affect on the ordering of data within a sample. The order is
1763	 * documented here.
1764	 *
1765	 * struct {
1766	 *     struct drm_i915_perf_record_header header;
1767	 *
1768	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
1769	 * };
1770	 */
1771	DRM_I915_PERF_RECORD_SAMPLE = 1,
1772
1773	/*
1774	 * Indicates that one or more OA reports were not written by the
1775	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
1776	 * command collides with periodic sampling - which would be more likely
1777	 * at higher sampling frequencies.
1778	 */
1779	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
1780
1781	/**
1782	 * An error occurred that resulted in all pending OA reports being lost.
1783	 */
1784	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
1785
1786	DRM_I915_PERF_RECORD_MAX /* non-ABI */
1787};
1788
1789/**
1790 * Structure to upload perf dynamic configuration into the kernel.
1791 */
1792struct drm_i915_perf_oa_config {
1793	/** String formatted like "%08x-%04x-%04x-%04x-%012x" */
1794	char uuid[36];
1795
1796	__u32 n_mux_regs;
1797	__u32 n_boolean_regs;
1798	__u32 n_flex_regs;
1799
1800	/*
1801	 * These fields are pointers to tuples of u32 values (register address,
1802	 * value). For example the expected length of the buffer pointed by
1803	 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1804	 */
1805	__u64 mux_regs_ptr;
1806	__u64 boolean_regs_ptr;
1807	__u64 flex_regs_ptr;
1808};
1809
1810struct drm_i915_query_item {
1811	__u64 query_id;
1812#define DRM_I915_QUERY_TOPOLOGY_INFO    1
1813/* Must be kept compact -- no holes and well documented */
1814
1815	/*
1816	 * When set to zero by userspace, this is filled with the size of the
1817	 * data to be written at the data_ptr pointer. The kernel sets this
1818	 * value to a negative value to signal an error on a particular query
1819	 * item.
1820	 */
1821	__s32 length;
1822
1823	/*
1824	 * Unused for now. Must be cleared to zero.
1825	 */
1826	__u32 flags;
1827
1828	/*
1829	 * Data will be written at the location pointed by data_ptr when the
1830	 * value of length matches the length of the data to be written by the
1831	 * kernel.
1832	 */
1833	__u64 data_ptr;
1834};
1835
1836struct drm_i915_query {
1837	__u32 num_items;
1838
1839	/*
1840	 * Unused for now. Must be cleared to zero.
1841	 */
1842	__u32 flags;
1843
1844	/*
1845	 * This points to an array of num_items drm_i915_query_item structures.
1846	 */
1847	__u64 items_ptr;
1848};
1849
1850/*
1851 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
1852 *
1853 * data: contains the 3 pieces of information :
1854 *
1855 * - the slice mask with one bit per slice telling whether a slice is
1856 *   available. The availability of slice X can be queried with the following
1857 *   formula :
1858 *
1859 *           (data[X / 8] >> (X % 8)) & 1
1860 *
1861 * - the subslice mask for each slice with one bit per subslice telling
1862 *   whether a subslice is available. The availability of subslice Y in slice
1863 *   X can be queried with the following formula :
1864 *
1865 *           (data[subslice_offset +
1866 *                 X * subslice_stride +
1867 *                 Y / 8] >> (Y % 8)) & 1
1868 *
1869 * - the EU mask for each subslice in each slice with one bit per EU telling
1870 *   whether an EU is available. The availability of EU Z in subslice Y in
1871 *   slice X can be queried with the following formula :
1872 *
1873 *           (data[eu_offset +
1874 *                 (X * max_subslices + Y) * eu_stride +
1875 *                 Z / 8] >> (Z % 8)) & 1
1876 */
1877struct drm_i915_query_topology_info {
1878	/*
1879	 * Unused for now. Must be cleared to zero.
1880	 */
1881	__u16 flags;
1882
1883	__u16 max_slices;
1884	__u16 max_subslices;
1885	__u16 max_eus_per_subslice;
1886
1887	/*
1888	 * Offset in data[] at which the subslice masks are stored.
1889	 */
1890	__u16 subslice_offset;
1891
1892	/*
1893	 * Stride at which each of the subslice masks for each slice are
1894	 * stored.
1895	 */
1896	__u16 subslice_stride;
1897
1898	/*
1899	 * Offset in data[] at which the EU masks are stored.
1900	 */
1901	__u16 eu_offset;
1902
1903	/*
1904	 * Stride at which each of the EU masks for each subslice are stored.
1905	 */
1906	__u16 eu_stride;
1907
1908	__u8 data[];
1909};
1910
1911#if defined(__cplusplus)
1912}
1913#endif
1914
1915#endif /* _I915_DRM_H_ */
1916