i915_drm.h revision 2ee35494
122944501Smrg/*
222944501Smrg * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
322944501Smrg * All Rights Reserved.
422944501Smrg *
522944501Smrg * Permission is hereby granted, free of charge, to any person obtaining a
622944501Smrg * copy of this software and associated documentation files (the
722944501Smrg * "Software"), to deal in the Software without restriction, including
822944501Smrg * without limitation the rights to use, copy, modify, merge, publish,
922944501Smrg * distribute, sub license, and/or sell copies of the Software, and to
1022944501Smrg * permit persons to whom the Software is furnished to do so, subject to
1122944501Smrg * the following conditions:
1222944501Smrg *
1322944501Smrg * The above copyright notice and this permission notice (including the
1422944501Smrg * next paragraph) shall be included in all copies or substantial portions
1522944501Smrg * of the Software.
1622944501Smrg *
1722944501Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1822944501Smrg * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1922944501Smrg * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
2022944501Smrg * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
2122944501Smrg * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
2222944501Smrg * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
2322944501Smrg * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2422944501Smrg *
2522944501Smrg */
2622944501Smrg
2722944501Smrg#ifndef _I915_DRM_H_
2822944501Smrg#define _I915_DRM_H_
2922944501Smrg
30fe517fc9Smrg#include "drm.h"
3122944501Smrg
322ee35494Smrg#if defined(__cplusplus)
332ee35494Smrgextern "C" {
342ee35494Smrg#endif
352ee35494Smrg
3622944501Smrg/* Please note that modifications to all structs defined here are
3722944501Smrg * subject to backwards-compatibility constraints.
3822944501Smrg */
3922944501Smrg
40e88f27b3Smrg/**
41e88f27b3Smrg * DOC: uevents generated by i915 on it's device node
42e88f27b3Smrg *
43e88f27b3Smrg * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44e88f27b3Smrg *	event from the gpu l3 cache. Additional information supplied is ROW,
45e88f27b3Smrg *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46e88f27b3Smrg *	track of these events and if a specific cache-line seems to have a
47e88f27b3Smrg *	persistent error remap it with the l3 remapping tool supplied in
48e88f27b3Smrg *	intel-gpu-tools.  The value supplied with the event is always 1.
49e88f27b3Smrg *
50e88f27b3Smrg * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51e88f27b3Smrg *	hangcheck. The error detection event is a good indicator of when things
52e88f27b3Smrg *	began to go badly. The value supplied with the event is a 1 upon error
53e88f27b3Smrg *	detection, and a 0 upon reset completion, signifying no more error
54e88f27b3Smrg *	exists. NOTE: Disabling hangcheck or reset via module parameter will
55e88f27b3Smrg *	cause the related events to not be seen.
56e88f27b3Smrg *
57e88f27b3Smrg * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58e88f27b3Smrg *	the GPU. The value supplied with the event is always 1. NOTE: Disable
59e88f27b3Smrg *	reset via module parameter will cause this event to not be seen.
60e88f27b3Smrg */
61e88f27b3Smrg#define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
62e88f27b3Smrg#define I915_ERROR_UEVENT		"ERROR"
63e88f27b3Smrg#define I915_RESET_UEVENT		"RESET"
64e88f27b3Smrg
652ee35494Smrg/*
662ee35494Smrg * MOCS indexes used for GPU surfaces, defining the cacheability of the
672ee35494Smrg * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
682ee35494Smrg */
692ee35494Smrgenum i915_mocs_table_index {
702ee35494Smrg	/*
712ee35494Smrg	 * Not cached anywhere, coherency between CPU and GPU accesses is
722ee35494Smrg	 * guaranteed.
732ee35494Smrg	 */
742ee35494Smrg	I915_MOCS_UNCACHED,
752ee35494Smrg	/*
762ee35494Smrg	 * Cacheability and coherency controlled by the kernel automatically
772ee35494Smrg	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
782ee35494Smrg	 * usage of the surface (used for display scanout or not).
792ee35494Smrg	 */
802ee35494Smrg	I915_MOCS_PTE,
812ee35494Smrg	/*
822ee35494Smrg	 * Cached in all GPU caches available on the platform.
832ee35494Smrg	 * Coherency between CPU and GPU accesses to the surface is not
842ee35494Smrg	 * guaranteed without extra synchronization.
852ee35494Smrg	 */
862ee35494Smrg	I915_MOCS_CACHED,
872ee35494Smrg};
882ee35494Smrg
8922944501Smrg/* Each region is a minimum of 16k, and there are at most 255 of them.
9022944501Smrg */
9122944501Smrg#define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
9222944501Smrg				 * of chars for next/prev indices */
9322944501Smrg#define I915_LOG_MIN_TEX_REGION_SIZE 14
9422944501Smrg
9522944501Smrgtypedef struct _drm_i915_init {
9622944501Smrg	enum {
9722944501Smrg		I915_INIT_DMA = 0x01,
9822944501Smrg		I915_CLEANUP_DMA = 0x02,
9922944501Smrg		I915_RESUME_DMA = 0x03
10022944501Smrg	} func;
10122944501Smrg	unsigned int mmio_offset;
10222944501Smrg	int sarea_priv_offset;
10322944501Smrg	unsigned int ring_start;
10422944501Smrg	unsigned int ring_end;
10522944501Smrg	unsigned int ring_size;
10622944501Smrg	unsigned int front_offset;
10722944501Smrg	unsigned int back_offset;
10822944501Smrg	unsigned int depth_offset;
10922944501Smrg	unsigned int w;
11022944501Smrg	unsigned int h;
11122944501Smrg	unsigned int pitch;
11222944501Smrg	unsigned int pitch_bits;
11322944501Smrg	unsigned int back_pitch;
11422944501Smrg	unsigned int depth_pitch;
11522944501Smrg	unsigned int cpp;
11622944501Smrg	unsigned int chipset;
11722944501Smrg} drm_i915_init_t;
11822944501Smrg
11922944501Smrgtypedef struct _drm_i915_sarea {
12022944501Smrg	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
12122944501Smrg	int last_upload;	/* last time texture was uploaded */
12222944501Smrg	int last_enqueue;	/* last time a buffer was enqueued */
12322944501Smrg	int last_dispatch;	/* age of the most recently dispatched buffer */
12422944501Smrg	int ctxOwner;		/* last context to upload state */
12522944501Smrg	int texAge;
12622944501Smrg	int pf_enabled;		/* is pageflipping allowed? */
12722944501Smrg	int pf_active;
12822944501Smrg	int pf_current_page;	/* which buffer is being displayed? */
12922944501Smrg	int perf_boxes;		/* performance boxes to be displayed */
13022944501Smrg	int width, height;      /* screen size in pixels */
13122944501Smrg
13222944501Smrg	drm_handle_t front_handle;
13322944501Smrg	int front_offset;
13422944501Smrg	int front_size;
13522944501Smrg
13622944501Smrg	drm_handle_t back_handle;
13722944501Smrg	int back_offset;
13822944501Smrg	int back_size;
13922944501Smrg
14022944501Smrg	drm_handle_t depth_handle;
14122944501Smrg	int depth_offset;
14222944501Smrg	int depth_size;
14322944501Smrg
14422944501Smrg	drm_handle_t tex_handle;
14522944501Smrg	int tex_offset;
14622944501Smrg	int tex_size;
14722944501Smrg	int log_tex_granularity;
14822944501Smrg	int pitch;
14922944501Smrg	int rotation;           /* 0, 90, 180 or 270 */
15022944501Smrg	int rotated_offset;
15122944501Smrg	int rotated_size;
15222944501Smrg	int rotated_pitch;
15322944501Smrg	int virtualX, virtualY;
15422944501Smrg
15522944501Smrg	unsigned int front_tiled;
15622944501Smrg	unsigned int back_tiled;
15722944501Smrg	unsigned int depth_tiled;
15822944501Smrg	unsigned int rotated_tiled;
15922944501Smrg	unsigned int rotated2_tiled;
16022944501Smrg
16122944501Smrg	int pipeA_x;
16222944501Smrg	int pipeA_y;
16322944501Smrg	int pipeA_w;
16422944501Smrg	int pipeA_h;
16522944501Smrg	int pipeB_x;
16622944501Smrg	int pipeB_y;
16722944501Smrg	int pipeB_w;
16822944501Smrg	int pipeB_h;
16922944501Smrg
17022944501Smrg	/* fill out some space for old userspace triple buffer */
17122944501Smrg	drm_handle_t unused_handle;
17222944501Smrg	__u32 unused1, unused2, unused3;
17322944501Smrg
17422944501Smrg	/* buffer object handles for static buffers. May change
17522944501Smrg	 * over the lifetime of the client.
17622944501Smrg	 */
17722944501Smrg	__u32 front_bo_handle;
17822944501Smrg	__u32 back_bo_handle;
17922944501Smrg	__u32 unused_bo_handle;
18022944501Smrg	__u32 depth_bo_handle;
18122944501Smrg
18222944501Smrg} drm_i915_sarea_t;
18322944501Smrg
18422944501Smrg/* due to userspace building against these headers we need some compat here */
18522944501Smrg#define planeA_x pipeA_x
18622944501Smrg#define planeA_y pipeA_y
18722944501Smrg#define planeA_w pipeA_w
18822944501Smrg#define planeA_h pipeA_h
18922944501Smrg#define planeB_x pipeB_x
19022944501Smrg#define planeB_y pipeB_y
19122944501Smrg#define planeB_w pipeB_w
19222944501Smrg#define planeB_h pipeB_h
19322944501Smrg
19422944501Smrg/* Flags for perf_boxes
19522944501Smrg */
19622944501Smrg#define I915_BOX_RING_EMPTY    0x1
19722944501Smrg#define I915_BOX_FLIP          0x2
19822944501Smrg#define I915_BOX_WAIT          0x4
19922944501Smrg#define I915_BOX_TEXTURE_LOAD  0x8
20022944501Smrg#define I915_BOX_LOST_CONTEXT  0x10
20122944501Smrg
202fe517fc9Smrg/*
203fe517fc9Smrg * i915 specific ioctls.
204fe517fc9Smrg *
205fe517fc9Smrg * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
206fe517fc9Smrg * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
207fe517fc9Smrg * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
20822944501Smrg */
20922944501Smrg#define DRM_I915_INIT		0x00
21022944501Smrg#define DRM_I915_FLUSH		0x01
21122944501Smrg#define DRM_I915_FLIP		0x02
21222944501Smrg#define DRM_I915_BATCHBUFFER	0x03
21322944501Smrg#define DRM_I915_IRQ_EMIT	0x04
21422944501Smrg#define DRM_I915_IRQ_WAIT	0x05
21522944501Smrg#define DRM_I915_GETPARAM	0x06
21622944501Smrg#define DRM_I915_SETPARAM	0x07
21722944501Smrg#define DRM_I915_ALLOC		0x08
21822944501Smrg#define DRM_I915_FREE		0x09
21922944501Smrg#define DRM_I915_INIT_HEAP	0x0a
22022944501Smrg#define DRM_I915_CMDBUFFER	0x0b
22122944501Smrg#define DRM_I915_DESTROY_HEAP	0x0c
22222944501Smrg#define DRM_I915_SET_VBLANK_PIPE	0x0d
22322944501Smrg#define DRM_I915_GET_VBLANK_PIPE	0x0e
22422944501Smrg#define DRM_I915_VBLANK_SWAP	0x0f
22522944501Smrg#define DRM_I915_HWS_ADDR	0x11
22622944501Smrg#define DRM_I915_GEM_INIT	0x13
22722944501Smrg#define DRM_I915_GEM_EXECBUFFER	0x14
22822944501Smrg#define DRM_I915_GEM_PIN	0x15
22922944501Smrg#define DRM_I915_GEM_UNPIN	0x16
23022944501Smrg#define DRM_I915_GEM_BUSY	0x17
23122944501Smrg#define DRM_I915_GEM_THROTTLE	0x18
23222944501Smrg#define DRM_I915_GEM_ENTERVT	0x19
23322944501Smrg#define DRM_I915_GEM_LEAVEVT	0x1a
23422944501Smrg#define DRM_I915_GEM_CREATE	0x1b
23522944501Smrg#define DRM_I915_GEM_PREAD	0x1c
23622944501Smrg#define DRM_I915_GEM_PWRITE	0x1d
23722944501Smrg#define DRM_I915_GEM_MMAP	0x1e
23822944501Smrg#define DRM_I915_GEM_SET_DOMAIN	0x1f
23922944501Smrg#define DRM_I915_GEM_SW_FINISH	0x20
24022944501Smrg#define DRM_I915_GEM_SET_TILING	0x21
24122944501Smrg#define DRM_I915_GEM_GET_TILING	0x22
24222944501Smrg#define DRM_I915_GEM_GET_APERTURE 0x23
24322944501Smrg#define DRM_I915_GEM_MMAP_GTT	0x24
24422944501Smrg#define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
24522944501Smrg#define DRM_I915_GEM_MADVISE	0x26
24622944501Smrg#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
24722944501Smrg#define DRM_I915_OVERLAY_ATTRS	0x28
24822944501Smrg#define DRM_I915_GEM_EXECBUFFER2	0x29
2492ee35494Smrg#define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
250e88f27b3Smrg#define DRM_I915_GET_SPRITE_COLORKEY	0x2a
251e88f27b3Smrg#define DRM_I915_SET_SPRITE_COLORKEY	0x2b
252e88f27b3Smrg#define DRM_I915_GEM_WAIT	0x2c
253e88f27b3Smrg#define DRM_I915_GEM_CONTEXT_CREATE	0x2d
254e88f27b3Smrg#define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
255e88f27b3Smrg#define DRM_I915_GEM_SET_CACHING	0x2f
256e88f27b3Smrg#define DRM_I915_GEM_GET_CACHING	0x30
257e88f27b3Smrg#define DRM_I915_REG_READ		0x31
258e88f27b3Smrg#define DRM_I915_GET_RESET_STATS	0x32
259baaff307Smrg#define DRM_I915_GEM_USERPTR		0x33
260424e9256Smrg#define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
261424e9256Smrg#define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
2622ee35494Smrg#define DRM_I915_PERF_OPEN		0x36
26322944501Smrg
26422944501Smrg#define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
26522944501Smrg#define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
26622944501Smrg#define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
26722944501Smrg#define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
26822944501Smrg#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
26922944501Smrg#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
27022944501Smrg#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
27122944501Smrg#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
27222944501Smrg#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
27322944501Smrg#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
27422944501Smrg#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
27522944501Smrg#define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
27622944501Smrg#define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
27722944501Smrg#define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
27822944501Smrg#define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
27922944501Smrg#define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
28069dda199Smrg#define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
28122944501Smrg#define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
28222944501Smrg#define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
28322944501Smrg#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
2842ee35494Smrg#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
28522944501Smrg#define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
28622944501Smrg#define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
28722944501Smrg#define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
288e88f27b3Smrg#define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
289e88f27b3Smrg#define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
29022944501Smrg#define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
29122944501Smrg#define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
29222944501Smrg#define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
29322944501Smrg#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
29422944501Smrg#define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
29522944501Smrg#define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
29622944501Smrg#define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
29722944501Smrg#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
29822944501Smrg#define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
29922944501Smrg#define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
30022944501Smrg#define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
30122944501Smrg#define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
30222944501Smrg#define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
30322944501Smrg#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
30422944501Smrg#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
305e88f27b3Smrg#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
30622944501Smrg#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
307e88f27b3Smrg#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
308b7926a35Schristos#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
309e88f27b3Smrg#define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
310e88f27b3Smrg#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
311e88f27b3Smrg#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
312e88f27b3Smrg#define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
313e88f27b3Smrg#define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
314424e9256Smrg#define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
315424e9256Smrg#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
316424e9256Smrg#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
3172ee35494Smrg#define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
31822944501Smrg
31922944501Smrg/* Allow drivers to submit batchbuffers directly to hardware, relying
32022944501Smrg * on the security mechanisms provided by hardware.
32122944501Smrg */
32222944501Smrgtypedef struct drm_i915_batchbuffer {
32322944501Smrg	int start;		/* agp offset */
32422944501Smrg	int used;		/* nr bytes in use */
32522944501Smrg	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
32622944501Smrg	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
32722944501Smrg	int num_cliprects;	/* mulitpass with multiple cliprects? */
32822944501Smrg	struct drm_clip_rect *cliprects;	/* pointer to userspace cliprects */
32922944501Smrg} drm_i915_batchbuffer_t;
33022944501Smrg
33122944501Smrg/* As above, but pass a pointer to userspace buffer which can be
33222944501Smrg * validated by the kernel prior to sending to hardware.
33322944501Smrg */
33422944501Smrgtypedef struct _drm_i915_cmdbuffer {
33522944501Smrg	char *buf;	/* pointer to userspace command buffer */
33622944501Smrg	int sz;			/* nr bytes in buf */
33722944501Smrg	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
33822944501Smrg	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
33922944501Smrg	int num_cliprects;	/* mulitpass with multiple cliprects? */
34022944501Smrg	struct drm_clip_rect *cliprects;	/* pointer to userspace cliprects */
34122944501Smrg} drm_i915_cmdbuffer_t;
34222944501Smrg
34322944501Smrg/* Userspace can request & wait on irq's:
34422944501Smrg */
34522944501Smrgtypedef struct drm_i915_irq_emit {
34622944501Smrg	int *irq_seq;
34722944501Smrg} drm_i915_irq_emit_t;
34822944501Smrg
34922944501Smrgtypedef struct drm_i915_irq_wait {
35022944501Smrg	int irq_seq;
35122944501Smrg} drm_i915_irq_wait_t;
35222944501Smrg
35322944501Smrg/* Ioctl to query kernel params:
35422944501Smrg */
35522944501Smrg#define I915_PARAM_IRQ_ACTIVE            1
35622944501Smrg#define I915_PARAM_ALLOW_BATCHBUFFER     2
35722944501Smrg#define I915_PARAM_LAST_DISPATCH         3
35822944501Smrg#define I915_PARAM_CHIPSET_ID            4
35922944501Smrg#define I915_PARAM_HAS_GEM               5
36022944501Smrg#define I915_PARAM_NUM_FENCES_AVAIL      6
36122944501Smrg#define I915_PARAM_HAS_OVERLAY           7
36222944501Smrg#define I915_PARAM_HAS_PAGEFLIPPING	 8
36322944501Smrg#define I915_PARAM_HAS_EXECBUF2          9
36413d1d17dSmrg#define I915_PARAM_HAS_BSD		 10
36569dda199Smrg#define I915_PARAM_HAS_BLT		 11
36669dda199Smrg#define I915_PARAM_HAS_RELAXED_FENCING	 12
367e88f27b3Smrg#define I915_PARAM_HAS_COHERENT_RINGS	 13
368e88f27b3Smrg#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
369e88f27b3Smrg#define I915_PARAM_HAS_RELAXED_DELTA	 15
370e88f27b3Smrg#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
371e88f27b3Smrg#define I915_PARAM_HAS_LLC     	 	 17
372e88f27b3Smrg#define I915_PARAM_HAS_ALIASING_PPGTT	 18
373e88f27b3Smrg#define I915_PARAM_HAS_WAIT_TIMEOUT	 19
374e88f27b3Smrg#define I915_PARAM_HAS_SEMAPHORES	 20
375e88f27b3Smrg#define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
376e88f27b3Smrg#define I915_PARAM_HAS_VEBOX		 22
377e88f27b3Smrg#define I915_PARAM_HAS_SECURE_BATCHES	 23
378e88f27b3Smrg#define I915_PARAM_HAS_PINNED_BATCHES	 24
379e88f27b3Smrg#define I915_PARAM_HAS_EXEC_NO_RELOC	 25
380e88f27b3Smrg#define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
381e88f27b3Smrg#define I915_PARAM_HAS_WT     	 	 27
382baaff307Smrg#define I915_PARAM_CMD_PARSER_VERSION	 28
383424e9256Smrg#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
384424e9256Smrg#define I915_PARAM_MMAP_VERSION          30
385424e9256Smrg#define I915_PARAM_HAS_BSD2		 31
386424e9256Smrg#define I915_PARAM_REVISION              32
387424e9256Smrg#define I915_PARAM_SUBSLICE_TOTAL	 33
388424e9256Smrg#define I915_PARAM_EU_TOTAL		 34
389fe517fc9Smrg#define I915_PARAM_HAS_GPU_RESET	 35
390fe517fc9Smrg#define I915_PARAM_HAS_RESOURCE_STREAMER 36
391fe517fc9Smrg#define I915_PARAM_HAS_EXEC_SOFTPIN	 37
3922ee35494Smrg#define I915_PARAM_HAS_POOLED_EU	 38
3932ee35494Smrg#define I915_PARAM_MIN_EU_IN_POOL	 39
3942ee35494Smrg#define I915_PARAM_MMAP_GTT_VERSION	 40
3952ee35494Smrg
3962ee35494Smrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
3972ee35494Smrg * priorities and the driver will attempt to execute batches in priority order.
3982ee35494Smrg */
3992ee35494Smrg#define I915_PARAM_HAS_SCHEDULER	 41
4002ee35494Smrg#define I915_PARAM_HUC_STATUS		 42
4012ee35494Smrg
4022ee35494Smrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
4032ee35494Smrg * synchronisation with implicit fencing on individual objects.
4042ee35494Smrg * See EXEC_OBJECT_ASYNC.
4052ee35494Smrg */
4062ee35494Smrg#define I915_PARAM_HAS_EXEC_ASYNC	 43
4072ee35494Smrg
4082ee35494Smrg/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
4092ee35494Smrg * both being able to pass in a sync_file fd to wait upon before executing,
4102ee35494Smrg * and being able to return a new sync_file fd that is signaled when the
4112ee35494Smrg * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
4122ee35494Smrg */
4132ee35494Smrg#define I915_PARAM_HAS_EXEC_FENCE	 44
41422944501Smrg
41522944501Smrgtypedef struct drm_i915_getparam {
416fe517fc9Smrg	__s32 param;
417fe517fc9Smrg	/*
418fe517fc9Smrg	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
419fe517fc9Smrg	 * compat32 code. Don't repeat this mistake.
420fe517fc9Smrg	 */
42122944501Smrg	int *value;
42222944501Smrg} drm_i915_getparam_t;
42322944501Smrg
42422944501Smrg/* Ioctl to set kernel params:
42522944501Smrg */
42622944501Smrg#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
42722944501Smrg#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
42822944501Smrg#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
42922944501Smrg#define I915_SETPARAM_NUM_USED_FENCES                     4
43022944501Smrg
43122944501Smrgtypedef struct drm_i915_setparam {
43222944501Smrg	int param;
43322944501Smrg	int value;
43422944501Smrg} drm_i915_setparam_t;
43522944501Smrg
43622944501Smrg/* A memory manager for regions of shared memory:
43722944501Smrg */
43822944501Smrg#define I915_MEM_REGION_AGP 1
43922944501Smrg
44022944501Smrgtypedef struct drm_i915_mem_alloc {
44122944501Smrg	int region;
44222944501Smrg	int alignment;
44322944501Smrg	int size;
44422944501Smrg	int *region_offset;	/* offset from start of fb or agp */
44522944501Smrg} drm_i915_mem_alloc_t;
44622944501Smrg
44722944501Smrgtypedef struct drm_i915_mem_free {
44822944501Smrg	int region;
44922944501Smrg	int region_offset;
45022944501Smrg} drm_i915_mem_free_t;
45122944501Smrg
45222944501Smrgtypedef struct drm_i915_mem_init_heap {
45322944501Smrg	int region;
45422944501Smrg	int size;
45522944501Smrg	int start;
45622944501Smrg} drm_i915_mem_init_heap_t;
45722944501Smrg
45822944501Smrg/* Allow memory manager to be torn down and re-initialized (eg on
45922944501Smrg * rotate):
46022944501Smrg */
46122944501Smrgtypedef struct drm_i915_mem_destroy_heap {
46222944501Smrg	int region;
46322944501Smrg} drm_i915_mem_destroy_heap_t;
46422944501Smrg
46522944501Smrg/* Allow X server to configure which pipes to monitor for vblank signals
46622944501Smrg */
46722944501Smrg#define	DRM_I915_VBLANK_PIPE_A	1
46822944501Smrg#define	DRM_I915_VBLANK_PIPE_B	2
46922944501Smrg
47022944501Smrgtypedef struct drm_i915_vblank_pipe {
47122944501Smrg	int pipe;
47222944501Smrg} drm_i915_vblank_pipe_t;
47322944501Smrg
47422944501Smrg/* Schedule buffer swap at given vertical blank:
47522944501Smrg */
47622944501Smrgtypedef struct drm_i915_vblank_swap {
47722944501Smrg	drm_drawable_t drawable;
47822944501Smrg	enum drm_vblank_seq_type seqtype;
47922944501Smrg	unsigned int sequence;
48022944501Smrg} drm_i915_vblank_swap_t;
48122944501Smrg
48222944501Smrgtypedef struct drm_i915_hws_addr {
48322944501Smrg	__u64 addr;
48422944501Smrg} drm_i915_hws_addr_t;
48522944501Smrg
48622944501Smrgstruct drm_i915_gem_init {
48722944501Smrg	/**
48822944501Smrg	 * Beginning offset in the GTT to be managed by the DRM memory
48922944501Smrg	 * manager.
49022944501Smrg	 */
49122944501Smrg	__u64 gtt_start;
49222944501Smrg	/**
49322944501Smrg	 * Ending offset in the GTT to be managed by the DRM memory
49422944501Smrg	 * manager.
49522944501Smrg	 */
49622944501Smrg	__u64 gtt_end;
49722944501Smrg};
49822944501Smrg
49922944501Smrgstruct drm_i915_gem_create {
50022944501Smrg	/**
50122944501Smrg	 * Requested size for the object.
50222944501Smrg	 *
50322944501Smrg	 * The (page-aligned) allocated size for the object will be returned.
50422944501Smrg	 */
50522944501Smrg	__u64 size;
50622944501Smrg	/**
50722944501Smrg	 * Returned handle for the object.
50822944501Smrg	 *
50922944501Smrg	 * Object handles are nonzero.
51022944501Smrg	 */
51122944501Smrg	__u32 handle;
51222944501Smrg	__u32 pad;
51322944501Smrg};
51422944501Smrg
51522944501Smrgstruct drm_i915_gem_pread {
51622944501Smrg	/** Handle for the object being read. */
51722944501Smrg	__u32 handle;
51822944501Smrg	__u32 pad;
51922944501Smrg	/** Offset into the object to read from */
52022944501Smrg	__u64 offset;
52122944501Smrg	/** Length of data to read */
52222944501Smrg	__u64 size;
52322944501Smrg	/**
52422944501Smrg	 * Pointer to write the data into.
52522944501Smrg	 *
52622944501Smrg	 * This is a fixed-size type for 32/64 compatibility.
52722944501Smrg	 */
52822944501Smrg	__u64 data_ptr;
52922944501Smrg};
53022944501Smrg
53122944501Smrgstruct drm_i915_gem_pwrite {
53222944501Smrg	/** Handle for the object being written to. */
53322944501Smrg	__u32 handle;
53422944501Smrg	__u32 pad;
53522944501Smrg	/** Offset into the object to write to */
53622944501Smrg	__u64 offset;
53722944501Smrg	/** Length of data to write */
53822944501Smrg	__u64 size;
53922944501Smrg	/**
54022944501Smrg	 * Pointer to read the data from.
54122944501Smrg	 *
54222944501Smrg	 * This is a fixed-size type for 32/64 compatibility.
54322944501Smrg	 */
54422944501Smrg	__u64 data_ptr;
54522944501Smrg};
54622944501Smrg
54722944501Smrgstruct drm_i915_gem_mmap {
54822944501Smrg	/** Handle for the object being mapped. */
54922944501Smrg	__u32 handle;
55022944501Smrg	__u32 pad;
55122944501Smrg	/** Offset in the object to map. */
55222944501Smrg	__u64 offset;
55322944501Smrg	/**
55422944501Smrg	 * Length of data to map.
55522944501Smrg	 *
55622944501Smrg	 * The value will be page-aligned.
55722944501Smrg	 */
55822944501Smrg	__u64 size;
55922944501Smrg	/**
56022944501Smrg	 * Returned pointer the data was mapped at.
56122944501Smrg	 *
56222944501Smrg	 * This is a fixed-size type for 32/64 compatibility.
56322944501Smrg	 */
56422944501Smrg	__u64 addr_ptr;
565424e9256Smrg
566424e9256Smrg	/**
567424e9256Smrg	 * Flags for extended behaviour.
568424e9256Smrg	 *
569424e9256Smrg	 * Added in version 2.
570424e9256Smrg	 */
571424e9256Smrg	__u64 flags;
572424e9256Smrg#define I915_MMAP_WC 0x1
57322944501Smrg};
57422944501Smrg
57522944501Smrgstruct drm_i915_gem_mmap_gtt {
57622944501Smrg	/** Handle for the object being mapped. */
57722944501Smrg	__u32 handle;
57822944501Smrg	__u32 pad;
57922944501Smrg	/**
58022944501Smrg	 * Fake offset to use for subsequent mmap call
58122944501Smrg	 *
58222944501Smrg	 * This is a fixed-size type for 32/64 compatibility.
58322944501Smrg	 */
58422944501Smrg	__u64 offset;
58522944501Smrg};
58622944501Smrg
58722944501Smrgstruct drm_i915_gem_set_domain {
58822944501Smrg	/** Handle for the object */
58922944501Smrg	__u32 handle;
59022944501Smrg
59122944501Smrg	/** New read domains */
59222944501Smrg	__u32 read_domains;
59322944501Smrg
59422944501Smrg	/** New write domain */
59522944501Smrg	__u32 write_domain;
59622944501Smrg};
59722944501Smrg
59822944501Smrgstruct drm_i915_gem_sw_finish {
59922944501Smrg	/** Handle for the object */
60022944501Smrg	__u32 handle;
60122944501Smrg};
60222944501Smrg
60322944501Smrgstruct drm_i915_gem_relocation_entry {
60422944501Smrg	/**
60522944501Smrg	 * Handle of the buffer being pointed to by this relocation entry.
60622944501Smrg	 *
60722944501Smrg	 * It's appealing to make this be an index into the mm_validate_entry
60822944501Smrg	 * list to refer to the buffer, but this allows the driver to create
60922944501Smrg	 * a relocation list for state buffers and not re-write it per
61022944501Smrg	 * exec using the buffer.
61122944501Smrg	 */
61222944501Smrg	__u32 target_handle;
61322944501Smrg
61422944501Smrg	/**
61522944501Smrg	 * Value to be added to the offset of the target buffer to make up
61622944501Smrg	 * the relocation entry.
61722944501Smrg	 */
61822944501Smrg	__u32 delta;
61922944501Smrg
62022944501Smrg	/** Offset in the buffer the relocation entry will be written into */
62122944501Smrg	__u64 offset;
62222944501Smrg
62322944501Smrg	/**
62422944501Smrg	 * Offset value of the target buffer that the relocation entry was last
62522944501Smrg	 * written as.
62622944501Smrg	 *
62722944501Smrg	 * If the buffer has the same offset as last time, we can skip syncing
62822944501Smrg	 * and writing the relocation.  This value is written back out by
62922944501Smrg	 * the execbuffer ioctl when the relocation is written.
63022944501Smrg	 */
63122944501Smrg	__u64 presumed_offset;
63222944501Smrg
63322944501Smrg	/**
63422944501Smrg	 * Target memory domains read by this operation.
63522944501Smrg	 */
63622944501Smrg	__u32 read_domains;
63722944501Smrg
63822944501Smrg	/**
63922944501Smrg	 * Target memory domains written by this operation.
64022944501Smrg	 *
64122944501Smrg	 * Note that only one domain may be written by the whole
64222944501Smrg	 * execbuffer operation, so that where there are conflicts,
64322944501Smrg	 * the application will get -EINVAL back.
64422944501Smrg	 */
64522944501Smrg	__u32 write_domain;
64622944501Smrg};
64722944501Smrg
64822944501Smrg/** @{
64922944501Smrg * Intel memory domains
65022944501Smrg *
65122944501Smrg * Most of these just align with the various caches in
65222944501Smrg * the system and are used to flush and invalidate as
65322944501Smrg * objects end up cached in different domains.
65422944501Smrg */
65522944501Smrg/** CPU cache */
65622944501Smrg#define I915_GEM_DOMAIN_CPU		0x00000001
65722944501Smrg/** Render cache, used by 2D and 3D drawing */
65822944501Smrg#define I915_GEM_DOMAIN_RENDER		0x00000002
65922944501Smrg/** Sampler cache, used by texture engine */
66022944501Smrg#define I915_GEM_DOMAIN_SAMPLER		0x00000004
66122944501Smrg/** Command queue, used to load batch buffers */
66222944501Smrg#define I915_GEM_DOMAIN_COMMAND		0x00000008
66322944501Smrg/** Instruction cache, used by shader programs */
66422944501Smrg#define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
66522944501Smrg/** Vertex address cache */
66622944501Smrg#define I915_GEM_DOMAIN_VERTEX		0x00000020
66722944501Smrg/** GTT domain - aperture and scanout */
66822944501Smrg#define I915_GEM_DOMAIN_GTT		0x00000040
66922944501Smrg/** @} */
67022944501Smrg
67122944501Smrgstruct drm_i915_gem_exec_object {
67222944501Smrg	/**
67322944501Smrg	 * User's handle for a buffer to be bound into the GTT for this
67422944501Smrg	 * operation.
67522944501Smrg	 */
67622944501Smrg	__u32 handle;
67722944501Smrg
67822944501Smrg	/** Number of relocations to be performed on this buffer */
67922944501Smrg	__u32 relocation_count;
68022944501Smrg	/**
68122944501Smrg	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
68222944501Smrg	 * the relocations to be performed in this buffer.
68322944501Smrg	 */
68422944501Smrg	__u64 relocs_ptr;
68522944501Smrg
68622944501Smrg	/** Required alignment in graphics aperture */
68722944501Smrg	__u64 alignment;
68822944501Smrg
68922944501Smrg	/**
69022944501Smrg	 * Returned value of the updated offset of the object, for future
69122944501Smrg	 * presumed_offset writes.
69222944501Smrg	 */
69322944501Smrg	__u64 offset;
69422944501Smrg};
69522944501Smrg
69622944501Smrgstruct drm_i915_gem_execbuffer {
69722944501Smrg	/**
69822944501Smrg	 * List of buffers to be validated with their relocations to be
69922944501Smrg	 * performend on them.
70022944501Smrg	 *
70122944501Smrg	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
70222944501Smrg	 *
70322944501Smrg	 * These buffers must be listed in an order such that all relocations
70422944501Smrg	 * a buffer is performing refer to buffers that have already appeared
70522944501Smrg	 * in the validate list.
70622944501Smrg	 */
70722944501Smrg	__u64 buffers_ptr;
70822944501Smrg	__u32 buffer_count;
70922944501Smrg
71022944501Smrg	/** Offset in the batchbuffer to start execution from. */
71122944501Smrg	__u32 batch_start_offset;
71222944501Smrg	/** Bytes used in batchbuffer from batch_start_offset */
71322944501Smrg	__u32 batch_len;
71422944501Smrg	__u32 DR1;
71522944501Smrg	__u32 DR4;
71622944501Smrg	__u32 num_cliprects;
71722944501Smrg	/** This is a struct drm_clip_rect *cliprects */
71822944501Smrg	__u64 cliprects_ptr;
71922944501Smrg};
72022944501Smrg
72122944501Smrgstruct drm_i915_gem_exec_object2 {
72222944501Smrg	/**
72322944501Smrg	 * User's handle for a buffer to be bound into the GTT for this
72422944501Smrg	 * operation.
72522944501Smrg	 */
72622944501Smrg	__u32 handle;
72722944501Smrg
72822944501Smrg	/** Number of relocations to be performed on this buffer */
72922944501Smrg	__u32 relocation_count;
73022944501Smrg	/**
73122944501Smrg	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
73222944501Smrg	 * the relocations to be performed in this buffer.
73322944501Smrg	 */
73422944501Smrg	__u64 relocs_ptr;
73522944501Smrg
73622944501Smrg	/** Required alignment in graphics aperture */
73722944501Smrg	__u64 alignment;
73822944501Smrg
73922944501Smrg	/**
740fe517fc9Smrg	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
741fe517fc9Smrg	 * the user with the GTT offset at which this object will be pinned.
742fe517fc9Smrg	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
743fe517fc9Smrg	 * presumed_offset of the object.
744fe517fc9Smrg	 * During execbuffer2 the kernel populates it with the value of the
745fe517fc9Smrg	 * current GTT offset of the object, for future presumed_offset writes.
74622944501Smrg	 */
74722944501Smrg	__u64 offset;
74822944501Smrg
7492ee35494Smrg#define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
7502ee35494Smrg#define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
7512ee35494Smrg#define EXEC_OBJECT_WRITE		 (1<<2)
752fe517fc9Smrg#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
7532ee35494Smrg#define EXEC_OBJECT_PINNED		 (1<<4)
7542ee35494Smrg#define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
7552ee35494Smrg/* The kernel implicitly tracks GPU activity on all GEM objects, and
7562ee35494Smrg * synchronises operations with outstanding rendering. This includes
7572ee35494Smrg * rendering on other devices if exported via dma-buf. However, sometimes
7582ee35494Smrg * this tracking is too coarse and the user knows better. For example,
7592ee35494Smrg * if the object is split into non-overlapping ranges shared between different
7602ee35494Smrg * clients or engines (i.e. suballocating objects), the implicit tracking
7612ee35494Smrg * by kernel assumes that each operation affects the whole object rather
7622ee35494Smrg * than an individual range, causing needless synchronisation between clients.
7632ee35494Smrg * The kernel will also forgo any CPU cache flushes prior to rendering from
7642ee35494Smrg * the object as the client is expected to be also handling such domain
7652ee35494Smrg * tracking.
7662ee35494Smrg *
7672ee35494Smrg * The kernel maintains the implicit tracking in order to manage resources
7682ee35494Smrg * used by the GPU - this flag only disables the synchronisation prior to
7692ee35494Smrg * rendering with this object in this execbuf.
7702ee35494Smrg *
7712ee35494Smrg * Opting out of implicit synhronisation requires the user to do its own
7722ee35494Smrg * explicit tracking to avoid rendering corruption. See, for example,
7732ee35494Smrg * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
7742ee35494Smrg */
7752ee35494Smrg#define EXEC_OBJECT_ASYNC		(1<<6)
7762ee35494Smrg/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
7772ee35494Smrg#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_ASYNC<<1)
77822944501Smrg	__u64 flags;
779e88f27b3Smrg
7802ee35494Smrg	union {
7812ee35494Smrg		__u64 rsvd1;
7822ee35494Smrg		__u64 pad_to_size;
7832ee35494Smrg	};
78422944501Smrg	__u64 rsvd2;
78522944501Smrg};
78622944501Smrg
78722944501Smrgstruct drm_i915_gem_execbuffer2 {
78822944501Smrg	/**
78922944501Smrg	 * List of gem_exec_object2 structs
79022944501Smrg	 */
79122944501Smrg	__u64 buffers_ptr;
79222944501Smrg	__u32 buffer_count;
79322944501Smrg
79422944501Smrg	/** Offset in the batchbuffer to start execution from. */
79522944501Smrg	__u32 batch_start_offset;
79622944501Smrg	/** Bytes used in batchbuffer from batch_start_offset */
79722944501Smrg	__u32 batch_len;
79822944501Smrg	__u32 DR1;
79922944501Smrg	__u32 DR4;
80022944501Smrg	__u32 num_cliprects;
80122944501Smrg	/** This is a struct drm_clip_rect *cliprects */
80222944501Smrg	__u64 cliprects_ptr;
80369dda199Smrg#define I915_EXEC_RING_MASK              (7<<0)
80469dda199Smrg#define I915_EXEC_DEFAULT                (0<<0)
805d049871aSmrg#define I915_EXEC_RENDER                 (1<<0)
80669dda199Smrg#define I915_EXEC_BSD                    (2<<0)
80769dda199Smrg#define I915_EXEC_BLT                    (3<<0)
808e88f27b3Smrg#define I915_EXEC_VEBOX                  (4<<0)
809e88f27b3Smrg
810e88f27b3Smrg/* Used for switching the constants addressing mode on gen4+ RENDER ring.
811e88f27b3Smrg * Gen6+ only supports relative addressing to dynamic state (default) and
812e88f27b3Smrg * absolute addressing.
813e88f27b3Smrg *
814e88f27b3Smrg * These flags are ignored for the BSD and BLT rings.
815e88f27b3Smrg */
816e88f27b3Smrg#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
817e88f27b3Smrg#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
818e88f27b3Smrg#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
819e88f27b3Smrg#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
82013d1d17dSmrg	__u64 flags;
821e88f27b3Smrg	__u64 rsvd1; /* now used for context info */
82222944501Smrg	__u64 rsvd2;
82322944501Smrg};
82422944501Smrg
825e88f27b3Smrg/** Resets the SO write offset registers for transform feedback on gen7. */
826e88f27b3Smrg#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
827e88f27b3Smrg
828e88f27b3Smrg/** Request a privileged ("secure") batch buffer. Note only available for
829e88f27b3Smrg * DRM_ROOT_ONLY | DRM_MASTER processes.
830e88f27b3Smrg */
831e88f27b3Smrg#define I915_EXEC_SECURE		(1<<9)
832e88f27b3Smrg
833e88f27b3Smrg/** Inform the kernel that the batch is and will always be pinned. This
834e88f27b3Smrg * negates the requirement for a workaround to be performed to avoid
835e88f27b3Smrg * an incoherent CS (such as can be found on 830/845). If this flag is
836e88f27b3Smrg * not passed, the kernel will endeavour to make sure the batch is
837e88f27b3Smrg * coherent with the CS before execution. If this flag is passed,
838e88f27b3Smrg * userspace assumes the responsibility for ensuring the same.
839e88f27b3Smrg */
840e88f27b3Smrg#define I915_EXEC_IS_PINNED		(1<<10)
841e88f27b3Smrg
842baaff307Smrg/** Provide a hint to the kernel that the command stream and auxiliary
843e88f27b3Smrg * state buffers already holds the correct presumed addresses and so the
844e88f27b3Smrg * relocation process may be skipped if no buffers need to be moved in
845e88f27b3Smrg * preparation for the execbuffer.
846e88f27b3Smrg */
847e88f27b3Smrg#define I915_EXEC_NO_RELOC		(1<<11)
848e88f27b3Smrg
849e88f27b3Smrg/** Use the reloc.handle as an index into the exec object array rather
850e88f27b3Smrg * than as the per-file handle.
851e88f27b3Smrg */
852e88f27b3Smrg#define I915_EXEC_HANDLE_LUT		(1<<12)
853e88f27b3Smrg
854424e9256Smrg/** Used for switching BSD rings on the platforms with two BSD rings */
855fe517fc9Smrg#define I915_EXEC_BSD_SHIFT	 (13)
856fe517fc9Smrg#define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
857fe517fc9Smrg/* default ping-pong mode */
858fe517fc9Smrg#define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
859fe517fc9Smrg#define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
860fe517fc9Smrg#define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
861fe517fc9Smrg
862fe517fc9Smrg/** Tell the kernel that the batchbuffer is processed by
863fe517fc9Smrg *  the resource streamer.
864fe517fc9Smrg */
865fe517fc9Smrg#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
866424e9256Smrg
8672ee35494Smrg/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
8682ee35494Smrg * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
8692ee35494Smrg * the batch.
8702ee35494Smrg *
8712ee35494Smrg * Returns -EINVAL if the sync_file fd cannot be found.
8722ee35494Smrg */
8732ee35494Smrg#define I915_EXEC_FENCE_IN		(1<<16)
8742ee35494Smrg
8752ee35494Smrg/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
8762ee35494Smrg * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
8772ee35494Smrg * to the caller, and it should be close() after use. (The fd is a regular
8782ee35494Smrg * file descriptor and will be cleaned up on process termination. It holds
8792ee35494Smrg * a reference to the request, but nothing else.)
8802ee35494Smrg *
8812ee35494Smrg * The sync_file fd can be combined with other sync_file and passed either
8822ee35494Smrg * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
8832ee35494Smrg * will only occur after this request completes), or to other devices.
8842ee35494Smrg *
8852ee35494Smrg * Using I915_EXEC_FENCE_OUT requires use of
8862ee35494Smrg * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
8872ee35494Smrg * back to userspace. Failure to do so will cause the out-fence to always
8882ee35494Smrg * be reported as zero, and the real fence fd to be leaked.
8892ee35494Smrg */
8902ee35494Smrg#define I915_EXEC_FENCE_OUT		(1<<17)
8912ee35494Smrg
8922ee35494Smrg#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_OUT<<1))
893e88f27b3Smrg
894e88f27b3Smrg#define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
895e88f27b3Smrg#define i915_execbuffer2_set_context_id(eb2, context) \
896e88f27b3Smrg	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
897e88f27b3Smrg#define i915_execbuffer2_get_context_id(eb2) \
898e88f27b3Smrg	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
899e88f27b3Smrg
90022944501Smrgstruct drm_i915_gem_pin {
90122944501Smrg	/** Handle of the buffer to be pinned. */
90222944501Smrg	__u32 handle;
90322944501Smrg	__u32 pad;
90422944501Smrg
90522944501Smrg	/** alignment required within the aperture */
90622944501Smrg	__u64 alignment;
90722944501Smrg
90822944501Smrg	/** Returned GTT offset of the buffer. */
90922944501Smrg	__u64 offset;
91022944501Smrg};
91122944501Smrg
91222944501Smrgstruct drm_i915_gem_unpin {
91322944501Smrg	/** Handle of the buffer to be unpinned. */
91422944501Smrg	__u32 handle;
91522944501Smrg	__u32 pad;
91622944501Smrg};
91722944501Smrg
91822944501Smrgstruct drm_i915_gem_busy {
91922944501Smrg	/** Handle of the buffer to check for busy */
92022944501Smrg	__u32 handle;
92122944501Smrg
922fe517fc9Smrg	/** Return busy status
923fe517fc9Smrg	 *
924fe517fc9Smrg	 * A return of 0 implies that the object is idle (after
925fe517fc9Smrg	 * having flushed any pending activity), and a non-zero return that
926fe517fc9Smrg	 * the object is still in-flight on the GPU. (The GPU has not yet
927fe517fc9Smrg	 * signaled completion for all pending requests that reference the
9282ee35494Smrg	 * object.) An object is guaranteed to become idle eventually (so
9292ee35494Smrg	 * long as no new GPU commands are executed upon it). Due to the
9302ee35494Smrg	 * asynchronous nature of the hardware, an object reported
9312ee35494Smrg	 * as busy may become idle before the ioctl is completed.
9322ee35494Smrg	 *
9332ee35494Smrg	 * Furthermore, if the object is busy, which engine is busy is only
9342ee35494Smrg	 * provided as a guide. There are race conditions which prevent the
9352ee35494Smrg	 * report of which engines are busy from being always accurate.
9362ee35494Smrg	 * However, the converse is not true. If the object is idle, the
9372ee35494Smrg	 * result of the ioctl, that all engines are idle, is accurate.
938fe517fc9Smrg	 *
939fe517fc9Smrg	 * The returned dword is split into two fields to indicate both
940fe517fc9Smrg	 * the engines on which the object is being read, and the
941fe517fc9Smrg	 * engine on which it is currently being written (if any).
942fe517fc9Smrg	 *
943fe517fc9Smrg	 * The low word (bits 0:15) indicate if the object is being written
944fe517fc9Smrg	 * to by any engine (there can only be one, as the GEM implicit
945fe517fc9Smrg	 * synchronisation rules force writes to be serialised). Only the
946fe517fc9Smrg	 * engine for the last write is reported.
947fe517fc9Smrg	 *
948fe517fc9Smrg	 * The high word (bits 16:31) are a bitmask of which engines are
949fe517fc9Smrg	 * currently reading from the object. Multiple engines may be
950fe517fc9Smrg	 * reading from the object simultaneously.
951fe517fc9Smrg	 *
952fe517fc9Smrg	 * The value of each engine is the same as specified in the
953fe517fc9Smrg	 * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
954fe517fc9Smrg	 * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
955fe517fc9Smrg	 * the I915_EXEC_RENDER engine for execution, and so it is never
956fe517fc9Smrg	 * reported as active itself. Some hardware may have parallel
957fe517fc9Smrg	 * execution engines, e.g. multiple media engines, which are
958fe517fc9Smrg	 * mapped to the same identifier in the EXECBUFFER2 ioctl and
959fe517fc9Smrg	 * so are not separately reported for busyness.
9602ee35494Smrg	 *
9612ee35494Smrg	 * Caveat emptor:
9622ee35494Smrg	 * Only the boolean result of this query is reliable; that is whether
9632ee35494Smrg	 * the object is idle or busy. The report of which engines are busy
9642ee35494Smrg	 * should be only used as a heuristic.
965e88f27b3Smrg	 */
96622944501Smrg	__u32 busy;
96722944501Smrg};
96822944501Smrg
969e88f27b3Smrg/**
970e88f27b3Smrg * I915_CACHING_NONE
971e88f27b3Smrg *
972e88f27b3Smrg * GPU access is not coherent with cpu caches. Default for machines without an
973e88f27b3Smrg * LLC.
974e88f27b3Smrg */
975e88f27b3Smrg#define I915_CACHING_NONE		0
976e88f27b3Smrg/**
977e88f27b3Smrg * I915_CACHING_CACHED
978e88f27b3Smrg *
979e88f27b3Smrg * GPU access is coherent with cpu caches and furthermore the data is cached in
980e88f27b3Smrg * last-level caches shared between cpu cores and the gpu GT. Default on
981e88f27b3Smrg * machines with HAS_LLC.
982e88f27b3Smrg */
983e88f27b3Smrg#define I915_CACHING_CACHED		1
984e88f27b3Smrg/**
985e88f27b3Smrg * I915_CACHING_DISPLAY
986e88f27b3Smrg *
987e88f27b3Smrg * Special GPU caching mode which is coherent with the scanout engines.
988e88f27b3Smrg * Transparently falls back to I915_CACHING_NONE on platforms where no special
989e88f27b3Smrg * cache mode (like write-through or gfdt flushing) is available. The kernel
990e88f27b3Smrg * automatically sets this mode when using a buffer as a scanout target.
991e88f27b3Smrg * Userspace can manually set this mode to avoid a costly stall and clflush in
992e88f27b3Smrg * the hotpath of drawing the first frame.
993e88f27b3Smrg */
994e88f27b3Smrg#define I915_CACHING_DISPLAY		2
995e88f27b3Smrg
996e88f27b3Smrgstruct drm_i915_gem_caching {
997e88f27b3Smrg	/**
998e88f27b3Smrg	 * Handle of the buffer to set/get the caching level of. */
999e88f27b3Smrg	__u32 handle;
1000e88f27b3Smrg
1001e88f27b3Smrg	/**
1002e88f27b3Smrg	 * Cacheing level to apply or return value
1003e88f27b3Smrg	 *
1004e88f27b3Smrg	 * bits0-15 are for generic caching control (i.e. the above defined
1005e88f27b3Smrg	 * values). bits16-31 are reserved for platform-specific variations
1006e88f27b3Smrg	 * (e.g. l3$ caching on gen7). */
1007e88f27b3Smrg	__u32 caching;
1008e88f27b3Smrg};
1009e88f27b3Smrg
101022944501Smrg#define I915_TILING_NONE	0
101122944501Smrg#define I915_TILING_X		1
101222944501Smrg#define I915_TILING_Y		2
10132ee35494Smrg#define I915_TILING_LAST	I915_TILING_Y
101422944501Smrg
101522944501Smrg#define I915_BIT_6_SWIZZLE_NONE		0
101622944501Smrg#define I915_BIT_6_SWIZZLE_9		1
101722944501Smrg#define I915_BIT_6_SWIZZLE_9_10		2
101822944501Smrg#define I915_BIT_6_SWIZZLE_9_11		3
101922944501Smrg#define I915_BIT_6_SWIZZLE_9_10_11	4
102022944501Smrg/* Not seen by userland */
102122944501Smrg#define I915_BIT_6_SWIZZLE_UNKNOWN	5
102222944501Smrg/* Seen by userland. */
102322944501Smrg#define I915_BIT_6_SWIZZLE_9_17		6
102422944501Smrg#define I915_BIT_6_SWIZZLE_9_10_17	7
102522944501Smrg
102622944501Smrgstruct drm_i915_gem_set_tiling {
102722944501Smrg	/** Handle of the buffer to have its tiling state updated */
102822944501Smrg	__u32 handle;
102922944501Smrg
103022944501Smrg	/**
103122944501Smrg	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
103222944501Smrg	 * I915_TILING_Y).
103322944501Smrg	 *
103422944501Smrg	 * This value is to be set on request, and will be updated by the
103522944501Smrg	 * kernel on successful return with the actual chosen tiling layout.
103622944501Smrg	 *
103722944501Smrg	 * The tiling mode may be demoted to I915_TILING_NONE when the system
103822944501Smrg	 * has bit 6 swizzling that can't be managed correctly by GEM.
103922944501Smrg	 *
104022944501Smrg	 * Buffer contents become undefined when changing tiling_mode.
104122944501Smrg	 */
104222944501Smrg	__u32 tiling_mode;
104322944501Smrg
104422944501Smrg	/**
104522944501Smrg	 * Stride in bytes for the object when in I915_TILING_X or
104622944501Smrg	 * I915_TILING_Y.
104722944501Smrg	 */
104822944501Smrg	__u32 stride;
104922944501Smrg
105022944501Smrg	/**
105122944501Smrg	 * Returned address bit 6 swizzling required for CPU access through
105222944501Smrg	 * mmap mapping.
105322944501Smrg	 */
105422944501Smrg	__u32 swizzle_mode;
105522944501Smrg};
105622944501Smrg
105722944501Smrgstruct drm_i915_gem_get_tiling {
105822944501Smrg	/** Handle of the buffer to get tiling state for. */
105922944501Smrg	__u32 handle;
106022944501Smrg
106122944501Smrg	/**
106222944501Smrg	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
106322944501Smrg	 * I915_TILING_Y).
106422944501Smrg	 */
106522944501Smrg	__u32 tiling_mode;
106622944501Smrg
106722944501Smrg	/**
106822944501Smrg	 * Returned address bit 6 swizzling required for CPU access through
106922944501Smrg	 * mmap mapping.
107022944501Smrg	 */
107122944501Smrg	__u32 swizzle_mode;
1072424e9256Smrg
1073424e9256Smrg	/**
1074424e9256Smrg	 * Returned address bit 6 swizzling required for CPU access through
1075424e9256Smrg	 * mmap mapping whilst bound.
1076424e9256Smrg	 */
1077424e9256Smrg	__u32 phys_swizzle_mode;
107822944501Smrg};
107922944501Smrg
108022944501Smrgstruct drm_i915_gem_get_aperture {
108122944501Smrg	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
108222944501Smrg	__u64 aper_size;
108322944501Smrg
108422944501Smrg	/**
108522944501Smrg	 * Available space in the aperture used by i915_gem_execbuffer, in
108622944501Smrg	 * bytes
108722944501Smrg	 */
108822944501Smrg	__u64 aper_available_size;
108922944501Smrg};
109022944501Smrg
109122944501Smrgstruct drm_i915_get_pipe_from_crtc_id {
109222944501Smrg	/** ID of CRTC being requested **/
109322944501Smrg	__u32 crtc_id;
109422944501Smrg
109522944501Smrg	/** pipe of requested CRTC **/
109622944501Smrg	__u32 pipe;
109722944501Smrg};
109822944501Smrg
109922944501Smrg#define I915_MADV_WILLNEED 0
110022944501Smrg#define I915_MADV_DONTNEED 1
110122944501Smrg#define __I915_MADV_PURGED 2 /* internal state */
110222944501Smrg
110322944501Smrgstruct drm_i915_gem_madvise {
110422944501Smrg	/** Handle of the buffer to change the backing store advice */
110522944501Smrg	__u32 handle;
110622944501Smrg
110722944501Smrg	/* Advice: either the buffer will be needed again in the near future,
110822944501Smrg	 *         or wont be and could be discarded under memory pressure.
110922944501Smrg	 */
111022944501Smrg	__u32 madv;
111122944501Smrg
111222944501Smrg	/** Whether the backing store still exists. */
111322944501Smrg	__u32 retained;
111422944501Smrg};
111522944501Smrg
111622944501Smrg/* flags */
111722944501Smrg#define I915_OVERLAY_TYPE_MASK 		0xff
111822944501Smrg#define I915_OVERLAY_YUV_PLANAR 	0x01
111922944501Smrg#define I915_OVERLAY_YUV_PACKED 	0x02
112022944501Smrg#define I915_OVERLAY_RGB		0x03
112122944501Smrg
112222944501Smrg#define I915_OVERLAY_DEPTH_MASK		0xff00
112322944501Smrg#define I915_OVERLAY_RGB24		0x1000
112422944501Smrg#define I915_OVERLAY_RGB16		0x2000
112522944501Smrg#define I915_OVERLAY_RGB15		0x3000
112622944501Smrg#define I915_OVERLAY_YUV422		0x0100
112722944501Smrg#define I915_OVERLAY_YUV411		0x0200
112822944501Smrg#define I915_OVERLAY_YUV420		0x0300
112922944501Smrg#define I915_OVERLAY_YUV410		0x0400
113022944501Smrg
113122944501Smrg#define I915_OVERLAY_SWAP_MASK		0xff0000
113222944501Smrg#define I915_OVERLAY_NO_SWAP		0x000000
113322944501Smrg#define I915_OVERLAY_UV_SWAP		0x010000
113422944501Smrg#define I915_OVERLAY_Y_SWAP		0x020000
113522944501Smrg#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
113622944501Smrg
113722944501Smrg#define I915_OVERLAY_FLAGS_MASK		0xff000000
113822944501Smrg#define I915_OVERLAY_ENABLE		0x01000000
113922944501Smrg
114022944501Smrgstruct drm_intel_overlay_put_image {
114122944501Smrg	/* various flags and src format description */
114222944501Smrg	__u32 flags;
114322944501Smrg	/* source picture description */
114422944501Smrg	__u32 bo_handle;
114522944501Smrg	/* stride values and offsets are in bytes, buffer relative */
114622944501Smrg	__u16 stride_Y; /* stride for packed formats */
114722944501Smrg	__u16 stride_UV;
114822944501Smrg	__u32 offset_Y; /* offset for packet formats */
114922944501Smrg	__u32 offset_U;
115022944501Smrg	__u32 offset_V;
115122944501Smrg	/* in pixels */
115222944501Smrg	__u16 src_width;
115322944501Smrg	__u16 src_height;
115422944501Smrg	/* to compensate the scaling factors for partially covered surfaces */
115522944501Smrg	__u16 src_scan_width;
115622944501Smrg	__u16 src_scan_height;
115722944501Smrg	/* output crtc description */
115822944501Smrg	__u32 crtc_id;
115922944501Smrg	__u16 dst_x;
116022944501Smrg	__u16 dst_y;
116122944501Smrg	__u16 dst_width;
116222944501Smrg	__u16 dst_height;
116322944501Smrg};
116422944501Smrg
116522944501Smrg/* flags */
116622944501Smrg#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
116722944501Smrg#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1168fe517fc9Smrg#define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
116922944501Smrgstruct drm_intel_overlay_attrs {
117022944501Smrg	__u32 flags;
117122944501Smrg	__u32 color_key;
117222944501Smrg	__s32 brightness;
117322944501Smrg	__u32 contrast;
117422944501Smrg	__u32 saturation;
117522944501Smrg	__u32 gamma0;
117622944501Smrg	__u32 gamma1;
117722944501Smrg	__u32 gamma2;
117822944501Smrg	__u32 gamma3;
117922944501Smrg	__u32 gamma4;
118022944501Smrg	__u32 gamma5;
118122944501Smrg};
118222944501Smrg
1183e88f27b3Smrg/*
1184e88f27b3Smrg * Intel sprite handling
1185e88f27b3Smrg *
1186e88f27b3Smrg * Color keying works with a min/mask/max tuple.  Both source and destination
1187e88f27b3Smrg * color keying is allowed.
1188e88f27b3Smrg *
1189e88f27b3Smrg * Source keying:
1190e88f27b3Smrg * Sprite pixels within the min & max values, masked against the color channels
1191e88f27b3Smrg * specified in the mask field, will be transparent.  All other pixels will
1192e88f27b3Smrg * be displayed on top of the primary plane.  For RGB surfaces, only the min
1193e88f27b3Smrg * and mask fields will be used; ranged compares are not allowed.
1194e88f27b3Smrg *
1195e88f27b3Smrg * Destination keying:
1196e88f27b3Smrg * Primary plane pixels that match the min value, masked against the color
1197e88f27b3Smrg * channels specified in the mask field, will be replaced by corresponding
1198e88f27b3Smrg * pixels from the sprite plane.
1199e88f27b3Smrg *
1200e88f27b3Smrg * Note that source & destination keying are exclusive; only one can be
1201e88f27b3Smrg * active on a given plane.
1202e88f27b3Smrg */
1203e88f27b3Smrg
1204e88f27b3Smrg#define I915_SET_COLORKEY_NONE		(1<<0) /* disable color key matching */
1205e88f27b3Smrg#define I915_SET_COLORKEY_DESTINATION	(1<<1)
1206e88f27b3Smrg#define I915_SET_COLORKEY_SOURCE	(1<<2)
1207e88f27b3Smrgstruct drm_intel_sprite_colorkey {
1208e88f27b3Smrg	__u32 plane_id;
1209e88f27b3Smrg	__u32 min_value;
1210e88f27b3Smrg	__u32 channel_mask;
1211e88f27b3Smrg	__u32 max_value;
1212e88f27b3Smrg	__u32 flags;
1213e88f27b3Smrg};
1214e88f27b3Smrg
1215e88f27b3Smrgstruct drm_i915_gem_wait {
1216e88f27b3Smrg	/** Handle of BO we shall wait on */
1217e88f27b3Smrg	__u32 bo_handle;
1218e88f27b3Smrg	__u32 flags;
1219e88f27b3Smrg	/** Number of nanoseconds to wait, Returns time remaining. */
1220e88f27b3Smrg	__s64 timeout_ns;
1221e88f27b3Smrg};
1222e88f27b3Smrg
1223e88f27b3Smrgstruct drm_i915_gem_context_create {
1224e88f27b3Smrg	/*  output: id of new context*/
1225e88f27b3Smrg	__u32 ctx_id;
1226e88f27b3Smrg	__u32 pad;
1227e88f27b3Smrg};
1228e88f27b3Smrg
1229e88f27b3Smrgstruct drm_i915_gem_context_destroy {
1230e88f27b3Smrg	__u32 ctx_id;
1231e88f27b3Smrg	__u32 pad;
1232e88f27b3Smrg};
1233e88f27b3Smrg
1234e88f27b3Smrgstruct drm_i915_reg_read {
1235fe517fc9Smrg	/*
1236fe517fc9Smrg	 * Register offset.
1237fe517fc9Smrg	 * For 64bit wide registers where the upper 32bits don't immediately
1238fe517fc9Smrg	 * follow the lower 32bits, the offset of the lower 32bits must
1239fe517fc9Smrg	 * be specified
1240fe517fc9Smrg	 */
1241e88f27b3Smrg	__u64 offset;
1242e88f27b3Smrg	__u64 val; /* Return value */
1243e88f27b3Smrg};
1244fe517fc9Smrg/* Known registers:
1245fe517fc9Smrg *
1246fe517fc9Smrg * Render engine timestamp - 0x2358 + 64bit - gen7+
1247fe517fc9Smrg * - Note this register returns an invalid value if using the default
1248fe517fc9Smrg *   single instruction 8byte read, in order to workaround that use
1249fe517fc9Smrg *   offset (0x2538 | 1) instead.
1250fe517fc9Smrg *
1251fe517fc9Smrg */
1252e88f27b3Smrg
1253e88f27b3Smrgstruct drm_i915_reset_stats {
1254e88f27b3Smrg	__u32 ctx_id;
1255e88f27b3Smrg	__u32 flags;
1256e88f27b3Smrg
1257e88f27b3Smrg	/* All resets since boot/module reload, for all contexts */
1258e88f27b3Smrg	__u32 reset_count;
1259e88f27b3Smrg
1260e88f27b3Smrg	/* Number of batches lost when active in GPU, for this context */
1261e88f27b3Smrg	__u32 batch_active;
1262e88f27b3Smrg
1263e88f27b3Smrg	/* Number of batches lost pending for execution, for this context */
1264e88f27b3Smrg	__u32 batch_pending;
1265e88f27b3Smrg
1266e88f27b3Smrg	__u32 pad;
1267e88f27b3Smrg};
1268e88f27b3Smrg
1269baaff307Smrgstruct drm_i915_gem_userptr {
1270baaff307Smrg	__u64 user_ptr;
1271baaff307Smrg	__u64 user_size;
1272baaff307Smrg	__u32 flags;
1273baaff307Smrg#define I915_USERPTR_READ_ONLY 0x1
1274baaff307Smrg#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1275baaff307Smrg	/**
1276424e9256Smrg	 * Returned handle for the object.
1277424e9256Smrg	 *
1278424e9256Smrg	 * Object handles are nonzero.
1279424e9256Smrg	 */
1280baaff307Smrg	__u32 handle;
1281baaff307Smrg};
1282baaff307Smrg
1283424e9256Smrgstruct drm_i915_gem_context_param {
1284424e9256Smrg	__u32 ctx_id;
1285424e9256Smrg	__u32 size;
1286424e9256Smrg	__u64 param;
1287fe517fc9Smrg#define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
1288fe517fc9Smrg#define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
1289fe517fc9Smrg#define I915_CONTEXT_PARAM_GTT_SIZE	0x3
12902ee35494Smrg#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
12912ee35494Smrg#define I915_CONTEXT_PARAM_BANNABLE	0x5
1292424e9256Smrg	__u64 value;
1293424e9256Smrg};
1294424e9256Smrg
12952ee35494Smrgenum drm_i915_oa_format {
12962ee35494Smrg	I915_OA_FORMAT_A13 = 1,
12972ee35494Smrg	I915_OA_FORMAT_A29,
12982ee35494Smrg	I915_OA_FORMAT_A13_B8_C8,
12992ee35494Smrg	I915_OA_FORMAT_B4_C8,
13002ee35494Smrg	I915_OA_FORMAT_A45_B8_C8,
13012ee35494Smrg	I915_OA_FORMAT_B4_C8_A16,
13022ee35494Smrg	I915_OA_FORMAT_C4_B8,
13032ee35494Smrg
13042ee35494Smrg	I915_OA_FORMAT_MAX	    /* non-ABI */
13052ee35494Smrg};
13062ee35494Smrg
13072ee35494Smrgenum drm_i915_perf_property_id {
13082ee35494Smrg	/**
13092ee35494Smrg	 * Open the stream for a specific context handle (as used with
13102ee35494Smrg	 * execbuffer2). A stream opened for a specific context this way
13112ee35494Smrg	 * won't typically require root privileges.
13122ee35494Smrg	 */
13132ee35494Smrg	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
13142ee35494Smrg
13152ee35494Smrg	/**
13162ee35494Smrg	 * A value of 1 requests the inclusion of raw OA unit reports as
13172ee35494Smrg	 * part of stream samples.
13182ee35494Smrg	 */
13192ee35494Smrg	DRM_I915_PERF_PROP_SAMPLE_OA,
13202ee35494Smrg
13212ee35494Smrg	/**
13222ee35494Smrg	 * The value specifies which set of OA unit metrics should be
13232ee35494Smrg	 * be configured, defining the contents of any OA unit reports.
13242ee35494Smrg	 */
13252ee35494Smrg	DRM_I915_PERF_PROP_OA_METRICS_SET,
13262ee35494Smrg
13272ee35494Smrg	/**
13282ee35494Smrg	 * The value specifies the size and layout of OA unit reports.
13292ee35494Smrg	 */
13302ee35494Smrg	DRM_I915_PERF_PROP_OA_FORMAT,
13312ee35494Smrg
13322ee35494Smrg	/**
13332ee35494Smrg	 * Specifying this property implicitly requests periodic OA unit
13342ee35494Smrg	 * sampling and (at least on Haswell) the sampling frequency is derived
13352ee35494Smrg	 * from this exponent as follows:
13362ee35494Smrg	 *
13372ee35494Smrg	 *   80ns * 2^(period_exponent + 1)
13382ee35494Smrg	 */
13392ee35494Smrg	DRM_I915_PERF_PROP_OA_EXPONENT,
13402ee35494Smrg
13412ee35494Smrg	DRM_I915_PERF_PROP_MAX /* non-ABI */
13422ee35494Smrg};
13432ee35494Smrg
13442ee35494Smrgstruct drm_i915_perf_open_param {
13452ee35494Smrg	__u32 flags;
13462ee35494Smrg#define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
13472ee35494Smrg#define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
13482ee35494Smrg#define I915_PERF_FLAG_DISABLED		(1<<2)
13492ee35494Smrg
13502ee35494Smrg	/** The number of u64 (id, value) pairs */
13512ee35494Smrg	__u32 num_properties;
13522ee35494Smrg
13532ee35494Smrg	/**
13542ee35494Smrg	 * Pointer to array of u64 (id, value) pairs configuring the stream
13552ee35494Smrg	 * to open.
13562ee35494Smrg	 */
13572ee35494Smrg	__u64 properties_ptr;
13582ee35494Smrg};
13592ee35494Smrg
13602ee35494Smrg/**
13612ee35494Smrg * Enable data capture for a stream that was either opened in a disabled state
13622ee35494Smrg * via I915_PERF_FLAG_DISABLED or was later disabled via
13632ee35494Smrg * I915_PERF_IOCTL_DISABLE.
13642ee35494Smrg *
13652ee35494Smrg * It is intended to be cheaper to disable and enable a stream than it may be
13662ee35494Smrg * to close and re-open a stream with the same configuration.
13672ee35494Smrg *
13682ee35494Smrg * It's undefined whether any pending data for the stream will be lost.
13692ee35494Smrg */
13702ee35494Smrg#define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
13712ee35494Smrg
13722ee35494Smrg/**
13732ee35494Smrg * Disable data capture for a stream.
13742ee35494Smrg *
13752ee35494Smrg * It is an error to try and read a stream that is disabled.
13762ee35494Smrg */
13772ee35494Smrg#define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
13782ee35494Smrg
13792ee35494Smrg/**
13802ee35494Smrg * Common to all i915 perf records
13812ee35494Smrg */
13822ee35494Smrgstruct drm_i915_perf_record_header {
13832ee35494Smrg	__u32 type;
13842ee35494Smrg	__u16 pad;
13852ee35494Smrg	__u16 size;
13862ee35494Smrg};
13872ee35494Smrg
13882ee35494Smrgenum drm_i915_perf_record_type {
13892ee35494Smrg
13902ee35494Smrg	/**
13912ee35494Smrg	 * Samples are the work horse record type whose contents are extensible
13922ee35494Smrg	 * and defined when opening an i915 perf stream based on the given
13932ee35494Smrg	 * properties.
13942ee35494Smrg	 *
13952ee35494Smrg	 * Boolean properties following the naming convention
13962ee35494Smrg	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
13972ee35494Smrg	 * every sample.
13982ee35494Smrg	 *
13992ee35494Smrg	 * The order of these sample properties given by userspace has no
14002ee35494Smrg	 * affect on the ordering of data within a sample. The order is
14012ee35494Smrg	 * documented here.
14022ee35494Smrg	 *
14032ee35494Smrg	 * struct {
14042ee35494Smrg	 *     struct drm_i915_perf_record_header header;
14052ee35494Smrg	 *
14062ee35494Smrg	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
14072ee35494Smrg	 * };
14082ee35494Smrg	 */
14092ee35494Smrg	DRM_I915_PERF_RECORD_SAMPLE = 1,
14102ee35494Smrg
14112ee35494Smrg	/*
14122ee35494Smrg	 * Indicates that one or more OA reports were not written by the
14132ee35494Smrg	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
14142ee35494Smrg	 * command collides with periodic sampling - which would be more likely
14152ee35494Smrg	 * at higher sampling frequencies.
14162ee35494Smrg	 */
14172ee35494Smrg	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
14182ee35494Smrg
14192ee35494Smrg	/**
14202ee35494Smrg	 * An error occurred that resulted in all pending OA reports being lost.
14212ee35494Smrg	 */
14222ee35494Smrg	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
14232ee35494Smrg
14242ee35494Smrg	DRM_I915_PERF_RECORD_MAX /* non-ABI */
14252ee35494Smrg};
14262ee35494Smrg
14272ee35494Smrg#if defined(__cplusplus)
14282ee35494Smrg}
14292ee35494Smrg#endif
14302ee35494Smrg
1431e88f27b3Smrg#endif /* _I915_DRM_H_ */
1432