1#ifndef _UAPI_MSM_KGSL_H
2#define _UAPI_MSM_KGSL_H
3
4#include <linux/types.h>
5#include <linux/ioctl.h>
6
7/*
8 * The KGSL version has proven not to be very useful in userspace if features
9 * are cherry picked into other trees out of order so it is frozen as of 3.14.
10 * It is left here for backwards compatabilty and as a reminder that
11 * software releases are never linear. Also, I like pie.
12 */
13
14#define KGSL_VERSION_MAJOR        3
15#define KGSL_VERSION_MINOR        14
16
17/*
18 * We have traditionally mixed context and issueibcmds / command batch flags
19 * together into a big flag stew. This worked fine until we started adding a
20 * lot more command batch flags and we started running out of bits. Turns out
21 * we have a bit of room in the context type / priority mask that we could use
22 * for command batches, but that means we need to split out the flags into two
23 * coherent sets.
24 *
25 * If any future definitions are for both context and cmdbatch add both defines
26 * and link the cmdbatch to the context define as we do below. Otherwise feel
27 * free to add exclusive bits to either set.
28 */
29
30/* --- context flags --- */
31#define KGSL_CONTEXT_SAVE_GMEM		0x00000001
32#define KGSL_CONTEXT_NO_GMEM_ALLOC	0x00000002
33/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
34#define KGSL_CONTEXT_SUBMIT_IB_LIST	0x00000004
35#define KGSL_CONTEXT_CTX_SWITCH		0x00000008
36#define KGSL_CONTEXT_PREAMBLE		0x00000010
37#define KGSL_CONTEXT_TRASH_STATE	0x00000020
38#define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
39#define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
40/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
41#define KGSL_CONTEXT_END_OF_FRAME	0x00000100
42#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
43/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
44#define KGSL_CONTEXT_SYNC               0x00000400
45#define KGSL_CONTEXT_PWR_CONSTRAINT     0x00000800
46#define KGSL_CONTEXT_PRIORITY_MASK      0x0000F000
47#define KGSL_CONTEXT_PRIORITY_SHIFT     12
48#define KGSL_CONTEXT_PRIORITY_UNDEF     0
49
50#define KGSL_CONTEXT_IFH_NOP            0x00010000
51#define KGSL_CONTEXT_SECURE             0x00020000
52#define KGSL_CONTEXT_NO_SNAPSHOT        0x00040000
53#define KGSL_CONTEXT_SPARSE             0x00080000
54
55#define KGSL_CONTEXT_PREEMPT_STYLE_MASK       0x0E000000
56#define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT      25
57#define KGSL_CONTEXT_PREEMPT_STYLE_DEFAULT    0x0
58#define KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER 0x1
59#define KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN  0x2
60
61#define KGSL_CONTEXT_TYPE_MASK          0x01F00000
62#define KGSL_CONTEXT_TYPE_SHIFT         20
63#define KGSL_CONTEXT_TYPE_ANY		0
64#define KGSL_CONTEXT_TYPE_GL		1
65#define KGSL_CONTEXT_TYPE_CL		2
66#define KGSL_CONTEXT_TYPE_C2D		3
67#define KGSL_CONTEXT_TYPE_RS		4
68#define KGSL_CONTEXT_TYPE_VK		5
69#define KGSL_CONTEXT_TYPE_UNKNOWN	0x1E
70
71#define KGSL_CONTEXT_INVALIDATE_ON_FAULT 0x10000000
72
73#define KGSL_CONTEXT_INVALID 0xffffffff
74
75/*
76 * --- command batch flags ---
77 * The bits that are linked to a KGSL_CONTEXT equivalent are either legacy
78 * definitions or bits that are valid for both contexts and cmdbatches.  To be
79 * safe the other 8 bits that are still available in the context field should be
80 * omitted here in case we need to share - the other bits are available for
81 * cmdbatch only flags as needed
82 */
83#define KGSL_CMDBATCH_MEMLIST		0x00000001
84#define KGSL_CMDBATCH_MARKER		0x00000002
85#define KGSL_CMDBATCH_SUBMIT_IB_LIST	KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
86#define KGSL_CMDBATCH_CTX_SWITCH	KGSL_CONTEXT_CTX_SWITCH     /* 0x008 */
87#define KGSL_CMDBATCH_PROFILING		0x00000010
88/*
89 * KGSL_CMDBATCH_PROFILING must also be set for KGSL_CMDBATCH_PROFILING_KTIME
90 * to take effect, as the latter only affects the time data returned.
91 */
92#define KGSL_CMDBATCH_PROFILING_KTIME	0x00000020
93#define KGSL_CMDBATCH_END_OF_FRAME	KGSL_CONTEXT_END_OF_FRAME   /* 0x100 */
94#define KGSL_CMDBATCH_SYNC		KGSL_CONTEXT_SYNC           /* 0x400 */
95#define KGSL_CMDBATCH_PWR_CONSTRAINT	KGSL_CONTEXT_PWR_CONSTRAINT /* 0x800 */
96#define KGSL_CMDBATCH_SPARSE	    0x1000 /* 0x1000 */
97
98/*
99 * Reserve bits [16:19] and bits [28:31] for possible bits shared between
100 * contexts and command batches.  Update this comment as new flags are added.
101 */
102
103/*
104 * gpu_command_object flags - these flags communicate the type of command or
105 * memory object being submitted for a GPU command
106 */
107
108/* Flags for GPU command objects */
109#define KGSL_CMDLIST_IB                  0x00000001U
110#define KGSL_CMDLIST_CTXTSWITCH_PREAMBLE 0x00000002U
111#define KGSL_CMDLIST_IB_PREAMBLE         0x00000004U
112
113/* Flags for GPU command memory objects */
114#define KGSL_OBJLIST_MEMOBJ  0x00000008U
115#define KGSL_OBJLIST_PROFILE 0x00000010U
116
117/* Flags for GPU command sync points */
118#define KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP 0
119#define KGSL_CMD_SYNCPOINT_TYPE_FENCE 1
120
121/* --- Memory allocation flags --- */
122
123/* General allocation hints */
124#define KGSL_MEMFLAGS_SECURE      0x00000008ULL
125#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
126#define KGSL_MEMFLAGS_GPUWRITEONLY 0x02000000U
127#define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
128
129/* Flag for binding all the virt range to single phys data */
130#define KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS 0x400000000ULL
131#define KGSL_SPARSE_BIND 0x1ULL
132#define KGSL_SPARSE_UNBIND 0x2ULL
133
134/* Memory caching hints */
135#define KGSL_CACHEMODE_MASK       0x0C000000U
136#define KGSL_CACHEMODE_SHIFT 26
137
138#define KGSL_CACHEMODE_WRITECOMBINE 0
139#define KGSL_CACHEMODE_UNCACHED 1
140#define KGSL_CACHEMODE_WRITETHROUGH 2
141#define KGSL_CACHEMODE_WRITEBACK 3
142
143#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
144#define KGSL_MEMFLAGS_SPARSE_PHYS 0x20000000ULL
145#define KGSL_MEMFLAGS_SPARSE_VIRT 0x40000000ULL
146#define KGSL_MEMFLAGS_IOCOHERENT  0x80000000ULL
147
148/* Memory types for which allocations are made */
149#define KGSL_MEMTYPE_MASK		0x0000FF00
150#define KGSL_MEMTYPE_SHIFT		8
151
152#define KGSL_MEMTYPE_OBJECTANY			0
153#define KGSL_MEMTYPE_FRAMEBUFFER		1
154#define KGSL_MEMTYPE_RENDERBUFFER		2
155#define KGSL_MEMTYPE_ARRAYBUFFER		3
156#define KGSL_MEMTYPE_ELEMENTARRAYBUFFER		4
157#define KGSL_MEMTYPE_VERTEXARRAYBUFFER		5
158#define KGSL_MEMTYPE_TEXTURE			6
159#define KGSL_MEMTYPE_SURFACE			7
160#define KGSL_MEMTYPE_EGL_SURFACE		8
161#define KGSL_MEMTYPE_GL				9
162#define KGSL_MEMTYPE_CL				10
163#define KGSL_MEMTYPE_CL_BUFFER_MAP		11
164#define KGSL_MEMTYPE_CL_BUFFER_NOMAP		12
165#define KGSL_MEMTYPE_CL_IMAGE_MAP		13
166#define KGSL_MEMTYPE_CL_IMAGE_NOMAP		14
167#define KGSL_MEMTYPE_CL_KERNEL_STACK		15
168#define KGSL_MEMTYPE_COMMAND			16
169#define KGSL_MEMTYPE_2D				17
170#define KGSL_MEMTYPE_EGL_IMAGE			18
171#define KGSL_MEMTYPE_EGL_SHADOW			19
172#define KGSL_MEMTYPE_MULTISAMPLE		20
173#define KGSL_MEMTYPE_KERNEL			255
174
175/*
176 * Alignment hint, passed as the power of 2 exponent.
177 * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
178 */
179#define KGSL_MEMALIGN_MASK		0x00FF0000
180#define KGSL_MEMALIGN_SHIFT		16
181
182enum kgsl_user_mem_type {
183	KGSL_USER_MEM_TYPE_PMEM		= 0x00000000,
184	KGSL_USER_MEM_TYPE_ASHMEM	= 0x00000001,
185	KGSL_USER_MEM_TYPE_ADDR		= 0x00000002,
186	KGSL_USER_MEM_TYPE_ION		= 0x00000003,
187	/*
188	 * ION type is retained for backwards compatibility but Ion buffers are
189	 * dma-bufs so try to use that naming if we can
190	 */
191	KGSL_USER_MEM_TYPE_DMABUF       = 0x00000003,
192	KGSL_USER_MEM_TYPE_MAX		= 0x00000007,
193};
194#define KGSL_MEMFLAGS_USERMEM_MASK 0x000000e0
195#define KGSL_MEMFLAGS_USERMEM_SHIFT 5
196
197/*
198 * Unfortunately, enum kgsl_user_mem_type starts at 0 which does not
199 * leave a good value for allocated memory. In the flags we use
200 * 0 to indicate allocated memory and thus need to add 1 to the enum
201 * values.
202 */
203#define KGSL_USERMEM_FLAG(x) (((x) + 1) << KGSL_MEMFLAGS_USERMEM_SHIFT)
204
205#define KGSL_MEMFLAGS_NOT_USERMEM 0
206#define KGSL_MEMFLAGS_USERMEM_PMEM KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_PMEM)
207#define KGSL_MEMFLAGS_USERMEM_ASHMEM \
208		KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ASHMEM)
209#define KGSL_MEMFLAGS_USERMEM_ADDR KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ADDR)
210#define KGSL_MEMFLAGS_USERMEM_ION KGSL_USERMEM_FLAG(KGSL_USER_MEM_TYPE_ION)
211
212/* --- generic KGSL flag values --- */
213
214#define KGSL_FLAGS_NORMALMODE  0x00000000
215#define KGSL_FLAGS_SAFEMODE    0x00000001
216#define KGSL_FLAGS_INITIALIZED0 0x00000002
217#define KGSL_FLAGS_INITIALIZED 0x00000004
218#define KGSL_FLAGS_STARTED     0x00000008
219#define KGSL_FLAGS_ACTIVE      0x00000010
220#define KGSL_FLAGS_RESERVED0   0x00000020
221#define KGSL_FLAGS_RESERVED1   0x00000040
222#define KGSL_FLAGS_RESERVED2   0x00000080
223#define KGSL_FLAGS_SOFT_RESET  0x00000100
224#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
225
226/* Server Side Sync Timeout in milliseconds */
227#define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
228
229/* UBWC Modes */
230#define KGSL_UBWC_NONE	0
231#define KGSL_UBWC_1_0	1
232#define KGSL_UBWC_2_0	2
233#define KGSL_UBWC_3_0	3
234
235/*
236 * Reset status values for context
237 */
238enum kgsl_ctx_reset_stat {
239	KGSL_CTX_STAT_NO_ERROR				= 0x00000000,
240	KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT		= 0x00000001,
241	KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT	= 0x00000002,
242	KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT		= 0x00000003
243};
244
245#define KGSL_CONVERT_TO_MBPS(val) \
246	(val*1000*1000U)
247
248/* device id */
249enum kgsl_deviceid {
250	KGSL_DEVICE_3D0		= 0x00000000,
251	KGSL_DEVICE_MAX
252};
253
254struct kgsl_devinfo {
255
256	unsigned int device_id;
257	/*
258	 * chip revision id
259	 * coreid:8 majorrev:8 minorrev:8 patch:8
260	 */
261	unsigned int chip_id;
262	unsigned int mmu_enabled;
263	unsigned long gmem_gpubaseaddr;
264	/*
265	 * This field contains the adreno revision
266	 * number 200, 205, 220, etc...
267	 */
268	unsigned int gpu_id;
269	size_t gmem_sizebytes;
270};
271
272/*
273 * struct kgsl_devmemstore - this structure defines the region of memory
274 * that can be mmap()ed from this driver. The timestamp fields are volatile
275 * because they are written by the GPU
276 * @soptimestamp: Start of pipeline timestamp written by GPU before the
277 * commands in concern are processed
278 * @sbz: Unused, kept for 8 byte alignment
279 * @eoptimestamp: End of pipeline timestamp written by GPU after the
280 * commands in concern are processed
281 * @sbz2: Unused, kept for 8 byte alignment
282 * @preempted: Indicates if the context was preempted
283 * @sbz3: Unused, kept for 8 byte alignment
284 * @ref_wait_ts: Timestamp on which to generate interrupt, unused now.
285 * @sbz4: Unused, kept for 8 byte alignment
286 * @current_context: The current context the GPU is working on
287 * @sbz5: Unused, kept for 8 byte alignment
288 */
289struct kgsl_devmemstore {
290	volatile unsigned int soptimestamp;
291	unsigned int sbz;
292	volatile unsigned int eoptimestamp;
293	unsigned int sbz2;
294	volatile unsigned int preempted;
295	unsigned int sbz3;
296	volatile unsigned int ref_wait_ts;
297	unsigned int sbz4;
298	unsigned int current_context;
299	unsigned int sbz5;
300};
301
302#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
303	((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
304	 offsetof(struct kgsl_devmemstore, field))
305
306/* timestamp id*/
307enum kgsl_timestamp_type {
308	KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
309	KGSL_TIMESTAMP_RETIRED  = 0x00000002, /* end-of-pipeline timestamp*/
310	KGSL_TIMESTAMP_QUEUED   = 0x00000003,
311};
312
313/* property types - used with kgsl_device_getproperty */
314#define KGSL_PROP_DEVICE_INFO		0x1
315#define KGSL_PROP_DEVICE_SHADOW		0x2
316#define KGSL_PROP_DEVICE_POWER		0x3
317#define KGSL_PROP_SHMEM			0x4
318#define KGSL_PROP_SHMEM_APERTURES	0x5
319#define KGSL_PROP_MMU_ENABLE		0x6
320#define KGSL_PROP_INTERRUPT_WAITS	0x7
321#define KGSL_PROP_VERSION		0x8
322#define KGSL_PROP_GPU_RESET_STAT	0x9
323#define KGSL_PROP_PWRCTRL		0xE
324#define KGSL_PROP_PWR_CONSTRAINT	0x12
325#define KGSL_PROP_UCHE_GMEM_VADDR	0x13
326#define KGSL_PROP_SP_GENERIC_MEM	0x14
327#define KGSL_PROP_UCODE_VERSION		0x15
328#define KGSL_PROP_GPMU_VERSION		0x16
329#define KGSL_PROP_HIGHEST_BANK_BIT	0x17
330#define KGSL_PROP_DEVICE_BITNESS	0x18
331#define KGSL_PROP_DEVICE_QDSS_STM	0x19
332#define KGSL_PROP_MIN_ACCESS_LENGTH	0x1A
333#define KGSL_PROP_UBWC_MODE		0x1B
334#define KGSL_PROP_DEVICE_QTIMER		0x20
335#define KGSL_PROP_L3_PWR_CONSTRAINT     0x22
336#define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23
337#define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24
338#define KGSL_PROP_SPEED_BIN		0x25
339#define KGSL_PROP_GAMING_BIN		0x26
340#define KGSL_PROP_CONTEXT_PROPERTY	0x28
341
342
343struct kgsl_shadowprop {
344	unsigned long gpuaddr;
345	size_t size;
346	unsigned int flags; /* contains KGSL_FLAGS_ values */
347};
348
349struct kgsl_qdss_stm_prop {
350	uint64_t gpuaddr;
351	uint64_t size;
352};
353
354struct kgsl_qtimer_prop {
355	uint64_t gpuaddr;
356	uint64_t size;
357};
358
359struct kgsl_version {
360	unsigned int drv_major;
361	unsigned int drv_minor;
362	unsigned int dev_major;
363	unsigned int dev_minor;
364};
365
366struct kgsl_sp_generic_mem {
367	uint64_t local;
368	uint64_t pvt;
369};
370
371struct kgsl_ucode_version {
372	unsigned int pfp;
373	unsigned int pm4;
374};
375
376struct kgsl_gpmu_version {
377	unsigned int major;
378	unsigned int minor;
379	unsigned int features;
380};
381
382struct kgsl_context_property {
383	__u64 data;
384	__u32 size;
385	__u32 type;
386	__u32 contextid;
387};
388
389struct kgsl_context_property_fault {
390	__s32 faults;
391	__u32 timestamp;
392};
393
394/* Context property sub types */
395#define KGSL_CONTEXT_PROP_FAULTS 1
396
397/* Performance counter groups */
398
399#define KGSL_PERFCOUNTER_GROUP_CP 0x0
400#define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
401#define KGSL_PERFCOUNTER_GROUP_PC 0x2
402#define KGSL_PERFCOUNTER_GROUP_VFD 0x3
403#define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
404#define KGSL_PERFCOUNTER_GROUP_VPC 0x5
405#define KGSL_PERFCOUNTER_GROUP_TSE 0x6
406#define KGSL_PERFCOUNTER_GROUP_RAS 0x7
407#define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
408#define KGSL_PERFCOUNTER_GROUP_TP 0x9
409#define KGSL_PERFCOUNTER_GROUP_SP 0xA
410#define KGSL_PERFCOUNTER_GROUP_RB 0xB
411#define KGSL_PERFCOUNTER_GROUP_PWR 0xC
412#define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
413#define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
414#define KGSL_PERFCOUNTER_GROUP_MH 0xF
415#define KGSL_PERFCOUNTER_GROUP_PA_SU 0x10
416#define KGSL_PERFCOUNTER_GROUP_SQ 0x11
417#define KGSL_PERFCOUNTER_GROUP_SX 0x12
418#define KGSL_PERFCOUNTER_GROUP_TCF 0x13
419#define KGSL_PERFCOUNTER_GROUP_TCM 0x14
420#define KGSL_PERFCOUNTER_GROUP_TCR 0x15
421#define KGSL_PERFCOUNTER_GROUP_L2 0x16
422#define KGSL_PERFCOUNTER_GROUP_VSC 0x17
423#define KGSL_PERFCOUNTER_GROUP_CCU 0x18
424#define KGSL_PERFCOUNTER_GROUP_LRZ 0x19
425#define KGSL_PERFCOUNTER_GROUP_CMP 0x1A
426#define KGSL_PERFCOUNTER_GROUP_ALWAYSON 0x1B
427#define KGSL_PERFCOUNTER_GROUP_SP_PWR 0x1C
428#define KGSL_PERFCOUNTER_GROUP_TP_PWR 0x1D
429#define KGSL_PERFCOUNTER_GROUP_RB_PWR 0x1E
430#define KGSL_PERFCOUNTER_GROUP_CCU_PWR 0x1F
431#define KGSL_PERFCOUNTER_GROUP_UCHE_PWR 0x20
432#define KGSL_PERFCOUNTER_GROUP_CP_PWR 0x21
433#define KGSL_PERFCOUNTER_GROUP_GPMU_PWR 0x22
434#define KGSL_PERFCOUNTER_GROUP_ALWAYSON_PWR 0x23
435#define KGSL_PERFCOUNTER_GROUP_MAX 0x24
436
437#define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
438#define KGSL_PERFCOUNTER_BROKEN 0xFFFFFFFE
439
440/* structure holds list of ibs */
441struct kgsl_ibdesc {
442	unsigned long gpuaddr;
443	unsigned long __pad;
444	size_t sizedwords;
445	unsigned int ctrl;
446};
447
448/**
449 * struct kgsl_cmdbatch_profiling_buffer
450 * @wall_clock_s: Ringbuffer submission time (seconds).
451 *                If KGSL_CMDBATCH_PROFILING_KTIME is set, time is provided
452 *                in kernel clocks, otherwise wall clock time is used.
453 * @wall_clock_ns: Ringbuffer submission time (nanoseconds).
454 *                 If KGSL_CMDBATCH_PROFILING_KTIME is set time is provided
455 *                 in kernel clocks, otherwise wall clock time is used.
456 * @gpu_ticks_queued: GPU ticks at ringbuffer submission
457 * @gpu_ticks_submitted: GPU ticks when starting cmdbatch execution
458 * @gpu_ticks_retired: GPU ticks when finishing cmdbatch execution
459 *
460 * This structure defines the profiling buffer used to measure cmdbatch
461 * execution time
462 */
463struct kgsl_cmdbatch_profiling_buffer {
464	uint64_t wall_clock_s;
465	uint64_t wall_clock_ns;
466	uint64_t gpu_ticks_queued;
467	uint64_t gpu_ticks_submitted;
468	uint64_t gpu_ticks_retired;
469};
470
471/* ioctls */
472#define KGSL_IOC_TYPE 0x09
473
474/*
475 * get misc info about the GPU
476 * type should be a value from enum kgsl_property_type
477 * value points to a structure that varies based on type
478 * sizebytes is sizeof() that structure
479 * for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
480 * this structure contaings hardware versioning info.
481 * for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
482 * this is used to find mmap() offset and sizes for mapping
483 * struct kgsl_memstore into userspace.
484 */
485struct kgsl_device_getproperty {
486	unsigned int type;
487	void __user *value;
488	size_t sizebytes;
489};
490
491#define IOCTL_KGSL_DEVICE_GETPROPERTY \
492	_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
493
494/* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
495 */
496
497/* block until the GPU has executed past a given timestamp
498 * timeout is in milliseconds.
499 */
500struct kgsl_device_waittimestamp {
501	unsigned int timestamp;
502	unsigned int timeout;
503};
504
505#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
506	_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
507
508struct kgsl_device_waittimestamp_ctxtid {
509	unsigned int context_id;
510	unsigned int timestamp;
511	unsigned int timeout;
512};
513
514#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
515	_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
516
517/* DEPRECATED: issue indirect commands to the GPU.
518 * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
519 * ibaddr and sizedwords must specify a subset of a buffer created
520 * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
521 * flags may be a mask of KGSL_CONTEXT_ values
522 * timestamp is a returned counter value which can be passed to
523 * other ioctls to determine when the commands have been executed by
524 * the GPU.
525 *
526 * This function is deprecated - consider using IOCTL_KGSL_SUBMIT_COMMANDS
527 * instead
528 */
529struct kgsl_ringbuffer_issueibcmds {
530	unsigned int drawctxt_id;
531	unsigned long ibdesc_addr;
532	unsigned int numibs;
533	unsigned int timestamp; /*output param */
534	unsigned int flags;
535};
536
537#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
538	_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
539
540/* read the most recently executed timestamp value
541 * type should be a value from enum kgsl_timestamp_type
542 */
543struct kgsl_cmdstream_readtimestamp {
544	unsigned int type;
545	unsigned int timestamp; /*output param */
546};
547
548#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
549	_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
550
551#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
552	_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
553
554/* free memory when the GPU reaches a given timestamp.
555 * gpuaddr specify a memory region created by a
556 * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
557 * type should be a value from enum kgsl_timestamp_type
558 */
559struct kgsl_cmdstream_freememontimestamp {
560	unsigned long gpuaddr;
561	unsigned int type;
562	unsigned int timestamp;
563};
564
565#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
566	_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
567
568/*
569 * Previous versions of this header had incorrectly defined
570 * IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
571 * of a write only ioctl.  To ensure binary compatibility, the following
572 * #define will be used to intercept the incorrect ioctl
573 */
574
575#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
576	_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
577
578/* create a draw context, which is used to preserve GPU state.
579 * The flags field may contain a mask KGSL_CONTEXT_*  values
580 */
581struct kgsl_drawctxt_create {
582	unsigned int flags;
583	unsigned int drawctxt_id; /*output param */
584};
585
586#define IOCTL_KGSL_DRAWCTXT_CREATE \
587	_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
588
589/* destroy a draw context */
590struct kgsl_drawctxt_destroy {
591	unsigned int drawctxt_id;
592};
593
594#define IOCTL_KGSL_DRAWCTXT_DESTROY \
595	_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
596
597/*
598 * add a block of pmem, fb, ashmem or user allocated address
599 * into the GPU address space
600 */
601struct kgsl_map_user_mem {
602	int fd;
603	unsigned long gpuaddr;   /*output param */
604	size_t len;
605	size_t offset;
606	unsigned long hostptr;   /*input param */
607	enum kgsl_user_mem_type memtype;
608	unsigned int flags;
609};
610
611#define IOCTL_KGSL_MAP_USER_MEM \
612	_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
613
614struct kgsl_cmdstream_readtimestamp_ctxtid {
615	unsigned int context_id;
616	unsigned int type;
617	unsigned int timestamp; /*output param */
618};
619
620#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
621	_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
622
623struct kgsl_cmdstream_freememontimestamp_ctxtid {
624	unsigned int context_id;
625	unsigned long gpuaddr;
626	unsigned int type;
627	unsigned int timestamp;
628};
629
630#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
631	_IOW(KGSL_IOC_TYPE, 0x17, \
632	struct kgsl_cmdstream_freememontimestamp_ctxtid)
633
634/* add a block of pmem or fb into the GPU address space */
635struct kgsl_sharedmem_from_pmem {
636	int pmem_fd;
637	unsigned long gpuaddr;  /*output param */
638	unsigned int len;
639	unsigned int offset;
640};
641
642#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
643	_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
644
645/* remove memory from the GPU's address space */
646struct kgsl_sharedmem_free {
647	unsigned long gpuaddr;
648};
649
650#define IOCTL_KGSL_SHAREDMEM_FREE \
651	_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
652
653struct kgsl_cff_user_event {
654	unsigned char cff_opcode;
655	unsigned int op1;
656	unsigned int op2;
657	unsigned int op3;
658	unsigned int op4;
659	unsigned int op5;
660	unsigned int __pad[2];
661};
662
663#define IOCTL_KGSL_CFF_USER_EVENT \
664	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
665
666struct kgsl_gmem_desc {
667	unsigned int x;
668	unsigned int y;
669	unsigned int width;
670	unsigned int height;
671	unsigned int pitch;
672};
673
674struct kgsl_buffer_desc {
675	void		*hostptr;
676	unsigned long	gpuaddr;
677	int		size;
678	unsigned int	format;
679	unsigned int	pitch;
680	unsigned int	enabled;
681};
682
683struct kgsl_bind_gmem_shadow {
684	unsigned int drawctxt_id;
685	struct kgsl_gmem_desc gmem_desc;
686	unsigned int shadow_x;
687	unsigned int shadow_y;
688	struct kgsl_buffer_desc shadow_buffer;
689	unsigned int buffer_id;
690};
691
692#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
693	_IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
694
695/* add a block of memory into the GPU address space */
696
697/*
698 * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
699 * use IOCTL_KGSL_GPUMEM_ALLOC instead
700 */
701
702struct kgsl_sharedmem_from_vmalloc {
703	unsigned long gpuaddr;	/*output param */
704	unsigned int hostptr;
705	unsigned int flags;
706};
707
708#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
709	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
710
711/*
712 * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
713 * supports both directions (flush and invalidate). This code will still
714 * work, but by definition it will do a flush of the cache which might not be
715 * what you want to have happen on a buffer following a GPU operation.  It is
716 * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
717 */
718
719#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
720	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
721
722struct kgsl_drawctxt_set_bin_base_offset {
723	unsigned int drawctxt_id;
724	unsigned int offset;
725};
726
727#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
728	_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
729
730enum kgsl_cmdwindow_type {
731	KGSL_CMDWINDOW_MIN     = 0x00000000,
732	KGSL_CMDWINDOW_2D      = 0x00000000,
733	KGSL_CMDWINDOW_3D      = 0x00000001, /* legacy */
734	KGSL_CMDWINDOW_MMU     = 0x00000002,
735	KGSL_CMDWINDOW_ARBITER = 0x000000FF,
736	KGSL_CMDWINDOW_MAX     = 0x000000FF,
737};
738
739/* write to the command window */
740struct kgsl_cmdwindow_write {
741	enum kgsl_cmdwindow_type target;
742	unsigned int addr;
743	unsigned int data;
744};
745
746#define IOCTL_KGSL_CMDWINDOW_WRITE \
747	_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
748
749struct kgsl_gpumem_alloc {
750	unsigned long gpuaddr; /* output param */
751	size_t size;
752	unsigned int flags;
753};
754
755#define IOCTL_KGSL_GPUMEM_ALLOC \
756	_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
757
758struct kgsl_cff_syncmem {
759	unsigned long gpuaddr;
760	size_t len;
761	unsigned int __pad[2]; /* For future binary compatibility */
762};
763
764#define IOCTL_KGSL_CFF_SYNCMEM \
765	_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
766
767/*
768 * A timestamp event allows the user space to register an action following an
769 * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
770 * _IOWR to support fences which need to return a fd for the priv parameter.
771 */
772
773struct kgsl_timestamp_event {
774	int type;                /* Type of event (see list below) */
775	unsigned int timestamp;  /* Timestamp to trigger event on */
776	unsigned int context_id; /* Context for the timestamp */
777	void __user *priv;	 /* Pointer to the event specific blob */
778	size_t len;              /* Size of the event specific blob */
779};
780
781#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
782	_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
783
784/* A genlock timestamp event releases an existing lock on timestamp expire */
785
786#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
787
788struct kgsl_timestamp_event_genlock {
789	int handle; /* Handle of the genlock lock to release */
790};
791
792/* A fence timestamp event releases an existing lock on timestamp expire */
793
794#define KGSL_TIMESTAMP_EVENT_FENCE 2
795
796struct kgsl_timestamp_event_fence {
797	int fence_fd; /* Fence to signal */
798};
799
800/*
801 * Set a property within the kernel.  Uses the same structure as
802 * IOCTL_KGSL_GETPROPERTY
803 */
804
805#define IOCTL_KGSL_SETPROPERTY \
806	_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
807
808#define IOCTL_KGSL_TIMESTAMP_EVENT \
809	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
810
811/**
812 * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
813 * @id: returned id value for this allocation.
814 * @flags: mask of KGSL_MEM* values requested and actual flags on return.
815 * @size: requested size of the allocation and actual size on return.
816 * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
817 * @gpuaddr: returned GPU address for the allocation
818 *
819 * Allocate memory for access by the GPU. The flags and size fields are echoed
820 * back by the kernel, so that the caller can know if the request was
821 * adjusted.
822 *
823 * Supported flags:
824 * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
825 * KGSL_MEMTYPE*: usage hint for debugging aid
826 * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
827 * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
828 * address will be 0. Calling mmap() will set the GPU address.
829 */
830struct kgsl_gpumem_alloc_id {
831	unsigned int id;
832	unsigned int flags;
833	size_t size;
834	size_t mmapsize;
835	unsigned long gpuaddr;
836/* private: reserved for future use*/
837	unsigned long __pad[2];
838};
839
840#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
841	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
842
843/**
844 * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
845 * @id: GPU allocation id to free
846 *
847 * Free an allocation by id, in case a GPU address has not been assigned or
848 * is unknown. Freeing an allocation by id with this ioctl or by GPU address
849 * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
850 */
851struct kgsl_gpumem_free_id {
852	unsigned int id;
853/* private: reserved for future use*/
854	unsigned int __pad;
855};
856
857#define IOCTL_KGSL_GPUMEM_FREE_ID \
858	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
859
860/**
861 * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
862 * @gpuaddr: GPU address to query. Also set on return.
863 * @id: GPU allocation id to query. Also set on return.
864 * @flags: returned mask of KGSL_MEM* values.
865 * @size: returned size of the allocation.
866 * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
867 * @useraddr: returned address of the userspace mapping for this buffer
868 *
869 * This ioctl allows querying of all user visible attributes of an existing
870 * allocation, by either the GPU address or the id returned by a previous
871 * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
872 * return all attributes so this ioctl can be used to look them up if needed.
873 *
874 */
875struct kgsl_gpumem_get_info {
876	unsigned long gpuaddr;
877	unsigned int id;
878	unsigned int flags;
879	size_t size;
880	size_t mmapsize;
881	unsigned long useraddr;
882/* private: reserved for future use*/
883	unsigned long __pad[4];
884};
885
886#define IOCTL_KGSL_GPUMEM_GET_INFO\
887	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
888
889/**
890 * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
891 * @gpuaddr: GPU address of the buffer to sync.
892 * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
893 * @op: a mask of KGSL_GPUMEM_CACHE_* values
894 * @offset: offset into the buffer
895 * @length: number of bytes starting from offset to perform
896 * the cache operation on
897 *
898 * Sync the L2 cache for memory headed to and from the GPU - this replaces
899 * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
900 * directions
901 *
902 */
903struct kgsl_gpumem_sync_cache {
904	unsigned long gpuaddr;
905	unsigned int id;
906	unsigned int op;
907	size_t offset;
908	size_t length;
909};
910
911#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
912#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
913
914#define KGSL_GPUMEM_CACHE_INV (1 << 1)
915#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
916
917#define KGSL_GPUMEM_CACHE_FLUSH \
918	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
919
920/* Flag to ensure backwards compatibility of kgsl_gpumem_sync_cache struct */
921#define KGSL_GPUMEM_CACHE_RANGE (1 << 31U)
922
923#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
924	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
925
926/**
927 * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
928 * @groupid: Performance counter group ID
929 * @countable: Countable to select within the group
930 * @offset: Return offset of the reserved LO counter
931 * @offset_hi: Return offset of the reserved HI counter
932 *
933 * Get an available performance counter from a specified groupid.  The offset
934 * of the performance counter will be returned after successfully assigning
935 * the countable to the counter for the specified group.  An error will be
936 * returned and an offset of 0 if the groupid is invalid or there are no
937 * more counters left.  After successfully getting a perfcounter, the user
938 * must call kgsl_perfcounter_put(groupid, contable) when finished with
939 * the perfcounter to clear up perfcounter resources.
940 *
941 */
942struct kgsl_perfcounter_get {
943	unsigned int groupid;
944	unsigned int countable;
945	unsigned int offset;
946	unsigned int offset_hi;
947/* private: reserved for future use */
948	unsigned int __pad; /* For future binary compatibility */
949};
950
951#define IOCTL_KGSL_PERFCOUNTER_GET \
952	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
953
954/**
955 * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
956 * @groupid: Performance counter group ID
957 * @countable: Countable to release within the group
958 *
959 * Put an allocated performance counter to allow others to have access to the
960 * resource that was previously taken.  This is only to be called after
961 * successfully getting a performance counter from kgsl_perfcounter_get().
962 *
963 */
964struct kgsl_perfcounter_put {
965	unsigned int groupid;
966	unsigned int countable;
967/* private: reserved for future use */
968	unsigned int __pad[2]; /* For future binary compatibility */
969};
970
971#define IOCTL_KGSL_PERFCOUNTER_PUT \
972	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
973
974/**
975 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
976 * @groupid: Performance counter group ID
977 * @countable: Return active countables array
978 * @size: Size of active countables array
979 * @max_counters: Return total number counters for the group ID
980 *
981 * Query the available performance counters given a groupid.  The array
982 * *countables is used to return the current active countables in counters.
983 * The size of the array is passed in so the kernel will only write at most
984 * size or counter->size for the group id.  The total number of available
985 * counters for the group ID is returned in max_counters.
986 * If the array or size passed in are invalid, then only the maximum number
987 * of counters will be returned, no data will be written to *countables.
988 * If the groupid is invalid an error code will be returned.
989 *
990 */
991struct kgsl_perfcounter_query {
992	unsigned int groupid;
993	/* Array to return the current countable for up to size counters */
994	unsigned int __user *countables;
995	unsigned int count;
996	unsigned int max_counters;
997/* private: reserved for future use */
998	unsigned int __pad[2]; /* For future binary compatibility */
999};
1000
1001#define IOCTL_KGSL_PERFCOUNTER_QUERY \
1002	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
1003
1004/**
1005 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
1006 * @groupid: Performance counter group IDs
1007 * @countable: Performance counter countable IDs
1008 * @value: Return performance counter reads
1009 * @size: Size of all arrays (groupid/countable pair and return value)
1010 *
1011 * Read in the current value of a performance counter given by the groupid
1012 * and countable.
1013 *
1014 */
1015
1016struct kgsl_perfcounter_read_group {
1017	unsigned int groupid;
1018	unsigned int countable;
1019	unsigned long long value;
1020};
1021
1022struct kgsl_perfcounter_read {
1023	struct kgsl_perfcounter_read_group __user *reads;
1024	unsigned int count;
1025/* private: reserved for future use */
1026	unsigned int __pad[2]; /* For future binary compatibility */
1027};
1028
1029#define IOCTL_KGSL_PERFCOUNTER_READ \
1030	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
1031/*
1032 * struct kgsl_gpumem_sync_cache_bulk - argument to
1033 * IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK
1034 * @id_list: list of GPU buffer ids of the buffers to sync
1035 * @count: number of GPU buffer ids in id_list
1036 * @op: a mask of KGSL_GPUMEM_CACHE_* values
1037 *
1038 * Sync the cache for memory headed to and from the GPU. Certain
1039 * optimizations can be made on the cache operation based on the total
1040 * size of the working set of memory to be managed.
1041 */
1042struct kgsl_gpumem_sync_cache_bulk {
1043	unsigned int __user *id_list;
1044	unsigned int count;
1045	unsigned int op;
1046/* private: reserved for future use */
1047	unsigned int __pad[2]; /* For future binary compatibility */
1048};
1049
1050#define IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK \
1051	_IOWR(KGSL_IOC_TYPE, 0x3C, struct kgsl_gpumem_sync_cache_bulk)
1052
1053/*
1054 * struct kgsl_cmd_syncpoint_timestamp
1055 * @context_id: ID of a KGSL context
1056 * @timestamp: GPU timestamp
1057 *
1058 * This structure defines a syncpoint comprising a context/timestamp pair. A
1059 * list of these may be passed by IOCTL_KGSL_SUBMIT_COMMANDS to define
1060 * dependencies that must be met before the command can be submitted to the
1061 * hardware
1062 */
1063struct kgsl_cmd_syncpoint_timestamp {
1064	unsigned int context_id;
1065	unsigned int timestamp;
1066};
1067
1068struct kgsl_cmd_syncpoint_fence {
1069	int fd;
1070};
1071
1072/**
1073 * struct kgsl_cmd_syncpoint - Define a sync point for a command batch
1074 * @type: type of sync point defined here
1075 * @priv: Pointer to the type specific buffer
1076 * @size: Size of the type specific buffer
1077 *
1078 * This structure contains pointers defining a specific command sync point.
1079 * The pointer and size should point to a type appropriate structure.
1080 */
1081struct kgsl_cmd_syncpoint {
1082	int type;
1083	void __user *priv;
1084	size_t size;
1085};
1086
1087/* Flag to indicate that the cmdlist may contain memlists */
1088#define KGSL_IBDESC_MEMLIST 0x1
1089
1090/* Flag to point out the cmdbatch profiling buffer in the memlist */
1091#define KGSL_IBDESC_PROFILING_BUFFER 0x2
1092
1093/**
1094 * struct kgsl_submit_commands - Argument to IOCTL_KGSL_SUBMIT_COMMANDS
1095 * @context_id: KGSL context ID that owns the commands
1096 * @flags:
1097 * @cmdlist: User pointer to a list of kgsl_ibdesc structures
1098 * @numcmds: Number of commands listed in cmdlist
1099 * @synclist: User pointer to a list of kgsl_cmd_syncpoint structures
1100 * @numsyncs: Number of sync points listed in synclist
1101 * @timestamp: On entry the a user defined timestamp, on exist the timestamp
1102 * assigned to the command batch
1103 *
1104 * This structure specifies a command to send to the GPU hardware.  This is
1105 * similar to kgsl_issueibcmds expect that it doesn't support the legacy way to
1106 * submit IB lists and it adds sync points to block the IB until the
1107 * dependencies are satisified.  This entry point is the new and preferred way
1108 * to submit commands to the GPU. The memory list can be used to specify all
1109 * memory that is referrenced in the current set of commands.
1110 */
1111
1112struct kgsl_submit_commands {
1113	unsigned int context_id;
1114	unsigned int flags;
1115	struct kgsl_ibdesc __user *cmdlist;
1116	unsigned int numcmds;
1117	struct kgsl_cmd_syncpoint __user *synclist;
1118	unsigned int numsyncs;
1119	unsigned int timestamp;
1120/* private: reserved for future use */
1121	unsigned int __pad[4];
1122};
1123
1124#define IOCTL_KGSL_SUBMIT_COMMANDS \
1125	_IOWR(KGSL_IOC_TYPE, 0x3D, struct kgsl_submit_commands)
1126
1127/**
1128 * struct kgsl_device_constraint - device constraint argument
1129 * @context_id: KGSL context ID
1130 * @type: type of constraint i.e pwrlevel/none
1131 * @data: constraint data
1132 * @size: size of the constraint data
1133 */
1134struct kgsl_device_constraint {
1135	unsigned int type;
1136	unsigned int context_id;
1137	void __user *data;
1138	size_t size;
1139};
1140
1141/* Constraint Type*/
1142#define KGSL_CONSTRAINT_NONE 0
1143#define KGSL_CONSTRAINT_PWRLEVEL 1
1144
1145/* L3 constraint Type */
1146#define KGSL_CONSTRAINT_L3_NONE	2
1147#define KGSL_CONSTRAINT_L3_PWRLEVEL	3
1148
1149/* PWRLEVEL constraint level*/
1150/* set to min frequency */
1151#define KGSL_CONSTRAINT_PWR_MIN    0
1152/* set to max frequency */
1153#define KGSL_CONSTRAINT_PWR_MAX    1
1154
1155struct kgsl_device_constraint_pwrlevel {
1156	unsigned int level;
1157};
1158
1159/**
1160 * struct kgsl_syncsource_create - Argument to IOCTL_KGSL_SYNCSOURCE_CREATE
1161 * @id: returned id for the syncsource that was created.
1162 *
1163 * This ioctl creates a userspace sync timeline.
1164 */
1165
1166struct kgsl_syncsource_create {
1167	unsigned int id;
1168/* private: reserved for future use */
1169	unsigned int __pad[3];
1170};
1171
1172#define IOCTL_KGSL_SYNCSOURCE_CREATE \
1173	_IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
1174
1175/**
1176 * struct kgsl_syncsource_destroy - Argument to IOCTL_KGSL_SYNCSOURCE_DESTROY
1177 * @id: syncsource id to destroy
1178 *
1179 * This ioctl creates a userspace sync timeline.
1180 */
1181
1182struct kgsl_syncsource_destroy {
1183	unsigned int id;
1184/* private: reserved for future use */
1185	unsigned int __pad[3];
1186};
1187
1188#define IOCTL_KGSL_SYNCSOURCE_DESTROY \
1189	_IOWR(KGSL_IOC_TYPE, 0x41, struct kgsl_syncsource_destroy)
1190
1191/**
1192 * struct kgsl_syncsource_create_fence - Argument to
1193 *     IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1194 * @id: syncsource id
1195 * @fence_fd: returned sync_fence fd
1196 *
1197 * Create a fence that may be signaled by userspace by calling
1198 * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE. There are no order dependencies between
1199 * these fences.
1200 */
1201struct kgsl_syncsource_create_fence {
1202	unsigned int id;
1203	int fence_fd;
1204/* private: reserved for future use */
1205	unsigned int __pad[4];
1206};
1207
1208/**
1209 * struct kgsl_syncsource_signal_fence - Argument to
1210 *     IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
1211 * @id: syncsource id
1212 * @fence_fd: sync_fence fd to signal
1213 *
1214 * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
1215 * call using the same syncsource id. This allows a fence to be shared
1216 * to other processes but only signaled by the process owning the fd
1217 * used to create the fence.
1218 */
1219#define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
1220	_IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
1221
1222struct kgsl_syncsource_signal_fence {
1223	unsigned int id;
1224	int fence_fd;
1225/* private: reserved for future use */
1226	unsigned int __pad[4];
1227};
1228
1229#define IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE \
1230	_IOWR(KGSL_IOC_TYPE, 0x43, struct kgsl_syncsource_signal_fence)
1231
1232/**
1233 * struct kgsl_cff_sync_gpuobj - Argument to IOCTL_KGSL_CFF_SYNC_GPUOBJ
1234 * @offset: Offset into the GPU object to sync
1235 * @length: Number of bytes to sync
1236 * @id: ID of the GPU object to sync
1237 */
1238struct kgsl_cff_sync_gpuobj {
1239	uint64_t offset;
1240	uint64_t length;
1241	unsigned int id;
1242};
1243
1244#define IOCTL_KGSL_CFF_SYNC_GPUOBJ \
1245	_IOW(KGSL_IOC_TYPE, 0x44, struct kgsl_cff_sync_gpuobj)
1246
1247/**
1248 * struct kgsl_gpuobj_alloc - Argument to IOCTL_KGSL_GPUOBJ_ALLOC
1249 * @size: Size in bytes of the object to allocate
1250 * @flags: mask of KGSL_MEMFLAG_* bits
1251 * @va_len: Size in bytes of the virtual region to allocate
1252 * @mmapsize: Returns the mmap() size of the object
1253 * @id: Returns the GPU object ID of the new object
1254 * @metadata_len: Length of the metdata to copy from the user
1255 * @metadata: Pointer to the user specified metadata to store for the object
1256 */
1257struct kgsl_gpuobj_alloc {
1258	uint64_t size;
1259	uint64_t flags;
1260	uint64_t va_len;
1261	uint64_t mmapsize;
1262	unsigned int id;
1263	unsigned int metadata_len;
1264	uint64_t metadata;
1265};
1266
1267/* Let the user know that this header supports the gpuobj metadata */
1268#define KGSL_GPUOBJ_ALLOC_METADATA_MAX 64
1269
1270#define IOCTL_KGSL_GPUOBJ_ALLOC \
1271	_IOWR(KGSL_IOC_TYPE, 0x45, struct kgsl_gpuobj_alloc)
1272
1273/**
1274 * struct kgsl_gpuobj_free - Argument to IOCTL_KGLS_GPUOBJ_FREE
1275 * @flags: Mask of: KGSL_GUPOBJ_FREE_ON_EVENT
1276 * @priv: Pointer to the private object if KGSL_GPUOBJ_FREE_ON_EVENT is
1277 * specified
1278 * @id: ID of the GPU object to free
1279 * @type: If KGSL_GPUOBJ_FREE_ON_EVENT is specified, the type of asynchronous
1280 * event to free on
1281 * @len: Length of the data passed in priv
1282 */
1283struct kgsl_gpuobj_free {
1284	uint64_t flags;
1285	uint64_t __user priv;
1286	unsigned int id;
1287	unsigned int type;
1288	unsigned int len;
1289};
1290
1291#define KGSL_GPUOBJ_FREE_ON_EVENT 1
1292
1293#define KGSL_GPU_EVENT_TIMESTAMP 1
1294#define KGSL_GPU_EVENT_FENCE     2
1295
1296/**
1297 * struct kgsl_gpu_event_timestamp - Specifies a timestamp event to free a GPU
1298 * object on
1299 * @context_id: ID of the timestamp event to wait for
1300 * @timestamp: Timestamp of the timestamp event to wait for
1301 */
1302struct kgsl_gpu_event_timestamp {
1303	unsigned int context_id;
1304	unsigned int timestamp;
1305};
1306
1307/**
1308 * struct kgsl_gpu_event_fence - Specifies a fence ID to to free a GPU object on
1309 * @fd: File descriptor for the fence
1310 */
1311struct kgsl_gpu_event_fence {
1312	int fd;
1313};
1314
1315#define IOCTL_KGSL_GPUOBJ_FREE \
1316	_IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free)
1317
1318/**
1319 * struct kgsl_gpuobj_info - argument to IOCTL_KGSL_GPUOBJ_INFO
1320 * @gpuaddr: GPU address of the object
1321 * @flags: Current flags for the object
1322 * @size: Size of the object
1323 * @va_len: VA size of the object
1324 * @va_addr: Virtual address of the object (if it is mapped)
1325 * id - GPU object ID of the object to query
1326 */
1327struct kgsl_gpuobj_info {
1328	uint64_t gpuaddr;
1329	uint64_t flags;
1330	uint64_t size;
1331	uint64_t va_len;
1332	uint64_t va_addr;
1333	unsigned int id;
1334};
1335
1336#define IOCTL_KGSL_GPUOBJ_INFO \
1337	_IOWR(KGSL_IOC_TYPE, 0x47, struct kgsl_gpuobj_info)
1338
1339/**
1340 * struct kgsl_gpuobj_import - argument to IOCTL_KGSL_GPUOBJ_IMPORT
1341 * @priv: Pointer to the private data for the import type
1342 * @priv_len: Length of the private data
1343 * @flags: Mask of KGSL_MEMFLAG_ flags
1344 * @type: Type of the import (KGSL_USER_MEM_TYPE_*)
1345 * @id: Returns the ID of the new GPU object
1346 */
1347struct kgsl_gpuobj_import {
1348	uint64_t __user priv;
1349	uint64_t priv_len;
1350	uint64_t flags;
1351	unsigned int type;
1352	unsigned int id;
1353};
1354
1355/**
1356 * struct kgsl_gpuobj_import_dma_buf - import a dmabuf object
1357 * @fd: File descriptor for the dma-buf object
1358 */
1359struct kgsl_gpuobj_import_dma_buf {
1360	int fd;
1361};
1362
1363/**
1364 * struct kgsl_gpuobj_import_useraddr - import an object based on a useraddr
1365 * @virtaddr: Virtual address of the object to import
1366 */
1367struct kgsl_gpuobj_import_useraddr {
1368	uint64_t virtaddr;
1369};
1370
1371#define IOCTL_KGSL_GPUOBJ_IMPORT \
1372	_IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import)
1373
1374/**
1375 * struct kgsl_gpuobj_sync_obj - Individual GPU object to sync
1376 * @offset: Offset within the GPU object to sync
1377 * @length: Number of bytes to sync
1378 * @id: ID of the GPU object to sync
1379 * @op: Cache operation to execute
1380 */
1381
1382struct kgsl_gpuobj_sync_obj {
1383	uint64_t offset;
1384	uint64_t length;
1385	unsigned int id;
1386	unsigned int op;
1387};
1388
1389/**
1390 * struct kgsl_gpuobj_sync - Argument for IOCTL_KGSL_GPUOBJ_SYNC
1391 * @objs: Pointer to an array of kgsl_gpuobj_sync_obj structs
1392 * @obj_len: Size of each item in the array
1393 * @count: Number of items in the array
1394 */
1395
1396struct kgsl_gpuobj_sync {
1397	uint64_t __user objs;
1398	unsigned int obj_len;
1399	unsigned int count;
1400};
1401
1402#define IOCTL_KGSL_GPUOBJ_SYNC \
1403	_IOW(KGSL_IOC_TYPE, 0x49, struct kgsl_gpuobj_sync)
1404
1405/**
1406 * struct kgsl_command_object - GPU command object
1407 * @offset: GPU address offset of the object
1408 * @gpuaddr: GPU address of the object
1409 * @size: Size of the object
1410 * @flags: Current flags for the object
1411 * @id - GPU command object ID
1412 */
1413struct kgsl_command_object {
1414	uint64_t offset;
1415	uint64_t gpuaddr;
1416	uint64_t size;
1417	unsigned int flags;
1418	unsigned int id;
1419};
1420
1421/**
1422 * struct kgsl_command_syncpoint - GPU syncpoint object
1423 * @priv: Pointer to the type specific buffer
1424 * @size: Size of the type specific buffer
1425 * @type: type of sync point defined here
1426 */
1427struct kgsl_command_syncpoint {
1428	uint64_t __user priv;
1429	uint64_t size;
1430	unsigned int type;
1431};
1432
1433/**
1434 * struct kgsl_command_object - Argument for IOCTL_KGSL_GPU_COMMAND
1435 * @flags: Current flags for the object
1436 * @cmdlist: List of kgsl_command_objects for submission
1437 * @cmd_size: Size of kgsl_command_objects structure
1438 * @numcmds: Number of kgsl_command_objects in command list
1439 * @objlist: List of kgsl_command_objects for tracking
1440 * @obj_size: Size of kgsl_command_objects structure
1441 * @numobjs: Number of kgsl_command_objects in object list
1442 * @synclist: List of kgsl_command_syncpoints
1443 * @sync_size: Size of kgsl_command_syncpoint structure
1444 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1445 * @context_id: Context ID submittin ghte kgsl_gpu_command
1446 * @timestamp: Timestamp for the submitted commands
1447 */
1448struct kgsl_gpu_command {
1449	uint64_t flags;
1450	uint64_t __user cmdlist;
1451	unsigned int cmdsize;
1452	unsigned int numcmds;
1453	uint64_t __user objlist;
1454	unsigned int objsize;
1455	unsigned int numobjs;
1456	uint64_t __user synclist;
1457	unsigned int syncsize;
1458	unsigned int numsyncs;
1459	unsigned int context_id;
1460	unsigned int timestamp;
1461};
1462
1463#define IOCTL_KGSL_GPU_COMMAND \
1464	_IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
1465
1466/**
1467 * struct kgsl_preemption_counters_query - argument to
1468 * IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY
1469 * @counters: Return preemption counters array
1470 * @size_user: Size allocated by userspace
1471 * @size_priority_level: Size of preemption counters for each
1472 * priority level
1473 * @max_priority_level: Return max number of priority levels
1474 *
1475 * Query the available preemption counters. The array counters
1476 * is used to return preemption counters. The size of the array
1477 * is passed in so the kernel will only write at most size_user
1478 * or max available preemption counters.  The total number of
1479 * preemption counters is returned in max_priority_level. If the
1480 * array or size passed in are invalid, then an error is
1481 * returned back.
1482 */
1483struct kgsl_preemption_counters_query {
1484	uint64_t __user counters;
1485	unsigned int size_user;
1486	unsigned int size_priority_level;
1487	unsigned int max_priority_level;
1488};
1489
1490#define IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY \
1491	_IOWR(KGSL_IOC_TYPE, 0x4B, struct kgsl_preemption_counters_query)
1492
1493/**
1494 * struct kgsl_gpuobj_set_info - argument for IOCTL_KGSL_GPUOBJ_SET_INFO
1495 * @flags: Flags to indicate which parameters to change
1496 * @metadata:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, a pointer to the new
1497 * metadata
1498 * @id: GPU memory object ID to change
1499 * @metadata_len:  If KGSL_GPUOBJ_SET_INFO_METADATA is set, the length of the
1500 * new metadata string
1501 * @type: If KGSL_GPUOBJ_SET_INFO_TYPE is set, the new type of the memory object
1502 */
1503
1504#define KGSL_GPUOBJ_SET_INFO_METADATA (1 << 0)
1505#define KGSL_GPUOBJ_SET_INFO_TYPE (1 << 1)
1506
1507struct kgsl_gpuobj_set_info {
1508	uint64_t flags;
1509	uint64_t metadata;
1510	unsigned int id;
1511	unsigned int metadata_len;
1512	unsigned int type;
1513};
1514
1515#define IOCTL_KGSL_GPUOBJ_SET_INFO \
1516	_IOW(KGSL_IOC_TYPE, 0x4C, struct kgsl_gpuobj_set_info)
1517
1518/**
1519 * struct kgsl_sparse_phys_alloc - Argument for IOCTL_KGSL_SPARSE_PHYS_ALLOC
1520 * @size: Size in bytes to back
1521 * @pagesize: Pagesize alignment required
1522 * @flags: Flags for this allocation
1523 * @id: Returned ID for this allocation
1524 */
1525struct kgsl_sparse_phys_alloc {
1526	uint64_t size;
1527	uint64_t pagesize;
1528	uint64_t flags;
1529	unsigned int id;
1530};
1531
1532#define IOCTL_KGSL_SPARSE_PHYS_ALLOC \
1533	_IOWR(KGSL_IOC_TYPE, 0x50, struct kgsl_sparse_phys_alloc)
1534
1535/**
1536 * struct kgsl_sparse_phys_free - Argument for IOCTL_KGSL_SPARSE_PHYS_FREE
1537 * @id: ID to free
1538 */
1539struct kgsl_sparse_phys_free {
1540	unsigned int id;
1541};
1542
1543#define IOCTL_KGSL_SPARSE_PHYS_FREE \
1544	_IOW(KGSL_IOC_TYPE, 0x51, struct kgsl_sparse_phys_free)
1545
1546/**
1547 * struct kgsl_sparse_virt_alloc - Argument for IOCTL_KGSL_SPARSE_VIRT_ALLOC
1548 * @size: Size in bytes to reserve
1549 * @pagesize: Pagesize alignment required
1550 * @flags: Flags for this allocation
1551 * @id: Returned ID for this allocation
1552 * @gpuaddr: Returned GPU address for this allocation
1553 */
1554struct kgsl_sparse_virt_alloc {
1555	uint64_t size;
1556	uint64_t pagesize;
1557	uint64_t flags;
1558	uint64_t gpuaddr;
1559	unsigned int id;
1560};
1561
1562#define IOCTL_KGSL_SPARSE_VIRT_ALLOC \
1563	_IOWR(KGSL_IOC_TYPE, 0x52, struct kgsl_sparse_virt_alloc)
1564
1565/**
1566 * struct kgsl_sparse_virt_free - Argument for IOCTL_KGSL_SPARSE_VIRT_FREE
1567 * @id: ID to free
1568 */
1569struct kgsl_sparse_virt_free {
1570	unsigned int id;
1571};
1572
1573#define IOCTL_KGSL_SPARSE_VIRT_FREE \
1574	_IOW(KGSL_IOC_TYPE, 0x53, struct kgsl_sparse_virt_free)
1575
1576/**
1577 * struct kgsl_sparse_binding_object - Argument for kgsl_sparse_bind
1578 * @virtoffset: Offset into the virtual ID
1579 * @physoffset: Offset into the physical ID (bind only)
1580 * @size: Size in bytes to reserve
1581 * @flags: Flags for this kgsl_sparse_binding_object
1582 * @id: Physical ID to bind (bind only)
1583 */
1584struct kgsl_sparse_binding_object {
1585	uint64_t virtoffset;
1586	uint64_t physoffset;
1587	uint64_t size;
1588	uint64_t flags;
1589	unsigned int id;
1590};
1591
1592/**
1593 * struct kgsl_sparse_bind - Argument for IOCTL_KGSL_SPARSE_BIND
1594 * @list: List of kgsl_sparse_bind_objects to bind/unbind
1595 * @id: Virtual ID to bind/unbind
1596 * @size: Size of kgsl_sparse_bind_object
1597 * @count: Number of elements in list
1598 *
1599 */
1600struct kgsl_sparse_bind {
1601	uint64_t __user list;
1602	unsigned int id;
1603	unsigned int size;
1604	unsigned int count;
1605};
1606
1607#define IOCTL_KGSL_SPARSE_BIND \
1608	_IOW(KGSL_IOC_TYPE, 0x54, struct kgsl_sparse_bind)
1609
1610/**
1611 * struct kgsl_gpu_sparse_command - Argument for
1612 * IOCTL_KGSL_GPU_SPARSE_COMMAND
1613 * @flags: Current flags for the object
1614 * @sparselist: List of kgsl_sparse_binding_object to bind/unbind
1615 * @synclist: List of kgsl_command_syncpoints
1616 * @sparsesize: Size of kgsl_sparse_binding_object
1617 * @numsparse: Number of elements in list
1618 * @sync_size: Size of kgsl_command_syncpoint structure
1619 * @numsyncs: Number of kgsl_command_syncpoints in syncpoint list
1620 * @context_id: Context ID submitting the kgsl_gpu_command
1621 * @timestamp: Timestamp for the submitted commands
1622 * @id: Virtual ID to bind/unbind
1623 */
1624struct kgsl_gpu_sparse_command {
1625	uint64_t flags;
1626	uint64_t __user sparselist;
1627	uint64_t __user synclist;
1628	unsigned int sparsesize;
1629	unsigned int numsparse;
1630	unsigned int syncsize;
1631	unsigned int numsyncs;
1632	unsigned int context_id;
1633	unsigned int timestamp;
1634	unsigned int id;
1635};
1636
1637#define IOCTL_KGSL_GPU_SPARSE_COMMAND \
1638	_IOWR(KGSL_IOC_TYPE, 0x55, struct kgsl_gpu_sparse_command)
1639
1640#endif /* _UAPI_MSM_KGSL_H */
1641