drm.h revision 1.1.1.2 1 /**
2 * \file drm.h
3 * Header for the Direct Rendering Manager
4 *
5 * \author Rickard E. (Rik) Faith <faith (at) valinux.com>
6 *
7 * \par Acknowledgments:
8 * Dec 1999, Richard Henderson <rth (at) twiddle.net>, move to generic \c cmpxchg.
9 */
10
11 /*
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All rights reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 /**
37 * \mainpage
38 *
39 * The Direct Rendering Manager (DRM) is a device-independent kernel-level
40 * device driver that provides support for the XFree86 Direct Rendering
41 * Infrastructure (DRI).
42 *
43 * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
44 * ways:
45 * -# The DRM provides synchronized access to the graphics hardware via
46 * the use of an optimized two-tiered lock.
47 * -# The DRM enforces the DRI security policy for access to the graphics
48 * hardware by only allowing authenticated X11 clients access to
49 * restricted regions of memory.
50 * -# The DRM provides a generic DMA engine, complete with multiple
51 * queues and the ability to detect the need for an OpenGL context
52 * switch.
53 * -# The DRM is extensible via the use of small device-specific modules
54 * that rely extensively on the API exported by the DRM module.
55 *
56 */
57
58 #ifndef _DRM_H_
59 #define _DRM_H_
60
61 #ifndef __user
62 #define __user
63 #endif
64 #ifndef __iomem
65 #define __iomem
66 #endif
67
68 #ifdef __GNUC__
69 # define DEPRECATED __attribute__ ((deprecated))
70 #else
71 # define DEPRECATED
72 # ifndef __FUNCTION__
73 # define __FUNCTION__ __func__ /* C99 */
74 # endif
75 # ifndef __volatile__
76 # define __volatile__ volatile
77 # endif
78 #endif
79
80 #if defined(__linux__)
81 #include <asm/ioctl.h> /* For _IO* macros */
82 #define DRM_IOCTL_NR(n) _IOC_NR(n)
83 #define DRM_IOC_VOID _IOC_NONE
84 #define DRM_IOC_READ _IOC_READ
85 #define DRM_IOC_WRITE _IOC_WRITE
86 #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
87 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
88 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
89 #include <sys/ioccom.h>
90 #define DRM_IOCTL_NR(n) ((n) & 0xff)
91 #define DRM_IOC_VOID IOC_VOID
92 #define DRM_IOC_READ IOC_OUT
93 #define DRM_IOC_WRITE IOC_IN
94 #define DRM_IOC_READWRITE IOC_INOUT
95 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
96 #endif
97
98 #ifdef __OpenBSD__
99 #define DRM_MAJOR 81
100 #endif
101 #if defined(__linux__) || defined(__NetBSD__)
102 #define DRM_MAJOR 226
103 #endif
104 #define DRM_MAX_MINOR 15
105
106 #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
107 #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
108 #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
109 #define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
110
111 #define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
112 #define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
113 #define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
114 #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
115 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
116
117 #if defined(__linux__)
118 typedef unsigned int drm_handle_t;
119 #else
120 #include <sys/types.h>
121 typedef unsigned long drm_handle_t; /**< To mapped regions */
122 #endif
123 typedef unsigned int drm_context_t; /**< GLXContext handle */
124 typedef unsigned int drm_drawable_t;
125 typedef unsigned int drm_magic_t; /**< Magic for authentication */
126
127 /**
128 * Cliprect.
129 *
130 * \warning If you change this structure, make sure you change
131 * XF86DRIClipRectRec in the server as well
132 *
133 * \note KW: Actually it's illegal to change either for
134 * backwards-compatibility reasons.
135 */
136 struct drm_clip_rect {
137 unsigned short x1;
138 unsigned short y1;
139 unsigned short x2;
140 unsigned short y2;
141 };
142
143 /**
144 * Texture region,
145 */
146 struct drm_tex_region {
147 unsigned char next;
148 unsigned char prev;
149 unsigned char in_use;
150 unsigned char padding;
151 unsigned int age;
152 };
153
154 /**
155 * Hardware lock.
156 *
157 * The lock structure is a simple cache-line aligned integer. To avoid
158 * processor bus contention on a multiprocessor system, there should not be any
159 * other data stored in the same cache line.
160 */
161 struct drm_hw_lock {
162 __volatile__ unsigned int lock; /**< lock variable */
163 char padding[60]; /**< Pad to cache line */
164 };
165
166 /* This is beyond ugly, and only works on GCC. However, it allows me to use
167 * drm.h in places (i.e., in the X-server) where I can't use size_t. The real
168 * fix is to use uint32_t instead of size_t, but that fix will break existing
169 * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems. That *will*
170 * eventually happen, though. I chose 'unsigned long' to be the fallback type
171 * because that works on all the platforms I know about. Hopefully, the
172 * real fix will happen before that bites us.
173 */
174
175 #ifdef __SIZE_TYPE__
176 # define DRM_SIZE_T __SIZE_TYPE__
177 #else
178 # warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!"
179 # define DRM_SIZE_T unsigned long
180 #endif
181
182 /**
183 * DRM_IOCTL_VERSION ioctl argument type.
184 *
185 * \sa drmGetVersion().
186 */
187 struct drm_version {
188 int version_major; /**< Major version */
189 int version_minor; /**< Minor version */
190 int version_patchlevel; /**< Patch level */
191 DRM_SIZE_T name_len; /**< Length of name buffer */
192 char __user *name; /**< Name of driver */
193 DRM_SIZE_T date_len; /**< Length of date buffer */
194 char __user *date; /**< User-space buffer to hold date */
195 DRM_SIZE_T desc_len; /**< Length of desc buffer */
196 char __user *desc; /**< User-space buffer to hold desc */
197 };
198
199 /**
200 * DRM_IOCTL_GET_UNIQUE ioctl argument type.
201 *
202 * \sa drmGetBusid() and drmSetBusId().
203 */
204 struct drm_unique {
205 DRM_SIZE_T unique_len; /**< Length of unique */
206 char __user *unique; /**< Unique name for driver instantiation */
207 };
208
209 #undef DRM_SIZE_T
210
211 struct drm_list {
212 int count; /**< Length of user-space structures */
213 struct drm_version __user *version;
214 };
215
216 struct drm_block {
217 int unused;
218 };
219
220 /**
221 * DRM_IOCTL_CONTROL ioctl argument type.
222 *
223 * \sa drmCtlInstHandler() and drmCtlUninstHandler().
224 */
225 struct drm_control {
226 enum {
227 DRM_ADD_COMMAND,
228 DRM_RM_COMMAND,
229 DRM_INST_HANDLER,
230 DRM_UNINST_HANDLER
231 } func;
232 int irq;
233 };
234
235 /**
236 * Type of memory to map.
237 */
238 enum drm_map_type {
239 _DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
240 _DRM_REGISTERS = 1, /**< no caching, no core dump */
241 _DRM_SHM = 2, /**< shared, cached */
242 _DRM_AGP = 3, /**< AGP/GART */
243 _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
244 _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
245 _DRM_GEM = 6,
246 _DRM_TTM = 7,
247 };
248
249 /**
250 * Memory mapping flags.
251 */
252 enum drm_map_flags {
253 _DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
254 _DRM_READ_ONLY = 0x02,
255 _DRM_LOCKED = 0x04, /**< shared, cached, locked */
256 _DRM_KERNEL = 0x08, /**< kernel requires access */
257 _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
258 _DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
259 _DRM_REMOVABLE = 0x40, /**< Removable mapping */
260 _DRM_DRIVER = 0x80 /**< Managed by driver */
261 };
262
263 struct drm_ctx_priv_map {
264 unsigned int ctx_id; /**< Context requesting private mapping */
265 void *handle; /**< Handle of map */
266 };
267
268 /**
269 * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
270 * argument type.
271 *
272 * \sa drmAddMap().
273 */
274 struct drm_map {
275 unsigned long offset; /**< Requested physical address (0 for SAREA)*/
276 unsigned long size; /**< Requested physical size (bytes) */
277 enum drm_map_type type; /**< Type of memory to map */
278 enum drm_map_flags flags; /**< Flags */
279 void *handle; /**< User-space: "Handle" to pass to mmap() */
280 /**< Kernel-space: kernel-virtual address */
281 int mtrr; /**< MTRR slot used */
282 /* Private data */
283 };
284
285 /**
286 * DRM_IOCTL_GET_CLIENT ioctl argument type.
287 */
288 struct drm_client {
289 int idx; /**< Which client desired? */
290 int auth; /**< Is client authenticated? */
291 unsigned long pid; /**< Process ID */
292 unsigned long uid; /**< User ID */
293 unsigned long magic; /**< Magic */
294 unsigned long iocs; /**< Ioctl count */
295 };
296
297 enum drm_stat_type {
298 _DRM_STAT_LOCK,
299 _DRM_STAT_OPENS,
300 _DRM_STAT_CLOSES,
301 _DRM_STAT_IOCTLS,
302 _DRM_STAT_LOCKS,
303 _DRM_STAT_UNLOCKS,
304 _DRM_STAT_VALUE, /**< Generic value */
305 _DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
306 _DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
307
308 _DRM_STAT_IRQ, /**< IRQ */
309 _DRM_STAT_PRIMARY, /**< Primary DMA bytes */
310 _DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
311 _DRM_STAT_DMA, /**< DMA */
312 _DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
313 _DRM_STAT_MISSED /**< Missed DMA opportunity */
314 /* Add to the *END* of the list */
315 };
316
317 /**
318 * DRM_IOCTL_GET_STATS ioctl argument type.
319 */
320 struct drm_stats {
321 unsigned long count;
322 struct {
323 unsigned long value;
324 enum drm_stat_type type;
325 } data[15];
326 };
327
328 /**
329 * Hardware locking flags.
330 */
331 enum drm_lock_flags {
332 _DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
333 _DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
334 _DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
335 _DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
336 /* These *HALT* flags aren't supported yet
337 -- they will be used to support the
338 full-screen DGA-like mode. */
339 _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
340 _DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
341 };
342
343 /**
344 * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
345 *
346 * \sa drmGetLock() and drmUnlock().
347 */
348 struct drm_lock {
349 int context;
350 enum drm_lock_flags flags;
351 };
352
353 /**
354 * DMA flags
355 *
356 * \warning
357 * These values \e must match xf86drm.h.
358 *
359 * \sa drm_dma.
360 */
361 enum drm_dma_flags {
362 /* Flags for DMA buffer dispatch */
363 _DRM_DMA_BLOCK = 0x01, /**<
364 * Block until buffer dispatched.
365 *
366 * \note The buffer may not yet have
367 * been processed by the hardware --
368 * getting a hardware lock with the
369 * hardware quiescent will ensure
370 * that the buffer has been
371 * processed.
372 */
373 _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
374 _DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
375
376 /* Flags for DMA buffer request */
377 _DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
378 _DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
379 _DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
380 };
381
382 /**
383 * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
384 *
385 * \sa drmAddBufs().
386 */
387 struct drm_buf_desc {
388 int count; /**< Number of buffers of this size */
389 int size; /**< Size in bytes */
390 int low_mark; /**< Low water mark */
391 int high_mark; /**< High water mark */
392 enum {
393 _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
394 _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
395 _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
396 _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
397 _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
398 } flags;
399 unsigned long agp_start; /**<
400 * Start address of where the AGP buffers are
401 * in the AGP aperture
402 */
403 };
404
405 /**
406 * DRM_IOCTL_INFO_BUFS ioctl argument type.
407 */
408 struct drm_buf_info {
409 int count; /**< Number of buffers described in list */
410 struct drm_buf_desc __user *list; /**< List of buffer descriptions */
411 };
412
413 /**
414 * DRM_IOCTL_FREE_BUFS ioctl argument type.
415 */
416 struct drm_buf_free {
417 int count;
418 int __user *list;
419 };
420
421 /**
422 * Buffer information
423 *
424 * \sa drm_buf_map.
425 */
426 struct drm_buf_pub {
427 int idx; /**< Index into the master buffer list */
428 int total; /**< Buffer size */
429 int used; /**< Amount of buffer in use (for DMA) */
430 void __user *address; /**< Address of buffer */
431 };
432
433 /**
434 * DRM_IOCTL_MAP_BUFS ioctl argument type.
435 */
436 struct drm_buf_map {
437 int count; /**< Length of the buffer list */
438 #if defined(__cplusplus)
439 void __user *c_virtual;
440 #else
441 void __user *virtual; /**< Mmap'd area in user-virtual */
442 #endif
443 struct drm_buf_pub __user *list; /**< Buffer information */
444 };
445
446 /**
447 * DRM_IOCTL_DMA ioctl argument type.
448 *
449 * Indices here refer to the offset into the buffer list in drm_buf_get.
450 *
451 * \sa drmDMA().
452 */
453 struct drm_dma {
454 int context; /**< Context handle */
455 int send_count; /**< Number of buffers to send */
456 int __user *send_indices; /**< List of handles to buffers */
457 int __user *send_sizes; /**< Lengths of data to send */
458 enum drm_dma_flags flags; /**< Flags */
459 int request_count; /**< Number of buffers requested */
460 int request_size; /**< Desired size for buffers */
461 int __user *request_indices; /**< Buffer information */
462 int __user *request_sizes;
463 int granted_count; /**< Number of buffers granted */
464 };
465
466 enum drm_ctx_flags {
467 _DRM_CONTEXT_PRESERVED = 0x01,
468 _DRM_CONTEXT_2DONLY = 0x02
469 };
470
471 /**
472 * DRM_IOCTL_ADD_CTX ioctl argument type.
473 *
474 * \sa drmCreateContext() and drmDestroyContext().
475 */
476 struct drm_ctx {
477 drm_context_t handle;
478 enum drm_ctx_flags flags;
479 };
480
481 /**
482 * DRM_IOCTL_RES_CTX ioctl argument type.
483 */
484 struct drm_ctx_res {
485 int count;
486 struct drm_ctx __user *contexts;
487 };
488
489 /**
490 * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
491 */
492 struct drm_draw {
493 drm_drawable_t handle;
494 };
495
496 /**
497 * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
498 */
499 typedef enum {
500 DRM_DRAWABLE_CLIPRECTS,
501 } drm_drawable_info_type_t;
502
503 struct drm_update_draw {
504 drm_drawable_t handle;
505 unsigned int type;
506 unsigned int num;
507 unsigned long long data;
508 };
509
510 /**
511 * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
512 */
513 struct drm_auth {
514 drm_magic_t magic;
515 };
516
517 /**
518 * DRM_IOCTL_IRQ_BUSID ioctl argument type.
519 *
520 * \sa drmGetInterruptFromBusID().
521 */
522 struct drm_irq_busid {
523 int irq; /**< IRQ number */
524 int busnum; /**< bus number */
525 int devnum; /**< device number */
526 int funcnum; /**< function number */
527 };
528
529 enum drm_vblank_seq_type {
530 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
531 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
532 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
533 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
534 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
535 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
536 };
537
538 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
539 #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
540 _DRM_VBLANK_NEXTONMISS)
541
542 struct drm_wait_vblank_request {
543 enum drm_vblank_seq_type type;
544 unsigned int sequence;
545 unsigned long signal;
546 };
547
548 struct drm_wait_vblank_reply {
549 enum drm_vblank_seq_type type;
550 unsigned int sequence;
551 long tval_sec;
552 long tval_usec;
553 };
554
555 /**
556 * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
557 *
558 * \sa drmWaitVBlank().
559 */
560 union drm_wait_vblank {
561 struct drm_wait_vblank_request request;
562 struct drm_wait_vblank_reply reply;
563 };
564
565
566 #define _DRM_PRE_MODESET 1
567 #define _DRM_POST_MODESET 2
568
569 /**
570 * DRM_IOCTL_MODESET_CTL ioctl argument type
571 *
572 * \sa drmModesetCtl().
573 */
574 struct drm_modeset_ctl {
575 uint32_t crtc;
576 uint32_t cmd;
577 };
578
579 /**
580 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
581 *
582 * \sa drmAgpEnable().
583 */
584 struct drm_agp_mode {
585 unsigned long mode; /**< AGP mode */
586 };
587
588 /**
589 * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
590 *
591 * \sa drmAgpAlloc() and drmAgpFree().
592 */
593 struct drm_agp_buffer {
594 unsigned long size; /**< In bytes -- will round to page boundary */
595 unsigned long handle; /**< Used for binding / unbinding */
596 unsigned long type; /**< Type of memory to allocate */
597 unsigned long physical; /**< Physical used by i810 */
598 };
599
600 /**
601 * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
602 *
603 * \sa drmAgpBind() and drmAgpUnbind().
604 */
605 struct drm_agp_binding {
606 unsigned long handle; /**< From drm_agp_buffer */
607 unsigned long offset; /**< In bytes -- will round to page boundary */
608 };
609
610 /**
611 * DRM_IOCTL_AGP_INFO ioctl argument type.
612 *
613 * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
614 * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
615 * drmAgpVendorId() and drmAgpDeviceId().
616 */
617 struct drm_agp_info {
618 int agp_version_major;
619 int agp_version_minor;
620 unsigned long mode;
621 unsigned long aperture_base; /**< physical address */
622 unsigned long aperture_size; /**< bytes */
623 unsigned long memory_allowed; /**< bytes */
624 unsigned long memory_used;
625
626 /** \name PCI information */
627 /*@{ */
628 unsigned short id_vendor;
629 unsigned short id_device;
630 /*@} */
631 };
632
633 /**
634 * DRM_IOCTL_SG_ALLOC ioctl argument type.
635 */
636 struct drm_scatter_gather {
637 unsigned long size; /**< In bytes -- will round to page boundary */
638 unsigned long handle; /**< Used for mapping / unmapping */
639 };
640
641 /**
642 * DRM_IOCTL_SET_VERSION ioctl argument type.
643 */
644 struct drm_set_version {
645 int drm_di_major;
646 int drm_di_minor;
647 int drm_dd_major;
648 int drm_dd_minor;
649 };
650
651
652 #define DRM_FENCE_FLAG_EMIT 0x00000001
653 #define DRM_FENCE_FLAG_SHAREABLE 0x00000002
654 /**
655 * On hardware with no interrupt events for operation completion,
656 * indicates that the kernel should sleep while waiting for any blocking
657 * operation to complete rather than spinning.
658 *
659 * Has no effect otherwise.
660 */
661 #define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
662 #define DRM_FENCE_FLAG_NO_USER 0x00000010
663
664 /* Reserved for driver use */
665 #define DRM_FENCE_MASK_DRIVER 0xFF000000
666
667 #define DRM_FENCE_TYPE_EXE 0x00000001
668
669 struct drm_fence_arg {
670 unsigned int handle;
671 unsigned int fence_class;
672 unsigned int type;
673 unsigned int flags;
674 unsigned int signaled;
675 unsigned int error;
676 unsigned int sequence;
677 unsigned int pad64;
678 uint64_t expand_pad[2]; /*Future expansion */
679 };
680
681 /* Buffer permissions, referring to how the GPU uses the buffers.
682 * these translate to fence types used for the buffers.
683 * Typically a texture buffer is read, A destination buffer is write and
684 * a command (batch-) buffer is exe. Can be or-ed together.
685 */
686
687 #define DRM_BO_FLAG_READ (1ULL << 0)
688 #define DRM_BO_FLAG_WRITE (1ULL << 1)
689 #define DRM_BO_FLAG_EXE (1ULL << 2)
690
691 /*
692 * All of the bits related to access mode
693 */
694 #define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
695 /*
696 * Status flags. Can be read to determine the actual state of a buffer.
697 * Can also be set in the buffer mask before validation.
698 */
699
700 /*
701 * Mask: Never evict this buffer. Not even with force. This type of buffer is only
702 * available to root and must be manually removed before buffer manager shutdown
703 * or lock.
704 * Flags: Acknowledge
705 */
706 #define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
707
708 /*
709 * Mask: Require that the buffer is placed in mappable memory when validated.
710 * If not set the buffer may or may not be in mappable memory when validated.
711 * Flags: If set, the buffer is in mappable memory.
712 */
713 #define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
714
715 /* Mask: The buffer should be shareable with other processes.
716 * Flags: The buffer is shareable with other processes.
717 */
718 #define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
719
720 /* Mask: If set, place the buffer in cache-coherent memory if available.
721 * If clear, never place the buffer in cache coherent memory if validated.
722 * Flags: The buffer is currently in cache-coherent memory.
723 */
724 #define DRM_BO_FLAG_CACHED (1ULL << 7)
725
726 /* Mask: Make sure that every time this buffer is validated,
727 * it ends up on the same location provided that the memory mask is the same.
728 * The buffer will also not be evicted when claiming space for
729 * other buffers. Basically a pinned buffer but it may be thrown out as
730 * part of buffer manager shutdown or locking.
731 * Flags: Acknowledge.
732 */
733 #define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
734
735 /* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
736 * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
737 * with unsnooped PTEs instead of snooped, by using chipset-specific cache
738 * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
739 * as the eviction to local memory (TTM unbind) on map is just a side effect
740 * to prevent aggressive cache prefetch from the GPU disturbing the cache
741 * management that the DRM is doing.
742 *
743 * Flags: Acknowledge.
744 * Buffers allocated with this flag should not be used for suballocators
745 * This type may have issues on CPUs with over-aggressive caching
746 * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
747 */
748 #define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
749
750
751 /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
752 * Flags: Acknowledge.
753 */
754 #define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
755
756 /*
757 * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
758 * Flags: Acknowledge.
759 */
760 #define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
761 #define DRM_BO_FLAG_TILE (1ULL << 15)
762
763 /*
764 * Memory type flags that can be or'ed together in the mask, but only
765 * one appears in flags.
766 */
767
768 /* System memory */
769 #define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
770 /* Translation table memory */
771 #define DRM_BO_FLAG_MEM_TT (1ULL << 25)
772 /* Vram memory */
773 #define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
774 /* Up to the driver to define. */
775 #define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
776 #define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
777 #define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
778 #define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
779 #define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
780 /* We can add more of these now with a 64-bit flag type */
781
782 /*
783 * This is a mask covering all of the memory type flags; easier to just
784 * use a single constant than a bunch of | values. It covers
785 * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
786 */
787 #define DRM_BO_MASK_MEM 0x00000000FF000000ULL
788 /*
789 * This adds all of the CPU-mapping options in with the memory
790 * type to label all bits which change how the page gets mapped
791 */
792 #define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
793 DRM_BO_FLAG_CACHED_MAPPED | \
794 DRM_BO_FLAG_CACHED | \
795 DRM_BO_FLAG_MAPPABLE)
796
797 /* Driver-private flags */
798 #define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
799
800 /*
801 * Don't block on validate and map. Instead, return EBUSY.
802 */
803 #define DRM_BO_HINT_DONT_BLOCK 0x00000002
804 /*
805 * Don't place this buffer on the unfenced list. This means
806 * that the buffer will not end up having a fence associated
807 * with it as a result of this operation
808 */
809 #define DRM_BO_HINT_DONT_FENCE 0x00000004
810 /**
811 * On hardware with no interrupt events for operation completion,
812 * indicates that the kernel should sleep while waiting for any blocking
813 * operation to complete rather than spinning.
814 *
815 * Has no effect otherwise.
816 */
817 #define DRM_BO_HINT_WAIT_LAZY 0x00000008
818 /*
819 * The client has compute relocations refering to this buffer using the
820 * offset in the presumed_offset field. If that offset ends up matching
821 * where this buffer lands, the kernel is free to skip executing those
822 * relocations
823 */
824 #define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
825
826 #define DRM_BO_INIT_MAGIC 0xfe769812
827 #define DRM_BO_INIT_MAJOR 1
828 #define DRM_BO_INIT_MINOR 0
829 #define DRM_BO_INIT_PATCH 0
830
831
832 struct drm_bo_info_req {
833 uint64_t mask;
834 uint64_t flags;
835 unsigned int handle;
836 unsigned int hint;
837 unsigned int fence_class;
838 unsigned int desired_tile_stride;
839 unsigned int tile_info;
840 unsigned int pad64;
841 uint64_t presumed_offset;
842 };
843
844 struct drm_bo_create_req {
845 uint64_t flags;
846 uint64_t size;
847 uint64_t buffer_start;
848 unsigned int hint;
849 unsigned int page_alignment;
850 };
851
852
853 /*
854 * Reply flags
855 */
856
857 #define DRM_BO_REP_BUSY 0x00000001
858
859 struct drm_bo_info_rep {
860 uint64_t flags;
861 uint64_t proposed_flags;
862 uint64_t size;
863 uint64_t offset;
864 uint64_t arg_handle;
865 uint64_t buffer_start;
866 unsigned int handle;
867 unsigned int fence_flags;
868 unsigned int rep_flags;
869 unsigned int page_alignment;
870 unsigned int desired_tile_stride;
871 unsigned int hw_tile_stride;
872 unsigned int tile_info;
873 unsigned int pad64;
874 uint64_t expand_pad[4]; /*Future expansion */
875 };
876
877 struct drm_bo_arg_rep {
878 struct drm_bo_info_rep bo_info;
879 int ret;
880 unsigned int pad64;
881 };
882
883 struct drm_bo_create_arg {
884 union {
885 struct drm_bo_create_req req;
886 struct drm_bo_info_rep rep;
887 } d;
888 };
889
890 struct drm_bo_handle_arg {
891 unsigned int handle;
892 };
893
894 struct drm_bo_reference_info_arg {
895 union {
896 struct drm_bo_handle_arg req;
897 struct drm_bo_info_rep rep;
898 } d;
899 };
900
901 struct drm_bo_map_wait_idle_arg {
902 union {
903 struct drm_bo_info_req req;
904 struct drm_bo_info_rep rep;
905 } d;
906 };
907
908 struct drm_bo_op_req {
909 enum {
910 drm_bo_validate,
911 drm_bo_fence,
912 drm_bo_ref_fence,
913 } op;
914 unsigned int arg_handle;
915 struct drm_bo_info_req bo_req;
916 };
917
918
919 struct drm_bo_op_arg {
920 uint64_t next;
921 union {
922 struct drm_bo_op_req req;
923 struct drm_bo_arg_rep rep;
924 } d;
925 int handled;
926 unsigned int pad64;
927 };
928
929
930 #define DRM_BO_MEM_LOCAL 0
931 #define DRM_BO_MEM_TT 1
932 #define DRM_BO_MEM_VRAM 2
933 #define DRM_BO_MEM_PRIV0 3
934 #define DRM_BO_MEM_PRIV1 4
935 #define DRM_BO_MEM_PRIV2 5
936 #define DRM_BO_MEM_PRIV3 6
937 #define DRM_BO_MEM_PRIV4 7
938
939 #define DRM_BO_MEM_TYPES 8 /* For now. */
940
941 #define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
942 #define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
943
944 struct drm_bo_version_arg {
945 uint32_t major;
946 uint32_t minor;
947 uint32_t patchlevel;
948 };
949
950 struct drm_mm_type_arg {
951 unsigned int mem_type;
952 unsigned int lock_flags;
953 };
954
955 struct drm_mm_init_arg {
956 unsigned int magic;
957 unsigned int major;
958 unsigned int minor;
959 unsigned int mem_type;
960 uint64_t p_offset;
961 uint64_t p_size;
962 };
963
964 struct drm_mm_info_arg {
965 unsigned int mem_type;
966 uint64_t p_size;
967 };
968
969 struct drm_gem_close {
970 /** Handle of the object to be closed. */
971 uint32_t handle;
972 uint32_t pad;
973 };
974
975 struct drm_gem_flink {
976 /** Handle for the object being named */
977 uint32_t handle;
978
979 /** Returned global name */
980 uint32_t name;
981 };
982
983 struct drm_gem_open {
984 /** Name of object being opened */
985 uint32_t name;
986
987 /** Returned handle for the object */
988 uint32_t handle;
989
990 /** Returned size of the object */
991 uint64_t size;
992 };
993
994 #include "drm_mode.h"
995
996 /**
997 * \name Ioctls Definitions
998 */
999 /*@{*/
1000
1001 #define DRM_IOCTL_BASE 'd'
1002 #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
1003 #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
1004 #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
1005 #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
1006
1007 #define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
1008 #define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
1009 #define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
1010 #define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
1011 #define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
1012 #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
1013 #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
1014 #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
1015 #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
1016
1017 #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
1018 #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
1019 #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
1020
1021 #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
1022 #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
1023 #define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
1024 #define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
1025 #define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
1026 #define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
1027 #define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
1028 #define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
1029 #define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
1030 #define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
1031 #define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
1032
1033 #define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
1034
1035 #define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1036 #define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1037
1038 #define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
1039 #define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
1040
1041 #define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
1042 #define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
1043 #define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
1044 #define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
1045 #define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
1046 #define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
1047 #define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
1048 #define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
1049 #define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
1050 #define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
1051 #define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
1052 #define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
1053 #define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
1054
1055 #define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
1056 #define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
1057 #define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
1058 #define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
1059 #define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
1060 #define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
1061 #define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
1062 #define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
1063
1064 #define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
1065 #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
1066
1067 #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
1068
1069 #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
1070
1071 #define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
1072 #define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
1073 #define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
1074 #define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
1075
1076 #define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
1077 #define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
1078 #define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
1079 #define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
1080 #define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
1081 #define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
1082 #define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
1083 #define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
1084
1085 #define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
1086 #define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
1087 #define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
1088 #define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
1089 #define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
1090 #define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
1091 #define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
1092 #define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
1093 #define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
1094 #define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg)
1095
1096 #define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
1097
1098 #define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
1099 #define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
1100 #define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
1101 #define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
1102 #define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
1103
1104 #define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
1105
1106 #define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
1107 #define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
1108 #define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
1109 #define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
1110 #define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
1111 #define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
1112
1113 #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
1114 #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
1115 #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, uint32_t)
1116 #define DRM_IOCTL_MODE_REPLACEFB DRM_IOWR(0xB0, struct drm_mode_fb_cmd)
1117
1118 /*@}*/
1119
1120 /**
1121 * Device specific ioctls should only be in their respective headers
1122 * The device specific ioctl range is from 0x40 to 0x99.
1123 * Generic IOCTLS restart at 0xA0.
1124 *
1125 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1126 * drmCommandReadWrite().
1127 */
1128 #define DRM_COMMAND_BASE 0x40
1129 #define DRM_COMMAND_END 0xA0
1130
1131 /* typedef area */
1132 #ifndef __KERNEL__
1133 typedef struct drm_clip_rect drm_clip_rect_t;
1134 typedef struct drm_tex_region drm_tex_region_t;
1135 typedef struct drm_hw_lock drm_hw_lock_t;
1136 typedef struct drm_version drm_version_t;
1137 typedef struct drm_unique drm_unique_t;
1138 typedef struct drm_list drm_list_t;
1139 typedef struct drm_block drm_block_t;
1140 typedef struct drm_control drm_control_t;
1141 typedef enum drm_map_type drm_map_type_t;
1142 typedef enum drm_map_flags drm_map_flags_t;
1143 typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1144 typedef struct drm_map drm_map_t;
1145 typedef struct drm_client drm_client_t;
1146 typedef enum drm_stat_type drm_stat_type_t;
1147 typedef struct drm_stats drm_stats_t;
1148 typedef enum drm_lock_flags drm_lock_flags_t;
1149 typedef struct drm_lock drm_lock_t;
1150 typedef enum drm_dma_flags drm_dma_flags_t;
1151 typedef struct drm_buf_desc drm_buf_desc_t;
1152 typedef struct drm_buf_info drm_buf_info_t;
1153 typedef struct drm_buf_free drm_buf_free_t;
1154 typedef struct drm_buf_pub drm_buf_pub_t;
1155 typedef struct drm_buf_map drm_buf_map_t;
1156 typedef struct drm_dma drm_dma_t;
1157 typedef union drm_wait_vblank drm_wait_vblank_t;
1158 typedef struct drm_agp_mode drm_agp_mode_t;
1159 typedef enum drm_ctx_flags drm_ctx_flags_t;
1160 typedef struct drm_ctx drm_ctx_t;
1161 typedef struct drm_ctx_res drm_ctx_res_t;
1162 typedef struct drm_draw drm_draw_t;
1163 typedef struct drm_update_draw drm_update_draw_t;
1164 typedef struct drm_auth drm_auth_t;
1165 typedef struct drm_irq_busid drm_irq_busid_t;
1166 typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1167 typedef struct drm_agp_buffer drm_agp_buffer_t;
1168 typedef struct drm_agp_binding drm_agp_binding_t;
1169 typedef struct drm_agp_info drm_agp_info_t;
1170 typedef struct drm_scatter_gather drm_scatter_gather_t;
1171 typedef struct drm_set_version drm_set_version_t;
1172
1173 typedef struct drm_fence_arg drm_fence_arg_t;
1174 typedef struct drm_mm_type_arg drm_mm_type_arg_t;
1175 typedef struct drm_mm_init_arg drm_mm_init_arg_t;
1176 typedef enum drm_bo_type drm_bo_type_t;
1177 #endif
1178
1179 #endif
1180