Home | History | Annotate | Line # | Download | only in drm
      1 /*	$NetBSD: vmwgfx_drm.h,v 1.2 2021/12/18 23:45:46 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright  2009-2015 VMware, Inc., Palo Alto, CA., USA
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 
     30 #ifndef __VMWGFX_DRM_H__
     31 #define __VMWGFX_DRM_H__
     32 
     33 #include "drm.h"
     34 
     35 #if defined(__cplusplus)
     36 extern "C" {
     37 #endif
     38 
     39 #define DRM_VMW_MAX_SURFACE_FACES 6
     40 #define DRM_VMW_MAX_MIP_LEVELS 24
     41 
     42 
     43 #define DRM_VMW_GET_PARAM            0
     44 #define DRM_VMW_ALLOC_DMABUF         1
     45 #define DRM_VMW_ALLOC_BO             1
     46 #define DRM_VMW_UNREF_DMABUF         2
     47 #define DRM_VMW_HANDLE_CLOSE         2
     48 #define DRM_VMW_CURSOR_BYPASS        3
     49 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
     50 #define DRM_VMW_CONTROL_STREAM       4
     51 #define DRM_VMW_CLAIM_STREAM         5
     52 #define DRM_VMW_UNREF_STREAM         6
     53 /* guarded by DRM_VMW_PARAM_3D == 1 */
     54 #define DRM_VMW_CREATE_CONTEXT       7
     55 #define DRM_VMW_UNREF_CONTEXT        8
     56 #define DRM_VMW_CREATE_SURFACE       9
     57 #define DRM_VMW_UNREF_SURFACE        10
     58 #define DRM_VMW_REF_SURFACE          11
     59 #define DRM_VMW_EXECBUF              12
     60 #define DRM_VMW_GET_3D_CAP           13
     61 #define DRM_VMW_FENCE_WAIT           14
     62 #define DRM_VMW_FENCE_SIGNALED       15
     63 #define DRM_VMW_FENCE_UNREF          16
     64 #define DRM_VMW_FENCE_EVENT          17
     65 #define DRM_VMW_PRESENT              18
     66 #define DRM_VMW_PRESENT_READBACK     19
     67 #define DRM_VMW_UPDATE_LAYOUT        20
     68 #define DRM_VMW_CREATE_SHADER        21
     69 #define DRM_VMW_UNREF_SHADER         22
     70 #define DRM_VMW_GB_SURFACE_CREATE    23
     71 #define DRM_VMW_GB_SURFACE_REF       24
     72 #define DRM_VMW_SYNCCPU              25
     73 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
     74 #define DRM_VMW_GB_SURFACE_CREATE_EXT   27
     75 #define DRM_VMW_GB_SURFACE_REF_EXT      28
     76 #define DRM_VMW_MSG                     29
     77 
     78 /*************************************************************************/
     79 /**
     80  * DRM_VMW_GET_PARAM - get device information.
     81  *
     82  * DRM_VMW_PARAM_FIFO_OFFSET:
     83  * Offset to use to map the first page of the FIFO read-only.
     84  * The fifo is mapped using the mmap() system call on the drm device.
     85  *
     86  * DRM_VMW_PARAM_OVERLAY_IOCTL:
     87  * Does the driver support the overlay ioctl.
     88  *
     89  * DRM_VMW_PARAM_SM4_1
     90  * SM4_1 support is enabled.
     91  */
     92 
     93 #define DRM_VMW_PARAM_NUM_STREAMS      0
     94 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
     95 #define DRM_VMW_PARAM_3D               2
     96 #define DRM_VMW_PARAM_HW_CAPS          3
     97 #define DRM_VMW_PARAM_FIFO_CAPS        4
     98 #define DRM_VMW_PARAM_MAX_FB_SIZE      5
     99 #define DRM_VMW_PARAM_FIFO_HW_VERSION  6
    100 #define DRM_VMW_PARAM_MAX_SURF_MEMORY  7
    101 #define DRM_VMW_PARAM_3D_CAPS_SIZE     8
    102 #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
    103 #define DRM_VMW_PARAM_MAX_MOB_SIZE     10
    104 #define DRM_VMW_PARAM_SCREEN_TARGET    11
    105 #define DRM_VMW_PARAM_DX               12
    106 #define DRM_VMW_PARAM_HW_CAPS2         13
    107 #define DRM_VMW_PARAM_SM4_1            14
    108 
    109 /**
    110  * enum drm_vmw_handle_type - handle type for ref ioctls
    111  *
    112  */
    113 enum drm_vmw_handle_type {
    114 	DRM_VMW_HANDLE_LEGACY = 0,
    115 	DRM_VMW_HANDLE_PRIME = 1
    116 };
    117 
    118 /**
    119  * struct drm_vmw_getparam_arg
    120  *
    121  * @value: Returned value. //Out
    122  * @param: Parameter to query. //In.
    123  *
    124  * Argument to the DRM_VMW_GET_PARAM Ioctl.
    125  */
    126 
    127 struct drm_vmw_getparam_arg {
    128 	__u64 value;
    129 	__u32 param;
    130 	__u32 pad64;
    131 };
    132 
    133 /*************************************************************************/
    134 /**
    135  * DRM_VMW_CREATE_CONTEXT - Create a host context.
    136  *
    137  * Allocates a device unique context id, and queues a create context command
    138  * for the host. Does not wait for host completion.
    139  */
    140 
    141 /**
    142  * struct drm_vmw_context_arg
    143  *
    144  * @cid: Device unique context ID.
    145  *
    146  * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
    147  * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
    148  */
    149 
    150 struct drm_vmw_context_arg {
    151 	__s32 cid;
    152 	__u32 pad64;
    153 };
    154 
    155 /*************************************************************************/
    156 /**
    157  * DRM_VMW_UNREF_CONTEXT - Create a host context.
    158  *
    159  * Frees a global context id, and queues a destroy host command for the host.
    160  * Does not wait for host completion. The context ID can be used directly
    161  * in the command stream and shows up as the same context ID on the host.
    162  */
    163 
    164 /*************************************************************************/
    165 /**
    166  * DRM_VMW_CREATE_SURFACE - Create a host suface.
    167  *
    168  * Allocates a device unique surface id, and queues a create surface command
    169  * for the host. Does not wait for host completion. The surface ID can be
    170  * used directly in the command stream and shows up as the same surface
    171  * ID on the host.
    172  */
    173 
    174 /**
    175  * struct drm_wmv_surface_create_req
    176  *
    177  * @flags: Surface flags as understood by the host.
    178  * @format: Surface format as understood by the host.
    179  * @mip_levels: Number of mip levels for each face.
    180  * An unused face should have 0 encoded.
    181  * @size_addr: Address of a user-space array of sruct drm_vmw_size
    182  * cast to an __u64 for 32-64 bit compatibility.
    183  * The size of the array should equal the total number of mipmap levels.
    184  * @shareable: Boolean whether other clients (as identified by file descriptors)
    185  * may reference this surface.
    186  * @scanout: Boolean whether the surface is intended to be used as a
    187  * scanout.
    188  *
    189  * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
    190  * Output data from the DRM_VMW_REF_SURFACE Ioctl.
    191  */
    192 
    193 struct drm_vmw_surface_create_req {
    194 	__u32 flags;
    195 	__u32 format;
    196 	__u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
    197 	__u64 size_addr;
    198 	__s32 shareable;
    199 	__s32 scanout;
    200 };
    201 
    202 /**
    203  * struct drm_wmv_surface_arg
    204  *
    205  * @sid: Surface id of created surface or surface to destroy or reference.
    206  * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
    207  *
    208  * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
    209  * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
    210  * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
    211  */
    212 
    213 struct drm_vmw_surface_arg {
    214 	__s32 sid;
    215 	enum drm_vmw_handle_type handle_type;
    216 };
    217 
    218 /**
    219  * struct drm_vmw_size ioctl.
    220  *
    221  * @width - mip level width
    222  * @height - mip level height
    223  * @depth - mip level depth
    224  *
    225  * Description of a mip level.
    226  * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
    227  */
    228 
    229 struct drm_vmw_size {
    230 	__u32 width;
    231 	__u32 height;
    232 	__u32 depth;
    233 	__u32 pad64;
    234 };
    235 
    236 /**
    237  * union drm_vmw_surface_create_arg
    238  *
    239  * @rep: Output data as described above.
    240  * @req: Input data as described above.
    241  *
    242  * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
    243  */
    244 
    245 union drm_vmw_surface_create_arg {
    246 	struct drm_vmw_surface_arg rep;
    247 	struct drm_vmw_surface_create_req req;
    248 };
    249 
    250 /*************************************************************************/
    251 /**
    252  * DRM_VMW_REF_SURFACE - Reference a host surface.
    253  *
    254  * Puts a reference on a host surface with a give sid, as previously
    255  * returned by the DRM_VMW_CREATE_SURFACE ioctl.
    256  * A reference will make sure the surface isn't destroyed while we hold
    257  * it and will allow the calling client to use the surface ID in the command
    258  * stream.
    259  *
    260  * On successful return, the Ioctl returns the surface information given
    261  * in the DRM_VMW_CREATE_SURFACE ioctl.
    262  */
    263 
    264 /**
    265  * union drm_vmw_surface_reference_arg
    266  *
    267  * @rep: Output data as described above.
    268  * @req: Input data as described above.
    269  *
    270  * Argument to the DRM_VMW_REF_SURFACE Ioctl.
    271  */
    272 
    273 union drm_vmw_surface_reference_arg {
    274 	struct drm_vmw_surface_create_req rep;
    275 	struct drm_vmw_surface_arg req;
    276 };
    277 
    278 /*************************************************************************/
    279 /**
    280  * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
    281  *
    282  * Clear a reference previously put on a host surface.
    283  * When all references are gone, including the one implicitly placed
    284  * on creation,
    285  * a destroy surface command will be queued for the host.
    286  * Does not wait for completion.
    287  */
    288 
    289 /*************************************************************************/
    290 /**
    291  * DRM_VMW_EXECBUF
    292  *
    293  * Submit a command buffer for execution on the host, and return a
    294  * fence seqno that when signaled, indicates that the command buffer has
    295  * executed.
    296  */
    297 
    298 /**
    299  * struct drm_vmw_execbuf_arg
    300  *
    301  * @commands: User-space address of a command buffer cast to an __u64.
    302  * @command-size: Size in bytes of the command buffer.
    303  * @throttle-us: Sleep until software is less than @throttle_us
    304  * microseconds ahead of hardware. The driver may round this value
    305  * to the nearest kernel tick.
    306  * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
    307  * __u64.
    308  * @version: Allows expanding the execbuf ioctl parameters without breaking
    309  * backwards compatibility, since user-space will always tell the kernel
    310  * which version it uses.
    311  * @flags: Execbuf flags.
    312  * @imported_fence_fd:  FD for a fence imported from another device
    313  *
    314  * Argument to the DRM_VMW_EXECBUF Ioctl.
    315  */
    316 
    317 #define DRM_VMW_EXECBUF_VERSION 2
    318 
    319 #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
    320 #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
    321 
    322 struct drm_vmw_execbuf_arg {
    323 	__u64 commands;
    324 	__u32 command_size;
    325 	__u32 throttle_us;
    326 	__u64 fence_rep;
    327 	__u32 version;
    328 	__u32 flags;
    329 	__u32 context_handle;
    330 	__s32 imported_fence_fd;
    331 };
    332 
    333 /**
    334  * struct drm_vmw_fence_rep
    335  *
    336  * @handle: Fence object handle for fence associated with a command submission.
    337  * @mask: Fence flags relevant for this fence object.
    338  * @seqno: Fence sequence number in fifo. A fence object with a lower
    339  * seqno will signal the EXEC flag before a fence object with a higher
    340  * seqno. This can be used by user-space to avoid kernel calls to determine
    341  * whether a fence has signaled the EXEC flag. Note that @seqno will
    342  * wrap at 32-bit.
    343  * @passed_seqno: The highest seqno number processed by the hardware
    344  * so far. This can be used to mark user-space fence objects as signaled, and
    345  * to determine whether a fence seqno might be stale.
    346  * @fd: FD associated with the fence, -1 if not exported
    347  * @error: This member should've been set to -EFAULT on submission.
    348  * The following actions should be take on completion:
    349  * error == -EFAULT: Fence communication failed. The host is synchronized.
    350  * Use the last fence id read from the FIFO fence register.
    351  * error != 0 && error != -EFAULT:
    352  * Fence submission failed. The host is synchronized. Use the fence_seq member.
    353  * error == 0: All is OK, The host may not be synchronized.
    354  * Use the fence_seq member.
    355  *
    356  * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
    357  */
    358 
    359 struct drm_vmw_fence_rep {
    360 	__u32 handle;
    361 	__u32 mask;
    362 	__u32 seqno;
    363 	__u32 passed_seqno;
    364 	__s32 fd;
    365 	__s32 error;
    366 };
    367 
    368 /*************************************************************************/
    369 /**
    370  * DRM_VMW_ALLOC_BO
    371  *
    372  * Allocate a buffer object that is visible also to the host.
    373  * NOTE: The buffer is
    374  * identified by a handle and an offset, which are private to the guest, but
    375  * useable in the command stream. The guest kernel may translate these
    376  * and patch up the command stream accordingly. In the future, the offset may
    377  * be zero at all times, or it may disappear from the interface before it is
    378  * fixed.
    379  *
    380  * The buffer object may stay user-space mapped in the guest at all times,
    381  * and is thus suitable for sub-allocation.
    382  *
    383  * Buffer objects are mapped using the mmap() syscall on the drm device.
    384  */
    385 
    386 /**
    387  * struct drm_vmw_alloc_bo_req
    388  *
    389  * @size: Required minimum size of the buffer.
    390  *
    391  * Input data to the DRM_VMW_ALLOC_BO Ioctl.
    392  */
    393 
    394 struct drm_vmw_alloc_bo_req {
    395 	__u32 size;
    396 	__u32 pad64;
    397 };
    398 #define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
    399 
    400 /**
    401  * struct drm_vmw_bo_rep
    402  *
    403  * @map_handle: Offset to use in the mmap() call used to map the buffer.
    404  * @handle: Handle unique to this buffer. Used for unreferencing.
    405  * @cur_gmr_id: GMR id to use in the command stream when this buffer is
    406  * referenced. See not above.
    407  * @cur_gmr_offset: Offset to use in the command stream when this buffer is
    408  * referenced. See note above.
    409  *
    410  * Output data from the DRM_VMW_ALLOC_BO Ioctl.
    411  */
    412 
    413 struct drm_vmw_bo_rep {
    414 	__u64 map_handle;
    415 	__u32 handle;
    416 	__u32 cur_gmr_id;
    417 	__u32 cur_gmr_offset;
    418 	__u32 pad64;
    419 };
    420 #define drm_vmw_dmabuf_rep drm_vmw_bo_rep
    421 
    422 /**
    423  * union drm_vmw_alloc_bo_arg
    424  *
    425  * @req: Input data as described above.
    426  * @rep: Output data as described above.
    427  *
    428  * Argument to the DRM_VMW_ALLOC_BO Ioctl.
    429  */
    430 
    431 union drm_vmw_alloc_bo_arg {
    432 	struct drm_vmw_alloc_bo_req req;
    433 	struct drm_vmw_bo_rep rep;
    434 };
    435 #define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
    436 
    437 /*************************************************************************/
    438 /**
    439  * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
    440  *
    441  * This IOCTL controls the overlay units of the svga device.
    442  * The SVGA overlay units does not work like regular hardware units in
    443  * that they do not automaticaly read back the contents of the given dma
    444  * buffer. But instead only read back for each call to this ioctl, and
    445  * at any point between this call being made and a following call that
    446  * either changes the buffer or disables the stream.
    447  */
    448 
    449 /**
    450  * struct drm_vmw_rect
    451  *
    452  * Defines a rectangle. Used in the overlay ioctl to define
    453  * source and destination rectangle.
    454  */
    455 
    456 struct drm_vmw_rect {
    457 	__s32 x;
    458 	__s32 y;
    459 	__u32 w;
    460 	__u32 h;
    461 };
    462 
    463 /**
    464  * struct drm_vmw_control_stream_arg
    465  *
    466  * @stream_id: Stearm to control
    467  * @enabled: If false all following arguments are ignored.
    468  * @handle: Handle to buffer for getting data from.
    469  * @format: Format of the overlay as understood by the host.
    470  * @width: Width of the overlay.
    471  * @height: Height of the overlay.
    472  * @size: Size of the overlay in bytes.
    473  * @pitch: Array of pitches, the two last are only used for YUV12 formats.
    474  * @offset: Offset from start of dma buffer to overlay.
    475  * @src: Source rect, must be within the defined area above.
    476  * @dst: Destination rect, x and y may be negative.
    477  *
    478  * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
    479  */
    480 
    481 struct drm_vmw_control_stream_arg {
    482 	__u32 stream_id;
    483 	__u32 enabled;
    484 
    485 	__u32 flags;
    486 	__u32 color_key;
    487 
    488 	__u32 handle;
    489 	__u32 offset;
    490 	__s32 format;
    491 	__u32 size;
    492 	__u32 width;
    493 	__u32 height;
    494 	__u32 pitch[3];
    495 
    496 	__u32 pad64;
    497 	struct drm_vmw_rect src;
    498 	struct drm_vmw_rect dst;
    499 };
    500 
    501 /*************************************************************************/
    502 /**
    503  * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
    504  *
    505  */
    506 
    507 #define DRM_VMW_CURSOR_BYPASS_ALL    (1 << 0)
    508 #define DRM_VMW_CURSOR_BYPASS_FLAGS       (1)
    509 
    510 /**
    511  * struct drm_vmw_cursor_bypass_arg
    512  *
    513  * @flags: Flags.
    514  * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
    515  * @xpos: X position of cursor.
    516  * @ypos: Y position of cursor.
    517  * @xhot: X hotspot.
    518  * @yhot: Y hotspot.
    519  *
    520  * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
    521  */
    522 
    523 struct drm_vmw_cursor_bypass_arg {
    524 	__u32 flags;
    525 	__u32 crtc_id;
    526 	__s32 xpos;
    527 	__s32 ypos;
    528 	__s32 xhot;
    529 	__s32 yhot;
    530 };
    531 
    532 /*************************************************************************/
    533 /**
    534  * DRM_VMW_CLAIM_STREAM - Claim a single stream.
    535  */
    536 
    537 /**
    538  * struct drm_vmw_context_arg
    539  *
    540  * @stream_id: Device unique context ID.
    541  *
    542  * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
    543  * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
    544  */
    545 
    546 struct drm_vmw_stream_arg {
    547 	__u32 stream_id;
    548 	__u32 pad64;
    549 };
    550 
    551 /*************************************************************************/
    552 /**
    553  * DRM_VMW_UNREF_STREAM - Unclaim a stream.
    554  *
    555  * Return a single stream that was claimed by this process. Also makes
    556  * sure that the stream has been stopped.
    557  */
    558 
    559 /*************************************************************************/
    560 /**
    561  * DRM_VMW_GET_3D_CAP
    562  *
    563  * Read 3D capabilities from the FIFO
    564  *
    565  */
    566 
    567 /**
    568  * struct drm_vmw_get_3d_cap_arg
    569  *
    570  * @buffer: Pointer to a buffer for capability data, cast to an __u64
    571  * @size: Max size to copy
    572  *
    573  * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
    574  * ioctls.
    575  */
    576 
    577 struct drm_vmw_get_3d_cap_arg {
    578 	__u64 buffer;
    579 	__u32 max_size;
    580 	__u32 pad64;
    581 };
    582 
    583 /*************************************************************************/
    584 /**
    585  * DRM_VMW_FENCE_WAIT
    586  *
    587  * Waits for a fence object to signal. The wait is interruptible, so that
    588  * signals may be delivered during the interrupt. The wait may timeout,
    589  * in which case the calls returns -EBUSY. If the wait is restarted,
    590  * that is restarting without resetting @cookie_valid to zero,
    591  * the timeout is computed from the first call.
    592  *
    593  * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
    594  * on:
    595  * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
    596  * stream
    597  * have executed.
    598  * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
    599  * commands
    600  * in the buffer given to the EXECBUF ioctl returning the fence object handle
    601  * are available to user-space.
    602  *
    603  * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
    604  * fenc wait ioctl returns 0, the fence object has been unreferenced after
    605  * the wait.
    606  */
    607 
    608 #define DRM_VMW_FENCE_FLAG_EXEC   (1 << 0)
    609 #define DRM_VMW_FENCE_FLAG_QUERY  (1 << 1)
    610 
    611 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
    612 
    613 /**
    614  * struct drm_vmw_fence_wait_arg
    615  *
    616  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
    617  * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
    618  * @kernel_cookie: Set to 0 on first call. Left alone on restart.
    619  * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
    620  * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
    621  * before returning.
    622  * @flags: Fence flags to wait on.
    623  * @wait_options: Options that control the behaviour of the wait ioctl.
    624  *
    625  * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
    626  */
    627 
    628 struct drm_vmw_fence_wait_arg {
    629 	__u32 handle;
    630 	__s32  cookie_valid;
    631 	__u64 kernel_cookie;
    632 	__u64 timeout_us;
    633 	__s32 lazy;
    634 	__s32 flags;
    635 	__s32 wait_options;
    636 	__s32 pad64;
    637 };
    638 
    639 /*************************************************************************/
    640 /**
    641  * DRM_VMW_FENCE_SIGNALED
    642  *
    643  * Checks if a fence object is signaled..
    644  */
    645 
    646 /**
    647  * struct drm_vmw_fence_signaled_arg
    648  *
    649  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
    650  * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
    651  * @signaled: Out: Flags signaled.
    652  * @sequence: Out: Highest sequence passed so far. Can be used to signal the
    653  * EXEC flag of user-space fence objects.
    654  *
    655  * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
    656  * ioctls.
    657  */
    658 
    659 struct drm_vmw_fence_signaled_arg {
    660 	 __u32 handle;
    661 	 __u32 flags;
    662 	 __s32 signaled;
    663 	 __u32 passed_seqno;
    664 	 __u32 signaled_flags;
    665 	 __u32 pad64;
    666 };
    667 
    668 /*************************************************************************/
    669 /**
    670  * DRM_VMW_FENCE_UNREF
    671  *
    672  * Unreferences a fence object, and causes it to be destroyed if there are no
    673  * other references to it.
    674  *
    675  */
    676 
    677 /**
    678  * struct drm_vmw_fence_arg
    679  *
    680  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
    681  *
    682  * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
    683  */
    684 
    685 struct drm_vmw_fence_arg {
    686 	 __u32 handle;
    687 	 __u32 pad64;
    688 };
    689 
    690 
    691 /*************************************************************************/
    692 /**
    693  * DRM_VMW_FENCE_EVENT
    694  *
    695  * Queues an event on a fence to be delivered on the drm character device
    696  * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
    697  * Optionally the approximate time when the fence signaled is
    698  * given by the event.
    699  */
    700 
    701 /*
    702  * The event type
    703  */
    704 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
    705 
    706 struct drm_vmw_event_fence {
    707 	struct drm_event base;
    708 	__u64 user_data;
    709 	__u32 tv_sec;
    710 	__u32 tv_usec;
    711 };
    712 
    713 /*
    714  * Flags that may be given to the command.
    715  */
    716 /* Request fence signaled time on the event. */
    717 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
    718 
    719 /**
    720  * struct drm_vmw_fence_event_arg
    721  *
    722  * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
    723  * the fence is not supposed to be referenced by user-space.
    724  * @user_info: Info to be delivered with the event.
    725  * @handle: Attach the event to this fence only.
    726  * @flags: A set of flags as defined above.
    727  */
    728 struct drm_vmw_fence_event_arg {
    729 	__u64 fence_rep;
    730 	__u64 user_data;
    731 	__u32 handle;
    732 	__u32 flags;
    733 };
    734 
    735 
    736 /*************************************************************************/
    737 /**
    738  * DRM_VMW_PRESENT
    739  *
    740  * Executes an SVGA present on a given fb for a given surface. The surface
    741  * is placed on the framebuffer. Cliprects are given relative to the given
    742  * point (the point disignated by dest_{x|y}).
    743  *
    744  */
    745 
    746 /**
    747  * struct drm_vmw_present_arg
    748  * @fb_id: framebuffer id to present / read back from.
    749  * @sid: Surface id to present from.
    750  * @dest_x: X placement coordinate for surface.
    751  * @dest_y: Y placement coordinate for surface.
    752  * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
    753  * @num_clips: Number of cliprects given relative to the framebuffer origin,
    754  * in the same coordinate space as the frame buffer.
    755  * @pad64: Unused 64-bit padding.
    756  *
    757  * Input argument to the DRM_VMW_PRESENT ioctl.
    758  */
    759 
    760 struct drm_vmw_present_arg {
    761 	__u32 fb_id;
    762 	__u32 sid;
    763 	__s32 dest_x;
    764 	__s32 dest_y;
    765 	__u64 clips_ptr;
    766 	__u32 num_clips;
    767 	__u32 pad64;
    768 };
    769 
    770 
    771 /*************************************************************************/
    772 /**
    773  * DRM_VMW_PRESENT_READBACK
    774  *
    775  * Executes an SVGA present readback from a given fb to the dma buffer
    776  * currently bound as the fb. If there is no dma buffer bound to the fb,
    777  * an error will be returned.
    778  *
    779  */
    780 
    781 /**
    782  * struct drm_vmw_present_arg
    783  * @fb_id: fb_id to present / read back from.
    784  * @num_clips: Number of cliprects.
    785  * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
    786  * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
    787  * If this member is NULL, then the ioctl should not return a fence.
    788  */
    789 
    790 struct drm_vmw_present_readback_arg {
    791 	 __u32 fb_id;
    792 	 __u32 num_clips;
    793 	 __u64 clips_ptr;
    794 	 __u64 fence_rep;
    795 };
    796 
    797 /*************************************************************************/
    798 /**
    799  * DRM_VMW_UPDATE_LAYOUT - Update layout
    800  *
    801  * Updates the preferred modes and connection status for connectors. The
    802  * command consists of one drm_vmw_update_layout_arg pointing to an array
    803  * of num_outputs drm_vmw_rect's.
    804  */
    805 
    806 /**
    807  * struct drm_vmw_update_layout_arg
    808  *
    809  * @num_outputs: number of active connectors
    810  * @rects: pointer to array of drm_vmw_rect cast to an __u64
    811  *
    812  * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
    813  */
    814 struct drm_vmw_update_layout_arg {
    815 	__u32 num_outputs;
    816 	__u32 pad64;
    817 	__u64 rects;
    818 };
    819 
    820 
    821 /*************************************************************************/
    822 /**
    823  * DRM_VMW_CREATE_SHADER - Create shader
    824  *
    825  * Creates a shader and optionally binds it to a dma buffer containing
    826  * the shader byte-code.
    827  */
    828 
    829 /**
    830  * enum drm_vmw_shader_type - Shader types
    831  */
    832 enum drm_vmw_shader_type {
    833 	drm_vmw_shader_type_vs = 0,
    834 	drm_vmw_shader_type_ps,
    835 };
    836 
    837 
    838 /**
    839  * struct drm_vmw_shader_create_arg
    840  *
    841  * @shader_type: Shader type of the shader to create.
    842  * @size: Size of the byte-code in bytes.
    843  * where the shader byte-code starts
    844  * @buffer_handle: Buffer handle identifying the buffer containing the
    845  * shader byte-code
    846  * @shader_handle: On successful completion contains a handle that
    847  * can be used to subsequently identify the shader.
    848  * @offset: Offset in bytes into the buffer given by @buffer_handle,
    849  *
    850  * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
    851  */
    852 struct drm_vmw_shader_create_arg {
    853 	enum drm_vmw_shader_type shader_type;
    854 	__u32 size;
    855 	__u32 buffer_handle;
    856 	__u32 shader_handle;
    857 	__u64 offset;
    858 };
    859 
    860 /*************************************************************************/
    861 /**
    862  * DRM_VMW_UNREF_SHADER - Unreferences a shader
    863  *
    864  * Destroys a user-space reference to a shader, optionally destroying
    865  * it.
    866  */
    867 
    868 /**
    869  * struct drm_vmw_shader_arg
    870  *
    871  * @handle: Handle identifying the shader to destroy.
    872  *
    873  * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
    874  */
    875 struct drm_vmw_shader_arg {
    876 	__u32 handle;
    877 	__u32 pad64;
    878 };
    879 
    880 /*************************************************************************/
    881 /**
    882  * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
    883  *
    884  * Allocates a surface handle and queues a create surface command
    885  * for the host on the first use of the surface. The surface ID can
    886  * be used as the surface ID in commands referencing the surface.
    887  */
    888 
    889 /**
    890  * enum drm_vmw_surface_flags
    891  *
    892  * @drm_vmw_surface_flag_shareable:     Whether the surface is shareable
    893  * @drm_vmw_surface_flag_scanout:       Whether the surface is a scanout
    894  *                                      surface.
    895  * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
    896  *                                      given.
    897  * @drm_vmw_surface_flag_coherent:      Back surface with coherent memory.
    898  */
    899 enum drm_vmw_surface_flags {
    900 	drm_vmw_surface_flag_shareable = (1 << 0),
    901 	drm_vmw_surface_flag_scanout = (1 << 1),
    902 	drm_vmw_surface_flag_create_buffer = (1 << 2),
    903 	drm_vmw_surface_flag_coherent = (1 << 3),
    904 };
    905 
    906 /**
    907  * struct drm_vmw_gb_surface_create_req
    908  *
    909  * @svga3d_flags:     SVGA3d surface flags for the device.
    910  * @format:           SVGA3d format.
    911  * @mip_level:        Number of mip levels for all faces.
    912  * @drm_surface_flags Flags as described above.
    913  * @multisample_count Future use. Set to 0.
    914  * @autogen_filter    Future use. Set to 0.
    915  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
    916  *                    if none.
    917  * @base_size         Size of the base mip level for all faces.
    918  * @array_size        Must be zero for non-DX hardware, and if non-zero
    919  *                    svga3d_flags must have proper bind flags setup.
    920  *
    921  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
    922  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
    923  */
    924 struct drm_vmw_gb_surface_create_req {
    925 	__u32 svga3d_flags;
    926 	__u32 format;
    927 	__u32 mip_levels;
    928 	enum drm_vmw_surface_flags drm_surface_flags;
    929 	__u32 multisample_count;
    930 	__u32 autogen_filter;
    931 	__u32 buffer_handle;
    932 	__u32 array_size;
    933 	struct drm_vmw_size base_size;
    934 };
    935 
    936 /**
    937  * struct drm_vmw_gb_surface_create_rep
    938  *
    939  * @handle:            Surface handle.
    940  * @backup_size:       Size of backup buffers for this surface.
    941  * @buffer_handle:     Handle of backup buffer. SVGA3D_INVALID_ID if none.
    942  * @buffer_size:       Actual size of the buffer identified by
    943  *                     @buffer_handle
    944  * @buffer_map_handle: Offset into device address space for the buffer
    945  *                     identified by @buffer_handle.
    946  *
    947  * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
    948  * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
    949  */
    950 struct drm_vmw_gb_surface_create_rep {
    951 	__u32 handle;
    952 	__u32 backup_size;
    953 	__u32 buffer_handle;
    954 	__u32 buffer_size;
    955 	__u64 buffer_map_handle;
    956 };
    957 
    958 /**
    959  * union drm_vmw_gb_surface_create_arg
    960  *
    961  * @req: Input argument as described above.
    962  * @rep: Output argument as described above.
    963  *
    964  * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
    965  */
    966 union drm_vmw_gb_surface_create_arg {
    967 	struct drm_vmw_gb_surface_create_rep rep;
    968 	struct drm_vmw_gb_surface_create_req req;
    969 };
    970 
    971 /*************************************************************************/
    972 /**
    973  * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
    974  *
    975  * Puts a reference on a host surface with a given handle, as previously
    976  * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
    977  * A reference will make sure the surface isn't destroyed while we hold
    978  * it and will allow the calling client to use the surface handle in
    979  * the command stream.
    980  *
    981  * On successful return, the Ioctl returns the surface information given
    982  * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
    983  */
    984 
    985 /**
    986  * struct drm_vmw_gb_surface_reference_arg
    987  *
    988  * @creq: The data used as input when the surface was created, as described
    989  *        above at "struct drm_vmw_gb_surface_create_req"
    990  * @crep: Additional data output when the surface was created, as described
    991  *        above at "struct drm_vmw_gb_surface_create_rep"
    992  *
    993  * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
    994  */
    995 struct drm_vmw_gb_surface_ref_rep {
    996 	struct drm_vmw_gb_surface_create_req creq;
    997 	struct drm_vmw_gb_surface_create_rep crep;
    998 };
    999 
   1000 /**
   1001  * union drm_vmw_gb_surface_reference_arg
   1002  *
   1003  * @req: Input data as described above at "struct drm_vmw_surface_arg"
   1004  * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
   1005  *
   1006  * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
   1007  */
   1008 union drm_vmw_gb_surface_reference_arg {
   1009 	struct drm_vmw_gb_surface_ref_rep rep;
   1010 	struct drm_vmw_surface_arg req;
   1011 };
   1012 
   1013 
   1014 /*************************************************************************/
   1015 /**
   1016  * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
   1017  *
   1018  * Idles any previously submitted GPU operations on the buffer and
   1019  * by default blocks command submissions that reference the buffer.
   1020  * If the file descriptor used to grab a blocking CPU sync is closed, the
   1021  * cpu sync is released.
   1022  * The flags argument indicates how the grab / release operation should be
   1023  * performed:
   1024  */
   1025 
   1026 /**
   1027  * enum drm_vmw_synccpu_flags - Synccpu flags:
   1028  *
   1029  * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
   1030  * hint to the kernel to allow command submissions that references the buffer
   1031  * for read-only.
   1032  * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
   1033  * referencing this buffer.
   1034  * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
   1035  * -EBUSY should the buffer be busy.
   1036  * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
   1037  * while the buffer is synced for CPU. This is similar to the GEM bo idle
   1038  * behavior.
   1039  */
   1040 enum drm_vmw_synccpu_flags {
   1041 	drm_vmw_synccpu_read = (1 << 0),
   1042 	drm_vmw_synccpu_write = (1 << 1),
   1043 	drm_vmw_synccpu_dontblock = (1 << 2),
   1044 	drm_vmw_synccpu_allow_cs = (1 << 3)
   1045 };
   1046 
   1047 /**
   1048  * enum drm_vmw_synccpu_op - Synccpu operations:
   1049  *
   1050  * @drm_vmw_synccpu_grab:    Grab the buffer for CPU operations
   1051  * @drm_vmw_synccpu_release: Release a previous grab.
   1052  */
   1053 enum drm_vmw_synccpu_op {
   1054 	drm_vmw_synccpu_grab,
   1055 	drm_vmw_synccpu_release
   1056 };
   1057 
   1058 /**
   1059  * struct drm_vmw_synccpu_arg
   1060  *
   1061  * @op:			     The synccpu operation as described above.
   1062  * @handle:		     Handle identifying the buffer object.
   1063  * @flags:		     Flags as described above.
   1064  */
   1065 struct drm_vmw_synccpu_arg {
   1066 	enum drm_vmw_synccpu_op op;
   1067 	enum drm_vmw_synccpu_flags flags;
   1068 	__u32 handle;
   1069 	__u32 pad64;
   1070 };
   1071 
   1072 /*************************************************************************/
   1073 /**
   1074  * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
   1075  *
   1076  * Allocates a device unique context id, and queues a create context command
   1077  * for the host. Does not wait for host completion.
   1078  */
   1079 enum drm_vmw_extended_context {
   1080 	drm_vmw_context_legacy,
   1081 	drm_vmw_context_dx
   1082 };
   1083 
   1084 /**
   1085  * union drm_vmw_extended_context_arg
   1086  *
   1087  * @req: Context type.
   1088  * @rep: Context identifier.
   1089  *
   1090  * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
   1091  */
   1092 union drm_vmw_extended_context_arg {
   1093 	enum drm_vmw_extended_context req;
   1094 	struct drm_vmw_context_arg rep;
   1095 };
   1096 
   1097 /*************************************************************************/
   1098 /*
   1099  * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
   1100  * underlying resource.
   1101  *
   1102  * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
   1103  * Ioctl.
   1104  */
   1105 
   1106 /**
   1107  * struct drm_vmw_handle_close_arg
   1108  *
   1109  * @handle: Handle to close.
   1110  *
   1111  * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
   1112  */
   1113 struct drm_vmw_handle_close_arg {
   1114 	__u32 handle;
   1115 	__u32 pad64;
   1116 };
   1117 #define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
   1118 
   1119 /*************************************************************************/
   1120 /**
   1121  * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
   1122  *
   1123  * Allocates a surface handle and queues a create surface command
   1124  * for the host on the first use of the surface. The surface ID can
   1125  * be used as the surface ID in commands referencing the surface.
   1126  *
   1127  * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
   1128  * parameter and 64 bit svga flag.
   1129  */
   1130 
   1131 /**
   1132  * enum drm_vmw_surface_version
   1133  *
   1134  * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
   1135  * svga3d surface flags split into 2, upper half and lower half.
   1136  */
   1137 enum drm_vmw_surface_version {
   1138 	drm_vmw_gb_surface_v1
   1139 };
   1140 
   1141 /**
   1142  * struct drm_vmw_gb_surface_create_ext_req
   1143  *
   1144  * @base: Surface create parameters.
   1145  * @version: Version of surface create ioctl.
   1146  * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
   1147  * @multisample_pattern: Multisampling pattern when msaa is supported.
   1148  * @quality_level: Precision settings for each sample.
   1149  * @must_be_zero: Reserved for future usage.
   1150  *
   1151  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
   1152  * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
   1153  */
   1154 struct drm_vmw_gb_surface_create_ext_req {
   1155 	struct drm_vmw_gb_surface_create_req base;
   1156 	enum drm_vmw_surface_version version;
   1157 	uint32_t svga3d_flags_upper_32_bits;
   1158 	SVGA3dMSPattern multisample_pattern;
   1159 	SVGA3dMSQualityLevel quality_level;
   1160 	uint64_t must_be_zero;
   1161 };
   1162 
   1163 /**
   1164  * union drm_vmw_gb_surface_create_ext_arg
   1165  *
   1166  * @req: Input argument as described above.
   1167  * @rep: Output argument as described above.
   1168  *
   1169  * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
   1170  */
   1171 union drm_vmw_gb_surface_create_ext_arg {
   1172 	struct drm_vmw_gb_surface_create_rep rep;
   1173 	struct drm_vmw_gb_surface_create_ext_req req;
   1174 };
   1175 
   1176 /*************************************************************************/
   1177 /**
   1178  * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
   1179  *
   1180  * Puts a reference on a host surface with a given handle, as previously
   1181  * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
   1182  * A reference will make sure the surface isn't destroyed while we hold
   1183  * it and will allow the calling client to use the surface handle in
   1184  * the command stream.
   1185  *
   1186  * On successful return, the Ioctl returns the surface information given
   1187  * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
   1188  */
   1189 
   1190 /**
   1191  * struct drm_vmw_gb_surface_ref_ext_rep
   1192  *
   1193  * @creq: The data used as input when the surface was created, as described
   1194  *        above at "struct drm_vmw_gb_surface_create_ext_req"
   1195  * @crep: Additional data output when the surface was created, as described
   1196  *        above at "struct drm_vmw_gb_surface_create_rep"
   1197  *
   1198  * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
   1199  */
   1200 struct drm_vmw_gb_surface_ref_ext_rep {
   1201 	struct drm_vmw_gb_surface_create_ext_req creq;
   1202 	struct drm_vmw_gb_surface_create_rep crep;
   1203 };
   1204 
   1205 /**
   1206  * union drm_vmw_gb_surface_reference_ext_arg
   1207  *
   1208  * @req: Input data as described above at "struct drm_vmw_surface_arg"
   1209  * @rep: Output data as described above at
   1210  *       "struct drm_vmw_gb_surface_ref_ext_rep"
   1211  *
   1212  * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
   1213  */
   1214 union drm_vmw_gb_surface_reference_ext_arg {
   1215 	struct drm_vmw_gb_surface_ref_ext_rep rep;
   1216 	struct drm_vmw_surface_arg req;
   1217 };
   1218 
   1219 /**
   1220  * struct drm_vmw_msg_arg
   1221  *
   1222  * @send: Pointer to user-space msg string (null terminated).
   1223  * @receive: Pointer to user-space receive buffer.
   1224  * @send_only: Boolean whether this is only sending or receiving too.
   1225  *
   1226  * Argument to the DRM_VMW_MSG ioctl.
   1227  */
   1228 struct drm_vmw_msg_arg {
   1229 	__u64 send;
   1230 	__u64 receive;
   1231 	__s32 send_only;
   1232 	__u32 receive_len;
   1233 };
   1234 
   1235 #if defined(__cplusplus)
   1236 }
   1237 #endif
   1238 
   1239 #endif
   1240