17cdc0497Smrg/* 27cdc0497Smrg * Copyright (C) 2013 Red Hat 37cdc0497Smrg * Author: Rob Clark <robdclark@gmail.com> 47cdc0497Smrg * 57cdc0497Smrg * Permission is hereby granted, free of charge, to any person obtaining a 67cdc0497Smrg * copy of this software and associated documentation files (the "Software"), 77cdc0497Smrg * to deal in the Software without restriction, including without limitation 87cdc0497Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 97cdc0497Smrg * and/or sell copies of the Software, and to permit persons to whom the 107cdc0497Smrg * Software is furnished to do so, subject to the following conditions: 117cdc0497Smrg * 127cdc0497Smrg * The above copyright notice and this permission notice (including the next 137cdc0497Smrg * paragraph) shall be included in all copies or substantial portions of the 147cdc0497Smrg * Software. 157cdc0497Smrg * 167cdc0497Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 177cdc0497Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 187cdc0497Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 197cdc0497Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 207cdc0497Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 217cdc0497Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 227cdc0497Smrg * SOFTWARE. 237cdc0497Smrg */ 247cdc0497Smrg 257cdc0497Smrg#ifndef __MSM_DRM_H__ 267cdc0497Smrg#define __MSM_DRM_H__ 277cdc0497Smrg 287cdc0497Smrg#include "drm.h" 297cdc0497Smrg 307cdc0497Smrg#if defined(__cplusplus) 317cdc0497Smrgextern "C" { 327cdc0497Smrg#endif 337cdc0497Smrg 347cdc0497Smrg/* Please note that modifications to all structs defined here are 357cdc0497Smrg * subject to backwards-compatibility constraints: 367cdc0497Smrg * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit 377cdc0497Smrg * user/kernel compatibility 387cdc0497Smrg * 2) Keep fields aligned to their size 397cdc0497Smrg * 3) Because of how drm_ioctl() works, we can add new fields at 407cdc0497Smrg * the end of an ioctl if some care is taken: drm_ioctl() will 417cdc0497Smrg * zero out the new fields at the tail of the ioctl, so a zero 427cdc0497Smrg * value should have a backwards compatible meaning. And for 437cdc0497Smrg * output params, userspace won't see the newly added output 447cdc0497Smrg * fields.. so that has to be somehow ok. 457cdc0497Smrg */ 467cdc0497Smrg 477cdc0497Smrg#define MSM_PIPE_NONE 0x00 487cdc0497Smrg#define MSM_PIPE_2D0 0x01 497cdc0497Smrg#define MSM_PIPE_2D1 0x02 507cdc0497Smrg#define MSM_PIPE_3D0 0x10 517cdc0497Smrg 527cdc0497Smrg/* The pipe-id just uses the lower bits, so can be OR'd with flags in 537cdc0497Smrg * the upper 16 bits (which could be extended further, if needed, maybe 547cdc0497Smrg * we extend/overload the pipe-id some day to deal with multiple rings, 557cdc0497Smrg * but even then I don't think we need the full lower 16 bits). 567cdc0497Smrg */ 577cdc0497Smrg#define MSM_PIPE_ID_MASK 0xffff 587cdc0497Smrg#define MSM_PIPE_ID(x) ((x) & MSM_PIPE_ID_MASK) 597cdc0497Smrg#define MSM_PIPE_FLAGS(x) ((x) & ~MSM_PIPE_ID_MASK) 607cdc0497Smrg 617cdc0497Smrg/* timeouts are specified in clock-monotonic absolute times (to simplify 627cdc0497Smrg * restarting interrupted ioctls). The following struct is logically the 637cdc0497Smrg * same as 'struct timespec' but 32/64b ABI safe. 647cdc0497Smrg */ 657cdc0497Smrgstruct drm_msm_timespec { 667cdc0497Smrg __s64 tv_sec; /* seconds */ 677cdc0497Smrg __s64 tv_nsec; /* nanoseconds */ 687cdc0497Smrg}; 697cdc0497Smrg 707cdc0497Smrg#define MSM_PARAM_GPU_ID 0x01 717cdc0497Smrg#define MSM_PARAM_GMEM_SIZE 0x02 727cdc0497Smrg#define MSM_PARAM_CHIP_ID 0x03 737cdc0497Smrg#define MSM_PARAM_MAX_FREQ 0x04 747cdc0497Smrg#define MSM_PARAM_TIMESTAMP 0x05 757cdc0497Smrg#define MSM_PARAM_GMEM_BASE 0x06 767cdc0497Smrg#define MSM_PARAM_NR_RINGS 0x07 777cdc0497Smrg 787cdc0497Smrgstruct drm_msm_param { 797cdc0497Smrg __u32 pipe; /* in, MSM_PIPE_x */ 807cdc0497Smrg __u32 param; /* in, MSM_PARAM_x */ 817cdc0497Smrg __u64 value; /* out (get_param) or in (set_param) */ 827cdc0497Smrg}; 837cdc0497Smrg 847cdc0497Smrg/* 857cdc0497Smrg * GEM buffers: 867cdc0497Smrg */ 877cdc0497Smrg 887cdc0497Smrg#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */ 897cdc0497Smrg#define MSM_BO_GPU_READONLY 0x00000002 907cdc0497Smrg#define MSM_BO_CACHE_MASK 0x000f0000 917cdc0497Smrg/* cache modes */ 927cdc0497Smrg#define MSM_BO_CACHED 0x00010000 937cdc0497Smrg#define MSM_BO_WC 0x00020000 947cdc0497Smrg#define MSM_BO_UNCACHED 0x00040000 957cdc0497Smrg 967cdc0497Smrg#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \ 977cdc0497Smrg MSM_BO_GPU_READONLY | \ 987cdc0497Smrg MSM_BO_CACHED | \ 997cdc0497Smrg MSM_BO_WC | \ 1007cdc0497Smrg MSM_BO_UNCACHED) 1017cdc0497Smrg 1027cdc0497Smrgstruct drm_msm_gem_new { 1037cdc0497Smrg __u64 size; /* in */ 1047cdc0497Smrg __u32 flags; /* in, mask of MSM_BO_x */ 1057cdc0497Smrg __u32 handle; /* out */ 1067cdc0497Smrg}; 1077cdc0497Smrg 1087cdc0497Smrg#define MSM_INFO_IOVA 0x01 1097cdc0497Smrg 1107cdc0497Smrg#define MSM_INFO_FLAGS (MSM_INFO_IOVA) 1117cdc0497Smrg 1127cdc0497Smrgstruct drm_msm_gem_info { 1137cdc0497Smrg __u32 handle; /* in */ 1147cdc0497Smrg __u32 flags; /* in - combination of MSM_INFO_* flags */ 1157cdc0497Smrg __u64 offset; /* out, mmap() offset or iova */ 1167cdc0497Smrg}; 1177cdc0497Smrg 1187cdc0497Smrg#define MSM_PREP_READ 0x01 1197cdc0497Smrg#define MSM_PREP_WRITE 0x02 1207cdc0497Smrg#define MSM_PREP_NOSYNC 0x04 1217cdc0497Smrg 1227cdc0497Smrg#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC) 1237cdc0497Smrg 1247cdc0497Smrgstruct drm_msm_gem_cpu_prep { 1257cdc0497Smrg __u32 handle; /* in */ 1267cdc0497Smrg __u32 op; /* in, mask of MSM_PREP_x */ 1277cdc0497Smrg struct drm_msm_timespec timeout; /* in */ 1287cdc0497Smrg}; 1297cdc0497Smrg 1307cdc0497Smrgstruct drm_msm_gem_cpu_fini { 1317cdc0497Smrg __u32 handle; /* in */ 1327cdc0497Smrg}; 1337cdc0497Smrg 1347cdc0497Smrg/* 1357cdc0497Smrg * Cmdstream Submission: 1367cdc0497Smrg */ 1377cdc0497Smrg 1387cdc0497Smrg/* The value written into the cmdstream is logically: 1397cdc0497Smrg * 1407cdc0497Smrg * ((relocbuf->gpuaddr + reloc_offset) << shift) | or 1417cdc0497Smrg * 1427cdc0497Smrg * When we have GPU's w/ >32bit ptrs, it should be possible to deal 1437cdc0497Smrg * with this by emit'ing two reloc entries with appropriate shift 1447cdc0497Smrg * values. Or a new MSM_SUBMIT_CMD_x type would also be an option. 1457cdc0497Smrg * 1467cdc0497Smrg * NOTE that reloc's must be sorted by order of increasing submit_offset, 1477cdc0497Smrg * otherwise EINVAL. 1487cdc0497Smrg */ 1497cdc0497Smrgstruct drm_msm_gem_submit_reloc { 1507cdc0497Smrg __u32 submit_offset; /* in, offset from submit_bo */ 1517cdc0497Smrg __u32 or; /* in, value OR'd with result */ 1527cdc0497Smrg __s32 shift; /* in, amount of left shift (can be negative) */ 1537cdc0497Smrg __u32 reloc_idx; /* in, index of reloc_bo buffer */ 1547cdc0497Smrg __u64 reloc_offset; /* in, offset from start of reloc_bo */ 1557cdc0497Smrg}; 1567cdc0497Smrg 1577cdc0497Smrg/* submit-types: 1587cdc0497Smrg * BUF - this cmd buffer is executed normally. 1597cdc0497Smrg * IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are 1607cdc0497Smrg * processed normally, but the kernel does not setup an IB to 1617cdc0497Smrg * this buffer in the first-level ringbuffer 1627cdc0497Smrg * CTX_RESTORE_BUF - only executed if there has been a GPU context 1637cdc0497Smrg * switch since the last SUBMIT ioctl 1647cdc0497Smrg */ 1657cdc0497Smrg#define MSM_SUBMIT_CMD_BUF 0x0001 1667cdc0497Smrg#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002 1677cdc0497Smrg#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003 1687cdc0497Smrgstruct drm_msm_gem_submit_cmd { 1697cdc0497Smrg __u32 type; /* in, one of MSM_SUBMIT_CMD_x */ 1707cdc0497Smrg __u32 submit_idx; /* in, index of submit_bo cmdstream buffer */ 1717cdc0497Smrg __u32 submit_offset; /* in, offset into submit_bo */ 1727cdc0497Smrg __u32 size; /* in, cmdstream size */ 1737cdc0497Smrg __u32 pad; 1747cdc0497Smrg __u32 nr_relocs; /* in, number of submit_reloc's */ 1757cdc0497Smrg __u64 relocs; /* in, ptr to array of submit_reloc's */ 1767cdc0497Smrg}; 1777cdc0497Smrg 1787cdc0497Smrg/* Each buffer referenced elsewhere in the cmdstream submit (ie. the 1797cdc0497Smrg * cmdstream buffer(s) themselves or reloc entries) has one (and only 1807cdc0497Smrg * one) entry in the submit->bos[] table. 1817cdc0497Smrg * 1827cdc0497Smrg * As a optimization, the current buffer (gpu virtual address) can be 1837cdc0497Smrg * passed back through the 'presumed' field. If on a subsequent reloc, 1847cdc0497Smrg * userspace passes back a 'presumed' address that is still valid, 1857cdc0497Smrg * then patching the cmdstream for this entry is skipped. This can 1867cdc0497Smrg * avoid kernel needing to map/access the cmdstream bo in the common 1877cdc0497Smrg * case. 1887cdc0497Smrg */ 1897cdc0497Smrg#define MSM_SUBMIT_BO_READ 0x0001 1907cdc0497Smrg#define MSM_SUBMIT_BO_WRITE 0x0002 1917cdc0497Smrg 1927cdc0497Smrg#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE) 1937cdc0497Smrg 1947cdc0497Smrgstruct drm_msm_gem_submit_bo { 1957cdc0497Smrg __u32 flags; /* in, mask of MSM_SUBMIT_BO_x */ 1967cdc0497Smrg __u32 handle; /* in, GEM handle */ 1977cdc0497Smrg __u64 presumed; /* in/out, presumed buffer address */ 1987cdc0497Smrg}; 1997cdc0497Smrg 2007cdc0497Smrg/* Valid submit ioctl flags: */ 2017cdc0497Smrg#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */ 2027cdc0497Smrg#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */ 2037cdc0497Smrg#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */ 2047cdc0497Smrg#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */ 2057cdc0497Smrg#define MSM_SUBMIT_FLAGS ( \ 2067cdc0497Smrg MSM_SUBMIT_NO_IMPLICIT | \ 2077cdc0497Smrg MSM_SUBMIT_FENCE_FD_IN | \ 2087cdc0497Smrg MSM_SUBMIT_FENCE_FD_OUT | \ 2097cdc0497Smrg MSM_SUBMIT_SUDO | \ 2107cdc0497Smrg 0) 2117cdc0497Smrg 2127cdc0497Smrg/* Each cmdstream submit consists of a table of buffers involved, and 2137cdc0497Smrg * one or more cmdstream buffers. This allows for conditional execution 2147cdc0497Smrg * (context-restore), and IB buffers needed for per tile/bin draw cmds. 2157cdc0497Smrg */ 2167cdc0497Smrgstruct drm_msm_gem_submit { 2177cdc0497Smrg __u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */ 2187cdc0497Smrg __u32 fence; /* out */ 2197cdc0497Smrg __u32 nr_bos; /* in, number of submit_bo's */ 2207cdc0497Smrg __u32 nr_cmds; /* in, number of submit_cmd's */ 2217cdc0497Smrg __u64 bos; /* in, ptr to array of submit_bo's */ 2227cdc0497Smrg __u64 cmds; /* in, ptr to array of submit_cmd's */ 2237cdc0497Smrg __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ 2247cdc0497Smrg __u32 queueid; /* in, submitqueue id */ 2257cdc0497Smrg}; 2267cdc0497Smrg 2277cdc0497Smrg/* The normal way to synchronize with the GPU is just to CPU_PREP on 2287cdc0497Smrg * a buffer if you need to access it from the CPU (other cmdstream 2297cdc0497Smrg * submission from same or other contexts, PAGE_FLIP ioctl, etc, all 2307cdc0497Smrg * handle the required synchronization under the hood). This ioctl 2317cdc0497Smrg * mainly just exists as a way to implement the gallium pipe_fence 2327cdc0497Smrg * APIs without requiring a dummy bo to synchronize on. 2337cdc0497Smrg */ 2347cdc0497Smrgstruct drm_msm_wait_fence { 2357cdc0497Smrg __u32 fence; /* in */ 2367cdc0497Smrg __u32 pad; 2377cdc0497Smrg struct drm_msm_timespec timeout; /* in */ 2387cdc0497Smrg __u32 queueid; /* in, submitqueue id */ 2397cdc0497Smrg}; 2407cdc0497Smrg 2417cdc0497Smrg/* madvise provides a way to tell the kernel in case a buffers contents 2427cdc0497Smrg * can be discarded under memory pressure, which is useful for userspace 2437cdc0497Smrg * bo cache where we want to optimistically hold on to buffer allocate 2447cdc0497Smrg * and potential mmap, but allow the pages to be discarded under memory 2457cdc0497Smrg * pressure. 2467cdc0497Smrg * 2477cdc0497Smrg * Typical usage would involve madvise(DONTNEED) when buffer enters BO 2487cdc0497Smrg * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache. 2497cdc0497Smrg * In the WILLNEED case, 'retained' indicates to userspace whether the 2507cdc0497Smrg * backing pages still exist. 2517cdc0497Smrg */ 2527cdc0497Smrg#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */ 2537cdc0497Smrg#define MSM_MADV_DONTNEED 1 /* backing pages not needed */ 2547cdc0497Smrg#define __MSM_MADV_PURGED 2 /* internal state */ 2557cdc0497Smrg 2567cdc0497Smrgstruct drm_msm_gem_madvise { 2577cdc0497Smrg __u32 handle; /* in, GEM handle */ 2587cdc0497Smrg __u32 madv; /* in, MSM_MADV_x */ 2597cdc0497Smrg __u32 retained; /* out, whether backing store still exists */ 2607cdc0497Smrg}; 2617cdc0497Smrg 2627cdc0497Smrg/* 2637cdc0497Smrg * Draw queues allow the user to set specific submission parameter. Command 2647cdc0497Smrg * submissions specify a specific submitqueue to use. ID 0 is reserved for 2657cdc0497Smrg * backwards compatibility as a "default" submitqueue 2667cdc0497Smrg */ 2677cdc0497Smrg 2687cdc0497Smrg#define MSM_SUBMITQUEUE_FLAGS (0) 2697cdc0497Smrg 2707cdc0497Smrgstruct drm_msm_submitqueue { 2717cdc0497Smrg __u32 flags; /* in, MSM_SUBMITQUEUE_x */ 2727cdc0497Smrg __u32 prio; /* in, Priority level */ 2737cdc0497Smrg __u32 id; /* out, identifier */ 2747cdc0497Smrg}; 2757cdc0497Smrg 2767cdc0497Smrg#define DRM_MSM_GET_PARAM 0x00 2777cdc0497Smrg/* placeholder: 2787cdc0497Smrg#define DRM_MSM_SET_PARAM 0x01 2797cdc0497Smrg */ 2807cdc0497Smrg#define DRM_MSM_GEM_NEW 0x02 2817cdc0497Smrg#define DRM_MSM_GEM_INFO 0x03 2827cdc0497Smrg#define DRM_MSM_GEM_CPU_PREP 0x04 2837cdc0497Smrg#define DRM_MSM_GEM_CPU_FINI 0x05 2847cdc0497Smrg#define DRM_MSM_GEM_SUBMIT 0x06 2857cdc0497Smrg#define DRM_MSM_WAIT_FENCE 0x07 2867cdc0497Smrg#define DRM_MSM_GEM_MADVISE 0x08 2877cdc0497Smrg/* placeholder: 2887cdc0497Smrg#define DRM_MSM_GEM_SVM_NEW 0x09 2897cdc0497Smrg */ 2907cdc0497Smrg#define DRM_MSM_SUBMITQUEUE_NEW 0x0A 2917cdc0497Smrg#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B 2927cdc0497Smrg 2937cdc0497Smrg#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) 2947cdc0497Smrg#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new) 2957cdc0497Smrg#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info) 2967cdc0497Smrg#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep) 2977cdc0497Smrg#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini) 2987cdc0497Smrg#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit) 2997cdc0497Smrg#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence) 3007cdc0497Smrg#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise) 3017cdc0497Smrg#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue) 3027cdc0497Smrg#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32) 3037cdc0497Smrg 3047cdc0497Smrg#if defined(__cplusplus) 3057cdc0497Smrg} 3067cdc0497Smrg#endif 3077cdc0497Smrg 3087cdc0497Smrg#endif /* __MSM_DRM_H__ */ 309