101e04c3fSmrg/*
201e04c3fSmrg * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
301e04c3fSmrg * Copyright 2010 Marek Olšák <maraeo@gmail.com>
401e04c3fSmrg * Copyright 2018 Advanced Micro Devices, Inc.
501e04c3fSmrg * All Rights Reserved.
601e04c3fSmrg *
701e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a
801e04c3fSmrg * copy of this software and associated documentation files (the "Software"),
901e04c3fSmrg * to deal in the Software without restriction, including without limitation
1001e04c3fSmrg * on the rights to use, copy, modify, merge, publish, distribute, sub
1101e04c3fSmrg * license, and/or sell copies of the Software, and to permit persons to whom
1201e04c3fSmrg * the Software is furnished to do so, subject to the following conditions:
1301e04c3fSmrg *
1401e04c3fSmrg * The above copyright notice and this permission notice (including the next
1501e04c3fSmrg * paragraph) shall be included in all copies or substantial portions of the
1601e04c3fSmrg * Software.
1701e04c3fSmrg *
1801e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1901e04c3fSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2001e04c3fSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
2101e04c3fSmrg * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
2201e04c3fSmrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
2301e04c3fSmrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
2401e04c3fSmrg * USE OR OTHER DEALINGS IN THE SOFTWARE. */
2501e04c3fSmrg
2601e04c3fSmrg#ifndef RADEON_WINSYS_H
2701e04c3fSmrg#define RADEON_WINSYS_H
2801e04c3fSmrg
2901e04c3fSmrg/* The public winsys interface header for the radeon driver. */
3001e04c3fSmrg
317ec681f3Smrg/* Skip command submission. Same as RADEON_NOOP=1. */
327ec681f3Smrg#define RADEON_FLUSH_NOOP                     (1u << 29)
337ec681f3Smrg
347ec681f3Smrg/* Toggle the secure submission boolean after the flush */
357ec681f3Smrg#define RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION (1u << 30)
367ec681f3Smrg
3701e04c3fSmrg/* Whether the next IB can start immediately and not wait for draws and
3801e04c3fSmrg * dispatches from the current IB to finish. */
397ec681f3Smrg#define RADEON_FLUSH_START_NEXT_GFX_IB_NOW    (1u << 31)
4001e04c3fSmrg
417ec681f3Smrg#define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW                                                   \
427ec681f3Smrg   (PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
4301e04c3fSmrg
4401e04c3fSmrg#include "amd/common/ac_gpu_info.h"
4501e04c3fSmrg#include "amd/common/ac_surface.h"
467ec681f3Smrg#include "pipebuffer/pb_buffer.h"
4701e04c3fSmrg
4801e04c3fSmrg/* Tiling flags. */
497ec681f3Smrgenum radeon_bo_layout
507ec681f3Smrg{
517ec681f3Smrg   RADEON_LAYOUT_LINEAR = 0,
527ec681f3Smrg   RADEON_LAYOUT_TILED,
537ec681f3Smrg   RADEON_LAYOUT_SQUARETILED,
5401e04c3fSmrg
557ec681f3Smrg   RADEON_LAYOUT_UNKNOWN
5601e04c3fSmrg};
5701e04c3fSmrg
587ec681f3Smrgenum radeon_bo_domain
597ec681f3Smrg{ /* bitfield */
607ec681f3Smrg  RADEON_DOMAIN_GTT = 2,
617ec681f3Smrg  RADEON_DOMAIN_VRAM = 4,
627ec681f3Smrg  RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT,
637ec681f3Smrg  RADEON_DOMAIN_GDS = 8,
647ec681f3Smrg  RADEON_DOMAIN_OA = 16,
6501e04c3fSmrg};
6601e04c3fSmrg
677ec681f3Smrgenum radeon_bo_flag
687ec681f3Smrg{ /* bitfield */
697ec681f3Smrg  RADEON_FLAG_GTT_WC = (1 << 0),
707ec681f3Smrg  RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
717ec681f3Smrg  RADEON_FLAG_NO_SUBALLOC = (1 << 2),
727ec681f3Smrg  RADEON_FLAG_SPARSE = (1 << 3),
737ec681f3Smrg  RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
747ec681f3Smrg  RADEON_FLAG_READ_ONLY = (1 << 5),
757ec681f3Smrg  RADEON_FLAG_32BIT = (1 << 6),
767ec681f3Smrg  RADEON_FLAG_ENCRYPTED = (1 << 7),
777ec681f3Smrg  RADEON_FLAG_UNCACHED = (1 << 8), /* only gfx9 and newer */
787ec681f3Smrg  RADEON_FLAG_DRIVER_INTERNAL = (1 << 9),
7901e04c3fSmrg};
8001e04c3fSmrg
817ec681f3Smrgenum radeon_dependency_flag
827ec681f3Smrg{
837ec681f3Smrg   /* Instead of waiting for a job to finish execution, the dependency will
847ec681f3Smrg    * be signaled when the job starts execution.
857ec681f3Smrg    */
867ec681f3Smrg   RADEON_DEPENDENCY_START_FENCE = 1 << 1,
877ec681f3Smrg};
8801e04c3fSmrg
897ec681f3Smrgenum radeon_bo_usage
907ec681f3Smrg{ /* bitfield */
917ec681f3Smrg  RADEON_USAGE_READ = 2,
927ec681f3Smrg  RADEON_USAGE_WRITE = 4,
937ec681f3Smrg  RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE,
947ec681f3Smrg
957ec681f3Smrg  /* The winsys ensures that the CS submission will be scheduled after
967ec681f3Smrg   * previously flushed CSs referencing this BO in a conflicting way.
977ec681f3Smrg   */
987ec681f3Smrg  RADEON_USAGE_SYNCHRONIZED = 8,
997ec681f3Smrg
1007ec681f3Smrg  /* When used, an implicit sync is done to make sure a compute shader
1017ec681f3Smrg   * will read the written values from a previous draw.
1027ec681f3Smrg   */
1037ec681f3Smrg  RADEON_USAGE_NEEDS_IMPLICIT_SYNC = 16,
10401e04c3fSmrg};
10501e04c3fSmrg
1067ec681f3Smrgenum radeon_map_flags
1077ec681f3Smrg{
1089f464c52Smaya   /* Indicates that the caller will unmap the buffer.
1099f464c52Smaya    *
1109f464c52Smaya    * Not unmapping buffers is an important performance optimization for
1119f464c52Smaya    * OpenGL (avoids kernel overhead for frequently mapped buffers).
1129f464c52Smaya    */
1137ec681f3Smrg   RADEON_MAP_TEMPORARY = (PIPE_MAP_DRV_PRV << 0),
1149f464c52Smaya};
1159f464c52Smaya
11601e04c3fSmrg#define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
11701e04c3fSmrg
1187ec681f3Smrgenum radeon_value_id
1197ec681f3Smrg{
1207ec681f3Smrg   RADEON_REQUESTED_VRAM_MEMORY,
1217ec681f3Smrg   RADEON_REQUESTED_GTT_MEMORY,
1227ec681f3Smrg   RADEON_MAPPED_VRAM,
1237ec681f3Smrg   RADEON_MAPPED_GTT,
1247ec681f3Smrg   RADEON_SLAB_WASTED_VRAM,
1257ec681f3Smrg   RADEON_SLAB_WASTED_GTT,
1267ec681f3Smrg   RADEON_BUFFER_WAIT_TIME_NS,
1277ec681f3Smrg   RADEON_NUM_MAPPED_BUFFERS,
1287ec681f3Smrg   RADEON_TIMESTAMP,
1297ec681f3Smrg   RADEON_NUM_GFX_IBS,
1307ec681f3Smrg   RADEON_NUM_SDMA_IBS,
1317ec681f3Smrg   RADEON_GFX_BO_LIST_COUNTER, /* number of BOs submitted in gfx IBs */
1327ec681f3Smrg   RADEON_GFX_IB_SIZE_COUNTER,
1337ec681f3Smrg   RADEON_NUM_BYTES_MOVED,
1347ec681f3Smrg   RADEON_NUM_EVICTIONS,
1357ec681f3Smrg   RADEON_NUM_VRAM_CPU_PAGE_FAULTS,
1367ec681f3Smrg   RADEON_VRAM_USAGE,
1377ec681f3Smrg   RADEON_VRAM_VIS_USAGE,
1387ec681f3Smrg   RADEON_GTT_USAGE,
1397ec681f3Smrg   RADEON_GPU_TEMPERATURE, /* DRM 2.42.0 */
1407ec681f3Smrg   RADEON_CURRENT_SCLK,
1417ec681f3Smrg   RADEON_CURRENT_MCLK,
1427ec681f3Smrg   RADEON_CS_THREAD_TIME,
14301e04c3fSmrg};
14401e04c3fSmrg
1457ec681f3Smrgenum radeon_bo_priority
1467ec681f3Smrg{
1477ec681f3Smrg   /* Each group of two has the same priority. */
1487ec681f3Smrg   RADEON_PRIO_FENCE = 0,
1497ec681f3Smrg   RADEON_PRIO_TRACE,
15001e04c3fSmrg
1517ec681f3Smrg   RADEON_PRIO_SO_FILLED_SIZE = 2,
1527ec681f3Smrg   RADEON_PRIO_QUERY,
15301e04c3fSmrg
1547ec681f3Smrg   RADEON_PRIO_IB1 = 4, /* main IB submitted to the kernel */
1557ec681f3Smrg   RADEON_PRIO_IB2,     /* IB executed with INDIRECT_BUFFER */
15601e04c3fSmrg
1577ec681f3Smrg   RADEON_PRIO_DRAW_INDIRECT = 6,
1587ec681f3Smrg   RADEON_PRIO_INDEX_BUFFER,
15901e04c3fSmrg
1607ec681f3Smrg   RADEON_PRIO_CP_DMA = 8,
1617ec681f3Smrg   RADEON_PRIO_BORDER_COLORS,
16201e04c3fSmrg
1637ec681f3Smrg   RADEON_PRIO_CONST_BUFFER = 10,
1647ec681f3Smrg   RADEON_PRIO_DESCRIPTORS,
16501e04c3fSmrg
1667ec681f3Smrg   RADEON_PRIO_SAMPLER_BUFFER = 12,
1677ec681f3Smrg   RADEON_PRIO_VERTEX_BUFFER,
16801e04c3fSmrg
1697ec681f3Smrg   RADEON_PRIO_SHADER_RW_BUFFER = 14,
1707ec681f3Smrg   RADEON_PRIO_COMPUTE_GLOBAL,
17101e04c3fSmrg
1727ec681f3Smrg   RADEON_PRIO_SAMPLER_TEXTURE = 16,
1737ec681f3Smrg   RADEON_PRIO_SHADER_RW_IMAGE,
17401e04c3fSmrg
1757ec681f3Smrg   RADEON_PRIO_SAMPLER_TEXTURE_MSAA = 18,
1767ec681f3Smrg   RADEON_PRIO_COLOR_BUFFER,
17701e04c3fSmrg
1787ec681f3Smrg   RADEON_PRIO_DEPTH_BUFFER = 20,
17901e04c3fSmrg
1807ec681f3Smrg   RADEON_PRIO_COLOR_BUFFER_MSAA = 22,
18101e04c3fSmrg
1827ec681f3Smrg   RADEON_PRIO_DEPTH_BUFFER_MSAA = 24,
18301e04c3fSmrg
1847ec681f3Smrg   RADEON_PRIO_SEPARATE_META = 26,
1857ec681f3Smrg   RADEON_PRIO_SHADER_BINARY, /* the hw can't hide instruction cache misses */
18601e04c3fSmrg
1877ec681f3Smrg   RADEON_PRIO_SHADER_RINGS = 28,
18801e04c3fSmrg
1897ec681f3Smrg   RADEON_PRIO_SCRATCH_BUFFER = 30,
1907ec681f3Smrg   /* 31 is the maximum value */
19101e04c3fSmrg};
19201e04c3fSmrg
19301e04c3fSmrgstruct winsys_handle;
19401e04c3fSmrgstruct radeon_winsys_ctx;
19501e04c3fSmrg
19601e04c3fSmrgstruct radeon_cmdbuf_chunk {
1977ec681f3Smrg   unsigned cdw;    /* Number of used dwords. */
1987ec681f3Smrg   unsigned max_dw; /* Maximum number of dwords. */
1997ec681f3Smrg   uint32_t *buf;   /* The base pointer of the chunk. */
20001e04c3fSmrg};
20101e04c3fSmrg
20201e04c3fSmrgstruct radeon_cmdbuf {
2037ec681f3Smrg   struct radeon_cmdbuf_chunk current;
2047ec681f3Smrg   struct radeon_cmdbuf_chunk *prev;
2057ec681f3Smrg   uint16_t num_prev; /* Number of previous chunks. */
2067ec681f3Smrg   uint16_t max_prev; /* Space in array pointed to by prev. */
2077ec681f3Smrg   unsigned prev_dw;  /* Total number of dwords in previous chunks. */
2087ec681f3Smrg
2097ec681f3Smrg   /* Memory usage of the buffer list. These are always 0 for preamble IBs. */
2107ec681f3Smrg   uint32_t used_vram_kb;
2117ec681f3Smrg   uint32_t used_gart_kb;
2127ec681f3Smrg   uint64_t gpu_address;
2137ec681f3Smrg
2147ec681f3Smrg   /* Private winsys data. */
2157ec681f3Smrg   void *priv;
21601e04c3fSmrg};
21701e04c3fSmrg
21801e04c3fSmrg/* Tiling info for display code, DRI sharing, and other data. */
21901e04c3fSmrgstruct radeon_bo_metadata {
2207ec681f3Smrg   /* Tiling flags describing the texture layout for display code
2217ec681f3Smrg    * and DRI sharing.
2227ec681f3Smrg    */
2237ec681f3Smrg   union {
2247ec681f3Smrg      struct {
2257ec681f3Smrg         enum radeon_bo_layout microtile;
2267ec681f3Smrg         enum radeon_bo_layout macrotile;
2277ec681f3Smrg         unsigned pipe_config;
2287ec681f3Smrg         unsigned bankw;
2297ec681f3Smrg         unsigned bankh;
2307ec681f3Smrg         unsigned tile_split;
2317ec681f3Smrg         unsigned mtilea;
2327ec681f3Smrg         unsigned num_banks;
2337ec681f3Smrg         unsigned stride;
2347ec681f3Smrg         bool scanout;
2357ec681f3Smrg      } legacy;
2367ec681f3Smrg   } u;
2377ec681f3Smrg
2387ec681f3Smrg   enum radeon_surf_mode mode;   /* Output from buffer_get_metadata */
2397ec681f3Smrg
2407ec681f3Smrg   /* Additional metadata associated with the buffer, in bytes.
2417ec681f3Smrg    * The maximum size is 64 * 4. This is opaque for the winsys & kernel.
2427ec681f3Smrg    * Supported by amdgpu only.
2437ec681f3Smrg    */
2447ec681f3Smrg   uint32_t size_metadata;
2457ec681f3Smrg   uint32_t metadata[64];
24601e04c3fSmrg};
24701e04c3fSmrg
2487ec681f3Smrgenum radeon_feature_id
2497ec681f3Smrg{
2507ec681f3Smrg   RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
2517ec681f3Smrg   RADEON_FID_R300_CMASK_ACCESS,
25201e04c3fSmrg};
25301e04c3fSmrg
25401e04c3fSmrgstruct radeon_bo_list_item {
2557ec681f3Smrg   uint64_t bo_size;
2567ec681f3Smrg   uint64_t vm_address;
2577ec681f3Smrg   uint32_t priority_usage; /* mask of (1 << RADEON_PRIO_*) */
25801e04c3fSmrg};
25901e04c3fSmrg
26001e04c3fSmrgstruct radeon_winsys {
2617ec681f3Smrg   /**
2627ec681f3Smrg    * The screen object this winsys was created for
2637ec681f3Smrg    */
2647ec681f3Smrg   struct pipe_screen *screen;
2657ec681f3Smrg   /**
2667ec681f3Smrg    * Has the application created at least one TMZ buffer.
2677ec681f3Smrg    */
2687ec681f3Smrg   const bool uses_secure_bos;
2697ec681f3Smrg
2707ec681f3Smrg   /**
2717ec681f3Smrg    * Decrement the winsys reference count.
2727ec681f3Smrg    *
2737ec681f3Smrg    * \param ws  The winsys this function is called for.
2747ec681f3Smrg    * \return    True if the winsys and screen should be destroyed.
2757ec681f3Smrg    */
2767ec681f3Smrg   bool (*unref)(struct radeon_winsys *ws);
2777ec681f3Smrg
2787ec681f3Smrg   /**
2797ec681f3Smrg    * Destroy this winsys.
2807ec681f3Smrg    *
2817ec681f3Smrg    * \param ws        The winsys this function is called from.
2827ec681f3Smrg    */
2837ec681f3Smrg   void (*destroy)(struct radeon_winsys *ws);
2847ec681f3Smrg
2857ec681f3Smrg   /**
2867ec681f3Smrg    * Query an info structure from winsys.
2877ec681f3Smrg    *
2887ec681f3Smrg    * \param ws        The winsys this function is called from.
2897ec681f3Smrg    * \param info      Return structure
2907ec681f3Smrg    */
2917ec681f3Smrg   void (*query_info)(struct radeon_winsys *ws, struct radeon_info *info,
2927ec681f3Smrg                      bool enable_smart_access_memory,
2937ec681f3Smrg                      bool disable_smart_access_memory);
2947ec681f3Smrg
2957ec681f3Smrg   /**
2967ec681f3Smrg    * A hint for the winsys that it should pin its execution threads to
2977ec681f3Smrg    * a group of cores sharing a specific L3 cache if the CPU has multiple
2987ec681f3Smrg    * L3 caches. This is needed for good multithreading performance on
2997ec681f3Smrg    * AMD Zen CPUs.
3007ec681f3Smrg    */
3017ec681f3Smrg   void (*pin_threads_to_L3_cache)(struct radeon_winsys *ws, unsigned cache);
3027ec681f3Smrg
3037ec681f3Smrg   /**************************************************************************
3047ec681f3Smrg    * Buffer management. Buffer attributes are mostly fixed over its lifetime.
3057ec681f3Smrg    *
3067ec681f3Smrg    * Remember that gallium gets to choose the interface it needs, and the
3077ec681f3Smrg    * window systems must then implement that interface (rather than the
3087ec681f3Smrg    * other way around...).
3097ec681f3Smrg    *************************************************************************/
3107ec681f3Smrg
3117ec681f3Smrg   /**
3127ec681f3Smrg    * Create a buffer object.
3137ec681f3Smrg    *
3147ec681f3Smrg    * \param ws        The winsys this function is called from.
3157ec681f3Smrg    * \param size      The size to allocate.
3167ec681f3Smrg    * \param alignment An alignment of the buffer in memory.
3177ec681f3Smrg    * \param use_reusable_pool Whether the cache buffer manager should be used.
3187ec681f3Smrg    * \param domain    A bitmask of the RADEON_DOMAIN_* flags.
3197ec681f3Smrg    * \return          The created buffer object.
3207ec681f3Smrg    */
3217ec681f3Smrg   struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws, uint64_t size, unsigned alignment,
3227ec681f3Smrg                                      enum radeon_bo_domain domain, enum radeon_bo_flag flags);
3237ec681f3Smrg
3247ec681f3Smrg   /**
3257ec681f3Smrg    * Map the entire data store of a buffer object into the client's address
3267ec681f3Smrg    * space.
3277ec681f3Smrg    *
3287ec681f3Smrg    * Callers are expected to unmap buffers again if and only if the
3297ec681f3Smrg    * RADEON_MAP_TEMPORARY flag is set in \p usage.
3307ec681f3Smrg    *
3317ec681f3Smrg    * \param buf       A winsys buffer object to map.
3327ec681f3Smrg    * \param cs        A command stream to flush if the buffer is referenced by it.
3337ec681f3Smrg    * \param usage     A bitmask of the PIPE_MAP_* and RADEON_MAP_* flags.
3347ec681f3Smrg    * \return          The pointer at the beginning of the buffer.
3357ec681f3Smrg    */
3367ec681f3Smrg   void *(*buffer_map)(struct radeon_winsys *ws, struct pb_buffer *buf,
3377ec681f3Smrg                       struct radeon_cmdbuf *cs, enum pipe_map_flags usage);
3387ec681f3Smrg
3397ec681f3Smrg   /**
3407ec681f3Smrg    * Unmap a buffer object from the client's address space.
3417ec681f3Smrg    *
3427ec681f3Smrg    * \param buf       A winsys buffer object to unmap.
3437ec681f3Smrg    */
3447ec681f3Smrg   void (*buffer_unmap)(struct radeon_winsys *ws, struct pb_buffer *buf);
3457ec681f3Smrg
3467ec681f3Smrg   /**
3477ec681f3Smrg    * Wait for the buffer and return true if the buffer is not used
3487ec681f3Smrg    * by the device.
3497ec681f3Smrg    *
3507ec681f3Smrg    * The timeout of 0 will only return the status.
3517ec681f3Smrg    * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
3527ec681f3Smrg    * is idle.
3537ec681f3Smrg    */
3547ec681f3Smrg   bool (*buffer_wait)(struct radeon_winsys *ws, struct pb_buffer *buf,
3557ec681f3Smrg                       uint64_t timeout, enum radeon_bo_usage usage);
3567ec681f3Smrg
3577ec681f3Smrg   /**
3587ec681f3Smrg    * Return buffer metadata.
3597ec681f3Smrg    * (tiling info for display code, DRI sharing, and other data)
3607ec681f3Smrg    *
3617ec681f3Smrg    * \param buf       A winsys buffer object to get the flags from.
3627ec681f3Smrg    * \param md        Metadata
3637ec681f3Smrg    */
3647ec681f3Smrg   void (*buffer_get_metadata)(struct radeon_winsys *ws, struct pb_buffer *buf,
3657ec681f3Smrg                               struct radeon_bo_metadata *md, struct radeon_surf *surf);
3667ec681f3Smrg
3677ec681f3Smrg   /**
3687ec681f3Smrg    * Set buffer metadata.
3697ec681f3Smrg    * (tiling info for display code, DRI sharing, and other data)
3707ec681f3Smrg    *
3717ec681f3Smrg    * \param buf       A winsys buffer object to set the flags for.
3727ec681f3Smrg    * \param md        Metadata
3737ec681f3Smrg    */
3747ec681f3Smrg   void (*buffer_set_metadata)(struct radeon_winsys *ws, struct pb_buffer *buf,
3757ec681f3Smrg                               struct radeon_bo_metadata *md, struct radeon_surf *surf);
3767ec681f3Smrg
3777ec681f3Smrg   /**
3787ec681f3Smrg    * Get a winsys buffer from a winsys handle. The internal structure
3797ec681f3Smrg    * of the handle is platform-specific and only a winsys should access it.
3807ec681f3Smrg    *
3817ec681f3Smrg    * \param ws        The winsys this function is called from.
3827ec681f3Smrg    * \param whandle   A winsys handle pointer as was received from a state
3837ec681f3Smrg    *                  tracker.
3847ec681f3Smrg    */
3857ec681f3Smrg   struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws, struct winsys_handle *whandle,
3867ec681f3Smrg                                           unsigned vm_alignment);
3877ec681f3Smrg
3887ec681f3Smrg   /**
3897ec681f3Smrg    * Get a winsys buffer from a user pointer. The resulting buffer can't
3907ec681f3Smrg    * be exported. Both pointer and size must be page aligned.
3917ec681f3Smrg    *
3927ec681f3Smrg    * \param ws        The winsys this function is called from.
3937ec681f3Smrg    * \param pointer   User pointer to turn into a buffer object.
3947ec681f3Smrg    * \param Size      Size in bytes for the new buffer.
3957ec681f3Smrg    */
3967ec681f3Smrg   struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer, uint64_t size);
3977ec681f3Smrg
3987ec681f3Smrg   /**
3997ec681f3Smrg    * Whether the buffer was created from a user pointer.
4007ec681f3Smrg    *
4017ec681f3Smrg    * \param buf       A winsys buffer object
4027ec681f3Smrg    * \return          whether \p buf was created via buffer_from_ptr
4037ec681f3Smrg    */
4047ec681f3Smrg   bool (*buffer_is_user_ptr)(struct pb_buffer *buf);
4057ec681f3Smrg
4067ec681f3Smrg   /** Whether the buffer was suballocated. */
4077ec681f3Smrg   bool (*buffer_is_suballocated)(struct pb_buffer *buf);
4087ec681f3Smrg
4097ec681f3Smrg   /**
4107ec681f3Smrg    * Get a winsys handle from a winsys buffer. The internal structure
4117ec681f3Smrg    * of the handle is platform-specific and only a winsys should access it.
4127ec681f3Smrg    *
4137ec681f3Smrg    * \param ws        The winsys instance for which the handle is to be valid
4147ec681f3Smrg    * \param buf       A winsys buffer object to get the handle from.
4157ec681f3Smrg    * \param whandle   A winsys handle pointer.
4167ec681f3Smrg    * \return          true on success.
4177ec681f3Smrg    */
4187ec681f3Smrg   bool (*buffer_get_handle)(struct radeon_winsys *ws, struct pb_buffer *buf,
4197ec681f3Smrg                             struct winsys_handle *whandle);
4207ec681f3Smrg
4217ec681f3Smrg   /**
4227ec681f3Smrg    * Change the commitment of a (64KB-page aligned) region of the given
4237ec681f3Smrg    * sparse buffer.
4247ec681f3Smrg    *
4257ec681f3Smrg    * \warning There is no automatic synchronization with command submission.
4267ec681f3Smrg    *
4277ec681f3Smrg    * \note Only implemented by the amdgpu winsys.
4287ec681f3Smrg    *
4297ec681f3Smrg    * \return false on out of memory or other failure, true on success.
4307ec681f3Smrg    */
4317ec681f3Smrg   bool (*buffer_commit)(struct radeon_winsys *ws, struct pb_buffer *buf,
4327ec681f3Smrg                         uint64_t offset, uint64_t size, bool commit);
4337ec681f3Smrg
4347ec681f3Smrg   /**
4357ec681f3Smrg    * Return the virtual address of a buffer.
4367ec681f3Smrg    *
4377ec681f3Smrg    * When virtual memory is not in use, this is the offset relative to the
4387ec681f3Smrg    * relocation base (non-zero for sub-allocated buffers).
4397ec681f3Smrg    *
4407ec681f3Smrg    * \param buf       A winsys buffer object
4417ec681f3Smrg    * \return          virtual address
4427ec681f3Smrg    */
4437ec681f3Smrg   uint64_t (*buffer_get_virtual_address)(struct pb_buffer *buf);
4447ec681f3Smrg
4457ec681f3Smrg   /**
4467ec681f3Smrg    * Return the offset of this buffer relative to the relocation base.
4477ec681f3Smrg    * This is only non-zero for sub-allocated buffers.
4487ec681f3Smrg    *
4497ec681f3Smrg    * This is only supported in the radeon winsys, since amdgpu uses virtual
4507ec681f3Smrg    * addresses in submissions even for the video engines.
4517ec681f3Smrg    *
4527ec681f3Smrg    * \param buf      A winsys buffer object
4537ec681f3Smrg    * \return         the offset for relocations
4547ec681f3Smrg    */
4557ec681f3Smrg   unsigned (*buffer_get_reloc_offset)(struct pb_buffer *buf);
4567ec681f3Smrg
4577ec681f3Smrg   /**
4587ec681f3Smrg    * Query the initial placement of the buffer from the kernel driver.
4597ec681f3Smrg    */
4607ec681f3Smrg   enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
4617ec681f3Smrg
4627ec681f3Smrg   /**
4637ec681f3Smrg    * Query the flags used for creation of this buffer.
4647ec681f3Smrg    *
4657ec681f3Smrg    * Note that for imported buffer this may be lossy since not all flags
4667ec681f3Smrg    * are passed 1:1.
4677ec681f3Smrg    */
4687ec681f3Smrg   enum radeon_bo_flag (*buffer_get_flags)(struct pb_buffer *buf);
4697ec681f3Smrg
4707ec681f3Smrg   /**************************************************************************
4717ec681f3Smrg    * Command submission.
4727ec681f3Smrg    *
4737ec681f3Smrg    * Each pipe context should create its own command stream and submit
4747ec681f3Smrg    * commands independently of other contexts.
4757ec681f3Smrg    *************************************************************************/
4767ec681f3Smrg
4777ec681f3Smrg   /**
4787ec681f3Smrg    * Create a command submission context.
4797ec681f3Smrg    * Various command streams can be submitted to the same context.
4807ec681f3Smrg    */
4817ec681f3Smrg   struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws);
4827ec681f3Smrg
4837ec681f3Smrg   /**
4847ec681f3Smrg    * Destroy a context.
4857ec681f3Smrg    */
4867ec681f3Smrg   void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
4877ec681f3Smrg
4887ec681f3Smrg   /**
4897ec681f3Smrg    * Query a GPU reset status.
4907ec681f3Smrg    */
4917ec681f3Smrg   enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx,
4927ec681f3Smrg                                                    bool full_reset_only,
4937ec681f3Smrg                                                    bool *needs_reset);
4947ec681f3Smrg
4957ec681f3Smrg   /**
4967ec681f3Smrg    * Create a command stream.
4977ec681f3Smrg    *
4987ec681f3Smrg    * \param cs        The returned structure that is initialized by cs_create.
4997ec681f3Smrg    * \param ctx       The submission context
5007ec681f3Smrg    * \param ring_type The ring type (GFX, DMA, UVD)
5017ec681f3Smrg    * \param flush     Flush callback function associated with the command stream.
5027ec681f3Smrg    * \param user      User pointer that will be passed to the flush callback.
5037ec681f3Smrg    *
5047ec681f3Smrg    * \return true on success
5057ec681f3Smrg    */
5067ec681f3Smrg   bool (*cs_create)(struct radeon_cmdbuf *cs,
5077ec681f3Smrg                     struct radeon_winsys_ctx *ctx, enum ring_type ring_type,
5087ec681f3Smrg                     void (*flush)(void *ctx, unsigned flags,
5097ec681f3Smrg                                   struct pipe_fence_handle **fence),
5107ec681f3Smrg                     void *flush_ctx, bool stop_exec_on_failure);
5117ec681f3Smrg
5127ec681f3Smrg   /**
5137ec681f3Smrg    * Set up and enable mid command buffer preemption for the command stream.
5147ec681f3Smrg    *
5157ec681f3Smrg    * \param cs               Command stream
5167ec681f3Smrg    * \param preamble_ib      Non-preemptible preamble IB for the context.
5177ec681f3Smrg    * \param preamble_num_dw  Number of dwords in the preamble IB.
5187ec681f3Smrg    */
5197ec681f3Smrg   bool (*cs_setup_preemption)(struct radeon_cmdbuf *cs, const uint32_t *preamble_ib,
5207ec681f3Smrg                               unsigned preamble_num_dw);
5217ec681f3Smrg
5227ec681f3Smrg   /**
5237ec681f3Smrg    * Destroy a command stream.
5247ec681f3Smrg    *
5257ec681f3Smrg    * \param cs        A command stream to destroy.
5267ec681f3Smrg    */
5277ec681f3Smrg   void (*cs_destroy)(struct radeon_cmdbuf *cs);
5287ec681f3Smrg
5297ec681f3Smrg   /**
5307ec681f3Smrg    * Add a buffer. Each buffer used by a CS must be added using this function.
5317ec681f3Smrg    *
5327ec681f3Smrg    * \param cs      Command stream
5337ec681f3Smrg    * \param buf     Buffer
5347ec681f3Smrg    * \param usage   Whether the buffer is used for read and/or write.
5357ec681f3Smrg    * \param domain  Bitmask of the RADEON_DOMAIN_* flags.
5367ec681f3Smrg    * \param priority  A higher number means a greater chance of being
5377ec681f3Smrg    *                  placed in the requested domain. 15 is the maximum.
5387ec681f3Smrg    * \return Buffer index.
5397ec681f3Smrg    */
5407ec681f3Smrg   unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer *buf,
5417ec681f3Smrg                             enum radeon_bo_usage usage, enum radeon_bo_domain domain,
54201e04c3fSmrg                             enum radeon_bo_priority priority);
54301e04c3fSmrg
5447ec681f3Smrg   /**
5457ec681f3Smrg    * Return the index of an already-added buffer.
5467ec681f3Smrg    *
5477ec681f3Smrg    * Not supported on amdgpu. Drivers with GPUVM should not care about
5487ec681f3Smrg    * buffer indices.
5497ec681f3Smrg    *
5507ec681f3Smrg    * \param cs        Command stream
5517ec681f3Smrg    * \param buf       Buffer
5527ec681f3Smrg    * \return          The buffer index, or -1 if the buffer has not been added.
5537ec681f3Smrg    */
5547ec681f3Smrg   int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer *buf);
5557ec681f3Smrg
5567ec681f3Smrg   /**
5577ec681f3Smrg    * Return true if there is enough memory in VRAM and GTT for the buffers
5587ec681f3Smrg    * added so far. If the validation fails, all buffers which have
5597ec681f3Smrg    * been added since the last call of cs_validate will be removed and
5607ec681f3Smrg    * the CS will be flushed (provided there are still any buffers).
5617ec681f3Smrg    *
5627ec681f3Smrg    * \param cs        A command stream to validate.
5637ec681f3Smrg    */
5647ec681f3Smrg   bool (*cs_validate)(struct radeon_cmdbuf *cs);
5657ec681f3Smrg
5667ec681f3Smrg   /**
5677ec681f3Smrg    * Check whether the given number of dwords is available in the IB.
5687ec681f3Smrg    * Optionally chain a new chunk of the IB if necessary and supported.
5697ec681f3Smrg    *
5707ec681f3Smrg    * \param cs        A command stream.
5717ec681f3Smrg    * \param dw        Number of CS dwords requested by the caller.
5727ec681f3Smrg    * \param force_chaining  Chain the IB into a new buffer now to discard
5737ec681f3Smrg    *                        the CP prefetch cache (to emulate PKT3_REWIND)
5747ec681f3Smrg    * \return true if there is enough space
5757ec681f3Smrg    */
5767ec681f3Smrg   bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw, bool force_chaining);
5777ec681f3Smrg
5787ec681f3Smrg   /**
5797ec681f3Smrg    * Return the buffer list.
5807ec681f3Smrg    *
5817ec681f3Smrg    * This is the buffer list as passed to the kernel, i.e. it only contains
5827ec681f3Smrg    * the parent buffers of sub-allocated buffers.
5837ec681f3Smrg    *
5847ec681f3Smrg    * \param cs    Command stream
5857ec681f3Smrg    * \param list  Returned buffer list. Set to NULL to query the count only.
5867ec681f3Smrg    * \return      The buffer count.
5877ec681f3Smrg    */
5887ec681f3Smrg   unsigned (*cs_get_buffer_list)(struct radeon_cmdbuf *cs, struct radeon_bo_list_item *list);
5897ec681f3Smrg
5907ec681f3Smrg   /**
5917ec681f3Smrg    * Flush a command stream.
5927ec681f3Smrg    *
5937ec681f3Smrg    * \param cs          A command stream to flush.
5947ec681f3Smrg    * \param flags,      PIPE_FLUSH_* flags.
5957ec681f3Smrg    * \param fence       Pointer to a fence. If non-NULL, a fence is inserted
5967ec681f3Smrg    *                    after the CS and is returned through this parameter.
5977ec681f3Smrg    * \return Negative POSIX error code or 0 for success.
5987ec681f3Smrg    *         Asynchronous submissions never return an error.
5997ec681f3Smrg    */
6007ec681f3Smrg   int (*cs_flush)(struct radeon_cmdbuf *cs, unsigned flags, struct pipe_fence_handle **fence);
6017ec681f3Smrg
6027ec681f3Smrg   /**
6037ec681f3Smrg    * Create a fence before the CS is flushed.
6047ec681f3Smrg    * The user must flush manually to complete the initializaton of the fence.
6057ec681f3Smrg    *
6067ec681f3Smrg    * The fence must not be used for anything except \ref cs_add_fence_dependency
6077ec681f3Smrg    * before the flush.
6087ec681f3Smrg    */
6097ec681f3Smrg   struct pipe_fence_handle *(*cs_get_next_fence)(struct radeon_cmdbuf *cs);
6107ec681f3Smrg
6117ec681f3Smrg   /**
6127ec681f3Smrg    * Return true if a buffer is referenced by a command stream.
6137ec681f3Smrg    *
6147ec681f3Smrg    * \param cs        A command stream.
6157ec681f3Smrg    * \param buf       A winsys buffer.
6167ec681f3Smrg    */
6177ec681f3Smrg   bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs, struct pb_buffer *buf,
6187ec681f3Smrg                                   enum radeon_bo_usage usage);
6197ec681f3Smrg
6207ec681f3Smrg   /**
6217ec681f3Smrg    * Request access to a feature for a command stream.
6227ec681f3Smrg    *
6237ec681f3Smrg    * \param cs        A command stream.
6247ec681f3Smrg    * \param fid       Feature ID, one of RADEON_FID_*
6257ec681f3Smrg    * \param enable    Whether to enable or disable the feature.
6267ec681f3Smrg    */
6277ec681f3Smrg   bool (*cs_request_feature)(struct radeon_cmdbuf *cs, enum radeon_feature_id fid, bool enable);
6287ec681f3Smrg   /**
6297ec681f3Smrg    * Make sure all asynchronous flush of the cs have completed
6307ec681f3Smrg    *
6317ec681f3Smrg    * \param cs        A command stream.
6327ec681f3Smrg    */
6337ec681f3Smrg   void (*cs_sync_flush)(struct radeon_cmdbuf *cs);
6347ec681f3Smrg
6357ec681f3Smrg   /**
6367ec681f3Smrg    * Add a fence dependency to the CS, so that the CS will wait for
6377ec681f3Smrg    * the fence before execution.
6387ec681f3Smrg    *
6397ec681f3Smrg    * \param dependency_flags  Bitmask of RADEON_DEPENDENCY_*
6407ec681f3Smrg    */
6417ec681f3Smrg   void (*cs_add_fence_dependency)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence,
6427ec681f3Smrg                                   unsigned dependency_flags);
6437ec681f3Smrg
6447ec681f3Smrg   /**
6457ec681f3Smrg    * Signal a syncobj when the CS finishes execution.
6467ec681f3Smrg    */
6477ec681f3Smrg   void (*cs_add_syncobj_signal)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence);
6487ec681f3Smrg
6497ec681f3Smrg   /**
6507ec681f3Smrg    * Wait for the fence and return true if the fence has been signalled.
6517ec681f3Smrg    * The timeout of 0 will only return the status.
6527ec681f3Smrg    * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
6537ec681f3Smrg    * is signalled.
6547ec681f3Smrg    */
6557ec681f3Smrg   bool (*fence_wait)(struct radeon_winsys *ws, struct pipe_fence_handle *fence, uint64_t timeout);
6567ec681f3Smrg
6577ec681f3Smrg   /**
6587ec681f3Smrg    * Reference counting for fences.
6597ec681f3Smrg    */
6607ec681f3Smrg   void (*fence_reference)(struct pipe_fence_handle **dst, struct pipe_fence_handle *src);
6617ec681f3Smrg
6627ec681f3Smrg   /**
6637ec681f3Smrg    * Create a new fence object corresponding to the given syncobj fd.
6647ec681f3Smrg    */
6657ec681f3Smrg   struct pipe_fence_handle *(*fence_import_syncobj)(struct radeon_winsys *ws, int fd);
6667ec681f3Smrg
6677ec681f3Smrg   /**
6687ec681f3Smrg    * Create a new fence object corresponding to the given sync_file.
6697ec681f3Smrg    */
6707ec681f3Smrg   struct pipe_fence_handle *(*fence_import_sync_file)(struct radeon_winsys *ws, int fd);
6717ec681f3Smrg
6727ec681f3Smrg   /**
6737ec681f3Smrg    * Return a sync_file FD corresponding to the given fence object.
6747ec681f3Smrg    */
6757ec681f3Smrg   int (*fence_export_sync_file)(struct radeon_winsys *ws, struct pipe_fence_handle *fence);
6767ec681f3Smrg
6777ec681f3Smrg   /**
6787ec681f3Smrg    * Return a sync file FD that is already signalled.
6797ec681f3Smrg    */
6807ec681f3Smrg   int (*export_signalled_sync_file)(struct radeon_winsys *ws);
6817ec681f3Smrg
6827ec681f3Smrg   /**
6837ec681f3Smrg    * Initialize surface
6847ec681f3Smrg    *
6857ec681f3Smrg    * \param ws        The winsys this function is called from.
6867ec681f3Smrg    * \param tex       Input texture description
6877ec681f3Smrg    * \param flags     Bitmask of RADEON_SURF_* flags
6887ec681f3Smrg    * \param bpe       Bytes per pixel, it can be different for Z buffers.
6897ec681f3Smrg    * \param mode      Preferred tile mode. (linear, 1D, or 2D)
6907ec681f3Smrg    * \param surf      Output structure
6917ec681f3Smrg    */
6927ec681f3Smrg   int (*surface_init)(struct radeon_winsys *ws, const struct pipe_resource *tex, unsigned flags,
6937ec681f3Smrg                       unsigned bpe, enum radeon_surf_mode mode, struct radeon_surf *surf);
6947ec681f3Smrg
6957ec681f3Smrg   uint64_t (*query_value)(struct radeon_winsys *ws, enum radeon_value_id value);
6967ec681f3Smrg
6977ec681f3Smrg   bool (*read_registers)(struct radeon_winsys *ws, unsigned reg_offset, unsigned num_registers,
6987ec681f3Smrg                          uint32_t *out);
6997ec681f3Smrg
7007ec681f3Smrg   /**
7017ec681f3Smrg    * Secure context
7027ec681f3Smrg    */
7037ec681f3Smrg   bool (*cs_is_secure)(struct radeon_cmdbuf *cs);
70401e04c3fSmrg};
70501e04c3fSmrg
70601e04c3fSmrgstatic inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
70701e04c3fSmrg{
7087ec681f3Smrg   return cs && (cs->prev_dw + cs->current.cdw > num_dw);
70901e04c3fSmrg}
71001e04c3fSmrg
71101e04c3fSmrgstatic inline void radeon_emit(struct radeon_cmdbuf *cs, uint32_t value)
71201e04c3fSmrg{
7137ec681f3Smrg   cs->current.buf[cs->current.cdw++] = value;
7147ec681f3Smrg}
7157ec681f3Smrg
7167ec681f3Smrgstatic inline void radeon_emit_array(struct radeon_cmdbuf *cs, const uint32_t *values,
7177ec681f3Smrg                                     unsigned count)
7187ec681f3Smrg{
7197ec681f3Smrg   memcpy(cs->current.buf + cs->current.cdw, values, count * 4);
7207ec681f3Smrg   cs->current.cdw += count;
72101e04c3fSmrg}
72201e04c3fSmrg
7237ec681f3Smrgstatic inline bool radeon_uses_secure_bos(struct radeon_winsys* ws)
72401e04c3fSmrg{
7257ec681f3Smrg  return ws->uses_secure_bos;
72601e04c3fSmrg}
72701e04c3fSmrg
7287ec681f3Smrgstatic inline void
7297ec681f3Smrgradeon_bo_reference(struct radeon_winsys *rws, struct pb_buffer **dst, struct pb_buffer *src)
7307ec681f3Smrg{
7317ec681f3Smrg   pb_reference_with_winsys(rws, dst, src);
7327ec681f3Smrg}
7337ec681f3Smrg
7347ec681f3Smrgenum radeon_heap
7357ec681f3Smrg{
7367ec681f3Smrg   RADEON_HEAP_VRAM_NO_CPU_ACCESS,
7377ec681f3Smrg   RADEON_HEAP_VRAM_READ_ONLY,
7387ec681f3Smrg   RADEON_HEAP_VRAM_READ_ONLY_32BIT,
7397ec681f3Smrg   RADEON_HEAP_VRAM_32BIT,
7407ec681f3Smrg   RADEON_HEAP_VRAM,
7417ec681f3Smrg   RADEON_HEAP_GTT_WC,
7427ec681f3Smrg   RADEON_HEAP_GTT_WC_READ_ONLY,
7437ec681f3Smrg   RADEON_HEAP_GTT_WC_READ_ONLY_32BIT,
7447ec681f3Smrg   RADEON_HEAP_GTT_WC_32BIT,
7457ec681f3Smrg   RADEON_HEAP_GTT,
7467ec681f3Smrg   RADEON_HEAP_GTT_UNCACHED_WC,
7477ec681f3Smrg   RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY,
7487ec681f3Smrg   RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT,
7497ec681f3Smrg   RADEON_HEAP_GTT_UNCACHED_WC_32BIT,
7507ec681f3Smrg   RADEON_HEAP_GTT_UNCACHED,
7517ec681f3Smrg   RADEON_MAX_SLAB_HEAPS,
7527ec681f3Smrg   RADEON_MAX_CACHED_HEAPS = RADEON_MAX_SLAB_HEAPS,
75301e04c3fSmrg};
75401e04c3fSmrg
75501e04c3fSmrgstatic inline enum radeon_bo_domain radeon_domain_from_heap(enum radeon_heap heap)
75601e04c3fSmrg{
7577ec681f3Smrg   switch (heap) {
7587ec681f3Smrg   case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
7597ec681f3Smrg   case RADEON_HEAP_VRAM_READ_ONLY:
7607ec681f3Smrg   case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
7617ec681f3Smrg   case RADEON_HEAP_VRAM_32BIT:
7627ec681f3Smrg   case RADEON_HEAP_VRAM:
7637ec681f3Smrg      return RADEON_DOMAIN_VRAM;
7647ec681f3Smrg   case RADEON_HEAP_GTT_WC:
7657ec681f3Smrg   case RADEON_HEAP_GTT_WC_READ_ONLY:
7667ec681f3Smrg   case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
7677ec681f3Smrg   case RADEON_HEAP_GTT_WC_32BIT:
7687ec681f3Smrg   case RADEON_HEAP_GTT:
7697ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC:
7707ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
7717ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
7727ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
7737ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED:
7747ec681f3Smrg      return RADEON_DOMAIN_GTT;
7757ec681f3Smrg   default:
7767ec681f3Smrg      assert(0);
7777ec681f3Smrg      return (enum radeon_bo_domain)0;
7787ec681f3Smrg   }
77901e04c3fSmrg}
78001e04c3fSmrg
78101e04c3fSmrgstatic inline unsigned radeon_flags_from_heap(enum radeon_heap heap)
78201e04c3fSmrg{
7837ec681f3Smrg   unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING;
7847ec681f3Smrg
7857ec681f3Smrg   switch (heap) {
7867ec681f3Smrg   case RADEON_HEAP_GTT:
7877ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED:
7887ec681f3Smrg      break;
7897ec681f3Smrg   default:
7907ec681f3Smrg      flags |= RADEON_FLAG_GTT_WC;
7917ec681f3Smrg   }
7927ec681f3Smrg
7937ec681f3Smrg   switch (heap) {
7947ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC:
7957ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
7967ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
7977ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
7987ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED:
7997ec681f3Smrg      flags |= RADEON_FLAG_UNCACHED;
8007ec681f3Smrg      break;
8017ec681f3Smrg   default:
8027ec681f3Smrg      break;
8037ec681f3Smrg   }
8047ec681f3Smrg
8057ec681f3Smrg   switch (heap) {
8067ec681f3Smrg   case RADEON_HEAP_VRAM_READ_ONLY:
8077ec681f3Smrg   case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
8087ec681f3Smrg   case RADEON_HEAP_GTT_WC_READ_ONLY:
8097ec681f3Smrg   case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
8107ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
8117ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
8127ec681f3Smrg      flags |= RADEON_FLAG_READ_ONLY;
8137ec681f3Smrg      break;
8147ec681f3Smrg   default:
8157ec681f3Smrg      break;
8167ec681f3Smrg   }
8177ec681f3Smrg
8187ec681f3Smrg   switch (heap) {
8197ec681f3Smrg   case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
8207ec681f3Smrg   case RADEON_HEAP_VRAM_32BIT:
8217ec681f3Smrg   case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
8227ec681f3Smrg   case RADEON_HEAP_GTT_WC_32BIT:
8237ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
8247ec681f3Smrg   case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
8257ec681f3Smrg      flags |= RADEON_FLAG_32BIT;
8267ec681f3Smrg      FALLTHROUGH;
8277ec681f3Smrg   default:
8287ec681f3Smrg      break;
8297ec681f3Smrg   }
8307ec681f3Smrg
8317ec681f3Smrg   switch (heap) {
8327ec681f3Smrg   case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
8337ec681f3Smrg      flags |= RADEON_FLAG_NO_CPU_ACCESS;
8347ec681f3Smrg      break;
8357ec681f3Smrg   default:
8367ec681f3Smrg      break;
8377ec681f3Smrg   }
8387ec681f3Smrg
8397ec681f3Smrg   return flags;
84001e04c3fSmrg}
84101e04c3fSmrg
84201e04c3fSmrg/* Return the heap index for winsys allocators, or -1 on failure. */
8437ec681f3Smrgstatic inline int radeon_get_heap_index(enum radeon_bo_domain domain, enum radeon_bo_flag flags)
84401e04c3fSmrg{
8457ec681f3Smrg   bool uncached;
8467ec681f3Smrg
8477ec681f3Smrg   /* VRAM implies WC (write combining) */
8487ec681f3Smrg   assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
8497ec681f3Smrg   /* NO_CPU_ACCESS implies VRAM only. */
8507ec681f3Smrg   assert(!(flags & RADEON_FLAG_NO_CPU_ACCESS) || domain == RADEON_DOMAIN_VRAM);
8517ec681f3Smrg
8527ec681f3Smrg   /* Resources with interprocess sharing don't use any winsys allocators. */
8537ec681f3Smrg   if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
8547ec681f3Smrg      return -1;
8557ec681f3Smrg
8567ec681f3Smrg   /* Unsupported flags: NO_SUBALLOC, SPARSE. */
8577ec681f3Smrg   if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_UNCACHED |
8587ec681f3Smrg                 RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT |
8597ec681f3Smrg                 RADEON_FLAG_DRIVER_INTERNAL))
8607ec681f3Smrg      return -1;
8617ec681f3Smrg
8627ec681f3Smrg   switch (domain) {
8637ec681f3Smrg   case RADEON_DOMAIN_VRAM:
8647ec681f3Smrg      switch (flags & (RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT)) {
8657ec681f3Smrg      case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
8667ec681f3Smrg      case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY:
8677ec681f3Smrg         assert(!"NO_CPU_ACCESS | READ_ONLY doesn't make sense");
8687ec681f3Smrg         return -1;
8697ec681f3Smrg      case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_32BIT:
8707ec681f3Smrg         assert(!"NO_CPU_ACCESS with 32BIT is disallowed");
8717ec681f3Smrg         return -1;
8727ec681f3Smrg      case RADEON_FLAG_NO_CPU_ACCESS:
8737ec681f3Smrg         return RADEON_HEAP_VRAM_NO_CPU_ACCESS;
8747ec681f3Smrg      case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
8757ec681f3Smrg         return RADEON_HEAP_VRAM_READ_ONLY_32BIT;
8767ec681f3Smrg      case RADEON_FLAG_READ_ONLY:
8777ec681f3Smrg         return RADEON_HEAP_VRAM_READ_ONLY;
8787ec681f3Smrg      case RADEON_FLAG_32BIT:
8797ec681f3Smrg         return RADEON_HEAP_VRAM_32BIT;
8807ec681f3Smrg      case 0:
8817ec681f3Smrg         return RADEON_HEAP_VRAM;
8827ec681f3Smrg      }
8837ec681f3Smrg      break;
8847ec681f3Smrg   case RADEON_DOMAIN_GTT:
8857ec681f3Smrg      uncached = flags & RADEON_FLAG_UNCACHED;
8867ec681f3Smrg
8877ec681f3Smrg      switch (flags & (RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT)) {
8887ec681f3Smrg      case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
8897ec681f3Smrg         return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT
8907ec681f3Smrg                         : RADEON_HEAP_GTT_WC_READ_ONLY_32BIT;
8917ec681f3Smrg      case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY:
8927ec681f3Smrg         return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY
8937ec681f3Smrg                         : RADEON_HEAP_GTT_WC_READ_ONLY;
8947ec681f3Smrg      case RADEON_FLAG_GTT_WC | RADEON_FLAG_32BIT:
8957ec681f3Smrg         return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_32BIT
8967ec681f3Smrg                         : RADEON_HEAP_GTT_WC_32BIT;
8977ec681f3Smrg      case RADEON_FLAG_GTT_WC:
8987ec681f3Smrg         return uncached ? RADEON_HEAP_GTT_UNCACHED_WC : RADEON_HEAP_GTT_WC;
8997ec681f3Smrg      case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
9007ec681f3Smrg      case RADEON_FLAG_READ_ONLY:
9017ec681f3Smrg         assert(!"READ_ONLY without WC is disallowed");
9027ec681f3Smrg         return -1;
9037ec681f3Smrg      case RADEON_FLAG_32BIT:
9047ec681f3Smrg         assert(!"32BIT without WC is disallowed");
9057ec681f3Smrg         return -1;
9067ec681f3Smrg      case 0:
9077ec681f3Smrg         return uncached ? RADEON_HEAP_GTT_UNCACHED : RADEON_HEAP_GTT;
9087ec681f3Smrg      }
9097ec681f3Smrg      break;
9107ec681f3Smrg   default:
9117ec681f3Smrg      break;
9127ec681f3Smrg   }
9137ec681f3Smrg   return -1;
91401e04c3fSmrg}
91501e04c3fSmrg
91601e04c3fSmrg#endif
917