19f464c52Smaya/*
29f464c52Smaya * Copyright © 2017 Intel Corporation
39f464c52Smaya *
49f464c52Smaya * Permission is hereby granted, free of charge, to any person obtaining a
59f464c52Smaya * copy of this software and associated documentation files (the "Software"),
69f464c52Smaya * to deal in the Software without restriction, including without limitation
79f464c52Smaya * the rights to use, copy, modify, merge, publish, distribute, sublicense,
89f464c52Smaya * and/or sell copies of the Software, and to permit persons to whom the
99f464c52Smaya * Software is furnished to do so, subject to the following conditions:
109f464c52Smaya *
119f464c52Smaya * The above copyright notice and this permission notice (including the next
129f464c52Smaya * paragraph) shall be included in all copies or substantial portions of the
139f464c52Smaya * Software.
149f464c52Smaya *
159f464c52Smaya * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
169f464c52Smaya * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
179f464c52Smaya * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
189f464c52Smaya * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
199f464c52Smaya * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
209f464c52Smaya * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
219f464c52Smaya * IN THE SOFTWARE.
229f464c52Smaya */
239f464c52Smaya
249f464c52Smaya#ifndef IRIS_BUFMGR_H
259f464c52Smaya#define IRIS_BUFMGR_H
269f464c52Smaya
279f464c52Smaya#include <stdbool.h>
289f464c52Smaya#include <stdint.h>
299f464c52Smaya#include <stdio.h>
309f464c52Smaya#include <sys/types.h>
317ec681f3Smrg#include "c11/threads.h"
329f464c52Smaya#include "util/macros.h"
339f464c52Smaya#include "util/u_atomic.h"
347ec681f3Smrg#include "util/u_dynarray.h"
359f464c52Smaya#include "util/list.h"
367ec681f3Smrg#include "util/simple_mtx.h"
379f464c52Smaya#include "pipe/p_defines.h"
387ec681f3Smrg#include "pipebuffer/pb_slab.h"
399f464c52Smaya
407ec681f3Smrgstruct intel_device_info;
419f464c52Smayastruct pipe_debug_callback;
427ec681f3Smrgstruct isl_surf;
437ec681f3Smrgstruct iris_syncobj;
449f464c52Smaya
459f464c52Smaya/**
469f464c52Smaya * Memory zones.  When allocating a buffer, you can request that it is
479f464c52Smaya * placed into a specific region of the virtual address space (PPGTT).
489f464c52Smaya *
499f464c52Smaya * Most buffers can go anywhere (IRIS_MEMZONE_OTHER).  Some buffers are
509f464c52Smaya * accessed via an offset from a base address.  STATE_BASE_ADDRESS has
519f464c52Smaya * a maximum 4GB size for each region, so we need to restrict those
529f464c52Smaya * buffers to be within 4GB of the base.  Each memory zone corresponds
539f464c52Smaya * to a particular base address.
549f464c52Smaya *
559f464c52Smaya * We lay out the virtual address space as follows:
569f464c52Smaya *
579f464c52Smaya * - [0,   4K): Nothing            (empty page for null address)
589f464c52Smaya * - [4K,  4G): Shaders            (Instruction Base Address)
599f464c52Smaya * - [4G,  8G): Surfaces & Binders (Surface State Base Address, Bindless ...)
609f464c52Smaya * - [8G, 12G): Dynamic            (Dynamic State Base Address)
619f464c52Smaya * - [12G, *):  Other              (everything else in the full 48-bit VMA)
629f464c52Smaya *
639f464c52Smaya * A special buffer for border color lives at the start of the dynamic state
649f464c52Smaya * memory zone.  This unfortunately has to be handled specially because the
659f464c52Smaya * SAMPLER_STATE "Indirect State Pointer" field is only a 24-bit pointer.
669f464c52Smaya *
679f464c52Smaya * Each GL context uses a separate GEM context, which technically gives them
689f464c52Smaya * each a separate VMA.  However, we assign address globally, so buffers will
699f464c52Smaya * have the same address in all GEM contexts.  This lets us have a single BO
709f464c52Smaya * field for the address, which is easy and cheap.
719f464c52Smaya */
729f464c52Smayaenum iris_memory_zone {
739f464c52Smaya   IRIS_MEMZONE_SHADER,
749f464c52Smaya   IRIS_MEMZONE_BINDER,
757ec681f3Smrg   IRIS_MEMZONE_BINDLESS,
769f464c52Smaya   IRIS_MEMZONE_SURFACE,
779f464c52Smaya   IRIS_MEMZONE_DYNAMIC,
789f464c52Smaya   IRIS_MEMZONE_OTHER,
799f464c52Smaya
809f464c52Smaya   IRIS_MEMZONE_BORDER_COLOR_POOL,
819f464c52Smaya};
829f464c52Smaya
839f464c52Smaya/* Intentionally exclude single buffer "zones" */
849f464c52Smaya#define IRIS_MEMZONE_COUNT (IRIS_MEMZONE_OTHER + 1)
859f464c52Smaya
869f464c52Smaya#define IRIS_BINDER_SIZE (64 * 1024)
879f464c52Smaya#define IRIS_MAX_BINDERS 100
887ec681f3Smrg#define IRIS_BINDLESS_SIZE (8 * 1024 * 1024)
899f464c52Smaya
909f464c52Smaya#define IRIS_MEMZONE_SHADER_START     (0ull * (1ull << 32))
919f464c52Smaya#define IRIS_MEMZONE_BINDER_START     (1ull * (1ull << 32))
927ec681f3Smrg#define IRIS_MEMZONE_BINDLESS_START   (IRIS_MEMZONE_BINDER_START + IRIS_MAX_BINDERS * IRIS_BINDER_SIZE)
937ec681f3Smrg#define IRIS_MEMZONE_SURFACE_START    (IRIS_MEMZONE_BINDLESS_START + IRIS_BINDLESS_SIZE)
949f464c52Smaya#define IRIS_MEMZONE_DYNAMIC_START    (2ull * (1ull << 32))
959f464c52Smaya#define IRIS_MEMZONE_OTHER_START      (3ull * (1ull << 32))
969f464c52Smaya
979f464c52Smaya#define IRIS_BORDER_COLOR_POOL_ADDRESS IRIS_MEMZONE_DYNAMIC_START
989f464c52Smaya#define IRIS_BORDER_COLOR_POOL_SIZE (64 * 1024)
999f464c52Smaya
1007ec681f3Smrg/**
1017ec681f3Smrg * Classification of the various incoherent caches of the GPU into a number of
1027ec681f3Smrg * caching domains.
1037ec681f3Smrg */
1047ec681f3Smrgenum iris_domain {
1057ec681f3Smrg   /** Render color cache. */
1067ec681f3Smrg   IRIS_DOMAIN_RENDER_WRITE = 0,
1077ec681f3Smrg   /** (Hi)Z/stencil cache. */
1087ec681f3Smrg   IRIS_DOMAIN_DEPTH_WRITE,
1097ec681f3Smrg   /** Data port (HDC) cache. */
1107ec681f3Smrg   IRIS_DOMAIN_DATA_WRITE,
1117ec681f3Smrg   /** Any other read-write cache. */
1127ec681f3Smrg   IRIS_DOMAIN_OTHER_WRITE,
1137ec681f3Smrg   /** Vertex cache. */
1147ec681f3Smrg   IRIS_DOMAIN_VF_READ,
1157ec681f3Smrg   /** Any other read-only cache. */
1167ec681f3Smrg   IRIS_DOMAIN_OTHER_READ,
1177ec681f3Smrg   /** Number of caching domains. */
1187ec681f3Smrg   NUM_IRIS_DOMAINS,
1197ec681f3Smrg   /** Not a real cache, use to opt out of the cache tracking mechanism. */
1207ec681f3Smrg   IRIS_DOMAIN_NONE = NUM_IRIS_DOMAINS
1217ec681f3Smrg};
1227ec681f3Smrg
1237ec681f3Smrg/**
1247ec681f3Smrg * Whether a caching domain is guaranteed not to write any data to memory.
1257ec681f3Smrg */
1267ec681f3Smrgstatic inline bool
1277ec681f3Smrgiris_domain_is_read_only(enum iris_domain access)
1287ec681f3Smrg{
1297ec681f3Smrg   return access == IRIS_DOMAIN_OTHER_READ ||
1307ec681f3Smrg          access == IRIS_DOMAIN_VF_READ;
1317ec681f3Smrg}
1327ec681f3Smrg
1337ec681f3Smrgenum iris_mmap_mode {
1347ec681f3Smrg   IRIS_MMAP_NONE, /**< Cannot be mapped */
1357ec681f3Smrg   IRIS_MMAP_UC, /**< Fully uncached memory map */
1367ec681f3Smrg   IRIS_MMAP_WC, /**< Write-combining map with no caching of reads */
1377ec681f3Smrg   IRIS_MMAP_WB, /**< Write-back mapping with CPU caches enabled */
1387ec681f3Smrg};
1397ec681f3Smrg
1407ec681f3Smrg#define IRIS_BATCH_COUNT 2
1417ec681f3Smrg
1427ec681f3Smrgstruct iris_bo_screen_deps {
1437ec681f3Smrg   struct iris_syncobj *write_syncobjs[IRIS_BATCH_COUNT];
1447ec681f3Smrg   struct iris_syncobj *read_syncobjs[IRIS_BATCH_COUNT];
1457ec681f3Smrg};
1467ec681f3Smrg
1479f464c52Smayastruct iris_bo {
1489f464c52Smaya   /**
1499f464c52Smaya    * Size in bytes of the buffer object.
1509f464c52Smaya    *
1519f464c52Smaya    * The size may be larger than the size originally requested for the
1529f464c52Smaya    * allocation, such as being aligned to page size.
1539f464c52Smaya    */
1549f464c52Smaya   uint64_t size;
1559f464c52Smaya
1569f464c52Smaya   /** Buffer manager context associated with this buffer object */
1579f464c52Smaya   struct iris_bufmgr *bufmgr;
1589f464c52Smaya
1597ec681f3Smrg   /** Pre-computed hash using _mesa_hash_pointer for cache tracking sets */
1607ec681f3Smrg   uint32_t hash;
1617ec681f3Smrg
1629f464c52Smaya   /** The GEM handle for this buffer object. */
1639f464c52Smaya   uint32_t gem_handle;
1649f464c52Smaya
1659f464c52Smaya   /**
1669f464c52Smaya    * Virtual address of the buffer inside the PPGTT (Per-Process Graphics
1679f464c52Smaya    * Translation Table).
1689f464c52Smaya    *
1699f464c52Smaya    * Although each hardware context has its own VMA, we assign BO's to the
1709f464c52Smaya    * same address in all contexts, for simplicity.
1719f464c52Smaya    */
1727ec681f3Smrg   uint64_t address;
1739f464c52Smaya
1749f464c52Smaya   /**
1757ec681f3Smrg    * If non-zero, then this bo has an aux-map translation to this address.
1767ec681f3Smrg    */
1777ec681f3Smrg   uint64_t aux_map_address;
1787ec681f3Smrg
1797ec681f3Smrg   /**
1807ec681f3Smrg    * If this BO is referenced by a batch, this _may_ be the index into the
1817ec681f3Smrg    * batch->exec_bos[] list.
1827ec681f3Smrg    *
1837ec681f3Smrg    * Note that a single buffer may be used by multiple batches/contexts,
1847ec681f3Smrg    * and thus appear in multiple lists, but we only track one index here.
1857ec681f3Smrg    * In the common case one can guess that batch->exec_bos[bo->index] == bo
1867ec681f3Smrg    * and double check if that's true to avoid a linear list walk.
1879f464c52Smaya    *
1889f464c52Smaya    * XXX: this is not ideal now that we have more than one batch per context,
1899f464c52Smaya    * XXX: as the index will flop back and forth between the render index and
1909f464c52Smaya    * XXX: compute index...
1919f464c52Smaya    */
1929f464c52Smaya   unsigned index;
1939f464c52Smaya
1947ec681f3Smrg   int refcount;
1957ec681f3Smrg   const char *name;
1967ec681f3Smrg
1977ec681f3Smrg   /** BO cache list */
1987ec681f3Smrg   struct list_head head;
1997ec681f3Smrg
2007ec681f3Smrg   /**
2017ec681f3Smrg    * Synchronization sequence number of most recent access of this BO from
2027ec681f3Smrg    * each caching domain.
2037ec681f3Smrg    *
2047ec681f3Smrg    * Although this is a global field, use in multiple contexts should be
2057ec681f3Smrg    * safe, see iris_emit_buffer_barrier_for() for details.
2067ec681f3Smrg    *
2077ec681f3Smrg    * Also align it to 64 bits. This will make atomic operations faster on 32
2087ec681f3Smrg    * bit platforms.
2097ec681f3Smrg    */
2107ec681f3Smrg   uint64_t last_seqnos[NUM_IRIS_DOMAINS] __attribute__ ((aligned (8)));
2117ec681f3Smrg
2127ec681f3Smrg   /** Up to one per screen, may need realloc. */
2137ec681f3Smrg   struct iris_bo_screen_deps *deps;
2147ec681f3Smrg   int deps_size;
2157ec681f3Smrg
2169f464c52Smaya   /**
2179f464c52Smaya    * Boolean of whether the GPU is definitely not accessing the buffer.
2189f464c52Smaya    *
2199f464c52Smaya    * This is only valid when reusable, since non-reusable
2209f464c52Smaya    * buffers are those that have been shared with other
2219f464c52Smaya    * processes, so we don't know their state.
2229f464c52Smaya    */
2239f464c52Smaya   bool idle;
2249f464c52Smaya
2257ec681f3Smrg   union {
2267ec681f3Smrg      struct {
2277ec681f3Smrg         uint64_t kflags;
2289f464c52Smaya
2297ec681f3Smrg         time_t free_time;
2309f464c52Smaya
2317ec681f3Smrg         /** Mapped address for the buffer, saved across map/unmap cycles */
2327ec681f3Smrg         void *map;
2339f464c52Smaya
2347ec681f3Smrg         /** List of GEM handle exports of this buffer (bo_export) */
2357ec681f3Smrg         struct list_head exports;
2369f464c52Smaya
2377ec681f3Smrg         /**
2387ec681f3Smrg          * Kernel-assigned global name for this object
2397ec681f3Smrg          *
2407ec681f3Smrg          * List contains both flink named and prime fd'd objects
2417ec681f3Smrg          */
2427ec681f3Smrg         unsigned global_name;
2439f464c52Smaya
2447ec681f3Smrg         /** The mmap coherency mode selected at BO allocation time */
2457ec681f3Smrg         enum iris_mmap_mode mmap_mode;
2469f464c52Smaya
2477ec681f3Smrg         /** Was this buffer imported from an external client? */
2487ec681f3Smrg         bool imported;
2499f464c52Smaya
2507ec681f3Smrg         /** Has this buffer been exported to external clients? */
2517ec681f3Smrg         bool exported;
2529f464c52Smaya
2537ec681f3Smrg         /** Boolean of whether this buffer can be re-used */
2547ec681f3Smrg         bool reusable;
2559f464c52Smaya
2567ec681f3Smrg         /** Boolean of whether this buffer points into user memory */
2577ec681f3Smrg         bool userptr;
2589f464c52Smaya
2597ec681f3Smrg         /** Boolean of whether this was allocated from local memory */
2607ec681f3Smrg         bool local;
2617ec681f3Smrg      } real;
2627ec681f3Smrg      struct {
2637ec681f3Smrg         struct pb_slab_entry entry;
2647ec681f3Smrg         struct iris_bo *real;
2657ec681f3Smrg      } slab;
2667ec681f3Smrg   };
2679f464c52Smaya};
2689f464c52Smaya
2697ec681f3Smrg#define BO_ALLOC_ZEROED      (1<<0)
2707ec681f3Smrg#define BO_ALLOC_COHERENT    (1<<1)
2717ec681f3Smrg#define BO_ALLOC_SMEM        (1<<2)
2727ec681f3Smrg#define BO_ALLOC_SCANOUT     (1<<3)
2737ec681f3Smrg#define BO_ALLOC_NO_SUBALLOC (1<<4)
2749f464c52Smaya
2759f464c52Smaya/**
2769f464c52Smaya * Allocate a buffer object.
2779f464c52Smaya *
2789f464c52Smaya * Buffer objects are not necessarily initially mapped into CPU virtual
2799f464c52Smaya * address space or graphics device aperture.  They must be mapped
2809f464c52Smaya * using iris_bo_map() to be used by the CPU.
2819f464c52Smaya */
2829f464c52Smayastruct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr,
2839f464c52Smaya                              const char *name,
2849f464c52Smaya                              uint64_t size,
2857ec681f3Smrg                              uint32_t alignment,
2867ec681f3Smrg                              enum iris_memory_zone memzone,
2877ec681f3Smrg                              unsigned flags);
2889f464c52Smaya
2899f464c52Smayastruct iris_bo *
2909f464c52Smayairis_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
2919f464c52Smaya                       void *ptr, size_t size,
2929f464c52Smaya                       enum iris_memory_zone memzone);
2939f464c52Smaya
2949f464c52Smaya/** Takes a reference on a buffer object */
2959f464c52Smayastatic inline void
2969f464c52Smayairis_bo_reference(struct iris_bo *bo)
2979f464c52Smaya{
2989f464c52Smaya   p_atomic_inc(&bo->refcount);
2999f464c52Smaya}
3009f464c52Smaya
3019f464c52Smaya/**
3029f464c52Smaya * Releases a reference on a buffer object, freeing the data if
3039f464c52Smaya * no references remain.
3049f464c52Smaya */
3059f464c52Smayavoid iris_bo_unreference(struct iris_bo *bo);
3069f464c52Smaya
3077ec681f3Smrg#define MAP_READ          PIPE_MAP_READ
3087ec681f3Smrg#define MAP_WRITE         PIPE_MAP_WRITE
3097ec681f3Smrg#define MAP_ASYNC         PIPE_MAP_UNSYNCHRONIZED
3107ec681f3Smrg#define MAP_PERSISTENT    PIPE_MAP_PERSISTENT
3117ec681f3Smrg#define MAP_COHERENT      PIPE_MAP_COHERENT
3129f464c52Smaya/* internal */
3137ec681f3Smrg#define MAP_RAW           (PIPE_MAP_DRV_PRV << 0)
3147ec681f3Smrg#define MAP_INTERNAL_MASK (MAP_RAW)
3159f464c52Smaya
3169f464c52Smaya#define MAP_FLAGS         (MAP_READ | MAP_WRITE | MAP_ASYNC | \
3179f464c52Smaya                           MAP_PERSISTENT | MAP_COHERENT | MAP_INTERNAL_MASK)
3189f464c52Smaya
3199f464c52Smaya/**
3209f464c52Smaya * Maps the buffer into userspace.
3219f464c52Smaya *
3229f464c52Smaya * This function will block waiting for any existing execution on the
3239f464c52Smaya * buffer to complete, first.  The resulting mapping is returned.
3249f464c52Smaya */
3259f464c52SmayaMUST_CHECK void *iris_bo_map(struct pipe_debug_callback *dbg,
3269f464c52Smaya                             struct iris_bo *bo, unsigned flags);
3279f464c52Smaya
3289f464c52Smaya/**
3299f464c52Smaya * Reduces the refcount on the userspace mapping of the buffer
3309f464c52Smaya * object.
3319f464c52Smaya */
3329f464c52Smayastatic inline int iris_bo_unmap(struct iris_bo *bo) { return 0; }
3339f464c52Smaya
3349f464c52Smaya/**
3359f464c52Smaya * Waits for rendering to an object by the GPU to have completed.
3369f464c52Smaya *
3379f464c52Smaya * This is not required for any access to the BO by bo_map,
3389f464c52Smaya * bo_subdata, etc.  It is merely a way for the driver to implement
3399f464c52Smaya * glFinish.
3409f464c52Smaya */
3419f464c52Smayavoid iris_bo_wait_rendering(struct iris_bo *bo);
3429f464c52Smaya
3439f464c52Smaya
3449f464c52Smaya/**
3457ec681f3Smrg * Unref a buffer manager instance.
3469f464c52Smaya */
3477ec681f3Smrgvoid iris_bufmgr_unref(struct iris_bufmgr *bufmgr);
3489f464c52Smaya
3499f464c52Smaya/**
3509f464c52Smaya * Create a visible name for a buffer which can be used by other apps
3519f464c52Smaya *
3529f464c52Smaya * \param buf Buffer to create a name for
3539f464c52Smaya * \param name Returned name
3549f464c52Smaya */
3559f464c52Smayaint iris_bo_flink(struct iris_bo *bo, uint32_t *name);
3569f464c52Smaya
3579f464c52Smaya/**
3587ec681f3Smrg * Returns true if the BO is backed by a real GEM object, false if it's
3597ec681f3Smrg * a wrapper that's suballocated from a larger BO.
3607ec681f3Smrg */
3617ec681f3Smrgstatic inline bool
3627ec681f3Smrgiris_bo_is_real(struct iris_bo *bo)
3637ec681f3Smrg{
3647ec681f3Smrg   return bo->gem_handle != 0;
3657ec681f3Smrg}
3667ec681f3Smrg
3677ec681f3Smrg/**
3687ec681f3Smrg * Unwrap any slab-allocated wrapper BOs to get the BO for the underlying
3697ec681f3Smrg * backing storage, which is a real BO associated with a GEM object.
3707ec681f3Smrg */
3717ec681f3Smrgstatic inline struct iris_bo *
3727ec681f3Smrgiris_get_backing_bo(struct iris_bo *bo)
3737ec681f3Smrg{
3747ec681f3Smrg   if (!iris_bo_is_real(bo))
3757ec681f3Smrg      bo = bo->slab.real;
3767ec681f3Smrg
3777ec681f3Smrg   /* We only allow one level of wrapping. */
3787ec681f3Smrg   assert(iris_bo_is_real(bo));
3797ec681f3Smrg
3807ec681f3Smrg   return bo;
3817ec681f3Smrg}
3827ec681f3Smrg
3837ec681f3Smrg/**
3847ec681f3Smrg * Is this buffer shared with external clients (imported or exported)?
3857ec681f3Smrg */
3867ec681f3Smrgstatic inline bool
3877ec681f3Smrgiris_bo_is_external(const struct iris_bo *bo)
3887ec681f3Smrg{
3897ec681f3Smrg   bo = iris_get_backing_bo((struct iris_bo *) bo);
3907ec681f3Smrg   return bo->real.exported || bo->real.imported;
3917ec681f3Smrg}
3927ec681f3Smrg
3937ec681f3Smrgstatic inline bool
3947ec681f3Smrgiris_bo_is_imported(const struct iris_bo *bo)
3957ec681f3Smrg{
3967ec681f3Smrg   bo = iris_get_backing_bo((struct iris_bo *) bo);
3977ec681f3Smrg   return bo->real.imported;
3987ec681f3Smrg}
3997ec681f3Smrg
4007ec681f3Smrgstatic inline bool
4017ec681f3Smrgiris_bo_is_exported(const struct iris_bo *bo)
4027ec681f3Smrg{
4037ec681f3Smrg   bo = iris_get_backing_bo((struct iris_bo *) bo);
4047ec681f3Smrg   return bo->real.exported;
4057ec681f3Smrg}
4067ec681f3Smrg
4077ec681f3Smrgstatic inline enum iris_mmap_mode
4087ec681f3Smrgiris_bo_mmap_mode(const struct iris_bo *bo)
4097ec681f3Smrg{
4107ec681f3Smrg   bo = iris_get_backing_bo((struct iris_bo *) bo);
4117ec681f3Smrg   return bo->real.mmap_mode;
4127ec681f3Smrg}
4137ec681f3Smrg
4147ec681f3Smrg/**
4157ec681f3Smrg * Mark a buffer as being shared with other external clients.
4167ec681f3Smrg */
4177ec681f3Smrgvoid iris_bo_mark_exported(struct iris_bo *bo);
4187ec681f3Smrg
4197ec681f3Smrg/**
4207ec681f3Smrg * Returns true  if mapping the buffer for write could cause the process
4219f464c52Smaya * to block, due to the object being active in the GPU.
4229f464c52Smaya */
4237ec681f3Smrgbool iris_bo_busy(struct iris_bo *bo);
4249f464c52Smaya
4259f464c52Smaya/**
4269f464c52Smaya * Specify the volatility of the buffer.
4279f464c52Smaya * \param bo Buffer to create a name for
4289f464c52Smaya * \param madv The purgeable status
4299f464c52Smaya *
4309f464c52Smaya * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
4319f464c52Smaya * reclaimed under memory pressure. If you subsequently require the buffer,
4329f464c52Smaya * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
4339f464c52Smaya *
4349f464c52Smaya * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
4359f464c52Smaya * marked as I915_MADV_DONTNEED.
4369f464c52Smaya */
4379f464c52Smayaint iris_bo_madvise(struct iris_bo *bo, int madv);
4389f464c52Smaya
4397ec681f3Smrgstruct iris_bufmgr *iris_bufmgr_get_for_fd(struct intel_device_info *devinfo,
4407ec681f3Smrg                                           int fd, bool bo_reuse);
4417ec681f3Smrgint iris_bufmgr_get_fd(struct iris_bufmgr *bufmgr);
4427ec681f3Smrg
4439f464c52Smayastruct iris_bo *iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
4449f464c52Smaya                                             const char *name,
4459f464c52Smaya                                             unsigned handle);
4467ec681f3Smrg
4477ec681f3Smrgvoid* iris_bufmgr_get_aux_map_context(struct iris_bufmgr *bufmgr);
4489f464c52Smaya
4499f464c52Smayaint iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns);
4509f464c52Smaya
4519f464c52Smayauint32_t iris_create_hw_context(struct iris_bufmgr *bufmgr);
4527ec681f3Smrguint32_t iris_clone_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id);
4539f464c52Smaya
4549f464c52Smaya#define IRIS_CONTEXT_LOW_PRIORITY    ((I915_CONTEXT_MIN_USER_PRIORITY-1)/2)
4559f464c52Smaya#define IRIS_CONTEXT_MEDIUM_PRIORITY (I915_CONTEXT_DEFAULT_PRIORITY)
4569f464c52Smaya#define IRIS_CONTEXT_HIGH_PRIORITY   ((I915_CONTEXT_MAX_USER_PRIORITY+1)/2)
4579f464c52Smaya
4589f464c52Smayaint iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
4599f464c52Smaya                                 uint32_t ctx_id, int priority);
4609f464c52Smaya
4619f464c52Smayavoid iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id);
4629f464c52Smaya
4637ec681f3Smrgint iris_gem_get_tiling(struct iris_bo *bo, uint32_t *tiling);
4647ec681f3Smrgint iris_gem_set_tiling(struct iris_bo *bo, const struct isl_surf *surf);
4657ec681f3Smrg
4669f464c52Smayaint iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd);
4679f464c52Smayastruct iris_bo *iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd);
4689f464c52Smaya
4697ec681f3Smrg/**
4707ec681f3Smrg * Exports a bo as a GEM handle into a given DRM file descriptor
4717ec681f3Smrg * \param bo Buffer to export
4727ec681f3Smrg * \param drm_fd File descriptor where the new handle is created
4737ec681f3Smrg * \param out_handle Pointer to store the new handle
4747ec681f3Smrg *
4757ec681f3Smrg * Returns 0 if the buffer was successfully exported, a non zero error code
4767ec681f3Smrg * otherwise.
4777ec681f3Smrg */
4787ec681f3Smrgint iris_bo_export_gem_handle_for_device(struct iris_bo *bo, int drm_fd,
4797ec681f3Smrg                                         uint32_t *out_handle);
4807ec681f3Smrg
4819f464c52Smayauint32_t iris_bo_export_gem_handle(struct iris_bo *bo);
4829f464c52Smaya
4839f464c52Smayaint iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *out);
4849f464c52Smaya
4859f464c52Smaya/**
4869f464c52Smaya * Returns the BO's address relative to the appropriate base address.
4879f464c52Smaya *
4889f464c52Smaya * All of our base addresses are programmed to the start of a 4GB region,
4899f464c52Smaya * so simply returning the bottom 32 bits of the BO address will give us
4909f464c52Smaya * the offset from whatever base address corresponds to that memory region.
4919f464c52Smaya */
4929f464c52Smayastatic inline uint32_t
4939f464c52Smayairis_bo_offset_from_base_address(struct iris_bo *bo)
4949f464c52Smaya{
4959f464c52Smaya   /* This only works for buffers in the memory zones corresponding to a
4969f464c52Smaya    * base address - the top, unbounded memory zone doesn't have a base.
4979f464c52Smaya    */
4987ec681f3Smrg   assert(bo->address < IRIS_MEMZONE_OTHER_START);
4997ec681f3Smrg   return bo->address;
5007ec681f3Smrg}
5017ec681f3Smrg
5027ec681f3Smrg/**
5037ec681f3Smrg * Track access of a BO from the specified caching domain and sequence number.
5047ec681f3Smrg *
5057ec681f3Smrg * Can be used without locking.  Only the most recent access (i.e. highest
5067ec681f3Smrg * seqno) is tracked.
5077ec681f3Smrg */
5087ec681f3Smrgstatic inline void
5097ec681f3Smrgiris_bo_bump_seqno(struct iris_bo *bo, uint64_t seqno,
5107ec681f3Smrg                   enum iris_domain type)
5117ec681f3Smrg{
5127ec681f3Smrg   uint64_t *const last_seqno = &bo->last_seqnos[type];
5137ec681f3Smrg   uint64_t tmp, prev_seqno = p_atomic_read(last_seqno);
5147ec681f3Smrg
5157ec681f3Smrg   while (prev_seqno < seqno &&
5167ec681f3Smrg          prev_seqno != (tmp = p_atomic_cmpxchg(last_seqno, prev_seqno, seqno)))
5177ec681f3Smrg      prev_seqno = tmp;
5189f464c52Smaya}
5199f464c52Smaya
5209f464c52Smayaenum iris_memory_zone iris_memzone_for_address(uint64_t address);
5219f464c52Smaya
5227ec681f3Smrgint iris_bufmgr_create_screen_id(struct iris_bufmgr *bufmgr);
5237ec681f3Smrg
5247ec681f3Smrgsimple_mtx_t *iris_bufmgr_get_bo_deps_lock(struct iris_bufmgr *bufmgr);
5257ec681f3Smrg
5269f464c52Smaya#endif /* IRIS_BUFMGR_H */
527