1af69d88dSmrg/*
2af69d88dSmrg * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3af69d88dSmrg *
4af69d88dSmrg * Permission is hereby granted, free of charge, to any person obtaining a
5af69d88dSmrg * copy of this software and associated documentation files (the "Software"),
6af69d88dSmrg * to deal in the Software without restriction, including without limitation
7af69d88dSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8af69d88dSmrg * and/or sell copies of the Software, and to permit persons to whom the
9af69d88dSmrg * Software is furnished to do so, subject to the following conditions:
10af69d88dSmrg *
11af69d88dSmrg * The above copyright notice and this permission notice (including the next
12af69d88dSmrg * paragraph) shall be included in all copies or substantial portions of the
13af69d88dSmrg * Software.
14af69d88dSmrg *
15af69d88dSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16af69d88dSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17af69d88dSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18af69d88dSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19af69d88dSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20af69d88dSmrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21af69d88dSmrg * SOFTWARE.
22af69d88dSmrg *
23af69d88dSmrg * Authors:
24af69d88dSmrg *    Rob Clark <robclark@freedesktop.org>
25af69d88dSmrg */
26af69d88dSmrg
277ec681f3Smrg#include "util/format/u_format.h"
287ec681f3Smrg#include "util/format/u_format_rgtc.h"
297ec681f3Smrg#include "util/format/u_format_zs.h"
307ec681f3Smrg#include "util/set.h"
317ec681f3Smrg#include "util/u_drm.h"
32af69d88dSmrg#include "util/u_inlines.h"
33af69d88dSmrg#include "util/u_string.h"
34af69d88dSmrg#include "util/u_surface.h"
357ec681f3Smrg#include "util/u_transfer.h"
367ec681f3Smrg
377ec681f3Smrg#include "decode/util.h"
38af69d88dSmrg
3901e04c3fSmrg#include "freedreno_batch_cache.h"
409f464c52Smaya#include "freedreno_blitter.h"
417ec681f3Smrg#include "freedreno_context.h"
4201e04c3fSmrg#include "freedreno_fence.h"
437ec681f3Smrg#include "freedreno_query_hw.h"
447ec681f3Smrg#include "freedreno_resource.h"
45af69d88dSmrg#include "freedreno_screen.h"
46af69d88dSmrg#include "freedreno_surface.h"
47af69d88dSmrg#include "freedreno_util.h"
48af69d88dSmrg
49af69d88dSmrg#include <errno.h>
507ec681f3Smrg#include "drm-uapi/drm_fourcc.h"
51af69d88dSmrg
5201e04c3fSmrg/* XXX this should go away, needed for 'struct winsys_handle' */
537ec681f3Smrg#include "frontend/drm_driver.h"
547ec681f3Smrg
557ec681f3Smrg/* A private modifier for now, so we have a way to request tiled but not
567ec681f3Smrg * compressed.  It would perhaps be good to get real modifiers for the
577ec681f3Smrg * tiled formats, but would probably need to do some work to figure out
587ec681f3Smrg * the layout(s) of the tiled modes, and whether they are the same
597ec681f3Smrg * across generations.
607ec681f3Smrg */
617ec681f3Smrg#define FD_FORMAT_MOD_QCOM_TILED fourcc_mod_code(QCOM, 0xffffffff)
6201e04c3fSmrg
6301e04c3fSmrg/**
6401e04c3fSmrg * Go through the entire state and see if the resource is bound
6501e04c3fSmrg * anywhere. If it is, mark the relevant state as dirty. This is
667ec681f3Smrg * called on realloc_bo to ensure the necessary state is re-
6701e04c3fSmrg * emitted so the GPU looks at the new backing bo.
6801e04c3fSmrg */
6901e04c3fSmrgstatic void
707ec681f3Smrgrebind_resource_in_ctx(struct fd_context *ctx,
717ec681f3Smrg                       struct fd_resource *rsc) assert_dt
727ec681f3Smrg{
737ec681f3Smrg   struct pipe_resource *prsc = &rsc->b.b;
747ec681f3Smrg
757ec681f3Smrg   if (ctx->rebind_resource)
767ec681f3Smrg      ctx->rebind_resource(ctx, rsc);
777ec681f3Smrg
787ec681f3Smrg   /* VBOs */
797ec681f3Smrg   if (rsc->dirty & FD_DIRTY_VTXBUF) {
807ec681f3Smrg      struct fd_vertexbuf_stateobj *vb = &ctx->vtx.vertexbuf;
817ec681f3Smrg      for (unsigned i = 0; i < vb->count && !(ctx->dirty & FD_DIRTY_VTXBUF);
827ec681f3Smrg           i++) {
837ec681f3Smrg         if (vb->vb[i].buffer.resource == prsc)
847ec681f3Smrg            fd_context_dirty(ctx, FD_DIRTY_VTXBUF);
857ec681f3Smrg      }
867ec681f3Smrg   }
877ec681f3Smrg
887ec681f3Smrg   const enum fd_dirty_3d_state per_stage_dirty =
897ec681f3Smrg      FD_DIRTY_CONST | FD_DIRTY_TEX | FD_DIRTY_IMAGE | FD_DIRTY_SSBO;
907ec681f3Smrg
917ec681f3Smrg   if (!(rsc->dirty & per_stage_dirty))
927ec681f3Smrg      return;
937ec681f3Smrg
947ec681f3Smrg   /* per-shader-stage resources: */
957ec681f3Smrg   for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
967ec681f3Smrg      /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
977ec681f3Smrg       * cmdstream rather than by pointer..
987ec681f3Smrg       */
997ec681f3Smrg      if ((rsc->dirty & FD_DIRTY_CONST) &&
1007ec681f3Smrg          !(ctx->dirty_shader[stage] & FD_DIRTY_CONST)) {
1017ec681f3Smrg         struct fd_constbuf_stateobj *cb = &ctx->constbuf[stage];
1027ec681f3Smrg         const unsigned num_ubos = util_last_bit(cb->enabled_mask);
1037ec681f3Smrg         for (unsigned i = 1; i < num_ubos; i++) {
1047ec681f3Smrg            if (cb->cb[i].buffer == prsc) {
1057ec681f3Smrg               fd_context_dirty_shader(ctx, stage, FD_DIRTY_SHADER_CONST);
1067ec681f3Smrg               break;
1077ec681f3Smrg            }
1087ec681f3Smrg         }
1097ec681f3Smrg      }
1107ec681f3Smrg
1117ec681f3Smrg      /* Textures */
1127ec681f3Smrg      if ((rsc->dirty & FD_DIRTY_TEX) &&
1137ec681f3Smrg          !(ctx->dirty_shader[stage] & FD_DIRTY_TEX)) {
1147ec681f3Smrg         struct fd_texture_stateobj *tex = &ctx->tex[stage];
1157ec681f3Smrg         for (unsigned i = 0; i < tex->num_textures; i++) {
1167ec681f3Smrg            if (tex->textures[i] && (tex->textures[i]->texture == prsc)) {
1177ec681f3Smrg               fd_context_dirty_shader(ctx, stage, FD_DIRTY_SHADER_TEX);
1187ec681f3Smrg               break;
1197ec681f3Smrg            }
1207ec681f3Smrg         }
1217ec681f3Smrg      }
1227ec681f3Smrg
1237ec681f3Smrg      /* Images */
1247ec681f3Smrg      if ((rsc->dirty & FD_DIRTY_IMAGE) &&
1257ec681f3Smrg          !(ctx->dirty_shader[stage] & FD_DIRTY_IMAGE)) {
1267ec681f3Smrg         struct fd_shaderimg_stateobj *si = &ctx->shaderimg[stage];
1277ec681f3Smrg         const unsigned num_images = util_last_bit(si->enabled_mask);
1287ec681f3Smrg         for (unsigned i = 0; i < num_images; i++) {
1297ec681f3Smrg            if (si->si[i].resource == prsc) {
1307ec681f3Smrg               fd_context_dirty_shader(ctx, stage, FD_DIRTY_SHADER_IMAGE);
1317ec681f3Smrg               break;
1327ec681f3Smrg            }
1337ec681f3Smrg         }
1347ec681f3Smrg      }
1357ec681f3Smrg
1367ec681f3Smrg      /* SSBOs */
1377ec681f3Smrg      if ((rsc->dirty & FD_DIRTY_SSBO) &&
1387ec681f3Smrg          !(ctx->dirty_shader[stage] & FD_DIRTY_SSBO)) {
1397ec681f3Smrg         struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[stage];
1407ec681f3Smrg         const unsigned num_ssbos = util_last_bit(sb->enabled_mask);
1417ec681f3Smrg         for (unsigned i = 0; i < num_ssbos; i++) {
1427ec681f3Smrg            if (sb->sb[i].buffer == prsc) {
1437ec681f3Smrg               fd_context_dirty_shader(ctx, stage, FD_DIRTY_SHADER_SSBO);
1447ec681f3Smrg               break;
1457ec681f3Smrg            }
1467ec681f3Smrg         }
1477ec681f3Smrg      }
1487ec681f3Smrg   }
1497ec681f3Smrg}
1507ec681f3Smrg
1517ec681f3Smrgstatic void
1527ec681f3Smrgrebind_resource(struct fd_resource *rsc) assert_dt
1537ec681f3Smrg{
1547ec681f3Smrg   struct fd_screen *screen = fd_screen(rsc->b.b.screen);
1557ec681f3Smrg
1567ec681f3Smrg   fd_screen_lock(screen);
1577ec681f3Smrg   fd_resource_lock(rsc);
1587ec681f3Smrg
1597ec681f3Smrg   if (rsc->dirty)
1607ec681f3Smrg      list_for_each_entry (struct fd_context, ctx, &screen->context_list, node)
1617ec681f3Smrg         rebind_resource_in_ctx(ctx, rsc);
1627ec681f3Smrg
1637ec681f3Smrg   fd_resource_unlock(rsc);
1647ec681f3Smrg   fd_screen_unlock(screen);
1657ec681f3Smrg}
1667ec681f3Smrg
1677ec681f3Smrgstatic inline void
1687ec681f3Smrgfd_resource_set_bo(struct fd_resource *rsc, struct fd_bo *bo)
16901e04c3fSmrg{
1707ec681f3Smrg   struct fd_screen *screen = fd_screen(rsc->b.b.screen);
1717ec681f3Smrg
1727ec681f3Smrg   rsc->bo = bo;
1737ec681f3Smrg   rsc->seqno = p_atomic_inc_return(&screen->rsc_seqno);
1747ec681f3Smrg}
1757ec681f3Smrg
1767ec681f3Smrgint
1777ec681f3Smrg__fd_resource_wait(struct fd_context *ctx, struct fd_resource *rsc, unsigned op,
1787ec681f3Smrg                   const char *func)
1797ec681f3Smrg{
1807ec681f3Smrg   if (op & FD_BO_PREP_NOSYNC)
1817ec681f3Smrg      return fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
1827ec681f3Smrg
1837ec681f3Smrg   int ret;
1847ec681f3Smrg
1857ec681f3Smrg   perf_time_ctx (ctx, 10000, "%s: a busy \"%" PRSC_FMT "\" BO stalled", func,
1867ec681f3Smrg                  PRSC_ARGS(&rsc->b.b)) {
1877ec681f3Smrg      ret = fd_bo_cpu_prep(rsc->bo, ctx->pipe, op);
1887ec681f3Smrg   }
1897ec681f3Smrg
1907ec681f3Smrg   return ret;
19101e04c3fSmrg}
19201e04c3fSmrg
193af69d88dSmrgstatic void
194af69d88dSmrgrealloc_bo(struct fd_resource *rsc, uint32_t size)
195af69d88dSmrg{
1967ec681f3Smrg   struct pipe_resource *prsc = &rsc->b.b;
1977ec681f3Smrg   struct fd_screen *screen = fd_screen(rsc->b.b.screen);
1987ec681f3Smrg   uint32_t flags =
1997ec681f3Smrg      COND(prsc->usage & PIPE_USAGE_STAGING, FD_BO_CACHED_COHERENT) |
2007ec681f3Smrg      COND(prsc->bind & PIPE_BIND_SCANOUT, FD_BO_SCANOUT);
2017ec681f3Smrg   /* TODO other flags? */
2027ec681f3Smrg
2037ec681f3Smrg   /* if we start using things other than write-combine,
2047ec681f3Smrg    * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
2057ec681f3Smrg    */
2067ec681f3Smrg
2077ec681f3Smrg   if (rsc->bo)
2087ec681f3Smrg      fd_bo_del(rsc->bo);
2097ec681f3Smrg
2107ec681f3Smrg   struct fd_bo *bo =
2117ec681f3Smrg      fd_bo_new(screen->dev, size, flags, "%ux%ux%u@%u:%x", prsc->width0,
2127ec681f3Smrg                prsc->height0, prsc->depth0, rsc->layout.cpp, prsc->bind);
2137ec681f3Smrg   fd_resource_set_bo(rsc, bo);
2147ec681f3Smrg
2157ec681f3Smrg   /* Zero out the UBWC area on allocation.  This fixes intermittent failures
2167ec681f3Smrg    * with UBWC, which I suspect are due to the HW having a hard time
2177ec681f3Smrg    * interpreting arbitrary values populating the flags buffer when the BO
2187ec681f3Smrg    * was recycled through the bo cache (instead of fresh allocations from
2197ec681f3Smrg    * the kernel, which are zeroed).  sleep(1) in this spot didn't work
2207ec681f3Smrg    * around the issue, but any memset value seems to.
2217ec681f3Smrg    */
2227ec681f3Smrg   if (rsc->layout.ubwc) {
2237ec681f3Smrg      rsc->needs_ubwc_clear = true;
2247ec681f3Smrg   }
2257ec681f3Smrg
2267ec681f3Smrg   util_range_set_empty(&rsc->valid_buffer_range);
2277ec681f3Smrg   fd_bc_invalidate_resource(rsc, true);
22801e04c3fSmrg}
22901e04c3fSmrg
23001e04c3fSmrgstatic void
2317ec681f3Smrgdo_blit(struct fd_context *ctx, const struct pipe_blit_info *blit,
2327ec681f3Smrg        bool fallback) assert_dt
23301e04c3fSmrg{
2347ec681f3Smrg   struct pipe_context *pctx = &ctx->base;
2357ec681f3Smrg
2367ec681f3Smrg   assert(!ctx->in_blit);
2377ec681f3Smrg   ctx->in_blit = true;
2387ec681f3Smrg
2397ec681f3Smrg   /* TODO size threshold too?? */
2407ec681f3Smrg   if (fallback || !fd_blit(pctx, blit)) {
2417ec681f3Smrg      /* do blit on cpu: */
2427ec681f3Smrg      util_resource_copy_region(pctx, blit->dst.resource, blit->dst.level,
2437ec681f3Smrg                                blit->dst.box.x, blit->dst.box.y,
2447ec681f3Smrg                                blit->dst.box.z, blit->src.resource,
2457ec681f3Smrg                                blit->src.level, &blit->src.box);
2467ec681f3Smrg   }
2477ec681f3Smrg
2487ec681f3Smrg   ctx->in_blit = false;
24901e04c3fSmrg}
25001e04c3fSmrg
2517ec681f3Smrg/**
2527ec681f3Smrg * Replace the storage of dst with src.  This is only used by TC in the
2537ec681f3Smrg * DISCARD_WHOLE_RESOURCE path, and src is a freshly allocated buffer.
2547ec681f3Smrg */
2557ec681f3Smrgvoid
2567ec681f3Smrgfd_replace_buffer_storage(struct pipe_context *pctx, struct pipe_resource *pdst,
2577ec681f3Smrg                          struct pipe_resource *psrc, unsigned num_rebinds, uint32_t rebind_mask,
2587ec681f3Smrg                          uint32_t delete_buffer_id)
2597ec681f3Smrg{
2607ec681f3Smrg   struct fd_context *ctx = fd_context(pctx);
2617ec681f3Smrg   struct fd_resource *dst = fd_resource(pdst);
2627ec681f3Smrg   struct fd_resource *src = fd_resource(psrc);
2637ec681f3Smrg
2647ec681f3Smrg   DBG("pdst=%p, psrc=%p", pdst, psrc);
2657ec681f3Smrg
2667ec681f3Smrg   /* This should only be called with buffers.. which side-steps some tricker
2677ec681f3Smrg    * cases, like a rsc that is in a batch-cache key...
2687ec681f3Smrg    */
2697ec681f3Smrg   assert(pdst->target == PIPE_BUFFER);
2707ec681f3Smrg   assert(psrc->target == PIPE_BUFFER);
2717ec681f3Smrg   assert(dst->track->bc_batch_mask == 0);
2727ec681f3Smrg   assert(src->track->bc_batch_mask == 0);
2737ec681f3Smrg   assert(src->track->batch_mask == 0);
2747ec681f3Smrg   assert(src->track->write_batch == NULL);
2757ec681f3Smrg   assert(memcmp(&dst->layout, &src->layout, sizeof(dst->layout)) == 0);
2767ec681f3Smrg
2777ec681f3Smrg   /* get rid of any references that batch-cache might have to us (which
2787ec681f3Smrg    * should empty/destroy rsc->batches hashset)
2797ec681f3Smrg    *
2807ec681f3Smrg    * Note that we aren't actually destroying dst, but we are replacing
2817ec681f3Smrg    * it's storage so we want to go thru the same motions of decoupling
2827ec681f3Smrg    * it's batch connections.
2837ec681f3Smrg    */
2847ec681f3Smrg   fd_bc_invalidate_resource(dst, true);
2857ec681f3Smrg   rebind_resource(dst);
2867ec681f3Smrg
2877ec681f3Smrg   util_idalloc_mt_free(&ctx->screen->buffer_ids, delete_buffer_id);
2887ec681f3Smrg
2897ec681f3Smrg   fd_screen_lock(ctx->screen);
2907ec681f3Smrg
2917ec681f3Smrg   fd_bo_del(dst->bo);
2927ec681f3Smrg   dst->bo = fd_bo_ref(src->bo);
2937ec681f3Smrg
2947ec681f3Smrg   fd_resource_tracking_reference(&dst->track, src->track);
2957ec681f3Smrg   src->is_replacement = true;
2967ec681f3Smrg
2977ec681f3Smrg   dst->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
2987ec681f3Smrg
2997ec681f3Smrg   fd_screen_unlock(ctx->screen);
3007ec681f3Smrg}
3017ec681f3Smrg
3027ec681f3Smrgstatic unsigned
3037ec681f3Smrgtranslate_usage(unsigned usage)
3047ec681f3Smrg{
3057ec681f3Smrg   uint32_t op = 0;
3067ec681f3Smrg
3077ec681f3Smrg   if (usage & PIPE_MAP_READ)
3087ec681f3Smrg      op |= FD_BO_PREP_READ;
3097ec681f3Smrg
3107ec681f3Smrg   if (usage & PIPE_MAP_WRITE)
3117ec681f3Smrg      op |= FD_BO_PREP_WRITE;
3127ec681f3Smrg
3137ec681f3Smrg   return op;
3147ec681f3Smrg}
3157ec681f3Smrg
3167ec681f3Smrgbool
3177ec681f3Smrgfd_resource_busy(struct pipe_screen *pscreen, struct pipe_resource *prsc,
3187ec681f3Smrg                 unsigned usage)
3197ec681f3Smrg{
3207ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
3217ec681f3Smrg
3227ec681f3Smrg   if (pending(rsc, !!(usage & PIPE_MAP_WRITE)))
3237ec681f3Smrg      return true;
3247ec681f3Smrg
3257ec681f3Smrg   if (resource_busy(rsc, translate_usage(usage)))
3267ec681f3Smrg      return true;
3277ec681f3Smrg
3287ec681f3Smrg   return false;
3297ec681f3Smrg}
3307ec681f3Smrg
3317ec681f3Smrgstatic void flush_resource(struct fd_context *ctx, struct fd_resource *rsc,
3327ec681f3Smrg                           unsigned usage);
3337ec681f3Smrg
3347ec681f3Smrg/**
3357ec681f3Smrg * Helper to check if the format is something that we can blit/render
3367ec681f3Smrg * to.. if the format is not renderable, there is no point in trying
3377ec681f3Smrg * to do a staging blit (as it will still end up being a cpu copy)
3387ec681f3Smrg */
3397ec681f3Smrgstatic bool
3407ec681f3Smrgis_renderable(struct pipe_resource *prsc)
3417ec681f3Smrg{
3427ec681f3Smrg   struct pipe_screen *pscreen = prsc->screen;
3437ec681f3Smrg   return pscreen->is_format_supported(
3447ec681f3Smrg         pscreen, prsc->format, prsc->target, prsc->nr_samples,
3457ec681f3Smrg         prsc->nr_storage_samples, PIPE_BIND_RENDER_TARGET);
3467ec681f3Smrg}
3477ec681f3Smrg
3487ec681f3Smrg/**
3497ec681f3Smrg * @rsc: the resource to shadow
3507ec681f3Smrg * @level: the level to discard (if box != NULL, otherwise ignored)
3517ec681f3Smrg * @box: the box to discard (or NULL if none)
3527ec681f3Smrg * @modifier: the modifier for the new buffer state
3537ec681f3Smrg */
35401e04c3fSmrgstatic bool
35501e04c3fSmrgfd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
3567ec681f3Smrg                       unsigned level, const struct pipe_box *box,
3577ec681f3Smrg                       uint64_t modifier) assert_dt
3587ec681f3Smrg{
3597ec681f3Smrg   struct pipe_context *pctx = &ctx->base;
3607ec681f3Smrg   struct pipe_resource *prsc = &rsc->b.b;
3617ec681f3Smrg   struct fd_screen *screen = fd_screen(pctx->screen);
3627ec681f3Smrg   struct fd_batch *batch;
3637ec681f3Smrg   bool fallback = false;
3647ec681f3Smrg
3657ec681f3Smrg   if (prsc->next)
3667ec681f3Smrg      return false;
3677ec681f3Smrg
3687ec681f3Smrg   /* Flush any pending batches writing the resource before we go mucking around
3697ec681f3Smrg    * in its insides.  The blit would immediately cause the batch to be flushed,
3707ec681f3Smrg    * anyway.
3717ec681f3Smrg    */
3727ec681f3Smrg   fd_bc_flush_writer(ctx, rsc);
3737ec681f3Smrg
3747ec681f3Smrg   /* Because IB1 ("gmem") cmdstream is built only when we flush the
3757ec681f3Smrg    * batch, we need to flush any batches that reference this rsc as
3767ec681f3Smrg    * a render target.  Otherwise the framebuffer state emitted in
3777ec681f3Smrg    * IB1 will reference the resources new state, and not the state
3787ec681f3Smrg    * at the point in time that the earlier draws referenced it.
3797ec681f3Smrg    *
3807ec681f3Smrg    * Note that being in the gmem key doesn't necessarily mean the
3817ec681f3Smrg    * batch was considered a writer!
3827ec681f3Smrg    */
3837ec681f3Smrg   foreach_batch (batch, &screen->batch_cache, rsc->track->bc_batch_mask) {
3847ec681f3Smrg      fd_batch_flush(batch);
3857ec681f3Smrg   }
3867ec681f3Smrg
3877ec681f3Smrg   /* TODO: somehow munge dimensions and format to copy unsupported
3887ec681f3Smrg    * render target format to something that is supported?
3897ec681f3Smrg    */
3907ec681f3Smrg   if (!is_renderable(prsc))
3917ec681f3Smrg      fallback = true;
3927ec681f3Smrg
3937ec681f3Smrg   /* do shadowing back-blits on the cpu for buffers -- requires about a page of
3947ec681f3Smrg    * DMA to make GPU copies worth it according to robclark.  Note, if you
3957ec681f3Smrg    * decide to do it on the GPU then you'll need to update valid_buffer_range
3967ec681f3Smrg    * in the swap()s below.
3977ec681f3Smrg    */
3987ec681f3Smrg   if (prsc->target == PIPE_BUFFER)
3997ec681f3Smrg      fallback = true;
4007ec681f3Smrg
4017ec681f3Smrg   bool discard_whole_level = box && util_texrange_covers_whole_level(
4027ec681f3Smrg                                        prsc, level, box->x, box->y, box->z,
4037ec681f3Smrg                                        box->width, box->height, box->depth);
4047ec681f3Smrg
4057ec681f3Smrg   /* TODO need to be more clever about current level */
4067ec681f3Smrg   if ((prsc->target >= PIPE_TEXTURE_2D) && box && !discard_whole_level)
4077ec681f3Smrg      return false;
4087ec681f3Smrg
4097ec681f3Smrg   struct pipe_resource *pshadow = pctx->screen->resource_create_with_modifiers(
4107ec681f3Smrg      pctx->screen, prsc, &modifier, 1);
4117ec681f3Smrg
4127ec681f3Smrg   if (!pshadow)
4137ec681f3Smrg      return false;
4147ec681f3Smrg
4157ec681f3Smrg   assert(!ctx->in_shadow);
4167ec681f3Smrg   ctx->in_shadow = true;
4177ec681f3Smrg
4187ec681f3Smrg   /* get rid of any references that batch-cache might have to us (which
4197ec681f3Smrg    * should empty/destroy rsc->batches hashset)
4207ec681f3Smrg    */
4217ec681f3Smrg   fd_bc_invalidate_resource(rsc, false);
4227ec681f3Smrg   rebind_resource(rsc);
4237ec681f3Smrg
4247ec681f3Smrg   fd_screen_lock(ctx->screen);
4257ec681f3Smrg
4267ec681f3Smrg   /* Swap the backing bo's, so shadow becomes the old buffer,
4277ec681f3Smrg    * blit from shadow to new buffer.  From here on out, we
4287ec681f3Smrg    * cannot fail.
4297ec681f3Smrg    *
4307ec681f3Smrg    * Note that we need to do it in this order, otherwise if
4317ec681f3Smrg    * we go down cpu blit path, the recursive transfer_map()
4327ec681f3Smrg    * sees the wrong status..
4337ec681f3Smrg    */
4347ec681f3Smrg   struct fd_resource *shadow = fd_resource(pshadow);
4357ec681f3Smrg
4367ec681f3Smrg   DBG("shadow: %p (%d, %p) -> %p (%d, %p)", rsc, rsc->b.b.reference.count,
4377ec681f3Smrg       rsc->track, shadow, shadow->b.b.reference.count, shadow->track);
4387ec681f3Smrg
4397ec681f3Smrg   swap(rsc->bo, shadow->bo);
4407ec681f3Smrg   swap(rsc->valid, shadow->valid);
4417ec681f3Smrg
4427ec681f3Smrg   /* swap() doesn't work because you can't typeof() the bitfield. */
4437ec681f3Smrg   bool temp = shadow->needs_ubwc_clear;
4447ec681f3Smrg   shadow->needs_ubwc_clear = rsc->needs_ubwc_clear;
4457ec681f3Smrg   rsc->needs_ubwc_clear = temp;
4467ec681f3Smrg
4477ec681f3Smrg   swap(rsc->layout, shadow->layout);
4487ec681f3Smrg   rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
4497ec681f3Smrg
4507ec681f3Smrg   /* at this point, the newly created shadow buffer is not referenced
4517ec681f3Smrg    * by any batches, but the existing rsc (probably) is.  We need to
4527ec681f3Smrg    * transfer those references over:
4537ec681f3Smrg    */
4547ec681f3Smrg   debug_assert(shadow->track->batch_mask == 0);
4557ec681f3Smrg   foreach_batch (batch, &ctx->screen->batch_cache, rsc->track->batch_mask) {
4567ec681f3Smrg      struct set_entry *entry = _mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc);
4577ec681f3Smrg      _mesa_set_remove(batch->resources, entry);
4587ec681f3Smrg      _mesa_set_add_pre_hashed(batch->resources, shadow->hash, shadow);
4597ec681f3Smrg   }
4607ec681f3Smrg   swap(rsc->track, shadow->track);
4617ec681f3Smrg
4627ec681f3Smrg   fd_screen_unlock(ctx->screen);
4637ec681f3Smrg
4647ec681f3Smrg   struct pipe_blit_info blit = {};
4657ec681f3Smrg   blit.dst.resource = prsc;
4667ec681f3Smrg   blit.dst.format = prsc->format;
4677ec681f3Smrg   blit.src.resource = pshadow;
4687ec681f3Smrg   blit.src.format = pshadow->format;
4697ec681f3Smrg   blit.mask = util_format_get_mask(prsc->format);
4707ec681f3Smrg   blit.filter = PIPE_TEX_FILTER_NEAREST;
4717ec681f3Smrg
4727ec681f3Smrg#define set_box(field, val)                                                    \
4737ec681f3Smrg   do {                                                                        \
4747ec681f3Smrg      blit.dst.field = (val);                                                  \
4757ec681f3Smrg      blit.src.field = (val);                                                  \
4767ec681f3Smrg   } while (0)
4777ec681f3Smrg
4787ec681f3Smrg   /* Disable occlusion queries during shadow blits. */
4797ec681f3Smrg   bool saved_active_queries = ctx->active_queries;
4807ec681f3Smrg   pctx->set_active_query_state(pctx, false);
4817ec681f3Smrg
4827ec681f3Smrg   /* blit the other levels in their entirety: */
4837ec681f3Smrg   for (unsigned l = 0; l <= prsc->last_level; l++) {
4847ec681f3Smrg      if (box && l == level)
4857ec681f3Smrg         continue;
4867ec681f3Smrg
4877ec681f3Smrg      /* just blit whole level: */
4887ec681f3Smrg      set_box(level, l);
4897ec681f3Smrg      set_box(box.width, u_minify(prsc->width0, l));
4907ec681f3Smrg      set_box(box.height, u_minify(prsc->height0, l));
4917ec681f3Smrg      set_box(box.depth, u_minify(prsc->depth0, l));
4927ec681f3Smrg
4937ec681f3Smrg      for (int i = 0; i < prsc->array_size; i++) {
4947ec681f3Smrg         set_box(box.z, i);
4957ec681f3Smrg         do_blit(ctx, &blit, fallback);
4967ec681f3Smrg      }
4977ec681f3Smrg   }
4987ec681f3Smrg
4997ec681f3Smrg   /* deal w/ current level specially, since we might need to split
5007ec681f3Smrg    * it up into a couple blits:
5017ec681f3Smrg    */
5027ec681f3Smrg   if (box && !discard_whole_level) {
5037ec681f3Smrg      set_box(level, level);
5047ec681f3Smrg
5057ec681f3Smrg      switch (prsc->target) {
5067ec681f3Smrg      case PIPE_BUFFER:
5077ec681f3Smrg      case PIPE_TEXTURE_1D:
5087ec681f3Smrg         set_box(box.y, 0);
5097ec681f3Smrg         set_box(box.z, 0);
5107ec681f3Smrg         set_box(box.height, 1);
5117ec681f3Smrg         set_box(box.depth, 1);
5127ec681f3Smrg
5137ec681f3Smrg         if (box->x > 0) {
5147ec681f3Smrg            set_box(box.x, 0);
5157ec681f3Smrg            set_box(box.width, box->x);
5167ec681f3Smrg
5177ec681f3Smrg            do_blit(ctx, &blit, fallback);
5187ec681f3Smrg         }
5197ec681f3Smrg         if ((box->x + box->width) < u_minify(prsc->width0, level)) {
5207ec681f3Smrg            set_box(box.x, box->x + box->width);
5217ec681f3Smrg            set_box(box.width,
5227ec681f3Smrg                    u_minify(prsc->width0, level) - (box->x + box->width));
5237ec681f3Smrg
5247ec681f3Smrg            do_blit(ctx, &blit, fallback);
5257ec681f3Smrg         }
5267ec681f3Smrg         break;
5277ec681f3Smrg      case PIPE_TEXTURE_2D:
5287ec681f3Smrg         /* TODO */
5297ec681f3Smrg      default:
5307ec681f3Smrg         unreachable("TODO");
5317ec681f3Smrg      }
5327ec681f3Smrg   }
5337ec681f3Smrg
5347ec681f3Smrg   pctx->set_active_query_state(pctx, saved_active_queries);
5357ec681f3Smrg
5367ec681f3Smrg   ctx->in_shadow = false;
5377ec681f3Smrg
5387ec681f3Smrg   pipe_resource_reference(&pshadow, NULL);
5397ec681f3Smrg
5407ec681f3Smrg   return true;
5417ec681f3Smrg}
5427ec681f3Smrg
5437ec681f3Smrg/**
5447ec681f3Smrg * Uncompress an UBWC compressed buffer "in place".  This works basically
5457ec681f3Smrg * like resource shadowing, creating a new resource, and doing an uncompress
5467ec681f3Smrg * blit, and swapping the state between shadow and original resource so it
5477ec681f3Smrg * appears to the gallium frontends as if nothing changed.
5487ec681f3Smrg */
5497ec681f3Smrgvoid
5507ec681f3Smrgfd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc, bool linear)
55101e04c3fSmrg{
5527ec681f3Smrg   tc_assert_driver_thread(ctx->tc);
5537ec681f3Smrg
5547ec681f3Smrg   uint64_t modifier = linear ? DRM_FORMAT_MOD_LINEAR : FD_FORMAT_MOD_QCOM_TILED;
5557ec681f3Smrg
5567ec681f3Smrg   bool success = fd_try_shadow_resource(ctx, rsc, 0, NULL, modifier);
5577ec681f3Smrg
5587ec681f3Smrg   /* shadow should not fail in any cases where we need to uncompress: */
5597ec681f3Smrg   debug_assert(success);
5607ec681f3Smrg}
5617ec681f3Smrg
5627ec681f3Smrg/**
5637ec681f3Smrg * Debug helper to hexdump a resource.
5647ec681f3Smrg */
5657ec681f3Smrgvoid
5667ec681f3Smrgfd_resource_dump(struct fd_resource *rsc, const char *name)
5677ec681f3Smrg{
5687ec681f3Smrg   fd_bo_cpu_prep(rsc->bo, NULL, FD_BO_PREP_READ);
5697ec681f3Smrg   printf("%s: \n", name);
5707ec681f3Smrg   dump_hex(fd_bo_map(rsc->bo), fd_bo_size(rsc->bo));
57101e04c3fSmrg}
57201e04c3fSmrg
57301e04c3fSmrgstatic struct fd_resource *
57401e04c3fSmrgfd_alloc_staging(struct fd_context *ctx, struct fd_resource *rsc,
5757ec681f3Smrg                 unsigned level, const struct pipe_box *box)
5767ec681f3Smrg   assert_dt
57701e04c3fSmrg{
5787ec681f3Smrg   struct pipe_context *pctx = &ctx->base;
5797ec681f3Smrg   struct pipe_resource tmpl = rsc->b.b;
5807ec681f3Smrg
5817ec681f3Smrg   /* We cannot currently do stencil export on earlier gens, and
5827ec681f3Smrg    * u_blitter cannot do blits involving stencil otherwise:
5837ec681f3Smrg    */
5847ec681f3Smrg   if ((ctx->screen->gen < 6) && !ctx->blit &&
5857ec681f3Smrg       (util_format_get_mask(tmpl.format) & PIPE_MASK_S))
5867ec681f3Smrg      return NULL;
5877ec681f3Smrg
5887ec681f3Smrg   tmpl.width0 = box->width;
5897ec681f3Smrg   tmpl.height0 = box->height;
5907ec681f3Smrg   /* for array textures, box->depth is the array_size, otherwise
5917ec681f3Smrg    * for 3d textures, it is the depth:
5927ec681f3Smrg    */
5937ec681f3Smrg   if (tmpl.array_size > 1) {
5947ec681f3Smrg      if (tmpl.target == PIPE_TEXTURE_CUBE)
5957ec681f3Smrg         tmpl.target = PIPE_TEXTURE_2D_ARRAY;
5967ec681f3Smrg      tmpl.array_size = box->depth;
5977ec681f3Smrg      tmpl.depth0 = 1;
5987ec681f3Smrg   } else {
5997ec681f3Smrg      tmpl.array_size = 1;
6007ec681f3Smrg      tmpl.depth0 = box->depth;
6017ec681f3Smrg   }
6027ec681f3Smrg   tmpl.last_level = 0;
6037ec681f3Smrg   tmpl.bind |= PIPE_BIND_LINEAR;
6047ec681f3Smrg   tmpl.usage = PIPE_USAGE_STAGING;
6057ec681f3Smrg
6067ec681f3Smrg   struct pipe_resource *pstaging =
6077ec681f3Smrg      pctx->screen->resource_create(pctx->screen, &tmpl);
6087ec681f3Smrg   if (!pstaging)
6097ec681f3Smrg      return NULL;
6107ec681f3Smrg
6117ec681f3Smrg   return fd_resource(pstaging);
61201e04c3fSmrg}
61301e04c3fSmrg
61401e04c3fSmrgstatic void
6157ec681f3Smrgfd_blit_from_staging(struct fd_context *ctx,
6167ec681f3Smrg                     struct fd_transfer *trans) assert_dt
61701e04c3fSmrg{
6187ec681f3Smrg   DBG("");
6197ec681f3Smrg   struct pipe_resource *dst = trans->b.b.resource;
6207ec681f3Smrg   struct pipe_blit_info blit = {};
6217ec681f3Smrg
6227ec681f3Smrg   blit.dst.resource = dst;
6237ec681f3Smrg   blit.dst.format = dst->format;
6247ec681f3Smrg   blit.dst.level = trans->b.b.level;
6257ec681f3Smrg   blit.dst.box = trans->b.b.box;
6267ec681f3Smrg   blit.src.resource = trans->staging_prsc;
6277ec681f3Smrg   blit.src.format = trans->staging_prsc->format;
6287ec681f3Smrg   blit.src.level = 0;
6297ec681f3Smrg   blit.src.box = trans->staging_box;
6307ec681f3Smrg   blit.mask = util_format_get_mask(trans->staging_prsc->format);
6317ec681f3Smrg   blit.filter = PIPE_TEX_FILTER_NEAREST;
6327ec681f3Smrg
6337ec681f3Smrg   do_blit(ctx, &blit, false);
63401e04c3fSmrg}
63501e04c3fSmrg
63601e04c3fSmrgstatic void
6377ec681f3Smrgfd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans) assert_dt
63801e04c3fSmrg{
6397ec681f3Smrg   DBG("");
6407ec681f3Smrg   struct pipe_resource *src = trans->b.b.resource;
6417ec681f3Smrg   struct pipe_blit_info blit = {};
6427ec681f3Smrg
6437ec681f3Smrg   blit.src.resource = src;
6447ec681f3Smrg   blit.src.format = src->format;
6457ec681f3Smrg   blit.src.level = trans->b.b.level;
6467ec681f3Smrg   blit.src.box = trans->b.b.box;
6477ec681f3Smrg   blit.dst.resource = trans->staging_prsc;
6487ec681f3Smrg   blit.dst.format = trans->staging_prsc->format;
6497ec681f3Smrg   blit.dst.level = 0;
6507ec681f3Smrg   blit.dst.box = trans->staging_box;
6517ec681f3Smrg   blit.mask = util_format_get_mask(trans->staging_prsc->format);
6527ec681f3Smrg   blit.filter = PIPE_TEX_FILTER_NEAREST;
6537ec681f3Smrg
6547ec681f3Smrg   do_blit(ctx, &blit, false);
65501e04c3fSmrg}
65601e04c3fSmrg
6577ec681f3Smrgstatic void
6587ec681f3Smrgfd_resource_transfer_flush_region(struct pipe_context *pctx,
6597ec681f3Smrg                                  struct pipe_transfer *ptrans,
6607ec681f3Smrg                                  const struct pipe_box *box)
661af69d88dSmrg{
6627ec681f3Smrg   struct fd_resource *rsc = fd_resource(ptrans->resource);
663af69d88dSmrg
6647ec681f3Smrg   if (ptrans->resource->target == PIPE_BUFFER)
6657ec681f3Smrg      util_range_add(&rsc->b.b, &rsc->valid_buffer_range,
6667ec681f3Smrg                     ptrans->box.x + box->x,
6677ec681f3Smrg                     ptrans->box.x + box->x + box->width);
66801e04c3fSmrg}
669af69d88dSmrg
67001e04c3fSmrgstatic void
6717ec681f3Smrgflush_resource(struct fd_context *ctx, struct fd_resource *rsc,
6727ec681f3Smrg               unsigned usage) assert_dt
67301e04c3fSmrg{
6747ec681f3Smrg   if (usage & PIPE_MAP_WRITE) {
6757ec681f3Smrg      fd_bc_flush_readers(ctx, rsc);
6767ec681f3Smrg   } else {
6777ec681f3Smrg      fd_bc_flush_writer(ctx, rsc);
6787ec681f3Smrg   }
67901e04c3fSmrg}
68001e04c3fSmrg
68101e04c3fSmrgstatic void
68201e04c3fSmrgfd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
6837ec681f3Smrg   in_dt
68401e04c3fSmrg{
6857ec681f3Smrg   struct fd_context *ctx = fd_context(pctx);
6867ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
6877ec681f3Smrg
6887ec681f3Smrg   flush_resource(ctx, rsc, PIPE_MAP_READ);
6897ec681f3Smrg
6907ec681f3Smrg   /* If we had to flush a batch, make sure it makes it's way all the
6917ec681f3Smrg    * way to the kernel:
6927ec681f3Smrg    */
6937ec681f3Smrg   fd_resource_wait(ctx, rsc, FD_BO_PREP_FLUSH);
694af69d88dSmrg}
695af69d88dSmrg
696af69d88dSmrgstatic void
697af69d88dSmrgfd_resource_transfer_unmap(struct pipe_context *pctx,
6987ec681f3Smrg                           struct pipe_transfer *ptrans)
6997ec681f3Smrg   in_dt /* TODO for threaded-ctx we'll need to split out unsynchronized path */
700af69d88dSmrg{
7017ec681f3Smrg   struct fd_context *ctx = fd_context(pctx);
7027ec681f3Smrg   struct fd_resource *rsc = fd_resource(ptrans->resource);
7037ec681f3Smrg   struct fd_transfer *trans = fd_transfer(ptrans);
704af69d88dSmrg
7057ec681f3Smrg   if (trans->staging_prsc) {
7067ec681f3Smrg      if (ptrans->usage & PIPE_MAP_WRITE)
7077ec681f3Smrg         fd_blit_from_staging(ctx, trans);
7087ec681f3Smrg      pipe_resource_reference(&trans->staging_prsc, NULL);
7097ec681f3Smrg   }
710af69d88dSmrg
7117ec681f3Smrg   if (!(ptrans->usage & PIPE_MAP_UNSYNCHRONIZED)) {
7127ec681f3Smrg      fd_bo_cpu_fini(rsc->bo);
7137ec681f3Smrg   }
7147ec681f3Smrg
7157ec681f3Smrg   util_range_add(&rsc->b.b, &rsc->valid_buffer_range, ptrans->box.x,
7167ec681f3Smrg                  ptrans->box.x + ptrans->box.width);
7177ec681f3Smrg
7187ec681f3Smrg   pipe_resource_reference(&ptrans->resource, NULL);
7197ec681f3Smrg
7207ec681f3Smrg   assert(trans->b.staging == NULL); /* for threaded context only */
7217ec681f3Smrg
7227ec681f3Smrg   /* Don't use pool_transfers_unsync. We are always in the driver
7237ec681f3Smrg    * thread. Freeing an object into a different pool is allowed.
7247ec681f3Smrg    */
7257ec681f3Smrg   slab_free(&ctx->transfer_pool, ptrans);
726af69d88dSmrg}
727af69d88dSmrg
728af69d88dSmrgstatic void
7297ec681f3Smrginvalidate_resource(struct fd_resource *rsc, unsigned usage) assert_dt
730af69d88dSmrg{
7317ec681f3Smrg   bool needs_flush = pending(rsc, !!(usage & PIPE_MAP_WRITE));
7327ec681f3Smrg   unsigned op = translate_usage(usage);
7337ec681f3Smrg
7347ec681f3Smrg   if (needs_flush || resource_busy(rsc, op)) {
7357ec681f3Smrg      rebind_resource(rsc);
7367ec681f3Smrg      realloc_bo(rsc, fd_bo_size(rsc->bo));
7377ec681f3Smrg   } else {
7387ec681f3Smrg      util_range_set_empty(&rsc->valid_buffer_range);
7397ec681f3Smrg   }
740af69d88dSmrg}
741af69d88dSmrg
7427ec681f3Smrgstatic void *
7437ec681f3Smrgresource_transfer_map_unsync(struct pipe_context *pctx,
7447ec681f3Smrg                             struct pipe_resource *prsc, unsigned level,
7457ec681f3Smrg                             unsigned usage, const struct pipe_box *box,
7467ec681f3Smrg                             struct fd_transfer *trans)
7479f464c52Smaya{
7487ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
7497ec681f3Smrg   enum pipe_format format = prsc->format;
7507ec681f3Smrg   uint32_t offset;
7517ec681f3Smrg   char *buf;
7527ec681f3Smrg
7537ec681f3Smrg   buf = fd_bo_map(rsc->bo);
7547ec681f3Smrg   offset = box->y / util_format_get_blockheight(format) * trans->b.b.stride +
7557ec681f3Smrg            box->x / util_format_get_blockwidth(format) * rsc->layout.cpp +
7567ec681f3Smrg            fd_resource_offset(rsc, level, box->z);
7577ec681f3Smrg
7587ec681f3Smrg   if (usage & PIPE_MAP_WRITE)
7597ec681f3Smrg      rsc->valid = true;
7609f464c52Smaya
7617ec681f3Smrg   return buf + offset;
7629f464c52Smaya}
7639f464c52Smaya
7647ec681f3Smrg/**
7657ec681f3Smrg * Note, with threaded_context, resource_transfer_map() is only called
7667ec681f3Smrg * in driver thread, but resource_transfer_map_unsync() can be called in
7677ec681f3Smrg * either driver or frontend thread.
7687ec681f3Smrg */
7697ec681f3Smrgstatic void *
7707ec681f3Smrgresource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
7717ec681f3Smrg                      unsigned level, unsigned usage,
7727ec681f3Smrg                      const struct pipe_box *box,
7737ec681f3Smrg                      struct fd_transfer *trans) in_dt
774af69d88dSmrg{
7757ec681f3Smrg   struct fd_context *ctx = fd_context(pctx);
7767ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
7777ec681f3Smrg   char *buf;
7787ec681f3Smrg   int ret = 0;
7797ec681f3Smrg
7807ec681f3Smrg   tc_assert_driver_thread(ctx->tc);
7817ec681f3Smrg
7827ec681f3Smrg   /* Strip the read flag if the buffer has been invalidated (or is freshly
7837ec681f3Smrg    * created). Avoids extra staging blits of undefined data on glTexSubImage of
7847ec681f3Smrg    * a fresh DEPTH_COMPONENT or STENCIL_INDEX texture being stored as z24s8.
7857ec681f3Smrg    */
7867ec681f3Smrg   if (!rsc->valid)
7877ec681f3Smrg      usage &= ~PIPE_MAP_READ;
7887ec681f3Smrg
7897ec681f3Smrg   /* we always need a staging texture for tiled buffers:
7907ec681f3Smrg    *
7917ec681f3Smrg    * TODO we might sometimes want to *also* shadow the resource to avoid
7927ec681f3Smrg    * splitting a batch.. for ex, mid-frame texture uploads to a tiled
7937ec681f3Smrg    * texture.
7947ec681f3Smrg    */
7957ec681f3Smrg   if (rsc->layout.tile_mode) {
7967ec681f3Smrg      struct fd_resource *staging_rsc;
7977ec681f3Smrg
7987ec681f3Smrg      assert(prsc->target != PIPE_BUFFER);
7997ec681f3Smrg
8007ec681f3Smrg      staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
8017ec681f3Smrg      if (staging_rsc) {
8027ec681f3Smrg         trans->staging_prsc = &staging_rsc->b.b;
8037ec681f3Smrg         trans->b.b.stride = fd_resource_pitch(staging_rsc, 0);
8047ec681f3Smrg         trans->b.b.layer_stride = fd_resource_layer_stride(staging_rsc, 0);
8057ec681f3Smrg         trans->staging_box = *box;
8067ec681f3Smrg         trans->staging_box.x = 0;
8077ec681f3Smrg         trans->staging_box.y = 0;
8087ec681f3Smrg         trans->staging_box.z = 0;
8097ec681f3Smrg
8107ec681f3Smrg         if (usage & PIPE_MAP_READ) {
8117ec681f3Smrg            fd_blit_to_staging(ctx, trans);
8127ec681f3Smrg
8137ec681f3Smrg            fd_resource_wait(ctx, staging_rsc, FD_BO_PREP_READ);
8147ec681f3Smrg         }
8157ec681f3Smrg
8167ec681f3Smrg         buf = fd_bo_map(staging_rsc->bo);
8177ec681f3Smrg
8187ec681f3Smrg         ctx->stats.staging_uploads++;
8197ec681f3Smrg
8207ec681f3Smrg         return buf;
8217ec681f3Smrg      }
8227ec681f3Smrg   } else if ((usage & PIPE_MAP_READ) && !fd_bo_is_cached(rsc->bo)) {
8237ec681f3Smrg      perf_debug_ctx(ctx, "wc readback: prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d",
8247ec681f3Smrg                     prsc, level, usage, box->width, box->height, box->x, box->y);
8257ec681f3Smrg   }
8267ec681f3Smrg
8277ec681f3Smrg   if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
8287ec681f3Smrg      invalidate_resource(rsc, usage);
8297ec681f3Smrg   } else {
8307ec681f3Smrg      unsigned op = translate_usage(usage);
8317ec681f3Smrg      bool needs_flush = pending(rsc, !!(usage & PIPE_MAP_WRITE));
8327ec681f3Smrg
8337ec681f3Smrg      /* If the GPU is writing to the resource, or if it is reading from the
8347ec681f3Smrg       * resource and we're trying to write to it, flush the renders.
8357ec681f3Smrg       */
8367ec681f3Smrg      bool busy = needs_flush || resource_busy(rsc, op);
8377ec681f3Smrg
8387ec681f3Smrg      /* if we need to flush/stall, see if we can make a shadow buffer
8397ec681f3Smrg       * to avoid this:
8407ec681f3Smrg       *
8417ec681f3Smrg       * TODO we could go down this path !reorder && !busy_for_read
8427ec681f3Smrg       * ie. we only *don't* want to go down this path if the blit
8437ec681f3Smrg       * will trigger a flush!
8447ec681f3Smrg       */
8457ec681f3Smrg      if (ctx->screen->reorder && busy && !(usage & PIPE_MAP_READ) &&
8467ec681f3Smrg          (usage & PIPE_MAP_DISCARD_RANGE)) {
8477ec681f3Smrg
8487ec681f3Smrg         /* try shadowing only if it avoids a flush, otherwise staging would
8497ec681f3Smrg          * be better:
8507ec681f3Smrg          */
8517ec681f3Smrg         if (needs_flush && fd_try_shadow_resource(ctx, rsc, level, box,
8527ec681f3Smrg                                                   DRM_FORMAT_MOD_LINEAR)) {
8537ec681f3Smrg            needs_flush = busy = false;
8547ec681f3Smrg            ctx->stats.shadow_uploads++;
8557ec681f3Smrg         } else {
8567ec681f3Smrg            struct fd_resource *staging_rsc = NULL;
8577ec681f3Smrg
8587ec681f3Smrg            if (needs_flush) {
8597ec681f3Smrg               flush_resource(ctx, rsc, usage);
8607ec681f3Smrg               needs_flush = false;
8617ec681f3Smrg            }
8627ec681f3Smrg
8637ec681f3Smrg            /* in this case, we don't need to shadow the whole resource,
8647ec681f3Smrg             * since any draw that references the previous contents has
8657ec681f3Smrg             * already had rendering flushed for all tiles.  So we can
8667ec681f3Smrg             * use a staging buffer to do the upload.
8677ec681f3Smrg             */
8687ec681f3Smrg            if (is_renderable(prsc))
8697ec681f3Smrg               staging_rsc = fd_alloc_staging(ctx, rsc, level, box);
8707ec681f3Smrg            if (staging_rsc) {
8717ec681f3Smrg               trans->staging_prsc = &staging_rsc->b.b;
8727ec681f3Smrg               trans->b.b.stride = fd_resource_pitch(staging_rsc, 0);
8737ec681f3Smrg               trans->b.b.layer_stride =
8747ec681f3Smrg                  fd_resource_layer_stride(staging_rsc, 0);
8757ec681f3Smrg               trans->staging_box = *box;
8767ec681f3Smrg               trans->staging_box.x = 0;
8777ec681f3Smrg               trans->staging_box.y = 0;
8787ec681f3Smrg               trans->staging_box.z = 0;
8797ec681f3Smrg               buf = fd_bo_map(staging_rsc->bo);
8807ec681f3Smrg
8817ec681f3Smrg               ctx->stats.staging_uploads++;
8827ec681f3Smrg
8837ec681f3Smrg               return buf;
8847ec681f3Smrg            }
8857ec681f3Smrg         }
8867ec681f3Smrg      }
8877ec681f3Smrg
8887ec681f3Smrg      if (needs_flush) {
8897ec681f3Smrg         flush_resource(ctx, rsc, usage);
8907ec681f3Smrg         needs_flush = false;
8917ec681f3Smrg      }
8927ec681f3Smrg
8937ec681f3Smrg      /* The GPU keeps track of how the various bo's are being used, and
8947ec681f3Smrg       * will wait if necessary for the proper operation to have
8957ec681f3Smrg       * completed.
8967ec681f3Smrg       */
8977ec681f3Smrg      if (busy) {
8987ec681f3Smrg         ret = fd_resource_wait(ctx, rsc, op);
8997ec681f3Smrg         if (ret)
9007ec681f3Smrg            return NULL;
9017ec681f3Smrg      }
9027ec681f3Smrg   }
9037ec681f3Smrg
9047ec681f3Smrg   return resource_transfer_map_unsync(pctx, prsc, level, usage, box, trans);
9057ec681f3Smrg}
906af69d88dSmrg
9077ec681f3Smrgstatic unsigned
9087ec681f3Smrgimprove_transfer_map_usage(struct fd_context *ctx, struct fd_resource *rsc,
9097ec681f3Smrg                           unsigned usage, const struct pipe_box *box)
9107ec681f3Smrg   /* Not *strictly* true, but the access to things that must only be in driver-
9117ec681f3Smrg    * thread are protected by !(usage & TC_TRANSFER_MAP_THREADED_UNSYNC):
9127ec681f3Smrg    */
9137ec681f3Smrg   in_dt
9147ec681f3Smrg{
9157ec681f3Smrg   if (usage & TC_TRANSFER_MAP_NO_INVALIDATE) {
9167ec681f3Smrg      usage &= ~PIPE_MAP_DISCARD_WHOLE_RESOURCE;
9177ec681f3Smrg   }
9187ec681f3Smrg
9197ec681f3Smrg   if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
9207ec681f3Smrg      usage |= PIPE_MAP_UNSYNCHRONIZED;
9217ec681f3Smrg
9227ec681f3Smrg   if (!(usage &
9237ec681f3Smrg         (TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED | PIPE_MAP_UNSYNCHRONIZED))) {
9247ec681f3Smrg      if (ctx->in_shadow && !(usage & PIPE_MAP_READ)) {
9257ec681f3Smrg         usage |= PIPE_MAP_UNSYNCHRONIZED;
9267ec681f3Smrg      } else if ((usage & PIPE_MAP_WRITE) && (rsc->b.b.target == PIPE_BUFFER) &&
9277ec681f3Smrg                 !util_ranges_intersect(&rsc->valid_buffer_range, box->x,
9287ec681f3Smrg                                        box->x + box->width)) {
9297ec681f3Smrg         /* We are trying to write to a previously uninitialized range. No need
9307ec681f3Smrg          * to synchronize.
9317ec681f3Smrg          */
9327ec681f3Smrg         usage |= PIPE_MAP_UNSYNCHRONIZED;
9337ec681f3Smrg      }
9347ec681f3Smrg   }
9357ec681f3Smrg
9367ec681f3Smrg   return usage;
9377ec681f3Smrg}
9389f464c52Smaya
9397ec681f3Smrgstatic void *
9407ec681f3Smrgfd_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
9417ec681f3Smrg                         unsigned level, unsigned usage,
9427ec681f3Smrg                         const struct pipe_box *box,
9437ec681f3Smrg                         struct pipe_transfer **pptrans)
9447ec681f3Smrg{
9457ec681f3Smrg   struct fd_context *ctx = fd_context(pctx);
9467ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
9477ec681f3Smrg   struct fd_transfer *trans;
9487ec681f3Smrg   struct pipe_transfer *ptrans;
9497ec681f3Smrg
9507ec681f3Smrg   DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage,
9517ec681f3Smrg       box->width, box->height, box->x, box->y);
9527ec681f3Smrg
9537ec681f3Smrg   if ((usage & PIPE_MAP_DIRECTLY) && rsc->layout.tile_mode) {
9547ec681f3Smrg      DBG("CANNOT MAP DIRECTLY!\n");
9557ec681f3Smrg      return NULL;
9567ec681f3Smrg   }
9577ec681f3Smrg
9587ec681f3Smrg   if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC) {
9597ec681f3Smrg      ptrans = slab_alloc(&ctx->transfer_pool_unsync);
9607ec681f3Smrg   } else {
9617ec681f3Smrg      ptrans = slab_alloc(&ctx->transfer_pool);
9627ec681f3Smrg   }
9637ec681f3Smrg
9647ec681f3Smrg   if (!ptrans)
9657ec681f3Smrg      return NULL;
9667ec681f3Smrg
9677ec681f3Smrg   /* slab_alloc_st() doesn't zero: */
9687ec681f3Smrg   trans = fd_transfer(ptrans);
9697ec681f3Smrg   memset(trans, 0, sizeof(*trans));
9707ec681f3Smrg
9717ec681f3Smrg   usage = improve_transfer_map_usage(ctx, rsc, usage, box);
9727ec681f3Smrg
9737ec681f3Smrg   pipe_resource_reference(&ptrans->resource, prsc);
9747ec681f3Smrg   ptrans->level = level;
9757ec681f3Smrg   ptrans->usage = usage;
9767ec681f3Smrg   ptrans->box = *box;
9777ec681f3Smrg   ptrans->stride = fd_resource_pitch(rsc, level);
9787ec681f3Smrg   ptrans->layer_stride = fd_resource_layer_stride(rsc, level);
9797ec681f3Smrg
9807ec681f3Smrg   void *ret;
9817ec681f3Smrg   if (usage & PIPE_MAP_UNSYNCHRONIZED) {
9827ec681f3Smrg      ret = resource_transfer_map_unsync(pctx, prsc, level, usage, box, trans);
9837ec681f3Smrg   } else {
9847ec681f3Smrg      ret = resource_transfer_map(pctx, prsc, level, usage, box, trans);
9857ec681f3Smrg   }
9867ec681f3Smrg
9877ec681f3Smrg   if (ret) {
9887ec681f3Smrg      *pptrans = ptrans;
9897ec681f3Smrg   } else {
9907ec681f3Smrg      fd_resource_transfer_unmap(pctx, ptrans);
9917ec681f3Smrg   }
9927ec681f3Smrg
9937ec681f3Smrg   return ret;
994af69d88dSmrg}
995af69d88dSmrg
9967ec681f3Smrgstatic void
9977ec681f3Smrgfd_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
998af69d88dSmrg{
9997ec681f3Smrg   struct fd_screen *screen = fd_screen(prsc->screen);
10007ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
10017ec681f3Smrg
10027ec681f3Smrg   if (!rsc->is_replacement)
10037ec681f3Smrg      fd_bc_invalidate_resource(rsc, true);
10047ec681f3Smrg   if (rsc->bo)
10057ec681f3Smrg      fd_bo_del(rsc->bo);
10067ec681f3Smrg   if (rsc->lrz)
10077ec681f3Smrg      fd_bo_del(rsc->lrz);
10087ec681f3Smrg   if (rsc->scanout)
10097ec681f3Smrg      renderonly_scanout_destroy(rsc->scanout, fd_screen(pscreen)->ro);
10107ec681f3Smrg
10117ec681f3Smrg   if (prsc->target == PIPE_BUFFER)
10127ec681f3Smrg      util_idalloc_mt_free(&screen->buffer_ids, rsc->b.buffer_id_unique);
10137ec681f3Smrg
10147ec681f3Smrg   threaded_resource_deinit(prsc);
10157ec681f3Smrg
10167ec681f3Smrg   util_range_destroy(&rsc->valid_buffer_range);
10177ec681f3Smrg   simple_mtx_destroy(&rsc->lock);
10187ec681f3Smrg   fd_resource_tracking_reference(&rsc->track, NULL);
10197ec681f3Smrg
10207ec681f3Smrg   FREE(rsc);
1021af69d88dSmrg}
1022af69d88dSmrg
10237ec681f3Smrgstatic uint64_t
10247ec681f3Smrgfd_resource_modifier(struct fd_resource *rsc)
1025af69d88dSmrg{
10267ec681f3Smrg   if (!rsc->layout.tile_mode)
10277ec681f3Smrg      return DRM_FORMAT_MOD_LINEAR;
10287ec681f3Smrg
10297ec681f3Smrg   if (rsc->layout.ubwc_layer_size)
10307ec681f3Smrg      return DRM_FORMAT_MOD_QCOM_COMPRESSED;
10317ec681f3Smrg
10327ec681f3Smrg   /* TODO invent a modifier for tiled but not UBWC buffers: */
10337ec681f3Smrg   return DRM_FORMAT_MOD_INVALID;
103401e04c3fSmrg}
1035af69d88dSmrg
10367ec681f3Smrgstatic bool
10377ec681f3Smrgfd_resource_get_handle(struct pipe_screen *pscreen, struct pipe_context *pctx,
10387ec681f3Smrg                       struct pipe_resource *prsc, struct winsys_handle *handle,
10397ec681f3Smrg                       unsigned usage)
104001e04c3fSmrg{
10417ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
10427ec681f3Smrg
10437ec681f3Smrg   rsc->b.is_shared = true;
10447ec681f3Smrg
10457ec681f3Smrg   handle->modifier = fd_resource_modifier(rsc);
10467ec681f3Smrg
10477ec681f3Smrg   DBG("%" PRSC_FMT ", modifier=%" PRIx64, PRSC_ARGS(prsc), handle->modifier);
10487ec681f3Smrg
10497ec681f3Smrg   return fd_screen_bo_get_handle(pscreen, rsc->bo, rsc->scanout,
10507ec681f3Smrg                                  fd_resource_pitch(rsc, 0), handle);
105101e04c3fSmrg}
1052af69d88dSmrg
105301e04c3fSmrg/* special case to resize query buf after allocated.. */
105401e04c3fSmrgvoid
105501e04c3fSmrgfd_resource_resize(struct pipe_resource *prsc, uint32_t sz)
105601e04c3fSmrg{
10577ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
1058af69d88dSmrg
10597ec681f3Smrg   debug_assert(prsc->width0 == 0);
10607ec681f3Smrg   debug_assert(prsc->target == PIPE_BUFFER);
10617ec681f3Smrg   debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
1062af69d88dSmrg
10637ec681f3Smrg   prsc->width0 = sz;
10647ec681f3Smrg   realloc_bo(rsc, fd_screen(prsc->screen)->setup_slices(rsc));
106501e04c3fSmrg}
106601e04c3fSmrg
10677ec681f3Smrgstatic void
10687ec681f3Smrgfd_resource_layout_init(struct pipe_resource *prsc)
10697ec681f3Smrg{
10707ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
10717ec681f3Smrg   struct fdl_layout *layout = &rsc->layout;
10727ec681f3Smrg
10737ec681f3Smrg   layout->format = prsc->format;
10747ec681f3Smrg
10757ec681f3Smrg   layout->width0 = prsc->width0;
10767ec681f3Smrg   layout->height0 = prsc->height0;
10777ec681f3Smrg   layout->depth0 = prsc->depth0;
10787ec681f3Smrg
10797ec681f3Smrg   layout->cpp = util_format_get_blocksize(prsc->format);
10807ec681f3Smrg   layout->cpp *= fd_resource_nr_samples(prsc);
10817ec681f3Smrg   layout->cpp_shift = ffs(layout->cpp) - 1;
10827ec681f3Smrg}
10837ec681f3Smrg
10847ec681f3Smrgstatic struct fd_resource *
10857ec681f3Smrgalloc_resource_struct(struct pipe_screen *pscreen,
10867ec681f3Smrg                      const struct pipe_resource *tmpl)
10877ec681f3Smrg{
10887ec681f3Smrg   struct fd_screen *screen = fd_screen(pscreen);
10897ec681f3Smrg   struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
10907ec681f3Smrg
10917ec681f3Smrg   if (!rsc)
10927ec681f3Smrg      return NULL;
10937ec681f3Smrg
10947ec681f3Smrg   struct pipe_resource *prsc = &rsc->b.b;
10957ec681f3Smrg   *prsc = *tmpl;
10967ec681f3Smrg
10977ec681f3Smrg   pipe_reference_init(&prsc->reference, 1);
10987ec681f3Smrg   prsc->screen = pscreen;
10997ec681f3Smrg   rsc->hash = _mesa_hash_pointer(rsc);
11007ec681f3Smrg
11017ec681f3Smrg   util_range_init(&rsc->valid_buffer_range);
11027ec681f3Smrg   simple_mtx_init(&rsc->lock, mtx_plain);
11037ec681f3Smrg
11047ec681f3Smrg   rsc->track = CALLOC_STRUCT(fd_resource_tracking);
11057ec681f3Smrg   if (!rsc->track) {
11067ec681f3Smrg      free(rsc);
11077ec681f3Smrg      return NULL;
11087ec681f3Smrg   }
11097ec681f3Smrg
11107ec681f3Smrg   pipe_reference_init(&rsc->track->reference, 1);
11117ec681f3Smrg
11127ec681f3Smrg   threaded_resource_init(prsc);
11137ec681f3Smrg
11147ec681f3Smrg   if (tmpl->target == PIPE_BUFFER)
11157ec681f3Smrg      rsc->b.buffer_id_unique = util_idalloc_mt_alloc(&screen->buffer_ids);
11167ec681f3Smrg
11177ec681f3Smrg   return rsc;
11187ec681f3Smrg}
11197ec681f3Smrg
11207ec681f3Smrgenum fd_layout_type {
11217ec681f3Smrg   ERROR,
11227ec681f3Smrg   LINEAR,
11237ec681f3Smrg   TILED,
11247ec681f3Smrg   UBWC,
11257ec681f3Smrg};
11267ec681f3Smrg
11277ec681f3Smrgstatic enum fd_layout_type
11287ec681f3Smrgget_best_layout(struct fd_screen *screen, struct pipe_resource *prsc,
11297ec681f3Smrg                const struct pipe_resource *tmpl, const uint64_t *modifiers,
11307ec681f3Smrg                int count)
11317ec681f3Smrg{
11327ec681f3Smrg   bool implicit_modifiers =
11337ec681f3Smrg      (count == 0 ||
11347ec681f3Smrg       drm_find_modifier(DRM_FORMAT_MOD_INVALID, modifiers, count));
11357ec681f3Smrg
11367ec681f3Smrg   /* First, find all the conditions which would force us to linear */
11377ec681f3Smrg   if (!screen->tile_mode)
11387ec681f3Smrg      return LINEAR;
11397ec681f3Smrg
11407ec681f3Smrg   if (!screen->tile_mode(prsc))
11417ec681f3Smrg      return LINEAR;
11427ec681f3Smrg
11437ec681f3Smrg   if (tmpl->target == PIPE_BUFFER)
11447ec681f3Smrg      return LINEAR;
11457ec681f3Smrg
11467ec681f3Smrg   if (tmpl->bind & PIPE_BIND_LINEAR) {
11477ec681f3Smrg      if (tmpl->usage != PIPE_USAGE_STAGING)
11487ec681f3Smrg         perf_debug("%" PRSC_FMT ": forcing linear: bind flags",
11497ec681f3Smrg                    PRSC_ARGS(prsc));
11507ec681f3Smrg      return LINEAR;
11517ec681f3Smrg   }
11527ec681f3Smrg
11537ec681f3Smrg   if (FD_DBG(NOTILE))
11547ec681f3Smrg       return LINEAR;
11557ec681f3Smrg
11567ec681f3Smrg   /* Shared resources with implicit modifiers must always be linear */
11577ec681f3Smrg   if (implicit_modifiers && (tmpl->bind & PIPE_BIND_SHARED)) {
11587ec681f3Smrg      perf_debug("%" PRSC_FMT
11597ec681f3Smrg                 ": forcing linear: shared resource + implicit modifiers",
11607ec681f3Smrg                 PRSC_ARGS(prsc));
11617ec681f3Smrg      return LINEAR;
11627ec681f3Smrg   }
11637ec681f3Smrg
11647ec681f3Smrg   bool ubwc_ok = is_a6xx(screen);
11657ec681f3Smrg   if (FD_DBG(NOUBWC))
11667ec681f3Smrg      ubwc_ok = false;
11677ec681f3Smrg
11687ec681f3Smrg   if (ubwc_ok && !implicit_modifiers &&
11697ec681f3Smrg       !drm_find_modifier(DRM_FORMAT_MOD_QCOM_COMPRESSED, modifiers, count)) {
11707ec681f3Smrg      perf_debug("%" PRSC_FMT
11717ec681f3Smrg                 ": not using UBWC: not in acceptable modifier set",
11727ec681f3Smrg                 PRSC_ARGS(prsc));
11737ec681f3Smrg      ubwc_ok = false;
11747ec681f3Smrg   }
11757ec681f3Smrg
11767ec681f3Smrg   if (ubwc_ok)
11777ec681f3Smrg      return UBWC;
11787ec681f3Smrg
11797ec681f3Smrg   /* We can't use tiled with explicit modifiers, as there is no modifier token
11807ec681f3Smrg    * defined for it. But we might internally force tiled allocation using a
11817ec681f3Smrg    * private modifier token.
11827ec681f3Smrg    *
11837ec681f3Smrg    * TODO we should probably also limit TILED in a similar way to UBWC above,
11847ec681f3Smrg    * once we have a public modifier token defined.
11857ec681f3Smrg    */
11867ec681f3Smrg   if (implicit_modifiers ||
11877ec681f3Smrg       drm_find_modifier(FD_FORMAT_MOD_QCOM_TILED, modifiers, count))
11887ec681f3Smrg      return TILED;
11897ec681f3Smrg
11907ec681f3Smrg   if (!drm_find_modifier(DRM_FORMAT_MOD_LINEAR, modifiers, count)) {
11917ec681f3Smrg      perf_debug("%" PRSC_FMT ": need linear but not in modifier set",
11927ec681f3Smrg                 PRSC_ARGS(prsc));
11937ec681f3Smrg      return ERROR;
11947ec681f3Smrg   }
11957ec681f3Smrg
11967ec681f3Smrg   perf_debug("%" PRSC_FMT ": not using tiling: explicit modifiers and no UBWC",
11977ec681f3Smrg              PRSC_ARGS(prsc));
11987ec681f3Smrg   return LINEAR;
11997ec681f3Smrg}
12007ec681f3Smrg
12017ec681f3Smrg/**
12027ec681f3Smrg * Helper that allocates a resource and resolves its layout (but doesn't
12037ec681f3Smrg * allocate its bo).
12047ec681f3Smrg *
12057ec681f3Smrg * It returns a pipe_resource (as fd_resource_create_with_modifiers()
12067ec681f3Smrg * would do), and also bo's minimum required size as an output argument.
12077ec681f3Smrg */
12087ec681f3Smrgstatic struct pipe_resource *
12097ec681f3Smrgfd_resource_allocate_and_resolve(struct pipe_screen *pscreen,
12107ec681f3Smrg                                 const struct pipe_resource *tmpl,
12117ec681f3Smrg                                 const uint64_t *modifiers, int count,
12127ec681f3Smrg                                 uint32_t *psize)
121301e04c3fSmrg{
12147ec681f3Smrg   struct fd_screen *screen = fd_screen(pscreen);
12157ec681f3Smrg   struct fd_resource *rsc;
12167ec681f3Smrg   struct pipe_resource *prsc;
12177ec681f3Smrg   enum pipe_format format = tmpl->format;
12187ec681f3Smrg   uint32_t size;
12197ec681f3Smrg
12207ec681f3Smrg   rsc = alloc_resource_struct(pscreen, tmpl);
12217ec681f3Smrg   if (!rsc)
12227ec681f3Smrg      return NULL;
12237ec681f3Smrg
12247ec681f3Smrg   prsc = &rsc->b.b;
12257ec681f3Smrg
12267ec681f3Smrg   /* Clover creates buffers with PIPE_FORMAT_NONE: */
12277ec681f3Smrg   if ((prsc->target == PIPE_BUFFER) && (format == PIPE_FORMAT_NONE))
12287ec681f3Smrg      format = prsc->format = PIPE_FORMAT_R8_UNORM;
12297ec681f3Smrg
12307ec681f3Smrg   DBG("%" PRSC_FMT, PRSC_ARGS(prsc));
12317ec681f3Smrg
12327ec681f3Smrg   if (tmpl->bind & PIPE_BIND_SHARED)
12337ec681f3Smrg      rsc->b.is_shared = true;
12347ec681f3Smrg
12357ec681f3Smrg   fd_resource_layout_init(prsc);
12367ec681f3Smrg
12377ec681f3Smrg   enum fd_layout_type layout =
12387ec681f3Smrg      get_best_layout(screen, prsc, tmpl, modifiers, count);
12397ec681f3Smrg   if (layout == ERROR) {
12407ec681f3Smrg      free(prsc);
12417ec681f3Smrg      return NULL;
12427ec681f3Smrg   }
12437ec681f3Smrg
12447ec681f3Smrg   if (layout >= TILED)
12457ec681f3Smrg      rsc->layout.tile_mode = screen->tile_mode(prsc);
12467ec681f3Smrg   if (layout == UBWC)
12477ec681f3Smrg      rsc->layout.ubwc = true;
12487ec681f3Smrg
12497ec681f3Smrg   rsc->internal_format = format;
12507ec681f3Smrg
12517ec681f3Smrg   if (prsc->target == PIPE_BUFFER) {
12527ec681f3Smrg      assert(prsc->format == PIPE_FORMAT_R8_UNORM);
12537ec681f3Smrg      size = prsc->width0;
12547ec681f3Smrg      fdl_layout_buffer(&rsc->layout, size);
12557ec681f3Smrg   } else {
12567ec681f3Smrg      size = screen->setup_slices(rsc);
12577ec681f3Smrg   }
12587ec681f3Smrg
12597ec681f3Smrg   /* special case for hw-query buffer, which we need to allocate before we
12607ec681f3Smrg    * know the size:
12617ec681f3Smrg    */
12627ec681f3Smrg   if (size == 0) {
12637ec681f3Smrg      /* note, semi-intention == instead of & */
12647ec681f3Smrg      debug_assert(prsc->bind == PIPE_BIND_QUERY_BUFFER);
12657ec681f3Smrg      *psize = 0;
12667ec681f3Smrg      return prsc;
12677ec681f3Smrg   }
12687ec681f3Smrg
12697ec681f3Smrg   /* Set the layer size if the (non-a6xx) backend hasn't done so. */
12707ec681f3Smrg   if (rsc->layout.layer_first && !rsc->layout.layer_size) {
12717ec681f3Smrg      rsc->layout.layer_size = align(size, 4096);
12727ec681f3Smrg      size = rsc->layout.layer_size * prsc->array_size;
12737ec681f3Smrg   }
12747ec681f3Smrg
12757ec681f3Smrg   if (FD_DBG(LAYOUT))
12767ec681f3Smrg      fdl_dump_layout(&rsc->layout);
12777ec681f3Smrg
12787ec681f3Smrg   /* Hand out the resolved size. */
12797ec681f3Smrg   if (psize)
12807ec681f3Smrg      *psize = size;
12817ec681f3Smrg
12827ec681f3Smrg   return prsc;
1283af69d88dSmrg}
1284af69d88dSmrg
1285af69d88dSmrg/**
1286af69d88dSmrg * Create a new texture object, using the given template info.
1287af69d88dSmrg */
1288af69d88dSmrgstatic struct pipe_resource *
12899f464c52Smayafd_resource_create_with_modifiers(struct pipe_screen *pscreen,
12907ec681f3Smrg                                  const struct pipe_resource *tmpl,
12917ec681f3Smrg                                  const uint64_t *modifiers, int count)
1292af69d88dSmrg{
12937ec681f3Smrg   struct fd_screen *screen = fd_screen(pscreen);
12947ec681f3Smrg   struct fd_resource *rsc;
12957ec681f3Smrg   struct pipe_resource *prsc;
12967ec681f3Smrg   uint32_t size;
12977ec681f3Smrg
12987ec681f3Smrg   /* when using kmsro, scanout buffers are allocated on the display device
12997ec681f3Smrg    * create_with_modifiers() doesn't give us usage flags, so we have to
13007ec681f3Smrg    * assume that all calls with modifiers are scanout-possible
13017ec681f3Smrg    */
13027ec681f3Smrg   if (screen->ro &&
13037ec681f3Smrg       ((tmpl->bind & PIPE_BIND_SCANOUT) ||
13047ec681f3Smrg        !(count == 1 && modifiers[0] == DRM_FORMAT_MOD_INVALID))) {
13057ec681f3Smrg      struct pipe_resource scanout_templat = *tmpl;
13067ec681f3Smrg      struct renderonly_scanout *scanout;
13077ec681f3Smrg      struct winsys_handle handle;
13087ec681f3Smrg
13097ec681f3Smrg      /* note: alignment is wrong for a6xx */
13107ec681f3Smrg      scanout_templat.width0 = align(tmpl->width0, screen->info->gmem_align_w);
13117ec681f3Smrg
13127ec681f3Smrg      scanout =
13137ec681f3Smrg         renderonly_scanout_for_resource(&scanout_templat, screen->ro, &handle);
13147ec681f3Smrg      if (!scanout)
13157ec681f3Smrg         return NULL;
13167ec681f3Smrg
13177ec681f3Smrg      renderonly_scanout_destroy(scanout, screen->ro);
13187ec681f3Smrg
13197ec681f3Smrg      assert(handle.type == WINSYS_HANDLE_TYPE_FD);
13207ec681f3Smrg      rsc = fd_resource(pscreen->resource_from_handle(
13217ec681f3Smrg         pscreen, tmpl, &handle, PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE));
13227ec681f3Smrg      close(handle.handle);
13237ec681f3Smrg      if (!rsc)
13247ec681f3Smrg         return NULL;
13257ec681f3Smrg
13267ec681f3Smrg      return &rsc->b.b;
13277ec681f3Smrg   }
13287ec681f3Smrg
13297ec681f3Smrg   prsc =
13307ec681f3Smrg      fd_resource_allocate_and_resolve(pscreen, tmpl, modifiers, count, &size);
13317ec681f3Smrg   if (!prsc)
13327ec681f3Smrg      return NULL;
13337ec681f3Smrg   rsc = fd_resource(prsc);
13347ec681f3Smrg
13357ec681f3Smrg   realloc_bo(rsc, size);
13367ec681f3Smrg   if (!rsc->bo)
13377ec681f3Smrg      goto fail;
13387ec681f3Smrg
13397ec681f3Smrg   return prsc;
1340af69d88dSmrgfail:
13417ec681f3Smrg   fd_resource_destroy(pscreen, prsc);
13427ec681f3Smrg   return NULL;
1343af69d88dSmrg}
1344af69d88dSmrg
13459f464c52Smayastatic struct pipe_resource *
13469f464c52Smayafd_resource_create(struct pipe_screen *pscreen,
13477ec681f3Smrg                   const struct pipe_resource *tmpl)
13489f464c52Smaya{
13497ec681f3Smrg   const uint64_t mod = DRM_FORMAT_MOD_INVALID;
13507ec681f3Smrg   return fd_resource_create_with_modifiers(pscreen, tmpl, &mod, 1);
13519f464c52Smaya}
13529f464c52Smaya
1353af69d88dSmrg/**
1354af69d88dSmrg * Create a texture from a winsys_handle. The handle is often created in
1355af69d88dSmrg * another process by first creating a pipe texture and then calling
1356af69d88dSmrg * resource_get_handle.
1357af69d88dSmrg */
1358af69d88dSmrgstatic struct pipe_resource *
1359af69d88dSmrgfd_resource_from_handle(struct pipe_screen *pscreen,
13607ec681f3Smrg                        const struct pipe_resource *tmpl,
13617ec681f3Smrg                        struct winsys_handle *handle, unsigned usage)
1362af69d88dSmrg{
13637ec681f3Smrg   struct fd_screen *screen = fd_screen(pscreen);
13647ec681f3Smrg   struct fd_resource *rsc = alloc_resource_struct(pscreen, tmpl);
13657ec681f3Smrg
13667ec681f3Smrg   if (!rsc)
13677ec681f3Smrg      return NULL;
13687ec681f3Smrg
13697ec681f3Smrg   struct fdl_slice *slice = fd_resource_slice(rsc, 0);
13707ec681f3Smrg   struct pipe_resource *prsc = &rsc->b.b;
13717ec681f3Smrg
13727ec681f3Smrg   DBG("%" PRSC_FMT ", modifier=%" PRIx64, PRSC_ARGS(prsc), handle->modifier);
13737ec681f3Smrg
13747ec681f3Smrg   rsc->b.is_shared = true;
13757ec681f3Smrg
13767ec681f3Smrg   fd_resource_layout_init(prsc);
13777ec681f3Smrg
13787ec681f3Smrg   struct fd_bo *bo = fd_screen_bo_from_handle(pscreen, handle);
13797ec681f3Smrg   if (!bo)
13807ec681f3Smrg      goto fail;
13817ec681f3Smrg
13827ec681f3Smrg   fd_resource_set_bo(rsc, bo);
13837ec681f3Smrg
13847ec681f3Smrg   rsc->internal_format = tmpl->format;
13857ec681f3Smrg   rsc->layout.pitch0 = handle->stride;
13867ec681f3Smrg   slice->offset = handle->offset;
13877ec681f3Smrg   slice->size0 = handle->stride * prsc->height0;
13887ec681f3Smrg
13897ec681f3Smrg   /* use a pitchalign of gmem_align_w pixels, because GMEM resolve for
13907ec681f3Smrg    * lower alignments is not implemented (but possible for a6xx at least)
13917ec681f3Smrg    *
13927ec681f3Smrg    * for UBWC-enabled resources, layout_resource_for_modifier will further
13937ec681f3Smrg    * validate the pitch and set the right pitchalign
13947ec681f3Smrg    */
13957ec681f3Smrg   rsc->layout.pitchalign =
13967ec681f3Smrg      fdl_cpp_shift(&rsc->layout) + util_logbase2(screen->info->gmem_align_w);
13977ec681f3Smrg
13987ec681f3Smrg   /* apply the minimum pitchalign (note: actually 4 for a3xx but doesn't
13997ec681f3Smrg    * matter) */
14007ec681f3Smrg   if (is_a6xx(screen) || is_a5xx(screen))
14017ec681f3Smrg      rsc->layout.pitchalign = MAX2(rsc->layout.pitchalign, 6);
14027ec681f3Smrg   else
14037ec681f3Smrg      rsc->layout.pitchalign = MAX2(rsc->layout.pitchalign, 5);
14047ec681f3Smrg
14057ec681f3Smrg   if (rsc->layout.pitch0 < (prsc->width0 * rsc->layout.cpp) ||
14067ec681f3Smrg       fd_resource_pitch(rsc, 0) != rsc->layout.pitch0)
14077ec681f3Smrg      goto fail;
14087ec681f3Smrg
14097ec681f3Smrg   assert(rsc->layout.cpp);
14107ec681f3Smrg
14117ec681f3Smrg   if (screen->layout_resource_for_modifier(rsc, handle->modifier) < 0)
14127ec681f3Smrg      goto fail;
14137ec681f3Smrg
14147ec681f3Smrg   if (screen->ro) {
14157ec681f3Smrg      rsc->scanout =
14167ec681f3Smrg         renderonly_create_gpu_import_for_resource(prsc, screen->ro, NULL);
14177ec681f3Smrg      /* failure is expected in some cases.. */
14187ec681f3Smrg   }
14197ec681f3Smrg
14207ec681f3Smrg   rsc->valid = true;
14217ec681f3Smrg
14227ec681f3Smrg   return prsc;
1423af69d88dSmrg
1424af69d88dSmrgfail:
14257ec681f3Smrg   fd_resource_destroy(pscreen, prsc);
14267ec681f3Smrg   return NULL;
1427af69d88dSmrg}
1428af69d88dSmrg
142901e04c3fSmrgbool
143001e04c3fSmrgfd_render_condition_check(struct pipe_context *pctx)
143101e04c3fSmrg{
14327ec681f3Smrg   struct fd_context *ctx = fd_context(pctx);
14337ec681f3Smrg
14347ec681f3Smrg   if (!ctx->cond_query)
14357ec681f3Smrg      return true;
143601e04c3fSmrg
14377ec681f3Smrg   perf_debug("Implementing conditional rendering using a CPU read instaed of HW conditional rendering.");
143801e04c3fSmrg
14397ec681f3Smrg   union pipe_query_result res = {0};
14407ec681f3Smrg   bool wait = ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
14417ec681f3Smrg               ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
144201e04c3fSmrg
14437ec681f3Smrg   if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
14447ec681f3Smrg      return (bool)res.u64 != ctx->cond_cond;
144501e04c3fSmrg
14467ec681f3Smrg   return true;
144701e04c3fSmrg}
144801e04c3fSmrg
1449af69d88dSmrgstatic void
14507ec681f3Smrgfd_invalidate_resource(struct pipe_context *pctx,
14517ec681f3Smrg                       struct pipe_resource *prsc) in_dt
1452af69d88dSmrg{
14537ec681f3Smrg   struct fd_context *ctx = fd_context(pctx);
14547ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
14557ec681f3Smrg
14567ec681f3Smrg   if (prsc->target == PIPE_BUFFER) {
14577ec681f3Smrg      /* Handle the glInvalidateBufferData() case:
14587ec681f3Smrg       */
14597ec681f3Smrg      invalidate_resource(rsc, PIPE_MAP_READ | PIPE_MAP_WRITE);
14607ec681f3Smrg   } else if (rsc->track->write_batch) {
14617ec681f3Smrg      /* Handle the glInvalidateFramebuffer() case, telling us that
14627ec681f3Smrg       * we can skip resolve.
14637ec681f3Smrg       */
14647ec681f3Smrg
14657ec681f3Smrg      struct fd_batch *batch = rsc->track->write_batch;
14667ec681f3Smrg      struct pipe_framebuffer_state *pfb = &batch->framebuffer;
14677ec681f3Smrg
14687ec681f3Smrg      if (pfb->zsbuf && pfb->zsbuf->texture == prsc) {
14697ec681f3Smrg         batch->resolve &= ~(FD_BUFFER_DEPTH | FD_BUFFER_STENCIL);
14707ec681f3Smrg         fd_context_dirty(ctx, FD_DIRTY_ZSA);
14717ec681f3Smrg      }
14727ec681f3Smrg
14737ec681f3Smrg      for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
14747ec681f3Smrg         if (pfb->cbufs[i] && pfb->cbufs[i]->texture == prsc) {
14757ec681f3Smrg            batch->resolve &= ~(PIPE_CLEAR_COLOR0 << i);
14767ec681f3Smrg            fd_context_dirty(ctx, FD_DIRTY_FRAMEBUFFER);
14777ec681f3Smrg         }
14787ec681f3Smrg      }
14797ec681f3Smrg   }
14807ec681f3Smrg
14817ec681f3Smrg   rsc->valid = false;
148201e04c3fSmrg}
148301e04c3fSmrg
148401e04c3fSmrgstatic enum pipe_format
148501e04c3fSmrgfd_resource_get_internal_format(struct pipe_resource *prsc)
148601e04c3fSmrg{
14877ec681f3Smrg   return fd_resource(prsc)->internal_format;
148801e04c3fSmrg}
148901e04c3fSmrg
149001e04c3fSmrgstatic void
149101e04c3fSmrgfd_resource_set_stencil(struct pipe_resource *prsc,
14927ec681f3Smrg                        struct pipe_resource *stencil)
149301e04c3fSmrg{
14947ec681f3Smrg   fd_resource(prsc)->stencil = fd_resource(stencil);
149501e04c3fSmrg}
149601e04c3fSmrg
149701e04c3fSmrgstatic struct pipe_resource *
149801e04c3fSmrgfd_resource_get_stencil(struct pipe_resource *prsc)
149901e04c3fSmrg{
15007ec681f3Smrg   struct fd_resource *rsc = fd_resource(prsc);
15017ec681f3Smrg   if (rsc->stencil)
15027ec681f3Smrg      return &rsc->stencil->b.b;
15037ec681f3Smrg   return NULL;
1504af69d88dSmrg}
1505af69d88dSmrg
150601e04c3fSmrgstatic const struct u_transfer_vtbl transfer_vtbl = {
15077ec681f3Smrg   .resource_create = fd_resource_create,
15087ec681f3Smrg   .resource_destroy = fd_resource_destroy,
15097ec681f3Smrg   .transfer_map = fd_resource_transfer_map,
15107ec681f3Smrg   .transfer_flush_region = fd_resource_transfer_flush_region,
15117ec681f3Smrg   .transfer_unmap = fd_resource_transfer_unmap,
15127ec681f3Smrg   .get_internal_format = fd_resource_get_internal_format,
15137ec681f3Smrg   .set_stencil = fd_resource_set_stencil,
15147ec681f3Smrg   .get_stencil = fd_resource_get_stencil,
151501e04c3fSmrg};
151601e04c3fSmrg
15177ec681f3Smrgstatic const uint64_t supported_modifiers[] = {
15187ec681f3Smrg   DRM_FORMAT_MOD_LINEAR,
15197ec681f3Smrg};
15207ec681f3Smrg
15217ec681f3Smrgstatic int
15227ec681f3Smrgfd_layout_resource_for_modifier(struct fd_resource *rsc, uint64_t modifier)
15237ec681f3Smrg{
15247ec681f3Smrg   switch (modifier) {
15257ec681f3Smrg   case DRM_FORMAT_MOD_LINEAR:
15267ec681f3Smrg      /* The dri gallium frontend will pass DRM_FORMAT_MOD_INVALID to us
15277ec681f3Smrg       * when it's called through any of the non-modifier BO create entry
15287ec681f3Smrg       * points.  Other drivers will determine tiling from the kernel or
15297ec681f3Smrg       * other legacy backchannels, but for freedreno it just means
15307ec681f3Smrg       * LINEAR. */
15317ec681f3Smrg   case DRM_FORMAT_MOD_INVALID:
15327ec681f3Smrg      return 0;
15337ec681f3Smrg   default:
15347ec681f3Smrg      return -1;
15357ec681f3Smrg   }
15367ec681f3Smrg}
15377ec681f3Smrg
15387ec681f3Smrgstatic struct pipe_resource *
15397ec681f3Smrgfd_resource_from_memobj(struct pipe_screen *pscreen,
15407ec681f3Smrg                        const struct pipe_resource *tmpl,
15417ec681f3Smrg                        struct pipe_memory_object *pmemobj, uint64_t offset)
15427ec681f3Smrg{
15437ec681f3Smrg   struct fd_screen *screen = fd_screen(pscreen);
15447ec681f3Smrg   struct fd_memory_object *memobj = fd_memory_object(pmemobj);
15457ec681f3Smrg   struct pipe_resource *prsc;
15467ec681f3Smrg   struct fd_resource *rsc;
15477ec681f3Smrg   uint32_t size;
15487ec681f3Smrg   assert(memobj->bo);
15497ec681f3Smrg
15507ec681f3Smrg   /* We shouldn't get a scanout buffer here. */
15517ec681f3Smrg   assert(!(tmpl->bind & PIPE_BIND_SCANOUT));
15527ec681f3Smrg
15537ec681f3Smrg   uint64_t modifiers = DRM_FORMAT_MOD_INVALID;
15547ec681f3Smrg   if (tmpl->bind & PIPE_BIND_LINEAR) {
15557ec681f3Smrg      modifiers = DRM_FORMAT_MOD_LINEAR;
15567ec681f3Smrg   } else if (is_a6xx(screen) && tmpl->width0 >= FDL_MIN_UBWC_WIDTH) {
15577ec681f3Smrg      modifiers = DRM_FORMAT_MOD_QCOM_COMPRESSED;
15587ec681f3Smrg   }
15597ec681f3Smrg
15607ec681f3Smrg   /* Allocate new pipe resource. */
15617ec681f3Smrg   prsc = fd_resource_allocate_and_resolve(pscreen, tmpl, &modifiers, 1, &size);
15627ec681f3Smrg   if (!prsc)
15637ec681f3Smrg      return NULL;
15647ec681f3Smrg   rsc = fd_resource(prsc);
15657ec681f3Smrg   rsc->b.is_shared = true;
15667ec681f3Smrg
15677ec681f3Smrg   /* bo's size has to be large enough, otherwise cleanup resource and fail
15687ec681f3Smrg    * gracefully.
15697ec681f3Smrg    */
15707ec681f3Smrg   if (fd_bo_size(memobj->bo) < size) {
15717ec681f3Smrg      fd_resource_destroy(pscreen, prsc);
15727ec681f3Smrg      return NULL;
15737ec681f3Smrg   }
15747ec681f3Smrg
15757ec681f3Smrg   /* Share the bo with the memory object. */
15767ec681f3Smrg   fd_resource_set_bo(rsc, fd_bo_ref(memobj->bo));
15777ec681f3Smrg
15787ec681f3Smrg   return prsc;
15797ec681f3Smrg}
15807ec681f3Smrg
15817ec681f3Smrgstatic struct pipe_memory_object *
15827ec681f3Smrgfd_memobj_create_from_handle(struct pipe_screen *pscreen,
15837ec681f3Smrg                             struct winsys_handle *whandle, bool dedicated)
15847ec681f3Smrg{
15857ec681f3Smrg   struct fd_memory_object *memobj = CALLOC_STRUCT(fd_memory_object);
15867ec681f3Smrg   if (!memobj)
15877ec681f3Smrg      return NULL;
15887ec681f3Smrg
15897ec681f3Smrg   struct fd_bo *bo = fd_screen_bo_from_handle(pscreen, whandle);
15907ec681f3Smrg   if (!bo) {
15917ec681f3Smrg      free(memobj);
15927ec681f3Smrg      return NULL;
15937ec681f3Smrg   }
15947ec681f3Smrg
15957ec681f3Smrg   memobj->b.dedicated = dedicated;
15967ec681f3Smrg   memobj->bo = bo;
15977ec681f3Smrg
15987ec681f3Smrg   return &memobj->b;
15997ec681f3Smrg}
16007ec681f3Smrg
16017ec681f3Smrgstatic void
16027ec681f3Smrgfd_memobj_destroy(struct pipe_screen *pscreen,
16037ec681f3Smrg                  struct pipe_memory_object *pmemobj)
16047ec681f3Smrg{
16057ec681f3Smrg   struct fd_memory_object *memobj = fd_memory_object(pmemobj);
16067ec681f3Smrg
16077ec681f3Smrg   assert(memobj->bo);
16087ec681f3Smrg   fd_bo_del(memobj->bo);
16097ec681f3Smrg
16107ec681f3Smrg   free(pmemobj);
16117ec681f3Smrg}
16127ec681f3Smrg
1613af69d88dSmrgvoid
1614af69d88dSmrgfd_resource_screen_init(struct pipe_screen *pscreen)
1615af69d88dSmrg{
16167ec681f3Smrg   struct fd_screen *screen = fd_screen(pscreen);
16177ec681f3Smrg   bool fake_rgtc = screen->gen < 4;
16187ec681f3Smrg
16197ec681f3Smrg   pscreen->resource_create = u_transfer_helper_resource_create;
16207ec681f3Smrg   /* NOTE: u_transfer_helper does not yet support the _with_modifiers()
16217ec681f3Smrg    * variant:
16227ec681f3Smrg    */
16237ec681f3Smrg   pscreen->resource_create_with_modifiers = fd_resource_create_with_modifiers;
16247ec681f3Smrg   pscreen->resource_from_handle = fd_resource_from_handle;
16257ec681f3Smrg   pscreen->resource_get_handle = fd_resource_get_handle;
16267ec681f3Smrg   pscreen->resource_destroy = u_transfer_helper_resource_destroy;
16277ec681f3Smrg
16287ec681f3Smrg   pscreen->transfer_helper =
16297ec681f3Smrg      u_transfer_helper_create(&transfer_vtbl, true, false, fake_rgtc, true);
16307ec681f3Smrg
16317ec681f3Smrg   if (!screen->layout_resource_for_modifier)
16327ec681f3Smrg      screen->layout_resource_for_modifier = fd_layout_resource_for_modifier;
16337ec681f3Smrg   if (!screen->supported_modifiers) {
16347ec681f3Smrg      screen->supported_modifiers = supported_modifiers;
16357ec681f3Smrg      screen->num_supported_modifiers = ARRAY_SIZE(supported_modifiers);
16367ec681f3Smrg   }
16377ec681f3Smrg
16387ec681f3Smrg   /* GL_EXT_memory_object */
16397ec681f3Smrg   pscreen->memobj_create_from_handle = fd_memobj_create_from_handle;
16407ec681f3Smrg   pscreen->memobj_destroy = fd_memobj_destroy;
16417ec681f3Smrg   pscreen->resource_from_memobj = fd_resource_from_memobj;
1642af69d88dSmrg}
1643af69d88dSmrg
16449f464c52Smayastatic void
16457ec681f3Smrgfd_get_sample_position(struct pipe_context *context, unsigned sample_count,
16467ec681f3Smrg                       unsigned sample_index, float *pos_out)
16479f464c52Smaya{
16487ec681f3Smrg   /* The following is copied from nouveau/nv50 except for position
16497ec681f3Smrg    * values, which are taken from blob driver */
16507ec681f3Smrg   static const uint8_t pos1[1][2] = {{0x8, 0x8}};
16517ec681f3Smrg   static const uint8_t pos2[2][2] = {{0xc, 0xc}, {0x4, 0x4}};
16527ec681f3Smrg   static const uint8_t pos4[4][2] = {{0x6, 0x2},
16537ec681f3Smrg                                      {0xe, 0x6},
16547ec681f3Smrg                                      {0x2, 0xa},
16557ec681f3Smrg                                      {0xa, 0xe}};
16567ec681f3Smrg   /* TODO needs to be verified on supported hw */
16577ec681f3Smrg   static const uint8_t pos8[8][2] = {{0x9, 0x5}, {0x7, 0xb}, {0xd, 0x9},
16587ec681f3Smrg                                      {0x5, 0x3}, {0x3, 0xd}, {0x1, 0x7},
16597ec681f3Smrg                                      {0xb, 0xf}, {0xf, 0x1}};
16607ec681f3Smrg
16617ec681f3Smrg   const uint8_t(*ptr)[2];
16627ec681f3Smrg
16637ec681f3Smrg   switch (sample_count) {
16647ec681f3Smrg   case 1:
16657ec681f3Smrg      ptr = pos1;
16667ec681f3Smrg      break;
16677ec681f3Smrg   case 2:
16687ec681f3Smrg      ptr = pos2;
16697ec681f3Smrg      break;
16707ec681f3Smrg   case 4:
16717ec681f3Smrg      ptr = pos4;
16727ec681f3Smrg      break;
16737ec681f3Smrg   case 8:
16747ec681f3Smrg      ptr = pos8;
16757ec681f3Smrg      break;
16767ec681f3Smrg   default:
16777ec681f3Smrg      assert(0);
16787ec681f3Smrg      return;
16797ec681f3Smrg   }
16807ec681f3Smrg
16817ec681f3Smrg   pos_out[0] = ptr[sample_index][0] / 16.0f;
16827ec681f3Smrg   pos_out[1] = ptr[sample_index][1] / 16.0f;
16839f464c52Smaya}
16849f464c52Smaya
16859f464c52Smayastatic void
16867ec681f3Smrgfd_blit_pipe(struct pipe_context *pctx,
16877ec681f3Smrg             const struct pipe_blit_info *blit_info) in_dt
16889f464c52Smaya{
16897ec681f3Smrg   /* wrap fd_blit to return void */
16907ec681f3Smrg   fd_blit(pctx, blit_info);
16919f464c52Smaya}
16929f464c52Smaya
1693af69d88dSmrgvoid
1694af69d88dSmrgfd_resource_context_init(struct pipe_context *pctx)
1695af69d88dSmrg{
16967ec681f3Smrg   pctx->buffer_map = u_transfer_helper_transfer_map;
16977ec681f3Smrg   pctx->texture_map = u_transfer_helper_transfer_map;
16987ec681f3Smrg   pctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
16997ec681f3Smrg   pctx->buffer_unmap = u_transfer_helper_transfer_unmap;
17007ec681f3Smrg   pctx->texture_unmap = u_transfer_helper_transfer_unmap;
17017ec681f3Smrg   pctx->buffer_subdata = u_default_buffer_subdata;
17027ec681f3Smrg   pctx->texture_subdata = u_default_texture_subdata;
17037ec681f3Smrg   pctx->create_surface = fd_create_surface;
17047ec681f3Smrg   pctx->surface_destroy = fd_surface_destroy;
17057ec681f3Smrg   pctx->resource_copy_region = fd_resource_copy_region;
17067ec681f3Smrg   pctx->blit = fd_blit_pipe;
17077ec681f3Smrg   pctx->flush_resource = fd_flush_resource;
17087ec681f3Smrg   pctx->invalidate_resource = fd_invalidate_resource;
17097ec681f3Smrg   pctx->get_sample_position = fd_get_sample_position;
1710af69d88dSmrg}
1711