etnaviv_transfer.c revision 01e04c3f
1/*
2 * Copyright (c) 2012-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Wladimir J. van der Laan <laanwj@gmail.com>
25 */
26
27#include "etnaviv_transfer.h"
28#include "etnaviv_clear_blit.h"
29#include "etnaviv_context.h"
30#include "etnaviv_debug.h"
31#include "etnaviv_screen.h"
32
33#include "pipe/p_defines.h"
34#include "pipe/p_format.h"
35#include "pipe/p_screen.h"
36#include "pipe/p_state.h"
37#include "util/u_format.h"
38#include "util/u_inlines.h"
39#include "util/u_memory.h"
40#include "util/u_surface.h"
41#include "util/u_transfer.h"
42
43#include "hw/common_3d.xml.h"
44
45#include <drm_fourcc.h>
46
47/* Compute offset into a 1D/2D/3D buffer of a certain box.
48 * This box must be aligned to the block width and height of the
49 * underlying format. */
50static inline size_t
51etna_compute_offset(enum pipe_format format, const struct pipe_box *box,
52                    size_t stride, size_t layer_stride)
53{
54   return box->z * layer_stride +
55          box->y / util_format_get_blockheight(format) * stride +
56          box->x / util_format_get_blockwidth(format) *
57             util_format_get_blocksize(format);
58}
59
60static void
61etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
62{
63   struct etna_context *ctx = etna_context(pctx);
64   struct etna_transfer *trans = etna_transfer(ptrans);
65   struct etna_resource *rsc = etna_resource(ptrans->resource);
66
67   /* XXX
68    * When writing to a resource that is already in use, replace the resource
69    * with a completely new buffer
70    * and free the old one using a fenced free.
71    * The most tricky case to implement will be: tiled or supertiled surface,
72    * partial write, target not aligned to 4/64. */
73   assert(ptrans->level <= rsc->base.last_level);
74
75   if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture)))
76      rsc = etna_resource(rsc->texture); /* switch to using the texture resource */
77
78   /*
79    * Temporary resources are always pulled into the CPU domain, must push them
80    * back into GPU domain before the RS execs the blit to the base resource.
81    */
82   if (trans->rsc)
83      etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);
84
85   if (ptrans->usage & PIPE_TRANSFER_WRITE) {
86      if (trans->rsc) {
87         /* We have a temporary resource due to either tile status or
88          * tiling format. Write back the updated buffer contents.
89          * FIXME: we need to invalidate the tile status. */
90         etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box);
91      } else if (trans->staging) {
92         /* map buffer object */
93         struct etna_resource_level *res_level = &rsc->levels[ptrans->level];
94         void *mapped = etna_bo_map(rsc->bo) + res_level->offset;
95
96         if (rsc->layout == ETNA_LAYOUT_TILED) {
97            etna_texture_tile(
98               mapped + ptrans->box.z * res_level->layer_stride,
99               trans->staging, ptrans->box.x, ptrans->box.y,
100               res_level->stride, ptrans->box.width, ptrans->box.height,
101               ptrans->stride, util_format_get_blocksize(rsc->base.format));
102         } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
103            util_copy_box(mapped, rsc->base.format, res_level->stride,
104                          res_level->layer_stride, ptrans->box.x,
105                          ptrans->box.y, ptrans->box.z, ptrans->box.width,
106                          ptrans->box.height, ptrans->box.depth,
107                          trans->staging, ptrans->stride,
108                          ptrans->layer_stride, 0, 0, 0 /* src x,y,z */);
109         } else {
110            BUG("unsupported tiling %i", rsc->layout);
111         }
112
113         FREE(trans->staging);
114      }
115
116      rsc->seqno++;
117
118      if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) {
119         ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES;
120      }
121   }
122
123   /*
124    * Transfers without a temporary are only pulled into the CPU domain if they
125    * are not mapped unsynchronized. If they are, must push them back into GPU
126    * domain after CPU access is finished.
127    */
128   if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
129      etna_bo_cpu_fini(rsc->bo);
130
131   pipe_resource_reference(&trans->rsc, NULL);
132   pipe_resource_reference(&ptrans->resource, NULL);
133   slab_free(&ctx->transfer_pool, trans);
134}
135
136static void *
137etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
138                  unsigned level,
139                  unsigned usage,
140                  const struct pipe_box *box,
141                  struct pipe_transfer **out_transfer)
142{
143   struct etna_context *ctx = etna_context(pctx);
144   struct etna_resource *rsc = etna_resource(prsc);
145   struct etna_transfer *trans;
146   struct pipe_transfer *ptrans;
147   enum pipe_format format = prsc->format;
148
149   trans = slab_alloc(&ctx->transfer_pool);
150   if (!trans)
151      return NULL;
152
153   /* slab_alloc() doesn't zero */
154   memset(trans, 0, sizeof(*trans));
155
156   ptrans = &trans->base;
157   pipe_resource_reference(&ptrans->resource, prsc);
158   ptrans->level = level;
159   ptrans->usage = usage;
160   ptrans->box = *box;
161
162   assert(level <= prsc->last_level);
163
164   /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
165    * being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
166    * check needs to be extended to coherent mappings and shared resources.
167    */
168   if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
169       !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
170       prsc->last_level == 0 &&
171       prsc->width0 == box->width &&
172       prsc->height0 == box->height &&
173       prsc->depth0 == box->depth &&
174       prsc->array_size == 1) {
175      usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
176   }
177
178   if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) {
179      /* We have a texture resource which is the same age or newer than the
180       * render resource. Use the texture resource, which avoids bouncing
181       * pixels between the two resources, and we can de-tile it in s/w. */
182      rsc = etna_resource(rsc->texture);
183   } else if (rsc->ts_bo ||
184              (rsc->layout != ETNA_LAYOUT_LINEAR &&
185               util_format_get_blocksize(format) > 1 &&
186               /* HALIGN 4 resources are incompatible with the resolve engine,
187                * so fall back to using software to detile this resource. */
188               rsc->halign != TEXTURE_HALIGN_FOUR)) {
189      /* If the surface has tile status, we need to resolve it first.
190       * The strategy we implement here is to use the RS to copy the
191       * depth buffer, filling in the "holes" where the tile status
192       * indicates that it's clear. We also do this for tiled
193       * resources, but only if the RS can blit them. */
194      if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
195         slab_free(&ctx->transfer_pool, trans);
196         BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
197         return NULL;
198      }
199
200      if (prsc->depth0 > 1) {
201         slab_free(&ctx->transfer_pool, trans);
202         BUG("resource has depth >1 with tile status");
203         return NULL;
204      }
205
206      struct pipe_resource templ = *prsc;
207      templ.nr_samples = 0;
208      templ.bind = PIPE_BIND_RENDER_TARGET;
209
210      trans->rsc = etna_resource_alloc(pctx->screen, ETNA_LAYOUT_LINEAR,
211                                       DRM_FORMAT_MOD_LINEAR, &templ);
212      if (!trans->rsc) {
213         slab_free(&ctx->transfer_pool, trans);
214         return NULL;
215      }
216
217      if (!ctx->specs.use_blt) {
218         /* Need to align the transfer region to satisfy RS restrictions, as we
219          * really want to hit the RS blit path here.
220          */
221         unsigned w_align, h_align;
222
223         if (rsc->layout & ETNA_LAYOUT_BIT_SUPER) {
224            w_align = h_align = 64;
225         } else {
226            w_align = ETNA_RS_WIDTH_MASK + 1;
227            h_align = ETNA_RS_HEIGHT_MASK + 1;
228         }
229         h_align *= ctx->screen->specs.pixel_pipes;
230
231         ptrans->box.width += ptrans->box.x & (w_align - 1);
232         ptrans->box.x = ptrans->box.x & ~(w_align - 1);
233         ptrans->box.width = align(ptrans->box.width, (ETNA_RS_WIDTH_MASK + 1));
234         ptrans->box.height += ptrans->box.y & (h_align - 1);
235         ptrans->box.y = ptrans->box.y & ~(h_align - 1);
236         ptrans->box.height = align(ptrans->box.height,
237                                    (ETNA_RS_HEIGHT_MASK + 1) *
238                                     ctx->screen->specs.pixel_pipes);
239      }
240
241      if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
242         etna_copy_resource_box(pctx, trans->rsc, prsc, level, &ptrans->box);
243
244      /* Switch to using the temporary resource instead */
245      rsc = etna_resource(trans->rsc);
246   }
247
248   struct etna_resource_level *res_level = &rsc->levels[level];
249
250   /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
251    * when mapping in-place,
252    * but when not in place we need to fire off the copy operation in
253    * transfer_flush_region (currently
254    * a no-op) instead of unmap. Need to handle this to support
255    * ARB_map_buffer_range extension at least.
256    */
257   /* XXX we don't take care of current operations on the resource; which can
258      be, at some point in the pipeline
259      which is not yet executed:
260
261      - bound as surface
262      - bound through vertex buffer
263      - bound through index buffer
264      - bound in sampler view
265      - used in clear_render_target / clear_depth_stencil operation
266      - used in blit
267      - used in resource_copy_region
268
269      How do other drivers record this information over course of the rendering
270      pipeline?
271      Is it necessary at all? Only in case we want to provide a fast path and
272      map the resource directly
273      (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
274      We also need to know whether the resource is in use to determine if a sync
275      is needed (or just do it
276      always, but that comes at the expense of performance).
277
278      A conservative approximation without too much overhead would be to mark
279      all resources that have
280      been bound at some point as busy. A drawback would be that accessing
281      resources that have
282      been bound but are no longer in use for a while still carry a performance
283      penalty. On the other hand,
284      the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
285      PIPE_TRANSFER_UNSYNCHRONIZED to
286      avoid this in the first place...
287
288      A) We use an in-pipe copy engine, and queue the copy operation after unmap
289      so that the copy
290         will be performed when all current commands have been executed.
291         Using the RS is possible, not sure if always efficient. This can also
292      do any kind of tiling for us.
293         Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
294      B) We discard the entire resource (or at least, the mipmap level) and
295      allocate new memory for it.
296         Only possible when mapping the entire resource or
297      PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
298    */
299
300   /*
301    * Pull resources into the CPU domain. Only skipped for unsynchronized
302    * transfers without a temporary resource.
303    */
304   if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
305      uint32_t prep_flags = 0;
306
307      /*
308       * Always flush if we have the temporary resource and have a copy to this
309       * outstanding. Otherwise infer flush requirement from resource access and
310       * current GPU usage (reads must wait for GPU writes, writes must have
311       * exclusive access to the buffer).
312       */
313      if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
314          (!trans->rsc &&
315           (((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
316           ((usage & PIPE_TRANSFER_WRITE) && rsc->status))))
317         pctx->flush(pctx, NULL, 0);
318
319      if (usage & PIPE_TRANSFER_READ)
320         prep_flags |= DRM_ETNA_PREP_READ;
321      if (usage & PIPE_TRANSFER_WRITE)
322         prep_flags |= DRM_ETNA_PREP_WRITE;
323
324      if (etna_bo_cpu_prep(rsc->bo, prep_flags))
325         goto fail_prep;
326   }
327
328   /* map buffer object */
329   void *mapped = etna_bo_map(rsc->bo);
330   if (!mapped)
331      goto fail;
332
333   *out_transfer = ptrans;
334
335   if (rsc->layout == ETNA_LAYOUT_LINEAR) {
336      ptrans->stride = res_level->stride;
337      ptrans->layer_stride = res_level->layer_stride;
338
339      return mapped + res_level->offset +
340             etna_compute_offset(prsc->format, box, res_level->stride,
341                                 res_level->layer_stride);
342   } else {
343      unsigned divSizeX = util_format_get_blockwidth(format);
344      unsigned divSizeY = util_format_get_blockheight(format);
345
346      /* No direct mappings of tiled, since we need to manually
347       * tile/untile.
348       */
349      if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
350         goto fail;
351
352      mapped += res_level->offset;
353      ptrans->stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
354      ptrans->layer_stride = align(box->height, divSizeY) * ptrans->stride;
355      size_t size = ptrans->layer_stride * box->depth;
356
357      trans->staging = MALLOC(size);
358      if (!trans->staging)
359         goto fail;
360
361      if (usage & PIPE_TRANSFER_READ) {
362         if (rsc->layout == ETNA_LAYOUT_TILED) {
363            etna_texture_untile(trans->staging,
364                                mapped + ptrans->box.z * res_level->layer_stride,
365                                ptrans->box.x, ptrans->box.y, res_level->stride,
366                                ptrans->box.width, ptrans->box.height, ptrans->stride,
367                                util_format_get_blocksize(rsc->base.format));
368         } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
369            util_copy_box(trans->staging, rsc->base.format, ptrans->stride,
370                          ptrans->layer_stride, 0, 0, 0, /* dst x,y,z */
371                          ptrans->box.width, ptrans->box.height,
372                          ptrans->box.depth, mapped, res_level->stride,
373                          res_level->layer_stride, ptrans->box.x,
374                          ptrans->box.y, ptrans->box.z);
375         } else {
376            /* TODO supertiling */
377            BUG("unsupported tiling %i for reading", rsc->layout);
378         }
379      }
380
381      return trans->staging;
382   }
383
384fail:
385   etna_bo_cpu_fini(rsc->bo);
386fail_prep:
387   etna_transfer_unmap(pctx, ptrans);
388   return NULL;
389}
390
391static void
392etna_transfer_flush_region(struct pipe_context *pctx,
393                           struct pipe_transfer *transfer,
394                           const struct pipe_box *box)
395{
396   /* NOOP for now */
397}
398
399void
400etna_transfer_init(struct pipe_context *pctx)
401{
402   pctx->transfer_map = etna_transfer_map;
403   pctx->transfer_flush_region = etna_transfer_flush_region;
404   pctx->transfer_unmap = etna_transfer_unmap;
405   pctx->buffer_subdata = u_default_buffer_subdata;
406   pctx->texture_subdata = u_default_texture_subdata;
407}
408