1b8e80941Smrg/*
2b8e80941Smrg * Copyright (c) 2012-2015 Etnaviv Project
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sub license,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the
12b8e80941Smrg * next paragraph) shall be included in all copies or substantial portions
13b8e80941Smrg * of the Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21b8e80941Smrg * DEALINGS IN THE SOFTWARE.
22b8e80941Smrg *
23b8e80941Smrg * Authors:
24b8e80941Smrg *    Wladimir J. van der Laan <laanwj@gmail.com>
25b8e80941Smrg */
26b8e80941Smrg
27b8e80941Smrg#include "etnaviv_transfer.h"
28b8e80941Smrg#include "etnaviv_clear_blit.h"
29b8e80941Smrg#include "etnaviv_context.h"
30b8e80941Smrg#include "etnaviv_debug.h"
31b8e80941Smrg#include "etnaviv_etc2.h"
32b8e80941Smrg#include "etnaviv_screen.h"
33b8e80941Smrg
34b8e80941Smrg#include "pipe/p_defines.h"
35b8e80941Smrg#include "pipe/p_format.h"
36b8e80941Smrg#include "pipe/p_screen.h"
37b8e80941Smrg#include "pipe/p_state.h"
38b8e80941Smrg#include "util/u_format.h"
39b8e80941Smrg#include "util/u_inlines.h"
40b8e80941Smrg#include "util/u_memory.h"
41b8e80941Smrg#include "util/u_surface.h"
42b8e80941Smrg#include "util/u_transfer.h"
43b8e80941Smrg
44b8e80941Smrg#include "hw/common_3d.xml.h"
45b8e80941Smrg
46b8e80941Smrg#include "drm-uapi/drm_fourcc.h"
47b8e80941Smrg
48b8e80941Smrg/* Compute offset into a 1D/2D/3D buffer of a certain box.
49b8e80941Smrg * This box must be aligned to the block width and height of the
50b8e80941Smrg * underlying format. */
51b8e80941Smrgstatic inline size_t
52b8e80941Smrgetna_compute_offset(enum pipe_format format, const struct pipe_box *box,
53b8e80941Smrg                    size_t stride, size_t layer_stride)
54b8e80941Smrg{
55b8e80941Smrg   return box->z * layer_stride +
56b8e80941Smrg          box->y / util_format_get_blockheight(format) * stride +
57b8e80941Smrg          box->x / util_format_get_blockwidth(format) *
58b8e80941Smrg             util_format_get_blocksize(format);
59b8e80941Smrg}
60b8e80941Smrg
61b8e80941Smrgstatic void etna_patch_data(void *buffer, const struct pipe_transfer *ptrans)
62b8e80941Smrg{
63b8e80941Smrg   struct pipe_resource *prsc = ptrans->resource;
64b8e80941Smrg   struct etna_resource *rsc = etna_resource(prsc);
65b8e80941Smrg   struct etna_resource_level *level = &rsc->levels[ptrans->level];
66b8e80941Smrg
67b8e80941Smrg   if (likely(!etna_etc2_needs_patching(prsc)))
68b8e80941Smrg      return;
69b8e80941Smrg
70b8e80941Smrg   if (level->patched)
71b8e80941Smrg      return;
72b8e80941Smrg
73b8e80941Smrg   /* do have the offsets of blocks to patch? */
74b8e80941Smrg   if (!level->patch_offsets) {
75b8e80941Smrg      level->patch_offsets = CALLOC_STRUCT(util_dynarray);
76b8e80941Smrg
77b8e80941Smrg      etna_etc2_calculate_blocks(buffer, ptrans->stride,
78b8e80941Smrg                                         ptrans->box.width, ptrans->box.height,
79b8e80941Smrg                                         prsc->format, level->patch_offsets);
80b8e80941Smrg   }
81b8e80941Smrg
82b8e80941Smrg   etna_etc2_patch(buffer, level->patch_offsets);
83b8e80941Smrg
84b8e80941Smrg   level->patched = true;
85b8e80941Smrg}
86b8e80941Smrg
87b8e80941Smrgstatic void etna_unpatch_data(void *buffer, const struct pipe_transfer *ptrans)
88b8e80941Smrg{
89b8e80941Smrg   struct pipe_resource *prsc = ptrans->resource;
90b8e80941Smrg   struct etna_resource *rsc = etna_resource(prsc);
91b8e80941Smrg   struct etna_resource_level *level = &rsc->levels[ptrans->level];
92b8e80941Smrg
93b8e80941Smrg   if (!level->patched)
94b8e80941Smrg      return;
95b8e80941Smrg
96b8e80941Smrg   etna_etc2_patch(buffer, level->patch_offsets);
97b8e80941Smrg
98b8e80941Smrg   level->patched = false;
99b8e80941Smrg}
100b8e80941Smrg
101b8e80941Smrgstatic void
102b8e80941Smrgetna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
103b8e80941Smrg{
104b8e80941Smrg   struct etna_context *ctx = etna_context(pctx);
105b8e80941Smrg   struct etna_transfer *trans = etna_transfer(ptrans);
106b8e80941Smrg   struct etna_resource *rsc = etna_resource(ptrans->resource);
107b8e80941Smrg
108b8e80941Smrg   /* XXX
109b8e80941Smrg    * When writing to a resource that is already in use, replace the resource
110b8e80941Smrg    * with a completely new buffer
111b8e80941Smrg    * and free the old one using a fenced free.
112b8e80941Smrg    * The most tricky case to implement will be: tiled or supertiled surface,
113b8e80941Smrg    * partial write, target not aligned to 4/64. */
114b8e80941Smrg   assert(ptrans->level <= rsc->base.last_level);
115b8e80941Smrg
116b8e80941Smrg   if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture)))
117b8e80941Smrg      rsc = etna_resource(rsc->texture); /* switch to using the texture resource */
118b8e80941Smrg
119b8e80941Smrg   /*
120b8e80941Smrg    * Temporary resources are always pulled into the CPU domain, must push them
121b8e80941Smrg    * back into GPU domain before the RS execs the blit to the base resource.
122b8e80941Smrg    */
123b8e80941Smrg   if (trans->rsc)
124b8e80941Smrg      etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);
125b8e80941Smrg
126b8e80941Smrg   if (ptrans->usage & PIPE_TRANSFER_WRITE) {
127b8e80941Smrg      if (trans->rsc) {
128b8e80941Smrg         /* We have a temporary resource due to either tile status or
129b8e80941Smrg          * tiling format. Write back the updated buffer contents.
130b8e80941Smrg          * FIXME: we need to invalidate the tile status. */
131b8e80941Smrg         etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box);
132b8e80941Smrg      } else if (trans->staging) {
133b8e80941Smrg         /* map buffer object */
134b8e80941Smrg         struct etna_resource_level *res_level = &rsc->levels[ptrans->level];
135b8e80941Smrg
136b8e80941Smrg         if (rsc->layout == ETNA_LAYOUT_TILED) {
137b8e80941Smrg            etna_texture_tile(
138b8e80941Smrg               trans->mapped + ptrans->box.z * res_level->layer_stride,
139b8e80941Smrg               trans->staging, ptrans->box.x, ptrans->box.y,
140b8e80941Smrg               res_level->stride, ptrans->box.width, ptrans->box.height,
141b8e80941Smrg               ptrans->stride, util_format_get_blocksize(rsc->base.format));
142b8e80941Smrg         } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
143b8e80941Smrg            util_copy_box(trans->mapped, rsc->base.format, res_level->stride,
144b8e80941Smrg                          res_level->layer_stride, ptrans->box.x,
145b8e80941Smrg                          ptrans->box.y, ptrans->box.z, ptrans->box.width,
146b8e80941Smrg                          ptrans->box.height, ptrans->box.depth,
147b8e80941Smrg                          trans->staging, ptrans->stride,
148b8e80941Smrg                          ptrans->layer_stride, 0, 0, 0 /* src x,y,z */);
149b8e80941Smrg         } else {
150b8e80941Smrg            BUG("unsupported tiling %i", rsc->layout);
151b8e80941Smrg         }
152b8e80941Smrg
153b8e80941Smrg         FREE(trans->staging);
154b8e80941Smrg      }
155b8e80941Smrg
156b8e80941Smrg      rsc->seqno++;
157b8e80941Smrg
158b8e80941Smrg      if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) {
159b8e80941Smrg         ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES;
160b8e80941Smrg      }
161b8e80941Smrg   }
162b8e80941Smrg
163b8e80941Smrg   /* We need to have the patched data ready for the GPU. */
164b8e80941Smrg   etna_patch_data(trans->mapped, ptrans);
165b8e80941Smrg
166b8e80941Smrg   /*
167b8e80941Smrg    * Transfers without a temporary are only pulled into the CPU domain if they
168b8e80941Smrg    * are not mapped unsynchronized. If they are, must push them back into GPU
169b8e80941Smrg    * domain after CPU access is finished.
170b8e80941Smrg    */
171b8e80941Smrg   if (!trans->rsc && !(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED))
172b8e80941Smrg      etna_bo_cpu_fini(rsc->bo);
173b8e80941Smrg
174b8e80941Smrg   pipe_resource_reference(&trans->rsc, NULL);
175b8e80941Smrg   pipe_resource_reference(&ptrans->resource, NULL);
176b8e80941Smrg   slab_free(&ctx->transfer_pool, trans);
177b8e80941Smrg}
178b8e80941Smrg
179b8e80941Smrgstatic void *
180b8e80941Smrgetna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
181b8e80941Smrg                  unsigned level,
182b8e80941Smrg                  unsigned usage,
183b8e80941Smrg                  const struct pipe_box *box,
184b8e80941Smrg                  struct pipe_transfer **out_transfer)
185b8e80941Smrg{
186b8e80941Smrg   struct etna_context *ctx = etna_context(pctx);
187b8e80941Smrg   struct etna_resource *rsc = etna_resource(prsc);
188b8e80941Smrg   struct etna_transfer *trans;
189b8e80941Smrg   struct pipe_transfer *ptrans;
190b8e80941Smrg   enum pipe_format format = prsc->format;
191b8e80941Smrg
192b8e80941Smrg   trans = slab_alloc(&ctx->transfer_pool);
193b8e80941Smrg   if (!trans)
194b8e80941Smrg      return NULL;
195b8e80941Smrg
196b8e80941Smrg   /* slab_alloc() doesn't zero */
197b8e80941Smrg   memset(trans, 0, sizeof(*trans));
198b8e80941Smrg
199b8e80941Smrg   ptrans = &trans->base;
200b8e80941Smrg   pipe_resource_reference(&ptrans->resource, prsc);
201b8e80941Smrg   ptrans->level = level;
202b8e80941Smrg   ptrans->usage = usage;
203b8e80941Smrg   ptrans->box = *box;
204b8e80941Smrg
205b8e80941Smrg   assert(level <= prsc->last_level);
206b8e80941Smrg
207b8e80941Smrg   /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
208b8e80941Smrg    * being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
209b8e80941Smrg    * check needs to be extended to coherent mappings and shared resources.
210b8e80941Smrg    */
211b8e80941Smrg   if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
212b8e80941Smrg       !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
213b8e80941Smrg       prsc->last_level == 0 &&
214b8e80941Smrg       prsc->width0 == box->width &&
215b8e80941Smrg       prsc->height0 == box->height &&
216b8e80941Smrg       prsc->depth0 == box->depth &&
217b8e80941Smrg       prsc->array_size == 1) {
218b8e80941Smrg      usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
219b8e80941Smrg   }
220b8e80941Smrg
221b8e80941Smrg   if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) {
222b8e80941Smrg      /* We have a texture resource which is the same age or newer than the
223b8e80941Smrg       * render resource. Use the texture resource, which avoids bouncing
224b8e80941Smrg       * pixels between the two resources, and we can de-tile it in s/w. */
225b8e80941Smrg      rsc = etna_resource(rsc->texture);
226b8e80941Smrg   } else if (rsc->ts_bo ||
227b8e80941Smrg              (rsc->layout != ETNA_LAYOUT_LINEAR &&
228b8e80941Smrg               util_format_get_blocksize(format) > 1 &&
229b8e80941Smrg               /* HALIGN 4 resources are incompatible with the resolve engine,
230b8e80941Smrg                * so fall back to using software to detile this resource. */
231b8e80941Smrg               rsc->halign != TEXTURE_HALIGN_FOUR)) {
232b8e80941Smrg      /* If the surface has tile status, we need to resolve it first.
233b8e80941Smrg       * The strategy we implement here is to use the RS to copy the
234b8e80941Smrg       * depth buffer, filling in the "holes" where the tile status
235b8e80941Smrg       * indicates that it's clear. We also do this for tiled
236b8e80941Smrg       * resources, but only if the RS can blit them. */
237b8e80941Smrg      if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
238b8e80941Smrg         slab_free(&ctx->transfer_pool, trans);
239b8e80941Smrg         BUG("unsupported transfer flags %#x with tile status/tiled layout", usage);
240b8e80941Smrg         return NULL;
241b8e80941Smrg      }
242b8e80941Smrg
243b8e80941Smrg      if (prsc->depth0 > 1) {
244b8e80941Smrg         slab_free(&ctx->transfer_pool, trans);
245b8e80941Smrg         BUG("resource has depth >1 with tile status");
246b8e80941Smrg         return NULL;
247b8e80941Smrg      }
248b8e80941Smrg
249b8e80941Smrg      struct pipe_resource templ = *prsc;
250b8e80941Smrg      templ.nr_samples = 0;
251b8e80941Smrg      templ.bind = PIPE_BIND_RENDER_TARGET;
252b8e80941Smrg
253b8e80941Smrg      trans->rsc = etna_resource_alloc(pctx->screen, ETNA_LAYOUT_LINEAR,
254b8e80941Smrg                                       ETNA_ADDRESSING_MODE_TILED, DRM_FORMAT_MOD_LINEAR,
255b8e80941Smrg                                       &templ);
256b8e80941Smrg      if (!trans->rsc) {
257b8e80941Smrg         slab_free(&ctx->transfer_pool, trans);
258b8e80941Smrg         return NULL;
259b8e80941Smrg      }
260b8e80941Smrg
261b8e80941Smrg      if (!ctx->specs.use_blt) {
262b8e80941Smrg         /* Need to align the transfer region to satisfy RS restrictions, as we
263b8e80941Smrg          * really want to hit the RS blit path here.
264b8e80941Smrg          */
265b8e80941Smrg         unsigned w_align, h_align;
266b8e80941Smrg
267b8e80941Smrg         if (rsc->layout & ETNA_LAYOUT_BIT_SUPER) {
268b8e80941Smrg            w_align = h_align = 64;
269b8e80941Smrg         } else {
270b8e80941Smrg            w_align = ETNA_RS_WIDTH_MASK + 1;
271b8e80941Smrg            h_align = ETNA_RS_HEIGHT_MASK + 1;
272b8e80941Smrg         }
273b8e80941Smrg         h_align *= ctx->screen->specs.pixel_pipes;
274b8e80941Smrg
275b8e80941Smrg         ptrans->box.width += ptrans->box.x & (w_align - 1);
276b8e80941Smrg         ptrans->box.x = ptrans->box.x & ~(w_align - 1);
277b8e80941Smrg         ptrans->box.width = align(ptrans->box.width, (ETNA_RS_WIDTH_MASK + 1));
278b8e80941Smrg         ptrans->box.height += ptrans->box.y & (h_align - 1);
279b8e80941Smrg         ptrans->box.y = ptrans->box.y & ~(h_align - 1);
280b8e80941Smrg         ptrans->box.height = align(ptrans->box.height,
281b8e80941Smrg                                    (ETNA_RS_HEIGHT_MASK + 1) *
282b8e80941Smrg                                     ctx->screen->specs.pixel_pipes);
283b8e80941Smrg      }
284b8e80941Smrg
285b8e80941Smrg      if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
286b8e80941Smrg         etna_copy_resource_box(pctx, trans->rsc, prsc, level, &ptrans->box);
287b8e80941Smrg
288b8e80941Smrg      /* Switch to using the temporary resource instead */
289b8e80941Smrg      rsc = etna_resource(trans->rsc);
290b8e80941Smrg   }
291b8e80941Smrg
292b8e80941Smrg   struct etna_resource_level *res_level = &rsc->levels[level];
293b8e80941Smrg
294b8e80941Smrg   /* XXX we don't handle PIPE_TRANSFER_FLUSH_EXPLICIT; this flag can be ignored
295b8e80941Smrg    * when mapping in-place,
296b8e80941Smrg    * but when not in place we need to fire off the copy operation in
297b8e80941Smrg    * transfer_flush_region (currently
298b8e80941Smrg    * a no-op) instead of unmap. Need to handle this to support
299b8e80941Smrg    * ARB_map_buffer_range extension at least.
300b8e80941Smrg    */
301b8e80941Smrg   /* XXX we don't take care of current operations on the resource; which can
302b8e80941Smrg      be, at some point in the pipeline
303b8e80941Smrg      which is not yet executed:
304b8e80941Smrg
305b8e80941Smrg      - bound as surface
306b8e80941Smrg      - bound through vertex buffer
307b8e80941Smrg      - bound through index buffer
308b8e80941Smrg      - bound in sampler view
309b8e80941Smrg      - used in clear_render_target / clear_depth_stencil operation
310b8e80941Smrg      - used in blit
311b8e80941Smrg      - used in resource_copy_region
312b8e80941Smrg
313b8e80941Smrg      How do other drivers record this information over course of the rendering
314b8e80941Smrg      pipeline?
315b8e80941Smrg      Is it necessary at all? Only in case we want to provide a fast path and
316b8e80941Smrg      map the resource directly
317b8e80941Smrg      (and for PIPE_TRANSFER_MAP_DIRECTLY) and we don't want to force a sync.
318b8e80941Smrg      We also need to know whether the resource is in use to determine if a sync
319b8e80941Smrg      is needed (or just do it
320b8e80941Smrg      always, but that comes at the expense of performance).
321b8e80941Smrg
322b8e80941Smrg      A conservative approximation without too much overhead would be to mark
323b8e80941Smrg      all resources that have
324b8e80941Smrg      been bound at some point as busy. A drawback would be that accessing
325b8e80941Smrg      resources that have
326b8e80941Smrg      been bound but are no longer in use for a while still carry a performance
327b8e80941Smrg      penalty. On the other hand,
328b8e80941Smrg      the program could be using PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE or
329b8e80941Smrg      PIPE_TRANSFER_UNSYNCHRONIZED to
330b8e80941Smrg      avoid this in the first place...
331b8e80941Smrg
332b8e80941Smrg      A) We use an in-pipe copy engine, and queue the copy operation after unmap
333b8e80941Smrg      so that the copy
334b8e80941Smrg         will be performed when all current commands have been executed.
335b8e80941Smrg         Using the RS is possible, not sure if always efficient. This can also
336b8e80941Smrg      do any kind of tiling for us.
337b8e80941Smrg         Only possible when PIPE_TRANSFER_DISCARD_RANGE is set.
338b8e80941Smrg      B) We discard the entire resource (or at least, the mipmap level) and
339b8e80941Smrg      allocate new memory for it.
340b8e80941Smrg         Only possible when mapping the entire resource or
341b8e80941Smrg      PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE is set.
342b8e80941Smrg    */
343b8e80941Smrg
344b8e80941Smrg   /*
345b8e80941Smrg    * Pull resources into the CPU domain. Only skipped for unsynchronized
346b8e80941Smrg    * transfers without a temporary resource.
347b8e80941Smrg    */
348b8e80941Smrg   if (trans->rsc || !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
349b8e80941Smrg      struct etna_screen *screen = ctx->screen;
350b8e80941Smrg      uint32_t prep_flags = 0;
351b8e80941Smrg
352b8e80941Smrg      /*
353b8e80941Smrg       * Always flush if we have the temporary resource and have a copy to this
354b8e80941Smrg       * outstanding. Otherwise infer flush requirement from resource access and
355b8e80941Smrg       * current GPU usage (reads must wait for GPU writes, writes must have
356b8e80941Smrg       * exclusive access to the buffer).
357b8e80941Smrg       */
358b8e80941Smrg      mtx_lock(&screen->lock);
359b8e80941Smrg
360b8e80941Smrg      if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
361b8e80941Smrg          (!trans->rsc &&
362b8e80941Smrg           (((usage & PIPE_TRANSFER_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
363b8e80941Smrg           ((usage & PIPE_TRANSFER_WRITE) && rsc->status)))) {
364b8e80941Smrg         set_foreach(rsc->pending_ctx, entry) {
365b8e80941Smrg            struct etna_context *pend_ctx = (struct etna_context *)entry->key;
366b8e80941Smrg            struct pipe_context *pend_pctx = &pend_ctx->base;
367b8e80941Smrg
368b8e80941Smrg            pend_pctx->flush(pend_pctx, NULL, 0);
369b8e80941Smrg         }
370b8e80941Smrg      }
371b8e80941Smrg
372b8e80941Smrg      mtx_unlock(&screen->lock);
373b8e80941Smrg
374b8e80941Smrg      if (usage & PIPE_TRANSFER_READ)
375b8e80941Smrg         prep_flags |= DRM_ETNA_PREP_READ;
376b8e80941Smrg      if (usage & PIPE_TRANSFER_WRITE)
377b8e80941Smrg         prep_flags |= DRM_ETNA_PREP_WRITE;
378b8e80941Smrg
379b8e80941Smrg      /*
380b8e80941Smrg       * The ETC2 patching operates in-place on the resource, so the resource will
381b8e80941Smrg       * get written even on read-only transfers. This blocks the GPU to sample
382b8e80941Smrg       * from this resource.
383b8e80941Smrg       */
384b8e80941Smrg      if ((usage & PIPE_TRANSFER_READ) && etna_etc2_needs_patching(prsc))
385b8e80941Smrg         prep_flags |= DRM_ETNA_PREP_WRITE;
386b8e80941Smrg
387b8e80941Smrg      if (etna_bo_cpu_prep(rsc->bo, prep_flags))
388b8e80941Smrg         goto fail_prep;
389b8e80941Smrg   }
390b8e80941Smrg
391b8e80941Smrg   /* map buffer object */
392b8e80941Smrg   trans->mapped = etna_bo_map(rsc->bo);
393b8e80941Smrg   if (!trans->mapped)
394b8e80941Smrg      goto fail;
395b8e80941Smrg
396b8e80941Smrg   *out_transfer = ptrans;
397b8e80941Smrg
398b8e80941Smrg   if (rsc->layout == ETNA_LAYOUT_LINEAR) {
399b8e80941Smrg      ptrans->stride = res_level->stride;
400b8e80941Smrg      ptrans->layer_stride = res_level->layer_stride;
401b8e80941Smrg
402b8e80941Smrg      trans->mapped += res_level->offset +
403b8e80941Smrg             etna_compute_offset(prsc->format, box, res_level->stride,
404b8e80941Smrg                                 res_level->layer_stride);
405b8e80941Smrg
406b8e80941Smrg      /* We need to have the unpatched data ready for the gfx stack. */
407b8e80941Smrg      if (usage & PIPE_TRANSFER_READ)
408b8e80941Smrg         etna_unpatch_data(trans->mapped, ptrans);
409b8e80941Smrg
410b8e80941Smrg      return trans->mapped;
411b8e80941Smrg   } else {
412b8e80941Smrg      unsigned divSizeX = util_format_get_blockwidth(format);
413b8e80941Smrg      unsigned divSizeY = util_format_get_blockheight(format);
414b8e80941Smrg
415b8e80941Smrg      /* No direct mappings of tiled, since we need to manually
416b8e80941Smrg       * tile/untile.
417b8e80941Smrg       */
418b8e80941Smrg      if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
419b8e80941Smrg         goto fail;
420b8e80941Smrg
421b8e80941Smrg      trans->mapped += res_level->offset;
422b8e80941Smrg      ptrans->stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
423b8e80941Smrg      ptrans->layer_stride = align(box->height, divSizeY) * ptrans->stride;
424b8e80941Smrg      size_t size = ptrans->layer_stride * box->depth;
425b8e80941Smrg
426b8e80941Smrg      trans->staging = MALLOC(size);
427b8e80941Smrg      if (!trans->staging)
428b8e80941Smrg         goto fail;
429b8e80941Smrg
430b8e80941Smrg      if (usage & PIPE_TRANSFER_READ) {
431b8e80941Smrg         if (rsc->layout == ETNA_LAYOUT_TILED) {
432b8e80941Smrg            etna_texture_untile(trans->staging,
433b8e80941Smrg                                trans->mapped + ptrans->box.z * res_level->layer_stride,
434b8e80941Smrg                                ptrans->box.x, ptrans->box.y, res_level->stride,
435b8e80941Smrg                                ptrans->box.width, ptrans->box.height, ptrans->stride,
436b8e80941Smrg                                util_format_get_blocksize(rsc->base.format));
437b8e80941Smrg         } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
438b8e80941Smrg            util_copy_box(trans->staging, rsc->base.format, ptrans->stride,
439b8e80941Smrg                          ptrans->layer_stride, 0, 0, 0, /* dst x,y,z */
440b8e80941Smrg                          ptrans->box.width, ptrans->box.height,
441b8e80941Smrg                          ptrans->box.depth, trans->mapped, res_level->stride,
442b8e80941Smrg                          res_level->layer_stride, ptrans->box.x,
443b8e80941Smrg                          ptrans->box.y, ptrans->box.z);
444b8e80941Smrg         } else {
445b8e80941Smrg            /* TODO supertiling */
446b8e80941Smrg            BUG("unsupported tiling %i for reading", rsc->layout);
447b8e80941Smrg         }
448b8e80941Smrg      }
449b8e80941Smrg
450b8e80941Smrg      return trans->staging;
451b8e80941Smrg   }
452b8e80941Smrg
453b8e80941Smrgfail:
454b8e80941Smrg   etna_bo_cpu_fini(rsc->bo);
455b8e80941Smrgfail_prep:
456b8e80941Smrg   etna_transfer_unmap(pctx, ptrans);
457b8e80941Smrg   return NULL;
458b8e80941Smrg}
459b8e80941Smrg
460b8e80941Smrgstatic void
461b8e80941Smrgetna_transfer_flush_region(struct pipe_context *pctx,
462b8e80941Smrg                           struct pipe_transfer *transfer,
463b8e80941Smrg                           const struct pipe_box *box)
464b8e80941Smrg{
465b8e80941Smrg   /* NOOP for now */
466b8e80941Smrg}
467b8e80941Smrg
468b8e80941Smrgvoid
469b8e80941Smrgetna_transfer_init(struct pipe_context *pctx)
470b8e80941Smrg{
471b8e80941Smrg   pctx->transfer_map = etna_transfer_map;
472b8e80941Smrg   pctx->transfer_flush_region = etna_transfer_flush_region;
473b8e80941Smrg   pctx->transfer_unmap = etna_transfer_unmap;
474b8e80941Smrg   pctx->buffer_subdata = u_default_buffer_subdata;
475b8e80941Smrg   pctx->texture_subdata = u_default_texture_subdata;
476b8e80941Smrg}
477