1/**********************************************************
2 * Copyright 2008-2009 VMware, Inc.  All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26#include "svga3d_reg.h"
27#include "svga3d_surfacedefs.h"
28
29#include "pipe/p_state.h"
30#include "pipe/p_defines.h"
31#include "os/os_thread.h"
32#include "util/format/u_format.h"
33#include "util/u_inlines.h"
34#include "util/u_math.h"
35#include "util/u_memory.h"
36#include "util/u_resource.h"
37#include "util/u_upload_mgr.h"
38
39#include "svga_cmd.h"
40#include "svga_format.h"
41#include "svga_screen.h"
42#include "svga_context.h"
43#include "svga_resource_texture.h"
44#include "svga_resource_buffer.h"
45#include "svga_sampler_view.h"
46#include "svga_winsys.h"
47#include "svga_debug.h"
48
49
50static void
51svga_transfer_dma_band(struct svga_context *svga,
52                       struct svga_transfer *st,
53                       SVGA3dTransferType transfer,
54                       unsigned x, unsigned y, unsigned z,
55                       unsigned w, unsigned h, unsigned d,
56                       unsigned srcx, unsigned srcy, unsigned srcz,
57                       SVGA3dSurfaceDMAFlags flags)
58{
59   struct svga_texture *texture = svga_texture(st->base.resource);
60   SVGA3dCopyBox box;
61
62   assert(!st->use_direct_map);
63
64   box.x = x;
65   box.y = y;
66   box.z = z;
67   box.w = w;
68   box.h = h;
69   box.d = d;
70   box.srcx = srcx;
71   box.srcy = srcy;
72   box.srcz = srcz;
73
74   SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - "
75            "(%u, %u, %u), %ubpp\n",
76            transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
77            texture->handle,
78            st->slice,
79            x,
80            y,
81            z,
82            x + w,
83            y + h,
84            z + 1,
85            util_format_get_blocksize(texture->b.format) * 8 /
86            (util_format_get_blockwidth(texture->b.format)
87             * util_format_get_blockheight(texture->b.format)));
88
89   SVGA_RETRY(svga, SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags));
90}
91
92
93static void
94svga_transfer_dma(struct svga_context *svga,
95                  struct svga_transfer *st,
96                  SVGA3dTransferType transfer,
97                  SVGA3dSurfaceDMAFlags flags)
98{
99   struct svga_texture *texture = svga_texture(st->base.resource);
100   struct svga_screen *screen = svga_screen(texture->b.screen);
101   struct svga_winsys_screen *sws = screen->sws;
102   struct pipe_fence_handle *fence = NULL;
103
104   assert(!st->use_direct_map);
105
106   if (transfer == SVGA3D_READ_HOST_VRAM) {
107      SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
108   }
109
110   /* Ensure any pending operations on host surfaces are queued on the command
111    * buffer first.
112    */
113   svga_surfaces_flush(svga);
114
115   if (!st->swbuf) {
116      /* Do the DMA transfer in a single go */
117      svga_transfer_dma_band(svga, st, transfer,
118                             st->box.x, st->box.y, st->box.z,
119                             st->box.w, st->box.h, st->box.d,
120                             0, 0, 0,
121                             flags);
122
123      if (transfer == SVGA3D_READ_HOST_VRAM) {
124         svga_context_flush(svga, &fence);
125         sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
126         sws->fence_reference(sws, &fence, NULL);
127      }
128   }
129   else {
130      int y, h, srcy;
131      unsigned blockheight =
132         util_format_get_blockheight(st->base.resource->format);
133
134      h = st->hw_nblocksy * blockheight;
135      srcy = 0;
136
137      for (y = 0; y < st->box.h; y += h) {
138         unsigned offset, length;
139         void *hw, *sw;
140
141         if (y + h > st->box.h)
142            h = st->box.h - y;
143
144         /* Transfer band must be aligned to pixel block boundaries */
145         assert(y % blockheight == 0);
146         assert(h % blockheight == 0);
147
148         offset = y * st->base.stride / blockheight;
149         length = h * st->base.stride / blockheight;
150
151         sw = (uint8_t *) st->swbuf + offset;
152
153         if (transfer == SVGA3D_WRITE_HOST_VRAM) {
154            unsigned usage = PIPE_MAP_WRITE;
155
156            /* Wait for the previous DMAs to complete */
157            /* TODO: keep one DMA (at half the size) in the background */
158            if (y) {
159               svga_context_flush(svga, NULL);
160               usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
161            }
162
163            hw = sws->buffer_map(sws, st->hwbuf, usage);
164            assert(hw);
165            if (hw) {
166               memcpy(hw, sw, length);
167               sws->buffer_unmap(sws, st->hwbuf);
168            }
169         }
170
171         svga_transfer_dma_band(svga, st, transfer,
172                                st->box.x, y, st->box.z,
173                                st->box.w, h, st->box.d,
174                                0, srcy, 0, flags);
175
176         /*
177          * Prevent the texture contents to be discarded on the next band
178          * upload.
179          */
180         flags.discard = FALSE;
181
182         if (transfer == SVGA3D_READ_HOST_VRAM) {
183            svga_context_flush(svga, &fence);
184            sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
185
186            hw = sws->buffer_map(sws, st->hwbuf, PIPE_MAP_READ);
187            assert(hw);
188            if (hw) {
189               memcpy(sw, hw, length);
190               sws->buffer_unmap(sws, st->hwbuf);
191            }
192         }
193      }
194   }
195}
196
197
198
199bool
200svga_resource_get_handle(struct pipe_screen *screen,
201                         struct pipe_context *context,
202                         struct pipe_resource *texture,
203                         struct winsys_handle *whandle,
204                         unsigned usage)
205{
206   struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
207   unsigned stride;
208
209   if (texture->target == PIPE_BUFFER)
210      return false;
211
212   assert(svga_texture(texture)->key.cachable == 0);
213   svga_texture(texture)->key.cachable = 0;
214
215   stride = util_format_get_nblocksx(texture->format, texture->width0) *
216            util_format_get_blocksize(texture->format);
217
218   return sws->surface_get_handle(sws, svga_texture(texture)->handle,
219                                  stride, whandle);
220}
221
222
223/**
224 * Determine if we need to read back a texture image before mapping it.
225 */
226static inline boolean
227need_tex_readback(struct svga_transfer *st)
228{
229   if (st->base.usage & PIPE_MAP_READ)
230      return TRUE;
231
232   if ((st->base.usage & PIPE_MAP_WRITE) &&
233       ((st->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) == 0)) {
234      return svga_was_texture_rendered_to(svga_texture(st->base.resource),
235                                          st->slice, st->base.level);
236   }
237
238   return FALSE;
239}
240
241
242static void
243readback_image_vgpu9(struct svga_context *svga,
244                   struct svga_winsys_surface *surf,
245                   unsigned slice,
246                   unsigned level)
247{
248   SVGA_RETRY(svga, SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level));
249}
250
251
252static void
253readback_image_vgpu10(struct svga_context *svga,
254                    struct svga_winsys_surface *surf,
255                    unsigned slice,
256                    unsigned level,
257                    unsigned numMipLevels)
258{
259   unsigned subResource;
260
261   subResource = slice * numMipLevels + level;
262   SVGA_RETRY(svga, SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf,
263                                                      subResource));
264}
265
266
267/**
268 * Use DMA for the transfer request
269 */
270static void *
271svga_texture_transfer_map_dma(struct svga_context *svga,
272                              struct svga_transfer *st)
273{
274   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
275   struct pipe_resource *texture = st->base.resource;
276   unsigned nblocksx, nblocksy;
277   unsigned d;
278   unsigned usage = st->base.usage;
279
280   /* we'll put the data into a tightly packed buffer */
281   nblocksx = util_format_get_nblocksx(texture->format, st->box.w);
282   nblocksy = util_format_get_nblocksy(texture->format, st->box.h);
283   d = st->box.d;
284
285   st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
286   st->base.layer_stride = st->base.stride * nblocksy;
287   st->hw_nblocksy = nblocksy;
288
289   st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
290                                         st->hw_nblocksy * st->base.stride * d);
291
292   while (!st->hwbuf && (st->hw_nblocksy /= 2)) {
293      st->hwbuf =
294         svga_winsys_buffer_create(svga, 1, 0,
295                                   st->hw_nblocksy * st->base.stride * d);
296   }
297
298   if (!st->hwbuf)
299      return NULL;
300
301   if (st->hw_nblocksy < nblocksy) {
302      /* We couldn't allocate a hardware buffer big enough for the transfer,
303       * so allocate regular malloc memory instead
304       */
305      if (0) {
306         debug_printf("%s: failed to allocate %u KB of DMA, "
307                      "splitting into %u x %u KB DMA transfers\n",
308                      __FUNCTION__,
309                      (nblocksy * st->base.stride + 1023) / 1024,
310                      (nblocksy + st->hw_nblocksy - 1) / st->hw_nblocksy,
311                      (st->hw_nblocksy * st->base.stride + 1023) / 1024);
312      }
313
314      st->swbuf = MALLOC(nblocksy * st->base.stride * d);
315      if (!st->swbuf) {
316         sws->buffer_destroy(sws, st->hwbuf);
317         return NULL;
318      }
319   }
320
321   if (usage & PIPE_MAP_READ) {
322      SVGA3dSurfaceDMAFlags flags;
323      memset(&flags, 0, sizeof flags);
324      svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
325   }
326
327   if (st->swbuf) {
328      return st->swbuf;
329   }
330   else {
331      return sws->buffer_map(sws, st->hwbuf, usage);
332   }
333}
334
335
336/**
337 * Use direct map for the transfer request
338 */
339static void *
340svga_texture_transfer_map_direct(struct svga_context *svga,
341                                 struct svga_transfer *st)
342{
343   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
344   struct pipe_transfer *transfer = &st->base;
345   struct pipe_resource *texture = transfer->resource;
346   struct svga_texture *tex = svga_texture(texture);
347   struct svga_winsys_surface *surf = tex->handle;
348   unsigned level = st->base.level;
349   unsigned w, h, nblocksx, nblocksy, i;
350   unsigned usage = st->base.usage;
351
352   if (need_tex_readback(st)) {
353      svga_surfaces_flush(svga);
354
355      if (!svga->swc->force_coherent || tex->imported) {
356         for (i = 0; i < st->box.d; i++) {
357            if (svga_have_vgpu10(svga)) {
358               readback_image_vgpu10(svga, surf, st->slice + i, level,
359                                     tex->b.last_level + 1);
360            } else {
361               readback_image_vgpu9(svga, surf, st->slice + i, level);
362            }
363         }
364         svga->hud.num_readbacks++;
365         SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
366
367         svga_context_flush(svga, NULL);
368      }
369      /*
370       * Note: if PIPE_MAP_DISCARD_WHOLE_RESOURCE were specified
371       * we could potentially clear the flag for all faces/layers/mips.
372       */
373      svga_clear_texture_rendered_to(tex, st->slice, level);
374   }
375   else {
376      assert(usage & PIPE_MAP_WRITE);
377      if ((usage & PIPE_MAP_UNSYNCHRONIZED) == 0) {
378         if (svga_is_texture_dirty(tex, st->slice, level)) {
379            /*
380             * do a surface flush if the subresource has been modified
381             * in this command buffer.
382             */
383            svga_surfaces_flush(svga);
384            if (!sws->surface_is_flushed(sws, surf)) {
385               svga->hud.surface_write_flushes++;
386               SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_SURFACEWRITEFLUSH);
387               svga_context_flush(svga, NULL);
388            }
389         }
390      }
391   }
392
393   /* we'll directly access the guest-backed surface */
394   w = u_minify(texture->width0, level);
395   h = u_minify(texture->height0, level);
396   nblocksx = util_format_get_nblocksx(texture->format, w);
397   nblocksy = util_format_get_nblocksy(texture->format, h);
398   st->hw_nblocksy = nblocksy;
399   st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
400   st->base.layer_stride = st->base.stride * nblocksy;
401
402   /*
403    * Begin mapping code
404    */
405   {
406      SVGA3dSize baseLevelSize;
407      uint8_t *map;
408      boolean retry, rebind;
409      unsigned offset, mip_width, mip_height;
410      struct svga_winsys_context *swc = svga->swc;
411
412      if (swc->force_coherent) {
413         usage |= PIPE_MAP_PERSISTENT | PIPE_MAP_COHERENT;
414      }
415
416      map = SVGA_TRY_MAP(svga->swc->surface_map
417                         (svga->swc, surf, usage, &retry, &rebind), retry);
418
419      if (map == NULL && retry) {
420         /*
421          * At this point, the svga_surfaces_flush() should already have
422          * called in svga_texture_get_transfer().
423          */
424         svga->hud.surface_write_flushes++;
425         svga_retry_enter(svga);
426         svga_context_flush(svga, NULL);
427         map = svga->swc->surface_map(svga->swc, surf, usage, &retry, &rebind);
428         svga_retry_exit(svga);
429      }
430
431      if (map && rebind) {
432         enum pipe_error ret;
433
434         ret = SVGA3D_BindGBSurface(swc, surf);
435         if (ret != PIPE_OK) {
436            svga_context_flush(svga, NULL);
437            ret = SVGA3D_BindGBSurface(swc, surf);
438            assert(ret == PIPE_OK);
439         }
440         svga_context_flush(svga, NULL);
441      }
442
443      /*
444       * Make sure we return NULL if the map fails
445       */
446      if (!map) {
447         return NULL;
448      }
449
450      /**
451       * Compute the offset to the specific texture slice in the buffer.
452       */
453      baseLevelSize.width = tex->b.width0;
454      baseLevelSize.height = tex->b.height0;
455      baseLevelSize.depth = tex->b.depth0;
456
457      if ((tex->b.target == PIPE_TEXTURE_1D_ARRAY) ||
458          (tex->b.target == PIPE_TEXTURE_2D_ARRAY) ||
459          (tex->b.target == PIPE_TEXTURE_CUBE_ARRAY)) {
460         st->base.layer_stride =
461            svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
462                                           tex->b.last_level + 1, 1, 0);
463      }
464
465      offset = svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
466                                              tex->b.last_level + 1, /* numMips */
467                                              st->slice, level);
468      if (level > 0) {
469         assert(offset > 0);
470      }
471
472      mip_width = u_minify(tex->b.width0, level);
473      mip_height = u_minify(tex->b.height0, level);
474
475      offset += svga3dsurface_get_pixel_offset(tex->key.format,
476                                               mip_width, mip_height,
477                                               st->box.x,
478                                               st->box.y,
479                                               st->box.z);
480
481      return (void *) (map + offset);
482   }
483}
484
485
486/**
487 * Request a transfer map to the texture resource
488 */
489void *
490svga_texture_transfer_map(struct pipe_context *pipe,
491                          struct pipe_resource *texture,
492                          unsigned level,
493                          unsigned usage,
494                          const struct pipe_box *box,
495                          struct pipe_transfer **ptransfer)
496{
497   struct svga_context *svga = svga_context(pipe);
498   struct svga_winsys_screen *sws = svga_screen(pipe->screen)->sws;
499   struct svga_texture *tex = svga_texture(texture);
500   struct svga_transfer *st;
501   struct svga_winsys_surface *surf = tex->handle;
502   boolean use_direct_map = svga_have_gb_objects(svga) &&
503       (!svga_have_gb_dma(svga) || (usage & PIPE_MAP_WRITE));
504   void *map = NULL;
505   int64_t begin = svga_get_time(svga);
506
507   SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERMAP);
508
509   if (!surf)
510      goto done;
511
512   /* We can't map texture storage directly unless we have GB objects */
513   if (usage & PIPE_MAP_DIRECTLY) {
514      if (svga_have_gb_objects(svga))
515         use_direct_map = TRUE;
516      else
517         goto done;
518   }
519
520   st = CALLOC_STRUCT(svga_transfer);
521   if (!st)
522      goto done;
523
524   st->base.level = level;
525   st->base.usage = usage;
526   st->base.box = *box;
527
528   /* The modified transfer map box with the array index removed from z.
529    * The array index is specified in slice.
530    */
531   st->box.x = box->x;
532   st->box.y = box->y;
533   st->box.z = box->z;
534   st->box.w = box->width;
535   st->box.h = box->height;
536   st->box.d = box->depth;
537
538   switch (tex->b.target) {
539   case PIPE_TEXTURE_CUBE:
540      st->slice = st->base.box.z;
541      st->box.z = 0;   /* so we don't apply double offsets below */
542      break;
543   case PIPE_TEXTURE_1D_ARRAY:
544   case PIPE_TEXTURE_2D_ARRAY:
545   case PIPE_TEXTURE_CUBE_ARRAY:
546      st->slice = st->base.box.z;
547      st->box.z = 0;   /* so we don't apply double offsets below */
548
549      /* Force direct map for transfering multiple slices */
550      if (st->base.box.depth > 1)
551         use_direct_map = svga_have_gb_objects(svga);
552
553      break;
554   default:
555      st->slice = 0;
556      break;
557   }
558
559   /* Force direct map for multisample surface */
560   if (texture->nr_samples > 1) {
561      assert(svga_have_gb_objects(svga));
562      assert(sws->have_sm4_1);
563      use_direct_map = TRUE;
564   }
565
566   st->use_direct_map = use_direct_map;
567   pipe_resource_reference(&st->base.resource, texture);
568
569   /* If this is the first time mapping to the surface in this
570    * command buffer and there is no pending primitives, clear
571    * the dirty masks of this surface.
572    */
573   if (sws->surface_is_flushed(sws, surf) &&
574       (svga_have_vgpu10(svga) ||
575        !svga_hwtnl_has_pending_prim(svga->hwtnl))) {
576      svga_clear_texture_dirty(tex);
577   }
578
579   if (!use_direct_map) {
580      /* upload to the DMA buffer */
581      map = svga_texture_transfer_map_dma(svga, st);
582   }
583   else {
584      boolean can_use_upload = tex->can_use_upload &&
585                               !(st->base.usage & PIPE_MAP_READ);
586      boolean was_rendered_to =
587         svga_was_texture_rendered_to(svga_texture(texture),
588                                      st->slice, st->base.level);
589
590      /* If the texture was already rendered to and upload buffer
591       * is supported, then we will use upload buffer to
592       * avoid the need to read back the texture content; otherwise,
593       * we'll first try to map directly to the GB surface, if it is blocked,
594       * then we'll try the upload buffer.
595       */
596      if (was_rendered_to && can_use_upload) {
597         map = svga_texture_transfer_map_upload(svga, st);
598      }
599      else {
600         unsigned orig_usage = st->base.usage;
601
602         /* First try directly map to the GB surface */
603         if (can_use_upload)
604            st->base.usage |= PIPE_MAP_DONTBLOCK;
605         map = svga_texture_transfer_map_direct(svga, st);
606         st->base.usage = orig_usage;
607
608         if (!map && can_use_upload) {
609            /* if direct map with DONTBLOCK fails, then try upload to the
610             * texture upload buffer.
611             */
612            map = svga_texture_transfer_map_upload(svga, st);
613         }
614      }
615
616      /* If upload fails, then try direct map again without forcing it
617       * to DONTBLOCK.
618       */
619      if (!map) {
620         map = svga_texture_transfer_map_direct(svga, st);
621      }
622   }
623
624   if (!map) {
625      FREE(st);
626   }
627   else {
628      *ptransfer = &st->base;
629      svga->hud.num_textures_mapped++;
630      if (usage & PIPE_MAP_WRITE) {
631         /* record texture upload for HUD */
632         svga->hud.num_bytes_uploaded +=
633            st->base.layer_stride * st->box.d;
634
635         /* mark this texture level as dirty */
636         svga_set_texture_dirty(tex, st->slice, level);
637      }
638   }
639
640done:
641   svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
642   SVGA_STATS_TIME_POP(sws);
643   (void) sws;
644
645   return map;
646}
647
648/**
649 * Unmap a GB texture surface.
650 */
651static void
652svga_texture_surface_unmap(struct svga_context *svga,
653                           struct pipe_transfer *transfer)
654{
655   struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
656   struct svga_winsys_context *swc = svga->swc;
657   boolean rebind;
658
659   assert(surf);
660
661   swc->surface_unmap(swc, surf, &rebind);
662   if (rebind) {
663      SVGA_RETRY(svga, SVGA3D_BindGBSurface(swc, surf));
664   }
665}
666
667
668static void
669update_image_vgpu9(struct svga_context *svga,
670                   struct svga_winsys_surface *surf,
671                   const SVGA3dBox *box,
672                   unsigned slice,
673                   unsigned level)
674{
675   SVGA_RETRY(svga, SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level));
676}
677
678
679static void
680update_image_vgpu10(struct svga_context *svga,
681                    struct svga_winsys_surface *surf,
682                    const SVGA3dBox *box,
683                    unsigned slice,
684                    unsigned level,
685                    unsigned numMipLevels)
686{
687   unsigned subResource;
688
689   subResource = slice * numMipLevels + level;
690
691   SVGA_RETRY(svga, SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box,
692                                                    subResource));
693}
694
695
696/**
697 * unmap DMA transfer request
698 */
699static void
700svga_texture_transfer_unmap_dma(struct svga_context *svga,
701                                struct svga_transfer *st)
702{
703   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
704
705   if (!st->swbuf)
706      sws->buffer_unmap(sws, st->hwbuf);
707
708   if (st->base.usage & PIPE_MAP_WRITE) {
709      /* Use DMA to transfer texture data */
710      SVGA3dSurfaceDMAFlags flags;
711      struct pipe_resource *texture = st->base.resource;
712      struct svga_texture *tex = svga_texture(texture);
713
714
715      memset(&flags, 0, sizeof flags);
716      if (st->base.usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
717         flags.discard = TRUE;
718      }
719      if (st->base.usage & PIPE_MAP_UNSYNCHRONIZED) {
720         flags.unsynchronized = TRUE;
721      }
722
723      svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
724      svga_set_texture_rendered_to(tex, st->slice, st->base.level);
725   }
726
727   FREE(st->swbuf);
728   sws->buffer_destroy(sws, st->hwbuf);
729}
730
731
732/**
733 * unmap direct map transfer request
734 */
735static void
736svga_texture_transfer_unmap_direct(struct svga_context *svga,
737                                   struct svga_transfer *st)
738{
739   struct pipe_transfer *transfer = &st->base;
740   struct svga_texture *tex = svga_texture(transfer->resource);
741
742   svga_texture_surface_unmap(svga, transfer);
743
744   /* Now send an update command to update the content in the backend. */
745   if (st->base.usage & PIPE_MAP_WRITE) {
746      struct svga_winsys_surface *surf = tex->handle;
747
748      assert(svga_have_gb_objects(svga));
749
750      /* update the effected region */
751      SVGA3dBox box = st->box;
752      unsigned nlayers;
753
754      switch (tex->b.target) {
755      case PIPE_TEXTURE_2D_ARRAY:
756      case PIPE_TEXTURE_CUBE_ARRAY:
757      case PIPE_TEXTURE_1D_ARRAY:
758         nlayers = box.d;
759         box.d = 1;
760         break;
761      default:
762         nlayers = 1;
763         break;
764      }
765
766
767      if (0)
768         debug_printf("%s %d, %d, %d  %d x %d x %d\n",
769                      __FUNCTION__,
770                      box.x, box.y, box.z,
771                      box.w, box.h, box.d);
772
773      if (!svga->swc->force_coherent || tex->imported) {
774         if (svga_have_vgpu10(svga)) {
775            unsigned i;
776
777            for (i = 0; i < nlayers; i++) {
778               update_image_vgpu10(svga, surf, &box,
779                                   st->slice + i, transfer->level,
780                                   tex->b.last_level + 1);
781            }
782         } else {
783            assert(nlayers == 1);
784            update_image_vgpu9(svga, surf, &box, st->slice,
785                               transfer->level);
786         }
787      }
788   }
789}
790
791
792void
793svga_texture_transfer_unmap(struct pipe_context *pipe,
794                            struct pipe_transfer *transfer)
795{
796   struct svga_context *svga = svga_context(pipe);
797   struct svga_screen *ss = svga_screen(pipe->screen);
798   struct svga_winsys_screen *sws = ss->sws;
799   struct svga_transfer *st = svga_transfer(transfer);
800   struct svga_texture *tex = svga_texture(transfer->resource);
801
802   SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERUNMAP);
803
804   if (!st->use_direct_map) {
805      svga_texture_transfer_unmap_dma(svga, st);
806   }
807   else if (st->upload.buf) {
808      svga_texture_transfer_unmap_upload(svga, st);
809   }
810   else {
811      svga_texture_transfer_unmap_direct(svga, st);
812   }
813
814   if (st->base.usage & PIPE_MAP_WRITE) {
815      svga->hud.num_resource_updates++;
816
817      /* Mark the texture level as dirty */
818      ss->texture_timestamp++;
819      svga_age_texture_view(tex, transfer->level);
820      if (transfer->resource->target == PIPE_TEXTURE_CUBE)
821         svga_define_texture_level(tex, st->slice, transfer->level);
822      else
823         svga_define_texture_level(tex, 0, transfer->level);
824   }
825
826   pipe_resource_reference(&st->base.resource, NULL);
827   FREE(st);
828   SVGA_STATS_TIME_POP(sws);
829   (void) sws;
830}
831
832
833/**
834 * Does format store depth values?
835 */
836static inline boolean
837format_has_depth(enum pipe_format format)
838{
839   const struct util_format_description *desc = util_format_description(format);
840   return util_format_has_depth(desc);
841}
842
843struct pipe_resource *
844svga_texture_create(struct pipe_screen *screen,
845                    const struct pipe_resource *template)
846{
847   struct svga_screen *svgascreen = svga_screen(screen);
848   struct svga_texture *tex;
849   unsigned bindings = template->bind;
850
851   SVGA_STATS_TIME_PUSH(svgascreen->sws,
852                        SVGA_STATS_TIME_CREATETEXTURE);
853
854   assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
855   if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
856      goto fail_notex;
857   }
858
859   /* Verify the number of mipmap levels isn't impossibly large.  For example,
860    * if the base 2D image is 16x16, we can't have 8 mipmap levels.
861    * the gallium frontend should never ask us to create a resource with invalid
862    * parameters.
863    */
864   {
865      unsigned max_dim = template->width0;
866
867      switch (template->target) {
868      case PIPE_TEXTURE_1D:
869      case PIPE_TEXTURE_1D_ARRAY:
870         // nothing
871         break;
872      case PIPE_TEXTURE_2D:
873      case PIPE_TEXTURE_CUBE:
874      case PIPE_TEXTURE_CUBE_ARRAY:
875      case PIPE_TEXTURE_2D_ARRAY:
876         max_dim = MAX2(max_dim, template->height0);
877         break;
878      case PIPE_TEXTURE_3D:
879         max_dim = MAX3(max_dim, template->height0, template->depth0);
880         break;
881      case PIPE_TEXTURE_RECT:
882      case PIPE_BUFFER:
883         assert(template->last_level == 0);
884         /* the assertion below should always pass */
885         break;
886      default:
887         debug_printf("Unexpected texture target type\n");
888      }
889      assert(1 << template->last_level <= max_dim);
890   }
891
892   tex = CALLOC_STRUCT(svga_texture);
893   if (!tex) {
894      goto fail_notex;
895   }
896
897   tex->defined = CALLOC(template->depth0 * template->array_size,
898                         sizeof(tex->defined[0]));
899   if (!tex->defined) {
900      FREE(tex);
901      goto fail_notex;
902   }
903
904   tex->rendered_to = CALLOC(template->depth0 * template->array_size,
905                             sizeof(tex->rendered_to[0]));
906   if (!tex->rendered_to) {
907      goto fail;
908   }
909
910   tex->dirty = CALLOC(template->depth0 * template->array_size,
911                             sizeof(tex->dirty[0]));
912   if (!tex->dirty) {
913      goto fail;
914   }
915
916   tex->b = *template;
917   pipe_reference_init(&tex->b.reference, 1);
918   tex->b.screen = screen;
919
920   tex->key.flags = 0;
921   tex->key.size.width = template->width0;
922   tex->key.size.height = template->height0;
923   tex->key.size.depth = template->depth0;
924   tex->key.arraySize = 1;
925   tex->key.numFaces = 1;
926
927   /* nr_samples=1 must be treated as a non-multisample texture */
928   if (tex->b.nr_samples == 1) {
929      tex->b.nr_samples = 0;
930   }
931   else if (tex->b.nr_samples > 1) {
932      assert(svgascreen->sws->have_sm4_1);
933      tex->key.flags |= SVGA3D_SURFACE_MULTISAMPLE;
934   }
935
936   tex->key.sampleCount = tex->b.nr_samples;
937
938   if (svgascreen->sws->have_vgpu10) {
939      switch (template->target) {
940      case PIPE_TEXTURE_1D:
941         tex->key.flags |= SVGA3D_SURFACE_1D;
942         break;
943      case PIPE_TEXTURE_1D_ARRAY:
944         tex->key.flags |= SVGA3D_SURFACE_1D;
945         FALLTHROUGH;
946      case PIPE_TEXTURE_2D_ARRAY:
947         tex->key.flags |= SVGA3D_SURFACE_ARRAY;
948         tex->key.arraySize = template->array_size;
949         break;
950      case PIPE_TEXTURE_3D:
951         tex->key.flags |= SVGA3D_SURFACE_VOLUME;
952         break;
953      case PIPE_TEXTURE_CUBE:
954         tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
955         tex->key.numFaces = 6;
956         break;
957      case PIPE_TEXTURE_CUBE_ARRAY:
958         assert(svgascreen->sws->have_sm4_1);
959         tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
960         tex->key.numFaces = 1;  // arraySize already includes the 6 faces
961         tex->key.arraySize = template->array_size;
962         break;
963      default:
964         break;
965      }
966   }
967   else {
968      switch (template->target) {
969      case PIPE_TEXTURE_3D:
970         tex->key.flags |= SVGA3D_SURFACE_VOLUME;
971         break;
972      case PIPE_TEXTURE_CUBE:
973         tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
974         tex->key.numFaces = 6;
975         break;
976      default:
977         break;
978      }
979   }
980
981   tex->key.cachable = 1;
982
983   if ((bindings & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
984       !(bindings & PIPE_BIND_SAMPLER_VIEW)) {
985      /* Also check if the format can be sampled from */
986      if (screen->is_format_supported(screen, template->format,
987                                      template->target,
988                                      template->nr_samples,
989                                      template->nr_storage_samples,
990                                      PIPE_BIND_SAMPLER_VIEW)) {
991         bindings |= PIPE_BIND_SAMPLER_VIEW;
992      }
993   }
994
995   if (bindings & PIPE_BIND_SAMPLER_VIEW) {
996      tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
997      tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
998
999      if (!(bindings & PIPE_BIND_RENDER_TARGET)) {
1000         /* Also check if the format is color renderable */
1001         if (screen->is_format_supported(screen, template->format,
1002                                         template->target,
1003                                         template->nr_samples,
1004                                         template->nr_storage_samples,
1005                                         PIPE_BIND_RENDER_TARGET)) {
1006            bindings |= PIPE_BIND_RENDER_TARGET;
1007         }
1008      }
1009
1010      if (!(bindings & PIPE_BIND_DEPTH_STENCIL)) {
1011         /* Also check if the format is depth/stencil renderable */
1012         if (screen->is_format_supported(screen, template->format,
1013                                         template->target,
1014                                         template->nr_samples,
1015                                         template->nr_storage_samples,
1016                                         PIPE_BIND_DEPTH_STENCIL)) {
1017            bindings |= PIPE_BIND_DEPTH_STENCIL;
1018         }
1019      }
1020   }
1021
1022   if (bindings & PIPE_BIND_DISPLAY_TARGET) {
1023      tex->key.cachable = 0;
1024   }
1025
1026   if (bindings & PIPE_BIND_SHARED) {
1027      tex->key.cachable = 0;
1028   }
1029
1030   if (bindings & (PIPE_BIND_SCANOUT | PIPE_BIND_CURSOR)) {
1031      tex->key.scanout = 1;
1032      tex->key.cachable = 0;
1033   }
1034
1035   /*
1036    * Note: Previously we never passed the
1037    * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
1038    * know beforehand whether a texture will be used as a rendertarget or not
1039    * and it always requests PIPE_BIND_RENDER_TARGET, therefore
1040    * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
1041    *
1042    * However, this was changed since other gallium frontends
1043    * (XA for example) uses it accurately and certain device versions
1044    * relies on it in certain situations to render correctly.
1045    */
1046   if ((bindings & PIPE_BIND_RENDER_TARGET) &&
1047       !util_format_is_s3tc(template->format)) {
1048      tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1049      tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1050   }
1051
1052   if (bindings & PIPE_BIND_DEPTH_STENCIL) {
1053      tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
1054      tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
1055   }
1056
1057   tex->key.numMipLevels = template->last_level + 1;
1058
1059   tex->key.format = svga_translate_format(svgascreen, template->format,
1060                                           bindings);
1061   if (tex->key.format == SVGA3D_FORMAT_INVALID) {
1062      goto fail;
1063   }
1064
1065   /* Use typeless formats for sRGB and depth resources.  Typeless
1066    * formats can be reinterpreted as other formats.  For example,
1067    * SVGA3D_R8G8B8A8_UNORM_TYPELESS can be interpreted as
1068    * SVGA3D_R8G8B8A8_UNORM_SRGB or SVGA3D_R8G8B8A8_UNORM.
1069    */
1070   if (svgascreen->sws->have_vgpu10 &&
1071       (util_format_is_srgb(template->format) ||
1072        format_has_depth(template->format))) {
1073      SVGA3dSurfaceFormat typeless = svga_typeless_format(tex->key.format);
1074      if (0) {
1075         debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
1076                      svga_format_name(tex->key.format),
1077                      svga_format_name(typeless),
1078                      bindings);
1079      }
1080
1081      if (svga_format_is_uncompressed_snorm(tex->key.format)) {
1082         /* We can't normally render to snorm surfaces, but once we
1083          * substitute a typeless format, we can if the rendertarget view
1084          * is unorm.  This can happen with GL_ARB_copy_image.
1085          */
1086         tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1087         tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1088      }
1089
1090      tex->key.format = typeless;
1091   }
1092
1093   SVGA_DBG(DEBUG_DMA, "surface_create for texture\n");
1094   tex->handle = svga_screen_surface_create(svgascreen, bindings,
1095                                            tex->b.usage,
1096                                            &tex->validated, &tex->key);
1097   if (!tex->handle) {
1098      goto fail;
1099   }
1100
1101   SVGA_DBG(DEBUG_DMA, "  --> got sid %p (texture)\n", tex->handle);
1102
1103   debug_reference(&tex->b.reference,
1104                   (debug_reference_descriptor)debug_describe_resource, 0);
1105
1106   tex->size = util_resource_size(template);
1107
1108   /* Determine if texture upload buffer can be used to upload this texture */
1109   tex->can_use_upload = svga_texture_transfer_map_can_upload(svgascreen,
1110                                                              &tex->b);
1111
1112   /* Initialize the backing resource cache */
1113   tex->backed_handle = NULL;
1114
1115   svgascreen->hud.total_resource_bytes += tex->size;
1116   svgascreen->hud.num_resources++;
1117
1118   SVGA_STATS_TIME_POP(svgascreen->sws);
1119
1120   return &tex->b;
1121
1122fail:
1123   if (tex->dirty)
1124      FREE(tex->dirty);
1125   if (tex->rendered_to)
1126      FREE(tex->rendered_to);
1127   if (tex->defined)
1128      FREE(tex->defined);
1129   FREE(tex);
1130fail_notex:
1131   SVGA_STATS_TIME_POP(svgascreen->sws);
1132   return NULL;
1133}
1134
1135
1136struct pipe_resource *
1137svga_texture_from_handle(struct pipe_screen *screen,
1138                         const struct pipe_resource *template,
1139                         struct winsys_handle *whandle)
1140{
1141   struct svga_winsys_screen *sws = svga_winsys_screen(screen);
1142   struct svga_screen *ss = svga_screen(screen);
1143   struct svga_winsys_surface *srf;
1144   struct svga_texture *tex;
1145   enum SVGA3dSurfaceFormat format = 0;
1146   assert(screen);
1147
1148   /* Only supports one type */
1149   if ((template->target != PIPE_TEXTURE_2D &&
1150       template->target != PIPE_TEXTURE_RECT) ||
1151       template->last_level != 0 ||
1152       template->depth0 != 1) {
1153      return NULL;
1154   }
1155
1156   srf = sws->surface_from_handle(sws, whandle, &format);
1157
1158   if (!srf)
1159      return NULL;
1160
1161   if (!svga_format_is_shareable(ss, template->format, format,
1162                                 template->bind, true))
1163      goto out_unref;
1164
1165   tex = CALLOC_STRUCT(svga_texture);
1166   if (!tex)
1167      goto out_unref;
1168
1169   tex->defined = CALLOC(template->depth0 * template->array_size,
1170                         sizeof(tex->defined[0]));
1171   if (!tex->defined)
1172      goto out_no_defined;
1173
1174   tex->b = *template;
1175   pipe_reference_init(&tex->b.reference, 1);
1176   tex->b.screen = screen;
1177
1178   SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
1179
1180   tex->key.cachable = 0;
1181   tex->key.format = format;
1182   tex->handle = srf;
1183
1184   tex->rendered_to = CALLOC(1, sizeof(tex->rendered_to[0]));
1185   if (!tex->rendered_to)
1186      goto out_no_rendered_to;
1187
1188   tex->dirty = CALLOC(1, sizeof(tex->dirty[0]));
1189   if (!tex->dirty)
1190      goto out_no_dirty;
1191
1192   tex->imported = TRUE;
1193
1194   ss->hud.num_resources++;
1195
1196   return &tex->b;
1197
1198out_no_dirty:
1199   FREE(tex->rendered_to);
1200out_no_rendered_to:
1201   FREE(tex->defined);
1202out_no_defined:
1203   FREE(tex);
1204out_unref:
1205   sws->surface_reference(sws, &srf, NULL);
1206   return NULL;
1207}
1208
1209bool
1210svga_texture_generate_mipmap(struct pipe_context *pipe,
1211                             struct pipe_resource *pt,
1212                             enum pipe_format format,
1213                             unsigned base_level,
1214                             unsigned last_level,
1215                             unsigned first_layer,
1216                             unsigned last_layer)
1217{
1218   struct pipe_sampler_view templ, *psv;
1219   struct svga_pipe_sampler_view *sv;
1220   struct svga_context *svga = svga_context(pipe);
1221   struct svga_texture *tex = svga_texture(pt);
1222
1223   assert(svga_have_vgpu10(svga));
1224
1225   /* Only support 2D texture for now */
1226   if (pt->target != PIPE_TEXTURE_2D)
1227      return false;
1228
1229   /* Fallback to the mipmap generation utility for those formats that
1230    * do not support hw generate mipmap
1231    */
1232   if (!svga_format_support_gen_mips(format))
1233      return false;
1234
1235   /* Make sure the texture surface was created with
1236    * SVGA3D_SURFACE_BIND_RENDER_TARGET
1237    */
1238   if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
1239      return false;
1240
1241   templ.format = format;
1242   templ.u.tex.first_layer = first_layer;
1243   templ.u.tex.last_layer = last_layer;
1244   templ.u.tex.first_level = base_level;
1245   templ.u.tex.last_level = last_level;
1246
1247   psv = pipe->create_sampler_view(pipe, pt, &templ);
1248   if (psv == NULL)
1249      return false;
1250
1251   sv = svga_pipe_sampler_view(psv);
1252   SVGA_RETRY(svga, svga_validate_pipe_sampler_view(svga, sv));
1253
1254   SVGA_RETRY(svga, SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle));
1255   pipe_sampler_view_reference(&psv, NULL);
1256
1257   svga->hud.num_generate_mipmap++;
1258
1259   return true;
1260}
1261
1262
1263/* texture upload buffer default size in bytes */
1264#define TEX_UPLOAD_DEFAULT_SIZE (1024 * 1024)
1265
1266/**
1267 * Create a texture upload buffer
1268 */
1269boolean
1270svga_texture_transfer_map_upload_create(struct svga_context *svga)
1271{
1272   svga->tex_upload = u_upload_create(&svga->pipe, TEX_UPLOAD_DEFAULT_SIZE,
1273                                      PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, 0);
1274   if (svga->tex_upload)
1275      u_upload_disable_persistent(svga->tex_upload);
1276
1277   return svga->tex_upload != NULL;
1278}
1279
1280
1281/**
1282 * Destroy the texture upload buffer
1283 */
1284void
1285svga_texture_transfer_map_upload_destroy(struct svga_context *svga)
1286{
1287   u_upload_destroy(svga->tex_upload);
1288}
1289
1290
1291/**
1292 * Returns true if this transfer map request can use the upload buffer.
1293 */
1294boolean
1295svga_texture_transfer_map_can_upload(const struct svga_screen *svgascreen,
1296                                     const struct pipe_resource *texture)
1297{
1298   if (svgascreen->sws->have_transfer_from_buffer_cmd == FALSE)
1299      return FALSE;
1300
1301   /* TransferFromBuffer command is not well supported with multi-samples surface */
1302   if (texture->nr_samples > 1)
1303      return FALSE;
1304
1305   if (util_format_is_compressed(texture->format)) {
1306      /* XXX Need to take a closer look to see why texture upload
1307       * with 3D texture with compressed format fails
1308       */
1309      if (texture->target == PIPE_TEXTURE_3D)
1310          return FALSE;
1311   }
1312   else if (texture->format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
1313      return FALSE;
1314   }
1315
1316   return TRUE;
1317}
1318
1319
1320/**
1321 * Use upload buffer for the transfer map request.
1322 */
1323void *
1324svga_texture_transfer_map_upload(struct svga_context *svga,
1325                                 struct svga_transfer *st)
1326{
1327   struct pipe_resource *texture = st->base.resource;
1328   struct pipe_resource *tex_buffer = NULL;
1329   void *tex_map;
1330   unsigned nblocksx, nblocksy;
1331   unsigned offset;
1332   unsigned upload_size;
1333
1334   assert(svga->tex_upload);
1335
1336   st->upload.box.x = st->base.box.x;
1337   st->upload.box.y = st->base.box.y;
1338   st->upload.box.z = st->base.box.z;
1339   st->upload.box.w = st->base.box.width;
1340   st->upload.box.h = st->base.box.height;
1341   st->upload.box.d = st->base.box.depth;
1342   st->upload.nlayers = 1;
1343
1344   switch (texture->target) {
1345   case PIPE_TEXTURE_CUBE:
1346      st->upload.box.z = 0;
1347      break;
1348   case PIPE_TEXTURE_2D_ARRAY:
1349   case PIPE_TEXTURE_CUBE_ARRAY:
1350      st->upload.nlayers = st->base.box.depth;
1351      st->upload.box.z = 0;
1352      st->upload.box.d = 1;
1353      break;
1354   case PIPE_TEXTURE_1D_ARRAY:
1355      st->upload.nlayers = st->base.box.depth;
1356      st->upload.box.y = st->upload.box.z = 0;
1357      st->upload.box.d = 1;
1358      break;
1359   default:
1360      break;
1361   }
1362
1363   nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
1364   nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
1365
1366   st->base.stride = nblocksx * util_format_get_blocksize(texture->format);
1367   st->base.layer_stride = st->base.stride * nblocksy;
1368
1369   /* In order to use the TransferFromBuffer command to update the
1370    * texture content from the buffer, the layer stride for a multi-layers
1371    * surface needs to be in multiples of 16 bytes.
1372    */
1373   if (st->upload.nlayers > 1 && st->base.layer_stride & 15)
1374      return NULL;
1375
1376   upload_size = st->base.layer_stride * st->base.box.depth;
1377   upload_size = align(upload_size, 16);
1378
1379#ifdef DEBUG
1380   if (util_format_is_compressed(texture->format)) {
1381      struct svga_texture *tex = svga_texture(texture);
1382      unsigned blockw, blockh, bytesPerBlock;
1383
1384      svga_format_size(tex->key.format, &blockw, &blockh, &bytesPerBlock);
1385
1386      /* dest box must start on block boundary */
1387      assert((st->base.box.x % blockw) == 0);
1388      assert((st->base.box.y % blockh) == 0);
1389   }
1390#endif
1391
1392   /* If the upload size exceeds the default buffer size, the
1393    * upload buffer manager code will try to allocate a new buffer
1394    * with the new buffer size.
1395    */
1396   u_upload_alloc(svga->tex_upload, 0, upload_size, 16,
1397                  &offset, &tex_buffer, &tex_map);
1398
1399   if (!tex_map) {
1400      return NULL;
1401   }
1402
1403   st->upload.buf = tex_buffer;
1404   st->upload.map = tex_map;
1405   st->upload.offset = offset;
1406
1407   return tex_map;
1408}
1409
1410
1411/**
1412 * Unmap upload map transfer request
1413 */
1414void
1415svga_texture_transfer_unmap_upload(struct svga_context *svga,
1416                                   struct svga_transfer *st)
1417{
1418   struct svga_winsys_surface *srcsurf;
1419   struct svga_winsys_surface *dstsurf;
1420   struct pipe_resource *texture = st->base.resource;
1421   struct svga_texture *tex = svga_texture(texture);
1422   unsigned subResource;
1423   unsigned numMipLevels;
1424   unsigned i, layer;
1425   unsigned offset = st->upload.offset;
1426
1427   assert(svga->tex_upload);
1428   assert(st->upload.buf);
1429
1430   /* unmap the texture upload buffer */
1431   u_upload_unmap(svga->tex_upload);
1432
1433   srcsurf = svga_buffer_handle(svga, st->upload.buf, 0);
1434   dstsurf = svga_texture(texture)->handle;
1435   assert(dstsurf);
1436
1437   numMipLevels = texture->last_level + 1;
1438
1439   for (i = 0, layer = st->slice; i < st->upload.nlayers; i++, layer++) {
1440      subResource = layer * numMipLevels + st->base.level;
1441
1442      /* send a transferFromBuffer command to update the host texture surface */
1443      assert((offset & 15) == 0);
1444
1445      SVGA_RETRY(svga, SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1446                                                        offset,
1447                                                        st->base.stride,
1448                                                        st->base.layer_stride,
1449                                                        dstsurf, subResource,
1450                                                        &st->upload.box));
1451      offset += st->base.layer_stride;
1452
1453      /* Set rendered-to flag */
1454      svga_set_texture_rendered_to(tex, layer, st->base.level);
1455   }
1456
1457   pipe_resource_reference(&st->upload.buf, NULL);
1458}
1459
1460/**
1461 * Does the device format backing this surface have an
1462 * alpha channel?
1463 *
1464 * \param texture[in]  The texture whose format we're querying
1465 * \return TRUE if the format has an alpha channel, FALSE otherwise
1466 *
1467 * For locally created textures, the device (svga) format is typically
1468 * identical to svga_format(texture->format), and we can use the gallium
1469 * format tests to determine whether the device format has an alpha channel
1470 * or not. However, for textures backed by imported svga surfaces that is
1471 * not always true, and we have to look at the SVGA3D utilities.
1472 */
1473boolean
1474svga_texture_device_format_has_alpha(struct pipe_resource *texture)
1475{
1476   /* the svga_texture() call below is invalid for PIPE_BUFFER resources */
1477   assert(texture->target != PIPE_BUFFER);
1478
1479   enum svga3d_block_desc block_desc =
1480      svga3dsurface_get_desc(svga_texture(texture)->key.format)->block_desc;
1481
1482   return !!(block_desc & SVGA3DBLOCKDESC_ALPHA);
1483}
1484