1/**********************************************************
2 * Copyright 2008-2009 VMware, Inc.  All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26#include "svga3d_reg.h"
27#include "svga3d_surfacedefs.h"
28
29#include "pipe/p_state.h"
30#include "pipe/p_defines.h"
31#include "os/os_thread.h"
32#include "util/u_format.h"
33#include "util/u_inlines.h"
34#include "util/u_math.h"
35#include "util/u_memory.h"
36#include "util/u_resource.h"
37#include "util/u_upload_mgr.h"
38
39#include "svga_cmd.h"
40#include "svga_format.h"
41#include "svga_screen.h"
42#include "svga_context.h"
43#include "svga_resource_texture.h"
44#include "svga_resource_buffer.h"
45#include "svga_sampler_view.h"
46#include "svga_winsys.h"
47#include "svga_debug.h"
48
49
50static void
51svga_transfer_dma_band(struct svga_context *svga,
52                       struct svga_transfer *st,
53                       SVGA3dTransferType transfer,
54                       unsigned x, unsigned y, unsigned z,
55                       unsigned w, unsigned h, unsigned d,
56                       unsigned srcx, unsigned srcy, unsigned srcz,
57                       SVGA3dSurfaceDMAFlags flags)
58{
59   struct svga_texture *texture = svga_texture(st->base.resource);
60   SVGA3dCopyBox box;
61   enum pipe_error ret;
62
63   assert(!st->use_direct_map);
64
65   box.x = x;
66   box.y = y;
67   box.z = z;
68   box.w = w;
69   box.h = h;
70   box.d = d;
71   box.srcx = srcx;
72   box.srcy = srcy;
73   box.srcz = srcz;
74
75   SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - "
76            "(%u, %u, %u), %ubpp\n",
77            transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
78            texture->handle,
79            st->slice,
80            x,
81            y,
82            z,
83            x + w,
84            y + h,
85            z + 1,
86            util_format_get_blocksize(texture->b.b.format) * 8 /
87            (util_format_get_blockwidth(texture->b.b.format)
88             * util_format_get_blockheight(texture->b.b.format)));
89
90   ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
91   if (ret != PIPE_OK) {
92      svga_context_flush(svga, NULL);
93      ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
94      assert(ret == PIPE_OK);
95   }
96}
97
98
99static void
100svga_transfer_dma(struct svga_context *svga,
101                  struct svga_transfer *st,
102                  SVGA3dTransferType transfer,
103                  SVGA3dSurfaceDMAFlags flags)
104{
105   struct svga_texture *texture = svga_texture(st->base.resource);
106   struct svga_screen *screen = svga_screen(texture->b.b.screen);
107   struct svga_winsys_screen *sws = screen->sws;
108   struct pipe_fence_handle *fence = NULL;
109
110   assert(!st->use_direct_map);
111
112   if (transfer == SVGA3D_READ_HOST_VRAM) {
113      SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
114   }
115
116   /* Ensure any pending operations on host surfaces are queued on the command
117    * buffer first.
118    */
119   svga_surfaces_flush(svga);
120
121   if (!st->swbuf) {
122      /* Do the DMA transfer in a single go */
123      svga_transfer_dma_band(svga, st, transfer,
124                             st->box.x, st->box.y, st->box.z,
125                             st->box.w, st->box.h, st->box.d,
126                             0, 0, 0,
127                             flags);
128
129      if (transfer == SVGA3D_READ_HOST_VRAM) {
130         svga_context_flush(svga, &fence);
131         sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
132         sws->fence_reference(sws, &fence, NULL);
133      }
134   }
135   else {
136      int y, h, srcy;
137      unsigned blockheight =
138         util_format_get_blockheight(st->base.resource->format);
139
140      h = st->hw_nblocksy * blockheight;
141      srcy = 0;
142
143      for (y = 0; y < st->box.h; y += h) {
144         unsigned offset, length;
145         void *hw, *sw;
146
147         if (y + h > st->box.h)
148            h = st->box.h - y;
149
150         /* Transfer band must be aligned to pixel block boundaries */
151         assert(y % blockheight == 0);
152         assert(h % blockheight == 0);
153
154         offset = y * st->base.stride / blockheight;
155         length = h * st->base.stride / blockheight;
156
157         sw = (uint8_t *) st->swbuf + offset;
158
159         if (transfer == SVGA3D_WRITE_HOST_VRAM) {
160            unsigned usage = PIPE_TRANSFER_WRITE;
161
162            /* Wait for the previous DMAs to complete */
163            /* TODO: keep one DMA (at half the size) in the background */
164            if (y) {
165               svga_context_flush(svga, NULL);
166               usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
167            }
168
169            hw = sws->buffer_map(sws, st->hwbuf, usage);
170            assert(hw);
171            if (hw) {
172               memcpy(hw, sw, length);
173               sws->buffer_unmap(sws, st->hwbuf);
174            }
175         }
176
177         svga_transfer_dma_band(svga, st, transfer,
178                                st->box.x, y, st->box.z,
179                                st->box.w, h, st->box.d,
180                                0, srcy, 0, flags);
181
182         /*
183          * Prevent the texture contents to be discarded on the next band
184          * upload.
185          */
186         flags.discard = FALSE;
187
188         if (transfer == SVGA3D_READ_HOST_VRAM) {
189            svga_context_flush(svga, &fence);
190            sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
191
192            hw = sws->buffer_map(sws, st->hwbuf, PIPE_TRANSFER_READ);
193            assert(hw);
194            if (hw) {
195               memcpy(sw, hw, length);
196               sws->buffer_unmap(sws, st->hwbuf);
197            }
198         }
199      }
200   }
201}
202
203
204
205static boolean
206svga_texture_get_handle(struct pipe_screen *screen,
207                        struct pipe_resource *texture,
208                        struct winsys_handle *whandle)
209{
210   struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
211   unsigned stride;
212
213   assert(svga_texture(texture)->key.cachable == 0);
214   svga_texture(texture)->key.cachable = 0;
215
216   stride = util_format_get_nblocksx(texture->format, texture->width0) *
217            util_format_get_blocksize(texture->format);
218
219   return sws->surface_get_handle(sws, svga_texture(texture)->handle,
220                                  stride, whandle);
221}
222
223
224static void
225svga_texture_destroy(struct pipe_screen *screen,
226                     struct pipe_resource *pt)
227{
228   struct svga_screen *ss = svga_screen(screen);
229   struct svga_texture *tex = svga_texture(pt);
230
231   ss->texture_timestamp++;
232
233   svga_sampler_view_reference(&tex->cached_view, NULL);
234
235   /*
236     DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
237   */
238   SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
239   svga_screen_surface_destroy(ss, &tex->key, &tex->handle);
240
241   /* Destroy the backed surface handle if exists */
242   if (tex->backed_handle)
243      svga_screen_surface_destroy(ss, &tex->backed_key, &tex->backed_handle);
244
245   ss->hud.total_resource_bytes -= tex->size;
246
247   FREE(tex->defined);
248   FREE(tex->rendered_to);
249   FREE(tex->dirty);
250   FREE(tex);
251
252   assert(ss->hud.num_resources > 0);
253   if (ss->hud.num_resources > 0)
254      ss->hud.num_resources--;
255}
256
257
258/**
259 * Determine if we need to read back a texture image before mapping it.
260 */
261static inline boolean
262need_tex_readback(struct svga_transfer *st)
263{
264   if (st->base.usage & PIPE_TRANSFER_READ)
265      return TRUE;
266
267   if ((st->base.usage & PIPE_TRANSFER_WRITE) &&
268       ((st->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
269      return svga_was_texture_rendered_to(svga_texture(st->base.resource),
270                                          st->slice, st->base.level);
271   }
272
273   return FALSE;
274}
275
276
277static enum pipe_error
278readback_image_vgpu9(struct svga_context *svga,
279                   struct svga_winsys_surface *surf,
280                   unsigned slice,
281                   unsigned level)
282{
283   enum pipe_error ret;
284
285   ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
286   if (ret != PIPE_OK) {
287      svga_context_flush(svga, NULL);
288      ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
289   }
290   return ret;
291}
292
293
294static enum pipe_error
295readback_image_vgpu10(struct svga_context *svga,
296                    struct svga_winsys_surface *surf,
297                    unsigned slice,
298                    unsigned level,
299                    unsigned numMipLevels)
300{
301   enum pipe_error ret;
302   unsigned subResource;
303
304   subResource = slice * numMipLevels + level;
305   ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
306   if (ret != PIPE_OK) {
307      svga_context_flush(svga, NULL);
308      ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
309   }
310   return ret;
311}
312
313
314/**
315 * Use DMA for the transfer request
316 */
317static void *
318svga_texture_transfer_map_dma(struct svga_context *svga,
319                              struct svga_transfer *st)
320{
321   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
322   struct pipe_resource *texture = st->base.resource;
323   unsigned nblocksx, nblocksy;
324   unsigned d;
325   unsigned usage = st->base.usage;
326
327   /* we'll put the data into a tightly packed buffer */
328   nblocksx = util_format_get_nblocksx(texture->format, st->box.w);
329   nblocksy = util_format_get_nblocksy(texture->format, st->box.h);
330   d = st->box.d;
331
332   st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
333   st->base.layer_stride = st->base.stride * nblocksy;
334   st->hw_nblocksy = nblocksy;
335
336   st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
337                                         st->hw_nblocksy * st->base.stride * d);
338
339   while (!st->hwbuf && (st->hw_nblocksy /= 2)) {
340      st->hwbuf =
341         svga_winsys_buffer_create(svga, 1, 0,
342                                   st->hw_nblocksy * st->base.stride * d);
343   }
344
345   if (!st->hwbuf)
346      return NULL;
347
348   if (st->hw_nblocksy < nblocksy) {
349      /* We couldn't allocate a hardware buffer big enough for the transfer,
350       * so allocate regular malloc memory instead
351       */
352      if (0) {
353         debug_printf("%s: failed to allocate %u KB of DMA, "
354                      "splitting into %u x %u KB DMA transfers\n",
355                      __FUNCTION__,
356                      (nblocksy * st->base.stride + 1023) / 1024,
357                      (nblocksy + st->hw_nblocksy - 1) / st->hw_nblocksy,
358                      (st->hw_nblocksy * st->base.stride + 1023) / 1024);
359      }
360
361      st->swbuf = MALLOC(nblocksy * st->base.stride * d);
362      if (!st->swbuf) {
363         sws->buffer_destroy(sws, st->hwbuf);
364         return NULL;
365      }
366   }
367
368   if (usage & PIPE_TRANSFER_READ) {
369      SVGA3dSurfaceDMAFlags flags;
370      memset(&flags, 0, sizeof flags);
371      svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
372   }
373
374   if (st->swbuf) {
375      return st->swbuf;
376   }
377   else {
378      return sws->buffer_map(sws, st->hwbuf, usage);
379   }
380}
381
382
383/**
384 * Use direct map for the transfer request
385 */
386static void *
387svga_texture_transfer_map_direct(struct svga_context *svga,
388                                 struct svga_transfer *st)
389{
390   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
391   struct pipe_transfer *transfer = &st->base;
392   struct pipe_resource *texture = transfer->resource;
393   struct svga_texture *tex = svga_texture(texture);
394   struct svga_winsys_surface *surf = tex->handle;
395   unsigned level = st->base.level;
396   unsigned w, h, nblocksx, nblocksy, i;
397   unsigned usage = st->base.usage;
398
399   if (need_tex_readback(st)) {
400      enum pipe_error ret;
401
402      svga_surfaces_flush(svga);
403
404      for (i = 0; i < st->box.d; i++) {
405         if (svga_have_vgpu10(svga)) {
406            ret = readback_image_vgpu10(svga, surf, st->slice + i, level,
407                                        tex->b.b.last_level + 1);
408         } else {
409            ret = readback_image_vgpu9(svga, surf, st->slice + i, level);
410         }
411      }
412      svga->hud.num_readbacks++;
413      SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
414
415      assert(ret == PIPE_OK);
416      (void) ret;
417
418      svga_context_flush(svga, NULL);
419      /*
420       * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
421       * we could potentially clear the flag for all faces/layers/mips.
422       */
423      svga_clear_texture_rendered_to(tex, st->slice, level);
424   }
425   else {
426      assert(usage & PIPE_TRANSFER_WRITE);
427      if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) == 0) {
428         if (svga_is_texture_dirty(tex, st->slice, level)) {
429            /*
430             * do a surface flush if the subresource has been modified
431             * in this command buffer.
432             */
433            svga_surfaces_flush(svga);
434            if (!sws->surface_is_flushed(sws, surf)) {
435               svga->hud.surface_write_flushes++;
436               SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_SURFACEWRITEFLUSH);
437               svga_context_flush(svga, NULL);
438            }
439         }
440      }
441   }
442
443   /* we'll directly access the guest-backed surface */
444   w = u_minify(texture->width0, level);
445   h = u_minify(texture->height0, level);
446   nblocksx = util_format_get_nblocksx(texture->format, w);
447   nblocksy = util_format_get_nblocksy(texture->format, h);
448   st->hw_nblocksy = nblocksy;
449   st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
450   st->base.layer_stride = st->base.stride * nblocksy;
451
452   /*
453    * Begin mapping code
454    */
455   {
456      SVGA3dSize baseLevelSize;
457      uint8_t *map;
458      boolean retry;
459      unsigned offset, mip_width, mip_height;
460
461      map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
462      if (map == NULL && retry) {
463         /*
464          * At this point, the svga_surfaces_flush() should already have
465          * called in svga_texture_get_transfer().
466          */
467         svga->hud.surface_write_flushes++;
468         svga_context_flush(svga, NULL);
469         map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
470      }
471
472      /*
473       * Make sure we return NULL if the map fails
474       */
475      if (!map) {
476         return NULL;
477      }
478
479      /**
480       * Compute the offset to the specific texture slice in the buffer.
481       */
482      baseLevelSize.width = tex->b.b.width0;
483      baseLevelSize.height = tex->b.b.height0;
484      baseLevelSize.depth = tex->b.b.depth0;
485
486      if ((tex->b.b.target == PIPE_TEXTURE_1D_ARRAY) ||
487          (tex->b.b.target == PIPE_TEXTURE_2D_ARRAY) ||
488          (tex->b.b.target == PIPE_TEXTURE_CUBE_ARRAY)) {
489         st->base.layer_stride =
490            svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
491                                           tex->b.b.last_level + 1, 1, 0);
492      }
493
494      offset = svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
495                                              tex->b.b.last_level + 1, /* numMips */
496                                              st->slice, level);
497      if (level > 0) {
498         assert(offset > 0);
499      }
500
501      mip_width = u_minify(tex->b.b.width0, level);
502      mip_height = u_minify(tex->b.b.height0, level);
503
504      offset += svga3dsurface_get_pixel_offset(tex->key.format,
505                                               mip_width, mip_height,
506                                               st->box.x,
507                                               st->box.y,
508                                               st->box.z);
509
510      return (void *) (map + offset);
511   }
512}
513
514
515/**
516 * Request a transfer map to the texture resource
517 */
518static void *
519svga_texture_transfer_map(struct pipe_context *pipe,
520                          struct pipe_resource *texture,
521                          unsigned level,
522                          unsigned usage,
523                          const struct pipe_box *box,
524                          struct pipe_transfer **ptransfer)
525{
526   struct svga_context *svga = svga_context(pipe);
527   struct svga_winsys_screen *sws = svga_screen(pipe->screen)->sws;
528   struct svga_texture *tex = svga_texture(texture);
529   struct svga_transfer *st;
530   struct svga_winsys_surface *surf = tex->handle;
531   boolean use_direct_map = svga_have_gb_objects(svga) &&
532                            !svga_have_gb_dma(svga);
533   void *map = NULL;
534   int64_t begin = svga_get_time(svga);
535
536   SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERMAP);
537
538   if (!surf)
539      goto done;
540
541   /* We can't map texture storage directly unless we have GB objects */
542   if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
543      if (svga_have_gb_objects(svga))
544         use_direct_map = TRUE;
545      else
546         goto done;
547   }
548
549   st = CALLOC_STRUCT(svga_transfer);
550   if (!st)
551      goto done;
552
553   st->base.level = level;
554   st->base.usage = usage;
555   st->base.box = *box;
556
557   /* The modified transfer map box with the array index removed from z.
558    * The array index is specified in slice.
559    */
560   st->box.x = box->x;
561   st->box.y = box->y;
562   st->box.z = box->z;
563   st->box.w = box->width;
564   st->box.h = box->height;
565   st->box.d = box->depth;
566
567   switch (tex->b.b.target) {
568   case PIPE_TEXTURE_CUBE:
569      st->slice = st->base.box.z;
570      st->box.z = 0;   /* so we don't apply double offsets below */
571      break;
572   case PIPE_TEXTURE_1D_ARRAY:
573   case PIPE_TEXTURE_2D_ARRAY:
574   case PIPE_TEXTURE_CUBE_ARRAY:
575      st->slice = st->base.box.z;
576      st->box.z = 0;   /* so we don't apply double offsets below */
577
578      /* Force direct map for transfering multiple slices */
579      if (st->base.box.depth > 1)
580         use_direct_map = svga_have_gb_objects(svga);
581
582      break;
583   default:
584      st->slice = 0;
585      break;
586   }
587
588   /* Force direct map for multisample surface */
589   if (texture->nr_samples > 1) {
590      assert(svga_have_gb_objects(svga));
591      assert(sws->have_sm4_1);
592      use_direct_map = TRUE;
593   }
594
595   st->use_direct_map = use_direct_map;
596   pipe_resource_reference(&st->base.resource, texture);
597
598   /* If this is the first time mapping to the surface in this
599    * command buffer, clear the dirty masks of this surface.
600    */
601   if (sws->surface_is_flushed(sws, surf)) {
602      svga_clear_texture_dirty(tex);
603   }
604
605   if (!use_direct_map) {
606      /* upload to the DMA buffer */
607      map = svga_texture_transfer_map_dma(svga, st);
608   }
609   else {
610      boolean can_use_upload = tex->can_use_upload &&
611                               !(st->base.usage & PIPE_TRANSFER_READ);
612      boolean was_rendered_to =
613         svga_was_texture_rendered_to(svga_texture(texture),
614                                      st->slice, st->base.level);
615
616      /* If the texture was already rendered to and upload buffer
617       * is supported, then we will use upload buffer to
618       * avoid the need to read back the texture content; otherwise,
619       * we'll first try to map directly to the GB surface, if it is blocked,
620       * then we'll try the upload buffer.
621       */
622      if (was_rendered_to && can_use_upload) {
623         map = svga_texture_transfer_map_upload(svga, st);
624      }
625      else {
626         unsigned orig_usage = st->base.usage;
627
628         /* First try directly map to the GB surface */
629         if (can_use_upload)
630            st->base.usage |= PIPE_TRANSFER_DONTBLOCK;
631         map = svga_texture_transfer_map_direct(svga, st);
632         st->base.usage = orig_usage;
633
634         if (!map && can_use_upload) {
635            /* if direct map with DONTBLOCK fails, then try upload to the
636             * texture upload buffer.
637             */
638            map = svga_texture_transfer_map_upload(svga, st);
639         }
640      }
641
642      /* If upload fails, then try direct map again without forcing it
643       * to DONTBLOCK.
644       */
645      if (!map) {
646         map = svga_texture_transfer_map_direct(svga, st);
647      }
648   }
649
650   if (!map) {
651      FREE(st);
652   }
653   else {
654      *ptransfer = &st->base;
655      svga->hud.num_textures_mapped++;
656      if (usage & PIPE_TRANSFER_WRITE) {
657         /* record texture upload for HUD */
658         svga->hud.num_bytes_uploaded +=
659            st->base.layer_stride * st->box.d;
660
661         /* mark this texture level as dirty */
662         svga_set_texture_dirty(tex, st->slice, level);
663      }
664   }
665
666done:
667   svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
668   SVGA_STATS_TIME_POP(sws);
669   (void) sws;
670
671   return map;
672}
673
674/**
675 * Unmap a GB texture surface.
676 */
677static void
678svga_texture_surface_unmap(struct svga_context *svga,
679                           struct pipe_transfer *transfer)
680{
681   struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
682   struct svga_winsys_context *swc = svga->swc;
683   boolean rebind;
684
685   assert(surf);
686
687   swc->surface_unmap(swc, surf, &rebind);
688   if (rebind) {
689      enum pipe_error ret;
690      ret = SVGA3D_BindGBSurface(swc, surf);
691      if (ret != PIPE_OK) {
692         /* flush and retry */
693         svga_context_flush(svga, NULL);
694         ret = SVGA3D_BindGBSurface(swc, surf);
695         assert(ret == PIPE_OK);
696      }
697   }
698}
699
700
701static enum pipe_error
702update_image_vgpu9(struct svga_context *svga,
703                   struct svga_winsys_surface *surf,
704                   const SVGA3dBox *box,
705                   unsigned slice,
706                   unsigned level)
707{
708   enum pipe_error ret;
709
710   ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
711   if (ret != PIPE_OK) {
712      svga_context_flush(svga, NULL);
713      ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
714   }
715   return ret;
716}
717
718
719static enum pipe_error
720update_image_vgpu10(struct svga_context *svga,
721                    struct svga_winsys_surface *surf,
722                    const SVGA3dBox *box,
723                    unsigned slice,
724                    unsigned level,
725                    unsigned numMipLevels)
726{
727   enum pipe_error ret;
728   unsigned subResource;
729
730   subResource = slice * numMipLevels + level;
731
732   ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
733   if (ret != PIPE_OK) {
734      svga_context_flush(svga, NULL);
735      ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
736   }
737   return ret;
738}
739
740
741/**
742 * unmap DMA transfer request
743 */
744static void
745svga_texture_transfer_unmap_dma(struct svga_context *svga,
746                                struct svga_transfer *st)
747{
748   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
749
750   if (st->hwbuf)
751      sws->buffer_unmap(sws, st->hwbuf);
752
753   if (st->base.usage & PIPE_TRANSFER_WRITE) {
754      /* Use DMA to transfer texture data */
755      SVGA3dSurfaceDMAFlags flags;
756
757      memset(&flags, 0, sizeof flags);
758      if (st->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
759         flags.discard = TRUE;
760      }
761      if (st->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
762         flags.unsynchronized = TRUE;
763      }
764
765      svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
766   }
767
768   FREE(st->swbuf);
769   sws->buffer_destroy(sws, st->hwbuf);
770}
771
772
773/**
774 * unmap direct map transfer request
775 */
776static void
777svga_texture_transfer_unmap_direct(struct svga_context *svga,
778                                   struct svga_transfer *st)
779{
780   struct pipe_transfer *transfer = &st->base;
781   struct svga_texture *tex = svga_texture(transfer->resource);
782
783   svga_texture_surface_unmap(svga, transfer);
784
785   /* Now send an update command to update the content in the backend. */
786   if (st->base.usage & PIPE_TRANSFER_WRITE) {
787      struct svga_winsys_surface *surf = tex->handle;
788      enum pipe_error ret;
789
790      assert(svga_have_gb_objects(svga));
791
792      /* update the effected region */
793      SVGA3dBox box = st->box;
794      unsigned nlayers;
795
796      switch (tex->b.b.target) {
797      case PIPE_TEXTURE_2D_ARRAY:
798      case PIPE_TEXTURE_CUBE_ARRAY:
799      case PIPE_TEXTURE_1D_ARRAY:
800         nlayers = box.d;
801         box.d = 1;
802         break;
803      default:
804         nlayers = 1;
805         break;
806      }
807
808
809      if (0)
810         debug_printf("%s %d, %d, %d  %d x %d x %d\n",
811                      __FUNCTION__,
812                      box.x, box.y, box.z,
813                      box.w, box.h, box.d);
814
815      if (svga_have_vgpu10(svga)) {
816         unsigned i;
817
818         for (i = 0; i < nlayers; i++) {
819            ret = update_image_vgpu10(svga, surf, &box,
820                                      st->slice + i, transfer->level,
821                                      tex->b.b.last_level + 1);
822            assert(ret == PIPE_OK);
823         }
824      } else {
825         assert(nlayers == 1);
826         ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
827         assert(ret == PIPE_OK);
828      }
829      (void) ret;
830   }
831}
832
833
834static void
835svga_texture_transfer_unmap(struct pipe_context *pipe,
836                            struct pipe_transfer *transfer)
837{
838   struct svga_context *svga = svga_context(pipe);
839   struct svga_screen *ss = svga_screen(pipe->screen);
840   struct svga_winsys_screen *sws = ss->sws;
841   struct svga_transfer *st = svga_transfer(transfer);
842   struct svga_texture *tex = svga_texture(transfer->resource);
843
844   SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERUNMAP);
845
846   if (!st->use_direct_map) {
847      svga_texture_transfer_unmap_dma(svga, st);
848   }
849   else if (st->upload.buf) {
850      svga_texture_transfer_unmap_upload(svga, st);
851   }
852   else {
853      svga_texture_transfer_unmap_direct(svga, st);
854   }
855
856   if (st->base.usage & PIPE_TRANSFER_WRITE) {
857      svga->hud.num_resource_updates++;
858
859      /* Mark the texture level as dirty */
860      ss->texture_timestamp++;
861      svga_age_texture_view(tex, transfer->level);
862      if (transfer->resource->target == PIPE_TEXTURE_CUBE)
863         svga_define_texture_level(tex, st->slice, transfer->level);
864      else
865         svga_define_texture_level(tex, 0, transfer->level);
866   }
867
868   pipe_resource_reference(&st->base.resource, NULL);
869   FREE(st);
870   SVGA_STATS_TIME_POP(sws);
871   (void) sws;
872}
873
874
875/**
876 * Does format store depth values?
877 */
878static inline boolean
879format_has_depth(enum pipe_format format)
880{
881   const struct util_format_description *desc = util_format_description(format);
882   return util_format_has_depth(desc);
883}
884
885
886struct u_resource_vtbl svga_texture_vtbl =
887{
888   svga_texture_get_handle,	      /* get_handle */
889   svga_texture_destroy,	      /* resource_destroy */
890   svga_texture_transfer_map,	      /* transfer_map */
891   u_default_transfer_flush_region,   /* transfer_flush_region */
892   svga_texture_transfer_unmap,	      /* transfer_unmap */
893};
894
895
896struct pipe_resource *
897svga_texture_create(struct pipe_screen *screen,
898                    const struct pipe_resource *template)
899{
900   struct svga_screen *svgascreen = svga_screen(screen);
901   struct svga_texture *tex;
902   unsigned bindings = template->bind;
903
904   SVGA_STATS_TIME_PUSH(svgascreen->sws,
905                        SVGA_STATS_TIME_CREATETEXTURE);
906
907   assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
908   if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
909      goto fail_notex;
910   }
911
912   /* Verify the number of mipmap levels isn't impossibly large.  For example,
913    * if the base 2D image is 16x16, we can't have 8 mipmap levels.
914    * The state tracker should never ask us to create a resource with invalid
915    * parameters.
916    */
917   {
918      unsigned max_dim = template->width0;
919
920      switch (template->target) {
921      case PIPE_TEXTURE_1D:
922      case PIPE_TEXTURE_1D_ARRAY:
923         // nothing
924         break;
925      case PIPE_TEXTURE_2D:
926      case PIPE_TEXTURE_CUBE:
927      case PIPE_TEXTURE_CUBE_ARRAY:
928      case PIPE_TEXTURE_2D_ARRAY:
929         max_dim = MAX2(max_dim, template->height0);
930         break;
931      case PIPE_TEXTURE_3D:
932         max_dim = MAX3(max_dim, template->height0, template->depth0);
933         break;
934      case PIPE_TEXTURE_RECT:
935      case PIPE_BUFFER:
936         assert(template->last_level == 0);
937         /* the assertion below should always pass */
938         break;
939      default:
940         debug_printf("Unexpected texture target type\n");
941      }
942      assert(1 << template->last_level <= max_dim);
943   }
944
945   tex = CALLOC_STRUCT(svga_texture);
946   if (!tex) {
947      goto fail_notex;
948   }
949
950   tex->defined = CALLOC(template->depth0 * template->array_size,
951                         sizeof(tex->defined[0]));
952   if (!tex->defined) {
953      FREE(tex);
954      goto fail_notex;
955   }
956
957   tex->rendered_to = CALLOC(template->depth0 * template->array_size,
958                             sizeof(tex->rendered_to[0]));
959   if (!tex->rendered_to) {
960      goto fail;
961   }
962
963   tex->dirty = CALLOC(template->depth0 * template->array_size,
964                             sizeof(tex->dirty[0]));
965   if (!tex->dirty) {
966      goto fail;
967   }
968
969   tex->b.b = *template;
970   tex->b.vtbl = &svga_texture_vtbl;
971   pipe_reference_init(&tex->b.b.reference, 1);
972   tex->b.b.screen = screen;
973
974   tex->key.flags = 0;
975   tex->key.size.width = template->width0;
976   tex->key.size.height = template->height0;
977   tex->key.size.depth = template->depth0;
978   tex->key.arraySize = 1;
979   tex->key.numFaces = 1;
980
981   /* nr_samples=1 must be treated as a non-multisample texture */
982   if (tex->b.b.nr_samples == 1) {
983      tex->b.b.nr_samples = 0;
984   }
985   else if (tex->b.b.nr_samples > 1) {
986      assert(svgascreen->sws->have_sm4_1);
987      tex->key.flags |= SVGA3D_SURFACE_MULTISAMPLE;
988   }
989
990   tex->key.sampleCount = tex->b.b.nr_samples;
991
992   if (svgascreen->sws->have_vgpu10) {
993      switch (template->target) {
994      case PIPE_TEXTURE_1D:
995         tex->key.flags |= SVGA3D_SURFACE_1D;
996         break;
997      case PIPE_TEXTURE_1D_ARRAY:
998         tex->key.flags |= SVGA3D_SURFACE_1D;
999         /* fall-through */
1000      case PIPE_TEXTURE_2D_ARRAY:
1001         tex->key.flags |= SVGA3D_SURFACE_ARRAY;
1002         tex->key.arraySize = template->array_size;
1003         break;
1004      case PIPE_TEXTURE_3D:
1005         tex->key.flags |= SVGA3D_SURFACE_VOLUME;
1006         break;
1007      case PIPE_TEXTURE_CUBE:
1008         tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
1009         tex->key.numFaces = 6;
1010         break;
1011      case PIPE_TEXTURE_CUBE_ARRAY:
1012         assert(svgascreen->sws->have_sm4_1);
1013         tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
1014         tex->key.numFaces = 1;  // arraySize already includes the 6 faces
1015         tex->key.arraySize = template->array_size;
1016         break;
1017      default:
1018         break;
1019      }
1020   }
1021   else {
1022      switch (template->target) {
1023      case PIPE_TEXTURE_3D:
1024         tex->key.flags |= SVGA3D_SURFACE_VOLUME;
1025         break;
1026      case PIPE_TEXTURE_CUBE:
1027         tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
1028         tex->key.numFaces = 6;
1029         break;
1030      default:
1031         break;
1032      }
1033   }
1034
1035   tex->key.cachable = 1;
1036
1037   if ((bindings & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
1038       !(bindings & PIPE_BIND_SAMPLER_VIEW)) {
1039      /* Also check if the format can be sampled from */
1040      if (screen->is_format_supported(screen, template->format,
1041                                      template->target,
1042                                      template->nr_samples,
1043                                      template->nr_storage_samples,
1044                                      PIPE_BIND_SAMPLER_VIEW)) {
1045         bindings |= PIPE_BIND_SAMPLER_VIEW;
1046      }
1047   }
1048
1049   if (bindings & PIPE_BIND_SAMPLER_VIEW) {
1050      tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
1051      tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
1052
1053      if (!(bindings & PIPE_BIND_RENDER_TARGET)) {
1054         /* Also check if the format is color renderable */
1055         if (screen->is_format_supported(screen, template->format,
1056                                         template->target,
1057                                         template->nr_samples,
1058                                         template->nr_storage_samples,
1059                                         PIPE_BIND_RENDER_TARGET)) {
1060            bindings |= PIPE_BIND_RENDER_TARGET;
1061         }
1062      }
1063
1064      if (!(bindings & PIPE_BIND_DEPTH_STENCIL)) {
1065         /* Also check if the format is depth/stencil renderable */
1066         if (screen->is_format_supported(screen, template->format,
1067                                         template->target,
1068                                         template->nr_samples,
1069                                         template->nr_storage_samples,
1070                                         PIPE_BIND_DEPTH_STENCIL)) {
1071            bindings |= PIPE_BIND_DEPTH_STENCIL;
1072         }
1073      }
1074   }
1075
1076   if (bindings & PIPE_BIND_DISPLAY_TARGET) {
1077      tex->key.cachable = 0;
1078   }
1079
1080   if (bindings & PIPE_BIND_SHARED) {
1081      tex->key.cachable = 0;
1082   }
1083
1084   if (bindings & (PIPE_BIND_SCANOUT | PIPE_BIND_CURSOR)) {
1085      tex->key.scanout = 1;
1086      tex->key.cachable = 0;
1087   }
1088
1089   /*
1090    * Note: Previously we never passed the
1091    * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
1092    * know beforehand whether a texture will be used as a rendertarget or not
1093    * and it always requests PIPE_BIND_RENDER_TARGET, therefore
1094    * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
1095    *
1096    * However, this was changed since other state trackers
1097    * (XA for example) uses it accurately and certain device versions
1098    * relies on it in certain situations to render correctly.
1099    */
1100   if ((bindings & PIPE_BIND_RENDER_TARGET) &&
1101       !util_format_is_s3tc(template->format)) {
1102      tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1103      tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1104   }
1105
1106   if (bindings & PIPE_BIND_DEPTH_STENCIL) {
1107      tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
1108      tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
1109   }
1110
1111   tex->key.numMipLevels = template->last_level + 1;
1112
1113   tex->key.format = svga_translate_format(svgascreen, template->format,
1114                                           bindings);
1115   if (tex->key.format == SVGA3D_FORMAT_INVALID) {
1116      goto fail;
1117   }
1118
1119   /* Use typeless formats for sRGB and depth resources.  Typeless
1120    * formats can be reinterpreted as other formats.  For example,
1121    * SVGA3D_R8G8B8A8_UNORM_TYPELESS can be interpreted as
1122    * SVGA3D_R8G8B8A8_UNORM_SRGB or SVGA3D_R8G8B8A8_UNORM.
1123    */
1124   if (svgascreen->sws->have_vgpu10 &&
1125       (util_format_is_srgb(template->format) ||
1126        format_has_depth(template->format))) {
1127      SVGA3dSurfaceFormat typeless = svga_typeless_format(tex->key.format);
1128      if (0) {
1129         debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
1130                      svga_format_name(tex->key.format),
1131                      svga_format_name(typeless),
1132                      bindings);
1133      }
1134
1135      if (svga_format_is_uncompressed_snorm(tex->key.format)) {
1136         /* We can't normally render to snorm surfaces, but once we
1137          * substitute a typeless format, we can if the rendertarget view
1138          * is unorm.  This can happen with GL_ARB_copy_image.
1139          */
1140         tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1141         tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1142      }
1143
1144      tex->key.format = typeless;
1145   }
1146
1147   SVGA_DBG(DEBUG_DMA, "surface_create for texture\n", tex->handle);
1148   tex->handle = svga_screen_surface_create(svgascreen, bindings,
1149                                            tex->b.b.usage,
1150                                            &tex->validated, &tex->key);
1151   if (!tex->handle) {
1152      goto fail;
1153   }
1154
1155   SVGA_DBG(DEBUG_DMA, "  --> got sid %p (texture)\n", tex->handle);
1156
1157   debug_reference(&tex->b.b.reference,
1158                   (debug_reference_descriptor)debug_describe_resource, 0);
1159
1160   tex->size = util_resource_size(template);
1161
1162   /* Determine if texture upload buffer can be used to upload this texture */
1163   tex->can_use_upload = svga_texture_transfer_map_can_upload(svgascreen,
1164                                                              &tex->b.b);
1165
1166   /* Initialize the backing resource cache */
1167   tex->backed_handle = NULL;
1168
1169   svgascreen->hud.total_resource_bytes += tex->size;
1170   svgascreen->hud.num_resources++;
1171
1172   SVGA_STATS_TIME_POP(svgascreen->sws);
1173
1174   return &tex->b.b;
1175
1176fail:
1177   if (tex->dirty)
1178      FREE(tex->dirty);
1179   if (tex->rendered_to)
1180      FREE(tex->rendered_to);
1181   if (tex->defined)
1182      FREE(tex->defined);
1183   FREE(tex);
1184fail_notex:
1185   SVGA_STATS_TIME_POP(svgascreen->sws);
1186   return NULL;
1187}
1188
1189
1190struct pipe_resource *
1191svga_texture_from_handle(struct pipe_screen *screen,
1192                         const struct pipe_resource *template,
1193                         struct winsys_handle *whandle)
1194{
1195   struct svga_winsys_screen *sws = svga_winsys_screen(screen);
1196   struct svga_screen *ss = svga_screen(screen);
1197   struct svga_winsys_surface *srf;
1198   struct svga_texture *tex;
1199   enum SVGA3dSurfaceFormat format = 0;
1200   assert(screen);
1201
1202   /* Only supports one type */
1203   if ((template->target != PIPE_TEXTURE_2D &&
1204       template->target != PIPE_TEXTURE_RECT) ||
1205       template->last_level != 0 ||
1206       template->depth0 != 1) {
1207      return NULL;
1208   }
1209
1210   srf = sws->surface_from_handle(sws, whandle, &format);
1211
1212   if (!srf)
1213      return NULL;
1214
1215   if (!svga_format_is_shareable(ss, template->format, format,
1216                                 template->bind, true))
1217      goto out_unref;
1218
1219   tex = CALLOC_STRUCT(svga_texture);
1220   if (!tex)
1221      goto out_unref;
1222
1223   tex->defined = CALLOC(template->depth0 * template->array_size,
1224                         sizeof(tex->defined[0]));
1225   if (!tex->defined)
1226      goto out_no_defined;
1227
1228   tex->b.b = *template;
1229   tex->b.vtbl = &svga_texture_vtbl;
1230   pipe_reference_init(&tex->b.b.reference, 1);
1231   tex->b.b.screen = screen;
1232
1233   SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
1234
1235   tex->key.cachable = 0;
1236   tex->key.format = format;
1237   tex->handle = srf;
1238
1239   tex->rendered_to = CALLOC(1, sizeof(tex->rendered_to[0]));
1240   if (!tex->rendered_to)
1241      goto out_no_rendered_to;
1242
1243   tex->dirty = CALLOC(1, sizeof(tex->dirty[0]));
1244   if (!tex->dirty)
1245      goto out_no_dirty;
1246
1247   tex->imported = TRUE;
1248
1249   ss->hud.num_resources++;
1250
1251   return &tex->b.b;
1252
1253out_no_dirty:
1254   FREE(tex->rendered_to);
1255out_no_rendered_to:
1256   FREE(tex->defined);
1257out_no_defined:
1258   FREE(tex);
1259out_unref:
1260   sws->surface_reference(sws, &srf, NULL);
1261   return NULL;
1262}
1263
1264boolean
1265svga_texture_generate_mipmap(struct pipe_context *pipe,
1266                             struct pipe_resource *pt,
1267                             enum pipe_format format,
1268                             unsigned base_level,
1269                             unsigned last_level,
1270                             unsigned first_layer,
1271                             unsigned last_layer)
1272{
1273   struct pipe_sampler_view templ, *psv;
1274   struct svga_pipe_sampler_view *sv;
1275   struct svga_context *svga = svga_context(pipe);
1276   struct svga_texture *tex = svga_texture(pt);
1277   enum pipe_error ret;
1278
1279   assert(svga_have_vgpu10(svga));
1280
1281   /* Only support 2D texture for now */
1282   if (pt->target != PIPE_TEXTURE_2D)
1283      return FALSE;
1284
1285   /* Fallback to the mipmap generation utility for those formats that
1286    * do not support hw generate mipmap
1287    */
1288   if (!svga_format_support_gen_mips(format))
1289      return FALSE;
1290
1291   /* Make sure the texture surface was created with
1292    * SVGA3D_SURFACE_BIND_RENDER_TARGET
1293    */
1294   if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
1295      return FALSE;
1296
1297   templ.format = format;
1298   templ.u.tex.first_layer = first_layer;
1299   templ.u.tex.last_layer = last_layer;
1300   templ.u.tex.first_level = base_level;
1301   templ.u.tex.last_level = last_level;
1302
1303   psv = pipe->create_sampler_view(pipe, pt, &templ);
1304   if (psv == NULL)
1305      return FALSE;
1306
1307   sv = svga_pipe_sampler_view(psv);
1308   ret = svga_validate_pipe_sampler_view(svga, sv);
1309   if (ret != PIPE_OK) {
1310      svga_context_flush(svga, NULL);
1311      ret = svga_validate_pipe_sampler_view(svga, sv);
1312      assert(ret == PIPE_OK);
1313   }
1314
1315   ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1316   if (ret != PIPE_OK) {
1317      svga_context_flush(svga, NULL);
1318      ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1319   }
1320   pipe_sampler_view_reference(&psv, NULL);
1321
1322   svga->hud.num_generate_mipmap++;
1323
1324   return TRUE;
1325}
1326
1327
1328/* texture upload buffer default size in bytes */
1329#define TEX_UPLOAD_DEFAULT_SIZE (1024 * 1024)
1330
1331/**
1332 * Create a texture upload buffer
1333 */
1334boolean
1335svga_texture_transfer_map_upload_create(struct svga_context *svga)
1336{
1337   svga->tex_upload = u_upload_create(&svga->pipe, TEX_UPLOAD_DEFAULT_SIZE,
1338                                      PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, 0);
1339   return svga->tex_upload != NULL;
1340}
1341
1342
1343/**
1344 * Destroy the texture upload buffer
1345 */
1346void
1347svga_texture_transfer_map_upload_destroy(struct svga_context *svga)
1348{
1349   u_upload_destroy(svga->tex_upload);
1350}
1351
1352
1353/**
1354 * Returns true if this transfer map request can use the upload buffer.
1355 */
1356boolean
1357svga_texture_transfer_map_can_upload(const struct svga_screen *svgascreen,
1358                                     const struct pipe_resource *texture)
1359{
1360   if (svgascreen->sws->have_transfer_from_buffer_cmd == FALSE)
1361      return FALSE;
1362
1363   /* TransferFromBuffer command is not well supported with multi-samples surface */
1364   if (texture->nr_samples > 1)
1365      return FALSE;
1366
1367   if (util_format_is_compressed(texture->format)) {
1368      /* XXX Need to take a closer look to see why texture upload
1369       * with 3D texture with compressed format fails
1370       */
1371      if (texture->target == PIPE_TEXTURE_3D)
1372          return FALSE;
1373   }
1374   else if (texture->format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
1375      return FALSE;
1376   }
1377
1378   return TRUE;
1379}
1380
1381
1382/**
1383 * Use upload buffer for the transfer map request.
1384 */
1385void *
1386svga_texture_transfer_map_upload(struct svga_context *svga,
1387                                 struct svga_transfer *st)
1388{
1389   struct pipe_resource *texture = st->base.resource;
1390   struct pipe_resource *tex_buffer = NULL;
1391   void *tex_map;
1392   unsigned nblocksx, nblocksy;
1393   unsigned offset;
1394   unsigned upload_size;
1395
1396   assert(svga->tex_upload);
1397
1398   st->upload.box.x = st->base.box.x;
1399   st->upload.box.y = st->base.box.y;
1400   st->upload.box.z = st->base.box.z;
1401   st->upload.box.w = st->base.box.width;
1402   st->upload.box.h = st->base.box.height;
1403   st->upload.box.d = st->base.box.depth;
1404   st->upload.nlayers = 1;
1405
1406   switch (texture->target) {
1407   case PIPE_TEXTURE_CUBE:
1408      st->upload.box.z = 0;
1409      break;
1410   case PIPE_TEXTURE_2D_ARRAY:
1411   case PIPE_TEXTURE_CUBE_ARRAY:
1412      st->upload.nlayers = st->base.box.depth;
1413      st->upload.box.z = 0;
1414      st->upload.box.d = 1;
1415      break;
1416   case PIPE_TEXTURE_1D_ARRAY:
1417      st->upload.nlayers = st->base.box.depth;
1418      st->upload.box.y = st->upload.box.z = 0;
1419      st->upload.box.d = 1;
1420      break;
1421   default:
1422      break;
1423   }
1424
1425   nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
1426   nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
1427
1428   st->base.stride = nblocksx * util_format_get_blocksize(texture->format);
1429   st->base.layer_stride = st->base.stride * nblocksy;
1430
1431   /* In order to use the TransferFromBuffer command to update the
1432    * texture content from the buffer, the layer stride for a multi-layers
1433    * surface needs to be in multiples of 16 bytes.
1434    */
1435   if (st->upload.nlayers > 1 && st->base.layer_stride & 15)
1436      return NULL;
1437
1438   upload_size = st->base.layer_stride * st->base.box.depth;
1439   upload_size = align(upload_size, 16);
1440
1441#ifdef DEBUG
1442   if (util_format_is_compressed(texture->format)) {
1443      struct svga_texture *tex = svga_texture(texture);
1444      unsigned blockw, blockh, bytesPerBlock;
1445
1446      svga_format_size(tex->key.format, &blockw, &blockh, &bytesPerBlock);
1447
1448      /* dest box must start on block boundary */
1449      assert((st->base.box.x % blockw) == 0);
1450      assert((st->base.box.y % blockh) == 0);
1451   }
1452#endif
1453
1454   /* If the upload size exceeds the default buffer size, the
1455    * upload buffer manager code will try to allocate a new buffer
1456    * with the new buffer size.
1457    */
1458   u_upload_alloc(svga->tex_upload, 0, upload_size, 16,
1459                  &offset, &tex_buffer, &tex_map);
1460
1461   if (!tex_map) {
1462      return NULL;
1463   }
1464
1465   st->upload.buf = tex_buffer;
1466   st->upload.map = tex_map;
1467   st->upload.offset = offset;
1468
1469   return tex_map;
1470}
1471
1472
1473/**
1474 * Unmap upload map transfer request
1475 */
1476void
1477svga_texture_transfer_unmap_upload(struct svga_context *svga,
1478                                   struct svga_transfer *st)
1479{
1480   struct svga_winsys_surface *srcsurf;
1481   struct svga_winsys_surface *dstsurf;
1482   struct pipe_resource *texture = st->base.resource;
1483   struct svga_texture *tex = svga_texture(texture);
1484   enum pipe_error ret;
1485   unsigned subResource;
1486   unsigned numMipLevels;
1487   unsigned i, layer;
1488   unsigned offset = st->upload.offset;
1489
1490   assert(svga->tex_upload);
1491   assert(st->upload.buf);
1492
1493   /* unmap the texture upload buffer */
1494   u_upload_unmap(svga->tex_upload);
1495
1496   srcsurf = svga_buffer_handle(svga, st->upload.buf, 0);
1497   dstsurf = svga_texture(texture)->handle;
1498   assert(dstsurf);
1499
1500   numMipLevels = texture->last_level + 1;
1501
1502   for (i = 0, layer = st->slice; i < st->upload.nlayers; i++, layer++) {
1503      subResource = layer * numMipLevels + st->base.level;
1504
1505      /* send a transferFromBuffer command to update the host texture surface */
1506      assert((offset & 15) == 0);
1507
1508      ret = SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1509                                             offset,
1510                                             st->base.stride,
1511                                             st->base.layer_stride,
1512                                             dstsurf, subResource,
1513                                             &st->upload.box);
1514      if (ret != PIPE_OK) {
1515         svga_context_flush(svga, NULL);
1516         ret = SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1517                                                offset,
1518                                                st->base.stride,
1519                                                st->base.layer_stride,
1520                                                dstsurf, subResource,
1521                                                &st->upload.box);
1522         assert(ret == PIPE_OK);
1523      }
1524      offset += st->base.layer_stride;
1525
1526      /* Set rendered-to flag */
1527      svga_set_texture_rendered_to(tex, layer, st->base.level);
1528   }
1529
1530   pipe_resource_reference(&st->upload.buf, NULL);
1531}
1532
1533/**
1534 * Does the device format backing this surface have an
1535 * alpha channel?
1536 *
1537 * \param texture[in]  The texture whose format we're querying
1538 * \return TRUE if the format has an alpha channel, FALSE otherwise
1539 *
1540 * For locally created textures, the device (svga) format is typically
1541 * identical to svga_format(texture->format), and we can use the gallium
1542 * format tests to determine whether the device format has an alpha channel
1543 * or not. However, for textures backed by imported svga surfaces that is
1544 * not always true, and we have to look at the SVGA3D utilities.
1545 */
1546boolean
1547svga_texture_device_format_has_alpha(struct pipe_resource *texture)
1548{
1549   /* the svga_texture() call below is invalid for PIPE_BUFFER resources */
1550   assert(texture->target != PIPE_BUFFER);
1551
1552   enum svga3d_block_desc block_desc =
1553      svga3dsurface_get_desc(svga_texture(texture)->key.format)->block_desc;
1554
1555   return !!(block_desc & SVGA3DBLOCKDESC_ALPHA);
1556}
1557