1/**********************************************************
2 * Copyright 2008-2009 VMware, Inc.  All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26#include "svga_cmd.h"
27
28#include "pipe/p_state.h"
29#include "pipe/p_defines.h"
30#include "util/u_inlines.h"
31#include "os/os_thread.h"
32#include "util/u_math.h"
33#include "util/u_memory.h"
34#include "util/u_resource.h"
35
36#include "svga_context.h"
37#include "svga_screen.h"
38#include "svga_resource_buffer.h"
39#include "svga_resource_buffer_upload.h"
40#include "svga_resource_texture.h"
41#include "svga_sampler_view.h"
42#include "svga_winsys.h"
43#include "svga_debug.h"
44
45
46/**
47 * Determine what buffers eventually need hardware backing.
48 *
49 * Vertex- and index buffers need hardware backing.  Constant buffers
50 * do on vgpu10. Staging texture-upload buffers do when they are
51 * supported.
52 */
53static inline boolean
54svga_buffer_needs_hw_storage(const struct svga_screen *ss,
55                             const struct pipe_resource *template)
56{
57   unsigned bind_mask = (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER |
58                         PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_STREAM_OUTPUT |
59                         PIPE_BIND_SHADER_BUFFER | PIPE_BIND_COMMAND_ARGS_BUFFER);
60
61   if (ss->sws->have_vgpu10) {
62      /*
63       * Driver-created upload const0- and staging texture upload buffers
64       * tagged with PIPE_BIND_CUSTOM
65       */
66      bind_mask |= PIPE_BIND_CUSTOM;
67      /**
68       * Uniform buffer objects.
69       * Don't create hardware storage for state-tracker constant buffers,
70       * because we frequently map them for reading and writing, and
71       * the length of those buffers are always small, so it is better
72       * to just use system memory.
73       */
74   }
75
76   if (template->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
77      return TRUE;
78
79   return !!(template->bind & bind_mask);
80}
81
82/**
83 * Create a buffer transfer.
84 *
85 * Unlike texture DMAs (which are written immediately to the command buffer and
86 * therefore inherently serialized with other context operations), for buffers
87 * we try to coalesce multiple range mappings (i.e, multiple calls to this
88 * function) into a single DMA command, for better efficiency in command
89 * processing.  This means we need to exercise extra care here to ensure that
90 * the end result is exactly the same as if one DMA was used for every mapped
91 * range.
92 */
93void *
94svga_buffer_transfer_map(struct pipe_context *pipe,
95                         struct pipe_resource *resource,
96                         unsigned level,
97                         unsigned usage,
98                         const struct pipe_box *box,
99                         struct pipe_transfer **ptransfer)
100{
101   struct svga_context *svga = svga_context(pipe);
102   struct svga_screen *ss = svga_screen(pipe->screen);
103   struct svga_buffer *sbuf = svga_buffer(resource);
104   struct pipe_transfer *transfer;
105   uint8_t *map = NULL;
106   int64_t begin = svga_get_time(svga);
107
108   SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERMAP);
109
110   assert(box->y == 0);
111   assert(box->z == 0);
112   assert(box->height == 1);
113   assert(box->depth == 1);
114
115   transfer = MALLOC_STRUCT(pipe_transfer);
116   if (!transfer) {
117      goto done;
118   }
119
120   transfer->resource = resource;
121   transfer->level = level;
122   transfer->usage = usage;
123   transfer->box = *box;
124   transfer->stride = 0;
125   transfer->layer_stride = 0;
126
127   if (usage & PIPE_MAP_WRITE) {
128      /* If we write to the buffer for any reason, free any saved translated
129       * vertices.
130       */
131      pipe_resource_reference(&sbuf->translated_indices.buffer, NULL);
132   }
133
134   if ((usage & PIPE_MAP_READ) && sbuf->dirty &&
135       !sbuf->key.coherent && !svga->swc->force_coherent) {
136
137      /* Host-side buffers can only be dirtied with vgpu10 features
138       * (streamout and buffer copy).
139       */
140      assert(svga_have_vgpu10(svga));
141
142      if (!sbuf->user) {
143         (void) svga_buffer_handle(svga, resource, sbuf->bind_flags);
144      }
145
146      if (sbuf->dma.pending) {
147         svga_buffer_upload_flush(svga, sbuf);
148         svga_context_finish(svga);
149      }
150
151      assert(sbuf->handle);
152
153      SVGA_RETRY(svga, SVGA3D_vgpu10_ReadbackSubResource(svga->swc,
154                                                         sbuf->handle, 0));
155      svga->hud.num_readbacks++;
156
157      svga_context_finish(svga);
158
159      sbuf->dirty = FALSE;
160   }
161
162   if (usage & PIPE_MAP_WRITE) {
163      if ((usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) &&
164          !(resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)) {
165         /*
166          * Flush any pending primitives, finish writing any pending DMA
167          * commands, and tell the host to discard the buffer contents on
168          * the next DMA operation.
169          */
170
171         svga_hwtnl_flush_buffer(svga, resource);
172
173         if (sbuf->dma.pending) {
174            svga_buffer_upload_flush(svga, sbuf);
175
176            /*
177             * Instead of flushing the context command buffer, simply discard
178             * the current hwbuf, and start a new one.
179             * With GB objects, the map operation takes care of this
180             * if passed the PIPE_MAP_DISCARD_WHOLE_RESOURCE flag,
181             * and the old backing store is busy.
182             */
183
184            if (!svga_have_gb_objects(svga))
185               svga_buffer_destroy_hw_storage(ss, sbuf);
186         }
187
188         sbuf->map.num_ranges = 0;
189         sbuf->dma.flags.discard = TRUE;
190      }
191
192      if (usage & PIPE_MAP_UNSYNCHRONIZED) {
193         if (!sbuf->map.num_ranges) {
194            /*
195             * No pending ranges to upload so far, so we can tell the host to
196             * not synchronize on the next DMA command.
197             */
198
199            sbuf->dma.flags.unsynchronized = TRUE;
200         }
201      } else {
202         /*
203          * Synchronizing, so flush any pending primitives, finish writing any
204          * pending DMA command, and ensure the next DMA will be done in order.
205          */
206
207         svga_hwtnl_flush_buffer(svga, resource);
208
209         if (sbuf->dma.pending) {
210            svga_buffer_upload_flush(svga, sbuf);
211
212            if (svga_buffer_has_hw_storage(sbuf)) {
213               /*
214                * We have a pending DMA upload from a hardware buffer, therefore
215                * we need to ensure that the host finishes processing that DMA
216                * command before the gallium frontend can start overwriting the
217                * hardware buffer.
218                *
219                * XXX: This could be avoided by tying the hardware buffer to
220                * the transfer (just as done with textures), which would allow
221                * overlapping DMAs commands to be queued on the same context
222                * buffer. However, due to the likelihood of software vertex
223                * processing, it is more convenient to hold on to the hardware
224                * buffer, allowing to quickly access the contents from the CPU
225                * without having to do a DMA download from the host.
226                */
227
228               if (usage & PIPE_MAP_DONTBLOCK) {
229                  /*
230                   * Flushing the command buffer here will most likely cause
231                   * the map of the hwbuf below to block, so preemptively
232                   * return NULL here if DONTBLOCK is set to prevent unnecessary
233                   * command buffer flushes.
234                   */
235
236                  FREE(transfer);
237                  goto done;
238               }
239
240               svga_context_flush(svga, NULL);
241            }
242         }
243
244         sbuf->dma.flags.unsynchronized = FALSE;
245      }
246   }
247
248   if (!sbuf->swbuf && !svga_buffer_has_hw_storage(sbuf)) {
249      if (svga_buffer_create_hw_storage(ss, sbuf, sbuf->bind_flags) != PIPE_OK) {
250         /*
251          * We can't create a hardware buffer big enough, so create a malloc
252          * buffer instead.
253          */
254         if (0) {
255            debug_printf("%s: failed to allocate %u KB of DMA, "
256                         "splitting DMA transfers\n",
257                         __FUNCTION__,
258                         (sbuf->b.width0 + 1023)/1024);
259         }
260
261         sbuf->swbuf = align_malloc(sbuf->b.width0, 16);
262         if (!sbuf->swbuf) {
263            FREE(transfer);
264            goto done;
265         }
266      }
267   }
268
269   if (sbuf->swbuf) {
270      /* User/malloc buffer */
271      map = sbuf->swbuf;
272   }
273   else if (svga_buffer_has_hw_storage(sbuf)) {
274      boolean retry;
275
276      map = SVGA_TRY_MAP(svga_buffer_hw_storage_map
277                         (svga, sbuf, transfer->usage, &retry), retry);
278      if (map == NULL && retry) {
279         /*
280          * At this point, svga_buffer_get_transfer() has already
281          * hit the DISCARD_WHOLE_RESOURCE path and flushed HWTNL
282          * for this buffer.
283          */
284         svga_retry_enter(svga);
285         svga_context_flush(svga, NULL);
286         map = svga_buffer_hw_storage_map(svga, sbuf, transfer->usage, &retry);
287         svga_retry_exit(svga);
288      }
289   }
290   else {
291      map = NULL;
292   }
293
294   if (map) {
295      ++sbuf->map.count;
296      map += transfer->box.x;
297      *ptransfer = transfer;
298   } else {
299      FREE(transfer);
300   }
301
302   svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
303
304done:
305   SVGA_STATS_TIME_POP(svga_sws(svga));
306   return map;
307}
308
309
310void
311svga_buffer_transfer_flush_region(struct pipe_context *pipe,
312                                  struct pipe_transfer *transfer,
313                                  const struct pipe_box *box)
314{
315   struct svga_screen *ss = svga_screen(pipe->screen);
316   struct svga_buffer *sbuf = svga_buffer(transfer->resource);
317   struct svga_context *svga = svga_context(pipe);
318   unsigned offset = transfer->box.x + box->x;
319   unsigned length = box->width;
320
321   assert(transfer->usage & PIPE_MAP_WRITE);
322   assert(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT);
323
324   if (!(svga->swc->force_coherent || sbuf->key.coherent) || sbuf->swbuf) {
325      mtx_lock(&ss->swc_mutex);
326      svga_buffer_add_range(sbuf, offset, offset + length);
327      mtx_unlock(&ss->swc_mutex);
328   }
329}
330
331
332void
333svga_buffer_transfer_unmap(struct pipe_context *pipe,
334                           struct pipe_transfer *transfer)
335{
336   struct svga_screen *ss = svga_screen(pipe->screen);
337   struct svga_context *svga = svga_context(pipe);
338   struct svga_buffer *sbuf = svga_buffer(transfer->resource);
339
340   SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERTRANSFERUNMAP);
341
342   mtx_lock(&ss->swc_mutex);
343
344   assert(sbuf->map.count);
345   if (sbuf->map.count) {
346      --sbuf->map.count;
347   }
348
349   if (svga_buffer_has_hw_storage(sbuf)) {
350
351      /* Note: we may wind up flushing here and unmapping other buffers
352       * which leads to recursively locking ss->swc_mutex.
353       */
354      svga_buffer_hw_storage_unmap(svga, sbuf);
355   }
356
357   if (transfer->usage & PIPE_MAP_WRITE) {
358      if (!(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT)) {
359         /*
360          * Mapped range not flushed explicitly, so flush the whole buffer,
361          * and tell the host to discard the contents when processing the DMA
362          * command.
363          */
364
365         SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
366
367         sbuf->dma.flags.discard = TRUE;
368
369         if (!(svga->swc->force_coherent || sbuf->key.coherent) || sbuf->swbuf)
370            svga_buffer_add_range(sbuf, 0, sbuf->b.width0);
371      }
372
373      if (sbuf->swbuf &&
374          (!sbuf->bind_flags || (sbuf->bind_flags & PIPE_BIND_CONSTANT_BUFFER))) {
375         /*
376          * Since the constant buffer is in system buffer, we need
377          * to set the constant buffer dirty bits, so that the context
378          * can update the changes in the device.
379          * According to the GL spec, buffer bound to other contexts will
380          * have to be explicitly rebound by the user to have the changes take
381          * into effect.
382          */
383         svga->dirty |= SVGA_NEW_CONST_BUFFER;
384      }
385   }
386
387   mtx_unlock(&ss->swc_mutex);
388   FREE(transfer);
389   SVGA_STATS_TIME_POP(svga_sws(svga));
390}
391
392
393void
394svga_resource_destroy(struct pipe_screen *screen,
395                      struct pipe_resource *buf)
396{
397   if (buf->target == PIPE_BUFFER) {
398      struct svga_screen *ss = svga_screen(screen);
399      struct svga_buffer *sbuf = svga_buffer(buf);
400
401      assert(!p_atomic_read(&buf->reference.count));
402
403      assert(!sbuf->dma.pending);
404
405      if (sbuf->handle)
406         svga_buffer_destroy_host_surface(ss, sbuf);
407
408      if (sbuf->uploaded.buffer)
409         pipe_resource_reference(&sbuf->uploaded.buffer, NULL);
410
411      if (sbuf->hwbuf)
412         svga_buffer_destroy_hw_storage(ss, sbuf);
413
414      if (sbuf->swbuf && !sbuf->user)
415         align_free(sbuf->swbuf);
416
417      pipe_resource_reference(&sbuf->translated_indices.buffer, NULL);
418
419      ss->hud.total_resource_bytes -= sbuf->size;
420      assert(ss->hud.num_resources > 0);
421      if (ss->hud.num_resources > 0)
422         ss->hud.num_resources--;
423
424      FREE(sbuf);
425   } else {
426      struct svga_screen *ss = svga_screen(screen);
427      struct svga_texture *tex = svga_texture(buf);
428
429      ss->texture_timestamp++;
430
431      svga_sampler_view_reference(&tex->cached_view, NULL);
432
433      /*
434        DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
435      */
436      SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
437      svga_screen_surface_destroy(ss, &tex->key, &tex->handle);
438
439      /* Destroy the backed surface handle if exists */
440      if (tex->backed_handle)
441         svga_screen_surface_destroy(ss, &tex->backed_key, &tex->backed_handle);
442
443      ss->hud.total_resource_bytes -= tex->size;
444
445      FREE(tex->defined);
446      FREE(tex->rendered_to);
447      FREE(tex->dirty);
448      FREE(tex);
449
450      assert(ss->hud.num_resources > 0);
451      if (ss->hud.num_resources > 0)
452         ss->hud.num_resources--;
453   }
454}
455
456struct pipe_resource *
457svga_buffer_create(struct pipe_screen *screen,
458                   const struct pipe_resource *template)
459{
460   struct svga_screen *ss = svga_screen(screen);
461   struct svga_buffer *sbuf;
462   unsigned bind_flags;
463
464   SVGA_STATS_TIME_PUSH(ss->sws, SVGA_STATS_TIME_CREATEBUFFER);
465
466   sbuf = CALLOC_STRUCT(svga_buffer);
467   if (!sbuf)
468      goto error1;
469
470   sbuf->b = *template;
471   pipe_reference_init(&sbuf->b.reference, 1);
472   sbuf->b.screen = screen;
473   bind_flags = template->bind & ~PIPE_BIND_CUSTOM;
474
475   list_inithead(&sbuf->surfaces);
476
477   if (bind_flags & PIPE_BIND_CONSTANT_BUFFER) {
478      /* Constant buffers can only have the PIPE_BIND_CONSTANT_BUFFER
479       * flag set.
480       */
481      if (ss->sws->have_vgpu10) {
482         bind_flags = PIPE_BIND_CONSTANT_BUFFER;
483      }
484   }
485
486   /* Although svga device only requires constant buffer size to be
487    * in multiples of 16, in order to allow bind_flags promotion,
488    * we are mandating all buffer size to be in multiples of 16.
489    */
490   sbuf->b.width0 = align(sbuf->b.width0, 16);
491
492   if (svga_buffer_needs_hw_storage(ss, template)) {
493
494      /* If the buffer is not used for constant buffer, set
495       * the vertex/index bind flags as well so that the buffer will be
496       * accepted for those uses.
497       * Note that the PIPE_BIND_ flags we get from the gallium frontend are
498       * just a hint about how the buffer may be used.  And OpenGL buffer
499       * object may be used for many different things.
500       * Also note that we do not unconditionally set the streamout
501       * bind flag since streamout buffer is an output buffer and
502       * might have performance implication.
503       */
504      if (!(template->bind & PIPE_BIND_CONSTANT_BUFFER) &&
505          !(template->bind & PIPE_BIND_CUSTOM)) {
506         /* Not a constant- or staging buffer.
507          * The buffer may be used for vertex data or indexes.
508          */
509         bind_flags |= (PIPE_BIND_VERTEX_BUFFER |
510                        PIPE_BIND_INDEX_BUFFER);
511
512         /* It may be used for shader resource as well. */
513         bind_flags |= PIPE_BIND_SAMPLER_VIEW;
514      }
515
516      if (svga_buffer_create_host_surface(ss, sbuf, bind_flags) != PIPE_OK)
517         goto error2;
518   }
519   else {
520      sbuf->swbuf = align_malloc(sbuf->b.width0, 64);
521      if (!sbuf->swbuf)
522         goto error2;
523
524      /* Since constant buffer is usually small, it is much cheaper to
525       * use system memory for the data just as it is being done for
526       * the default constant buffer.
527       */
528      if ((bind_flags & PIPE_BIND_CONSTANT_BUFFER) || !bind_flags)
529         sbuf->use_swbuf = TRUE;
530   }
531
532   debug_reference(&sbuf->b.reference,
533                   (debug_reference_descriptor)debug_describe_resource, 0);
534
535   sbuf->bind_flags = bind_flags;
536   sbuf->size = util_resource_size(&sbuf->b);
537   ss->hud.total_resource_bytes += sbuf->size;
538
539   ss->hud.num_resources++;
540   SVGA_STATS_TIME_POP(ss->sws);
541
542   return &sbuf->b;
543
544error2:
545   FREE(sbuf);
546error1:
547   SVGA_STATS_TIME_POP(ss->sws);
548   return NULL;
549}
550
551
552struct pipe_resource *
553svga_user_buffer_create(struct pipe_screen *screen,
554                        void *ptr,
555                        unsigned bytes,
556                        unsigned bind)
557{
558   struct svga_buffer *sbuf;
559   struct svga_screen *ss = svga_screen(screen);
560
561   sbuf = CALLOC_STRUCT(svga_buffer);
562   if (!sbuf)
563      goto no_sbuf;
564
565   pipe_reference_init(&sbuf->b.reference, 1);
566   sbuf->b.screen = screen;
567   sbuf->b.format = PIPE_FORMAT_R8_UNORM; /* ?? */
568   sbuf->b.usage = PIPE_USAGE_IMMUTABLE;
569   sbuf->b.bind = bind;
570   sbuf->b.width0 = bytes;
571   sbuf->b.height0 = 1;
572   sbuf->b.depth0 = 1;
573   sbuf->b.array_size = 1;
574
575   sbuf->bind_flags = bind;
576   sbuf->swbuf = ptr;
577   sbuf->user = TRUE;
578
579   debug_reference(&sbuf->b.reference,
580                   (debug_reference_descriptor)debug_describe_resource, 0);
581
582   ss->hud.num_resources++;
583
584   return &sbuf->b;
585
586no_sbuf:
587   return NULL;
588}
589