1/**********************************************************
2 * Copyright 2009-2015 VMware, Inc.  All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27#include "svga_cmd.h"
28
29#include "util/u_debug.h"
30#include "util/u_memory.h"
31#include "util/u_debug_stack.h"
32#include "util/u_debug_flush.h"
33#include "util/u_hash_table.h"
34#include "pipebuffer/pb_buffer.h"
35#include "pipebuffer/pb_validate.h"
36
37#include "svga_winsys.h"
38#include "vmw_context.h"
39#include "vmw_screen.h"
40#include "vmw_buffer.h"
41#include "vmw_surface.h"
42#include "vmw_fence.h"
43#include "vmw_shader.h"
44#include "vmw_query.h"
45
46#define VMW_COMMAND_SIZE (64*1024)
47#define VMW_SURFACE_RELOCS (1024)
48#define VMW_SHADER_RELOCS (1024)
49#define VMW_REGION_RELOCS (512)
50
51#define VMW_MUST_FLUSH_STACK 8
52
53/*
54 * A factor applied to the maximum mob memory size to determine
55 * the optimial time to preemptively flush the command buffer.
56 * The constant is based on some performance trials with SpecViewperf.
57 */
58#define VMW_MAX_MOB_MEM_FACTOR  2
59
60/*
61 * A factor applied to the maximum surface memory size to determine
62 * the optimial time to preemptively flush the command buffer.
63 * The constant is based on some performance trials with SpecViewperf.
64 */
65#define VMW_MAX_SURF_MEM_FACTOR 2
66
67
68struct vmw_buffer_relocation
69{
70   struct pb_buffer *buffer;
71   boolean is_mob;
72   uint32 offset;
73
74   union {
75      struct {
76	 struct SVGAGuestPtr *where;
77      } region;
78      struct {
79	 SVGAMobId *id;
80	 uint32 *offset_into_mob;
81      } mob;
82   };
83};
84
85struct vmw_ctx_validate_item {
86   union {
87      struct vmw_svga_winsys_surface *vsurf;
88      struct vmw_svga_winsys_shader *vshader;
89   };
90   boolean referenced;
91};
92
93struct vmw_svga_winsys_context
94{
95   struct svga_winsys_context base;
96
97   struct vmw_winsys_screen *vws;
98   struct util_hash_table *hash;
99
100#ifdef DEBUG
101   boolean must_flush;
102   struct debug_stack_frame must_flush_stack[VMW_MUST_FLUSH_STACK];
103   struct debug_flush_ctx *fctx;
104#endif
105
106   struct {
107      uint8_t buffer[VMW_COMMAND_SIZE];
108      uint32_t size;
109      uint32_t used;
110      uint32_t reserved;
111   } command;
112
113   struct {
114      struct vmw_ctx_validate_item items[VMW_SURFACE_RELOCS];
115      uint32_t size;
116      uint32_t used;
117      uint32_t staged;
118      uint32_t reserved;
119   } surface;
120
121   struct {
122      struct vmw_buffer_relocation relocs[VMW_REGION_RELOCS];
123      uint32_t size;
124      uint32_t used;
125      uint32_t staged;
126      uint32_t reserved;
127   } region;
128
129   struct {
130      struct vmw_ctx_validate_item items[VMW_SHADER_RELOCS];
131      uint32_t size;
132      uint32_t used;
133      uint32_t staged;
134      uint32_t reserved;
135   } shader;
136
137   struct pb_validate *validate;
138
139   /**
140    * The amount of surface, GMR or MOB memory that is referred by the commands
141    * currently batched in the context command buffer.
142    */
143   uint64_t seen_surfaces;
144   uint64_t seen_regions;
145   uint64_t seen_mobs;
146
147   /**
148    * Whether this context should fail to reserve more commands, not because it
149    * ran out of command space, but because a substantial ammount of GMR was
150    * referred.
151    */
152   boolean preemptive_flush;
153};
154
155
156static inline struct vmw_svga_winsys_context *
157vmw_svga_winsys_context(struct svga_winsys_context *swc)
158{
159   assert(swc);
160   return (struct vmw_svga_winsys_context *)swc;
161}
162
163
164static inline enum pb_usage_flags
165vmw_translate_to_pb_flags(unsigned flags)
166{
167   enum pb_usage_flags f = 0;
168   if (flags & SVGA_RELOC_READ)
169      f |= PB_USAGE_GPU_READ;
170
171   if (flags & SVGA_RELOC_WRITE)
172      f |= PB_USAGE_GPU_WRITE;
173
174   return f;
175}
176
177static enum pipe_error
178vmw_swc_flush(struct svga_winsys_context *swc,
179              struct pipe_fence_handle **pfence)
180{
181   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
182   struct vmw_winsys_screen *vws = vswc->vws;
183   struct pipe_fence_handle *fence = NULL;
184   unsigned i;
185   enum pipe_error ret;
186
187   /*
188    * If we hit a retry, lock the mutex and retry immediately.
189    * If we then still hit a retry, sleep until another thread
190    * wakes us up after it has released its buffers from the
191    * validate list.
192    *
193    * If we hit another error condition, we still need to broadcast since
194    * pb_validate_validate releases validated buffers in its error path.
195    */
196
197   ret = pb_validate_validate(vswc->validate);
198   if (ret != PIPE_OK) {
199      mtx_lock(&vws->cs_mutex);
200      while (ret == PIPE_ERROR_RETRY) {
201         ret = pb_validate_validate(vswc->validate);
202         if (ret == PIPE_ERROR_RETRY) {
203            cnd_wait(&vws->cs_cond, &vws->cs_mutex);
204         }
205      }
206      if (ret != PIPE_OK) {
207         cnd_broadcast(&vws->cs_cond);
208      }
209      mtx_unlock(&vws->cs_mutex);
210   }
211
212   assert(ret == PIPE_OK);
213   if(ret == PIPE_OK) {
214
215      /* Apply relocations */
216      for(i = 0; i < vswc->region.used; ++i) {
217         struct vmw_buffer_relocation *reloc = &vswc->region.relocs[i];
218         struct SVGAGuestPtr ptr;
219
220         if(!vmw_gmr_bufmgr_region_ptr(reloc->buffer, &ptr))
221            assert(0);
222
223         ptr.offset += reloc->offset;
224
225	 if (reloc->is_mob) {
226	    if (reloc->mob.id)
227	       *reloc->mob.id = ptr.gmrId;
228	    if (reloc->mob.offset_into_mob)
229	       *reloc->mob.offset_into_mob = ptr.offset;
230	    else {
231	       assert(ptr.offset == 0);
232	    }
233	 } else
234	    *reloc->region.where = ptr;
235      }
236
237      if (vswc->command.used || pfence != NULL)
238         vmw_ioctl_command(vws,
239                           vswc->base.cid,
240                           0,
241                           vswc->command.buffer,
242                           vswc->command.used,
243                           &fence,
244                           vswc->base.imported_fence_fd,
245                           vswc->base.hints);
246
247      pb_validate_fence(vswc->validate, fence);
248      mtx_lock(&vws->cs_mutex);
249      cnd_broadcast(&vws->cs_cond);
250      mtx_unlock(&vws->cs_mutex);
251   }
252
253   vswc->command.used = 0;
254   vswc->command.reserved = 0;
255
256   for(i = 0; i < vswc->surface.used + vswc->surface.staged; ++i) {
257      struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
258      if (isurf->referenced)
259         p_atomic_dec(&isurf->vsurf->validated);
260      vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
261   }
262
263   util_hash_table_clear(vswc->hash);
264   vswc->surface.used = 0;
265   vswc->surface.reserved = 0;
266
267   for(i = 0; i < vswc->shader.used + vswc->shader.staged; ++i) {
268      struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
269      if (ishader->referenced)
270         p_atomic_dec(&ishader->vshader->validated);
271      vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
272   }
273
274   vswc->shader.used = 0;
275   vswc->shader.reserved = 0;
276
277   vswc->region.used = 0;
278   vswc->region.reserved = 0;
279
280#ifdef DEBUG
281   vswc->must_flush = FALSE;
282   debug_flush_flush(vswc->fctx);
283#endif
284   swc->hints &= ~SVGA_HINT_FLAG_CAN_PRE_FLUSH;
285   swc->hints &= ~SVGA_HINT_FLAG_EXPORT_FENCE_FD;
286   vswc->preemptive_flush = FALSE;
287   vswc->seen_surfaces = 0;
288   vswc->seen_regions = 0;
289   vswc->seen_mobs = 0;
290
291   if (vswc->base.imported_fence_fd != -1) {
292      close(vswc->base.imported_fence_fd);
293      vswc->base.imported_fence_fd = -1;
294   }
295
296   if(pfence)
297      vmw_fence_reference(vswc->vws, pfence, fence);
298
299   vmw_fence_reference(vswc->vws, &fence, NULL);
300
301   return ret;
302}
303
304
305static void *
306vmw_swc_reserve(struct svga_winsys_context *swc,
307                uint32_t nr_bytes, uint32_t nr_relocs )
308{
309   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
310
311#ifdef DEBUG
312   /* Check if somebody forgot to check the previous failure */
313   if(vswc->must_flush) {
314      debug_printf("Forgot to flush:\n");
315      debug_backtrace_dump(vswc->must_flush_stack, VMW_MUST_FLUSH_STACK);
316      assert(!vswc->must_flush);
317   }
318   debug_flush_might_flush(vswc->fctx);
319#endif
320
321   assert(nr_bytes <= vswc->command.size);
322   if(nr_bytes > vswc->command.size)
323      return NULL;
324
325   if(vswc->preemptive_flush ||
326      vswc->command.used + nr_bytes > vswc->command.size ||
327      vswc->surface.used + nr_relocs > vswc->surface.size ||
328      vswc->shader.used + nr_relocs > vswc->shader.size ||
329      vswc->region.used + nr_relocs > vswc->region.size) {
330#ifdef DEBUG
331      vswc->must_flush = TRUE;
332      debug_backtrace_capture(vswc->must_flush_stack, 1,
333                              VMW_MUST_FLUSH_STACK);
334#endif
335      return NULL;
336   }
337
338   assert(vswc->command.used + nr_bytes <= vswc->command.size);
339   assert(vswc->surface.used + nr_relocs <= vswc->surface.size);
340   assert(vswc->shader.used + nr_relocs <= vswc->shader.size);
341   assert(vswc->region.used + nr_relocs <= vswc->region.size);
342
343   vswc->command.reserved = nr_bytes;
344   vswc->surface.reserved = nr_relocs;
345   vswc->surface.staged = 0;
346   vswc->shader.reserved = nr_relocs;
347   vswc->shader.staged = 0;
348   vswc->region.reserved = nr_relocs;
349   vswc->region.staged = 0;
350
351   return vswc->command.buffer + vswc->command.used;
352}
353
354static unsigned
355vmw_swc_get_command_buffer_size(struct svga_winsys_context *swc)
356{
357   const struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
358   return vswc->command.used;
359}
360
361static void
362vmw_swc_context_relocation(struct svga_winsys_context *swc,
363			   uint32 *cid)
364{
365   *cid = swc->cid;
366}
367
368static boolean
369vmw_swc_add_validate_buffer(struct vmw_svga_winsys_context *vswc,
370			    struct pb_buffer *pb_buf,
371			    unsigned flags)
372{
373   enum pipe_error ret;
374   unsigned translated_flags;
375
376   /*
377    * TODO: Update pb_validate to provide a similar functionality
378    * (Check buffer already present before adding)
379    */
380   if (util_hash_table_get(vswc->hash, pb_buf) != pb_buf) {
381      translated_flags = vmw_translate_to_pb_flags(flags);
382      ret = pb_validate_add_buffer(vswc->validate, pb_buf, translated_flags);
383      /* TODO: Update pipebuffer to reserve buffers and not fail here */
384      assert(ret == PIPE_OK);
385      (void)ret;
386      (void)util_hash_table_set(vswc->hash, pb_buf, pb_buf);
387      return TRUE;
388   }
389
390   return FALSE;
391}
392
393static void
394vmw_swc_region_relocation(struct svga_winsys_context *swc,
395                          struct SVGAGuestPtr *where,
396                          struct svga_winsys_buffer *buffer,
397                          uint32 offset,
398                          unsigned flags)
399{
400   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
401   struct vmw_buffer_relocation *reloc;
402
403   assert(vswc->region.staged < vswc->region.reserved);
404
405   reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
406   reloc->region.where = where;
407
408   /*
409    * pb_validate holds a refcount to the buffer, so no need to
410    * refcount it again in the relocation.
411    */
412   reloc->buffer = vmw_pb_buffer(buffer);
413   reloc->offset = offset;
414   reloc->is_mob = FALSE;
415   ++vswc->region.staged;
416
417   if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) {
418      vswc->seen_regions += reloc->buffer->size;
419      if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) &&
420          vswc->seen_regions >= VMW_GMR_POOL_SIZE/5)
421         vswc->preemptive_flush = TRUE;
422   }
423
424#ifdef DEBUG
425   if (!(flags & SVGA_RELOC_INTERNAL))
426      debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
427#endif
428}
429
430static void
431vmw_swc_mob_relocation(struct svga_winsys_context *swc,
432		       SVGAMobId *id,
433		       uint32 *offset_into_mob,
434		       struct svga_winsys_buffer *buffer,
435		       uint32 offset,
436		       unsigned flags)
437{
438   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
439   struct vmw_buffer_relocation *reloc;
440   struct pb_buffer *pb_buffer = vmw_pb_buffer(buffer);
441
442   if (id) {
443      assert(vswc->region.staged < vswc->region.reserved);
444
445      reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
446      reloc->mob.id = id;
447      reloc->mob.offset_into_mob = offset_into_mob;
448
449      /*
450       * pb_validate holds a refcount to the buffer, so no need to
451       * refcount it again in the relocation.
452       */
453      reloc->buffer = pb_buffer;
454      reloc->offset = offset;
455      reloc->is_mob = TRUE;
456      ++vswc->region.staged;
457   }
458
459   if (vmw_swc_add_validate_buffer(vswc, pb_buffer, flags)) {
460      vswc->seen_mobs += pb_buffer->size;
461
462      if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) &&
463          vswc->seen_mobs >=
464            vswc->vws->ioctl.max_mob_memory / VMW_MAX_MOB_MEM_FACTOR)
465         vswc->preemptive_flush = TRUE;
466   }
467
468#ifdef DEBUG
469   if (!(flags & SVGA_RELOC_INTERNAL))
470      debug_flush_cb_reference(vswc->fctx, vmw_debug_flush_buf(buffer));
471#endif
472}
473
474
475/**
476 * vmw_swc_surface_clear_reference - Clear referenced info for a surface
477 *
478 * @swc:   Pointer to an svga_winsys_context
479 * @vsurf: Pointer to a vmw_svga_winsys_surface, the referenced info of which
480 *         we want to clear
481 *
482 * This is primarily used by a discard surface map to indicate that the
483 * surface data is no longer referenced by a draw call, and mapping it
484 * should therefore no longer cause a flush.
485 */
486void
487vmw_swc_surface_clear_reference(struct svga_winsys_context *swc,
488                                struct vmw_svga_winsys_surface *vsurf)
489{
490   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
491   struct vmw_ctx_validate_item *isrf =
492      util_hash_table_get(vswc->hash, vsurf);
493
494   if (isrf && isrf->referenced) {
495      isrf->referenced = FALSE;
496      p_atomic_dec(&vsurf->validated);
497   }
498}
499
500static void
501vmw_swc_surface_only_relocation(struct svga_winsys_context *swc,
502				uint32 *where,
503				struct vmw_svga_winsys_surface *vsurf,
504				unsigned flags)
505{
506   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
507   struct vmw_ctx_validate_item *isrf;
508
509   assert(vswc->surface.staged < vswc->surface.reserved);
510   isrf = util_hash_table_get(vswc->hash, vsurf);
511
512   if (isrf == NULL) {
513      isrf = &vswc->surface.items[vswc->surface.used + vswc->surface.staged];
514      vmw_svga_winsys_surface_reference(&isrf->vsurf, vsurf);
515      isrf->referenced = FALSE;
516      /*
517       * Note that a failure here may just fall back to unhashed behavior
518       * and potentially cause unnecessary flushing, so ignore the
519       * return code.
520       */
521      (void) util_hash_table_set(vswc->hash, vsurf, isrf);
522      ++vswc->surface.staged;
523
524      vswc->seen_surfaces += vsurf->size;
525      if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) &&
526          vswc->seen_surfaces >=
527            vswc->vws->ioctl.max_surface_memory / VMW_MAX_SURF_MEM_FACTOR)
528         vswc->preemptive_flush = TRUE;
529   }
530
531   if (!(flags & SVGA_RELOC_INTERNAL) && !isrf->referenced) {
532      isrf->referenced = TRUE;
533      p_atomic_inc(&vsurf->validated);
534   }
535
536   if (where)
537      *where = vsurf->sid;
538}
539
540static void
541vmw_swc_surface_relocation(struct svga_winsys_context *swc,
542                           uint32 *where,
543                           uint32 *mobid,
544                           struct svga_winsys_surface *surface,
545                           unsigned flags)
546{
547   struct vmw_svga_winsys_surface *vsurf;
548
549   assert(swc->have_gb_objects || mobid == NULL);
550
551   if (!surface) {
552      *where = SVGA3D_INVALID_ID;
553      if (mobid)
554         *mobid = SVGA3D_INVALID_ID;
555      return;
556   }
557
558   vsurf = vmw_svga_winsys_surface(surface);
559   vmw_swc_surface_only_relocation(swc, where, vsurf, flags);
560
561   if (swc->have_gb_objects && vsurf->buf != NULL) {
562
563      /*
564       * Make sure backup buffer ends up fenced.
565       */
566
567      mtx_lock(&vsurf->mutex);
568      assert(vsurf->buf != NULL);
569
570      vmw_swc_mob_relocation(swc, mobid, NULL, (struct svga_winsys_buffer *)
571                             vsurf->buf, 0, flags);
572      mtx_unlock(&vsurf->mutex);
573   }
574}
575
576static void
577vmw_swc_shader_relocation(struct svga_winsys_context *swc,
578			  uint32 *shid,
579			  uint32 *mobid,
580			  uint32 *offset,
581			  struct svga_winsys_gb_shader *shader,
582                          unsigned flags)
583{
584   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
585   struct vmw_winsys_screen *vws = vswc->vws;
586   struct vmw_svga_winsys_shader *vshader;
587   struct vmw_ctx_validate_item *ishader;
588
589   if(!shader) {
590      *shid = SVGA3D_INVALID_ID;
591      return;
592   }
593
594   vshader = vmw_svga_winsys_shader(shader);
595
596   if (!vws->base.have_vgpu10) {
597      assert(vswc->shader.staged < vswc->shader.reserved);
598      ishader = util_hash_table_get(vswc->hash, vshader);
599
600      if (ishader == NULL) {
601         ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged];
602         vmw_svga_winsys_shader_reference(&ishader->vshader, vshader);
603         ishader->referenced = FALSE;
604         /*
605          * Note that a failure here may just fall back to unhashed behavior
606          * and potentially cause unnecessary flushing, so ignore the
607          * return code.
608          */
609         (void) util_hash_table_set(vswc->hash, vshader, ishader);
610         ++vswc->shader.staged;
611      }
612
613      if (!ishader->referenced) {
614         ishader->referenced = TRUE;
615         p_atomic_inc(&vshader->validated);
616      }
617   }
618
619   if (shid)
620      *shid = vshader->shid;
621
622   if (vshader->buf)
623      vmw_swc_mob_relocation(swc, mobid, offset, vshader->buf,
624			     0, SVGA_RELOC_READ);
625}
626
627static void
628vmw_swc_query_relocation(struct svga_winsys_context *swc,
629                         SVGAMobId *id,
630                         struct svga_winsys_gb_query *query)
631{
632   /* Queries are backed by one big MOB */
633   vmw_swc_mob_relocation(swc, id, NULL, query->buf, 0,
634                          SVGA_RELOC_READ | SVGA_RELOC_WRITE);
635}
636
637static void
638vmw_swc_commit(struct svga_winsys_context *swc)
639{
640   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
641
642   assert(vswc->command.used + vswc->command.reserved <= vswc->command.size);
643   vswc->command.used += vswc->command.reserved;
644   vswc->command.reserved = 0;
645
646   assert(vswc->surface.staged <= vswc->surface.reserved);
647   assert(vswc->surface.used + vswc->surface.staged <= vswc->surface.size);
648   vswc->surface.used += vswc->surface.staged;
649   vswc->surface.staged = 0;
650   vswc->surface.reserved = 0;
651
652   assert(vswc->shader.staged <= vswc->shader.reserved);
653   assert(vswc->shader.used + vswc->shader.staged <= vswc->shader.size);
654   vswc->shader.used += vswc->shader.staged;
655   vswc->shader.staged = 0;
656   vswc->shader.reserved = 0;
657
658   assert(vswc->region.staged <= vswc->region.reserved);
659   assert(vswc->region.used + vswc->region.staged <= vswc->region.size);
660   vswc->region.used += vswc->region.staged;
661   vswc->region.staged = 0;
662   vswc->region.reserved = 0;
663}
664
665
666static void
667vmw_swc_destroy(struct svga_winsys_context *swc)
668{
669   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
670   unsigned i;
671
672   for(i = 0; i < vswc->surface.used; ++i) {
673      struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
674      if (isurf->referenced)
675         p_atomic_dec(&isurf->vsurf->validated);
676      vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
677   }
678
679   for(i = 0; i < vswc->shader.used; ++i) {
680      struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
681      if (ishader->referenced)
682         p_atomic_dec(&ishader->vshader->validated);
683      vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
684   }
685
686   util_hash_table_destroy(vswc->hash);
687   pb_validate_destroy(vswc->validate);
688   vmw_ioctl_context_destroy(vswc->vws, swc->cid);
689#ifdef DEBUG
690   debug_flush_ctx_destroy(vswc->fctx);
691#endif
692   FREE(vswc);
693}
694
695static unsigned vmw_hash_ptr(void *p)
696{
697   return (unsigned)(unsigned long)p;
698}
699
700static int vmw_ptr_compare(void *key1, void *key2)
701{
702   return (key1 == key2) ? 0 : 1;
703}
704
705
706/**
707 * vmw_svga_winsys_vgpu10_shader_screate - The winsys shader_crate callback
708 *
709 * @swc: The winsys context.
710 * @shaderId: Previously allocated shader id.
711 * @shaderType: The shader type.
712 * @bytecode: The shader bytecode
713 * @bytecodelen: The length of the bytecode.
714 *
715 * Creates an svga_winsys_gb_shader structure and allocates a buffer for the
716 * shader code and copies the shader code into the buffer. Shader
717 * resource creation is not done.
718 */
719static struct svga_winsys_gb_shader *
720vmw_svga_winsys_vgpu10_shader_create(struct svga_winsys_context *swc,
721                                     uint32 shaderId,
722                                     SVGA3dShaderType shaderType,
723                                     const uint32 *bytecode,
724                                     uint32 bytecodeLen)
725{
726   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
727   struct vmw_svga_winsys_shader *shader;
728   struct svga_winsys_gb_shader *gb_shader =
729      vmw_svga_winsys_shader_create(&vswc->vws->base, shaderType, bytecode,
730                                    bytecodeLen);
731   if (!gb_shader)
732      return NULL;
733
734   shader = vmw_svga_winsys_shader(gb_shader);
735   shader->shid = shaderId;
736
737   return gb_shader;
738}
739
740/**
741 * vmw_svga_winsys_vgpu10_shader_destroy - The winsys shader_destroy callback.
742 *
743 * @swc: The winsys context.
744 * @shader: A shader structure previously allocated by shader_create.
745 *
746 * Frees the shader structure and the buffer holding the shader code.
747 */
748static void
749vmw_svga_winsys_vgpu10_shader_destroy(struct svga_winsys_context *swc,
750                                      struct svga_winsys_gb_shader *shader)
751{
752   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
753
754   vmw_svga_winsys_shader_destroy(&vswc->vws->base, shader);
755}
756
757/**
758 * vmw_svga_winsys_resource_rebind - The winsys resource_rebind callback
759 *
760 * @swc: The winsys context.
761 * @surface: The surface to be referenced.
762 * @shader: The shader to be referenced.
763 * @flags: Relocation flags.
764 *
765 * This callback is needed because shader backing buffers are sub-allocated, and
766 * hence the kernel fencing is not sufficient. The buffers need to be put on
767 * the context's validation list and fenced after command submission to avoid
768 * reuse of busy shader buffers. In addition, surfaces need to be put on the
769 * validation list in order for the driver to regard them as referenced
770 * by the command stream.
771 */
772static enum pipe_error
773vmw_svga_winsys_resource_rebind(struct svga_winsys_context *swc,
774                                struct svga_winsys_surface *surface,
775                                struct svga_winsys_gb_shader *shader,
776                                unsigned flags)
777{
778   /**
779    * Need to reserve one validation item for either the surface or
780    * the shader.
781    */
782   if (!vmw_swc_reserve(swc, 0, 1))
783      return PIPE_ERROR_OUT_OF_MEMORY;
784
785   if (surface)
786      vmw_swc_surface_relocation(swc, NULL, NULL, surface, flags);
787   else if (shader)
788      vmw_swc_shader_relocation(swc, NULL, NULL, NULL, shader, flags);
789
790   vmw_swc_commit(swc);
791
792   return PIPE_OK;
793}
794
795struct svga_winsys_context *
796vmw_svga_winsys_context_create(struct svga_winsys_screen *sws)
797{
798   struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
799   struct vmw_svga_winsys_context *vswc;
800
801   vswc = CALLOC_STRUCT(vmw_svga_winsys_context);
802   if(!vswc)
803      return NULL;
804
805   vswc->base.destroy = vmw_swc_destroy;
806   vswc->base.reserve = vmw_swc_reserve;
807   vswc->base.get_command_buffer_size = vmw_swc_get_command_buffer_size;
808   vswc->base.surface_relocation = vmw_swc_surface_relocation;
809   vswc->base.region_relocation = vmw_swc_region_relocation;
810   vswc->base.mob_relocation = vmw_swc_mob_relocation;
811   vswc->base.query_relocation = vmw_swc_query_relocation;
812   vswc->base.query_bind = vmw_swc_query_bind;
813   vswc->base.context_relocation = vmw_swc_context_relocation;
814   vswc->base.shader_relocation = vmw_swc_shader_relocation;
815   vswc->base.commit = vmw_swc_commit;
816   vswc->base.flush = vmw_swc_flush;
817   vswc->base.surface_map = vmw_svga_winsys_surface_map;
818   vswc->base.surface_unmap = vmw_svga_winsys_surface_unmap;
819   vswc->base.surface_invalidate = vmw_svga_winsys_surface_invalidate;
820
821   vswc->base.shader_create = vmw_svga_winsys_vgpu10_shader_create;
822   vswc->base.shader_destroy = vmw_svga_winsys_vgpu10_shader_destroy;
823
824   vswc->base.resource_rebind = vmw_svga_winsys_resource_rebind;
825
826   if (sws->have_vgpu10)
827      vswc->base.cid = vmw_ioctl_extended_context_create(vws, sws->have_vgpu10);
828   else
829      vswc->base.cid = vmw_ioctl_context_create(vws);
830
831   if (vswc->base.cid == -1)
832      goto out_no_context;
833
834   vswc->base.imported_fence_fd = -1;
835
836   vswc->base.have_gb_objects = sws->have_gb_objects;
837
838   vswc->vws = vws;
839
840   vswc->command.size = VMW_COMMAND_SIZE;
841   vswc->surface.size = VMW_SURFACE_RELOCS;
842   vswc->shader.size = VMW_SHADER_RELOCS;
843   vswc->region.size = VMW_REGION_RELOCS;
844
845   vswc->validate = pb_validate_create();
846   if(!vswc->validate)
847      goto out_no_validate;
848
849   vswc->hash = util_hash_table_create(vmw_hash_ptr, vmw_ptr_compare);
850   if (!vswc->hash)
851      goto out_no_hash;
852
853#ifdef DEBUG
854   vswc->fctx = debug_flush_ctx_create(TRUE, VMW_DEBUG_FLUSH_STACK);
855#endif
856
857   return &vswc->base;
858
859out_no_hash:
860   pb_validate_destroy(vswc->validate);
861out_no_validate:
862   vmw_ioctl_context_destroy(vws, vswc->base.cid);
863out_no_context:
864   FREE(vswc);
865   return NULL;
866}
867