1/**********************************************************
2 * Copyright 2008-2012 VMware, Inc.  All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26#include "util/u_bitmask.h"
27#include "util/u_memory.h"
28#include "util/format/u_format.h"
29#include "svga_context.h"
30#include "svga_cmd.h"
31#include "svga_format.h"
32#include "svga_shader.h"
33#include "svga_resource_texture.h"
34#include "VGPU10ShaderTokens.h"
35
36
37/**
38 * This bit isn't really used anywhere.  It only serves to help
39 * generate a unique "signature" for the vertex shader output bitmask.
40 * Shader input/output signatures are used to resolve shader linking
41 * issues.
42 */
43#define FOG_GENERIC_BIT (((uint64_t) 1) << 63)
44
45
46/**
47 * Use the shader info to generate a bitmask indicating which generic
48 * inputs are used by the shader.  A set bit indicates that GENERIC[i]
49 * is used.
50 */
51uint64_t
52svga_get_generic_inputs_mask(const struct tgsi_shader_info *info)
53{
54   unsigned i;
55   uint64_t mask = 0x0;
56
57   for (i = 0; i < info->num_inputs; i++) {
58      if (info->input_semantic_name[i] == TGSI_SEMANTIC_GENERIC) {
59         unsigned j = info->input_semantic_index[i];
60         assert(j < sizeof(mask) * 8);
61         mask |= ((uint64_t) 1) << j;
62      }
63   }
64
65   return mask;
66}
67
68
69/**
70 * Scan shader info to return a bitmask of written outputs.
71 */
72uint64_t
73svga_get_generic_outputs_mask(const struct tgsi_shader_info *info)
74{
75   unsigned i;
76   uint64_t mask = 0x0;
77
78   for (i = 0; i < info->num_outputs; i++) {
79      switch (info->output_semantic_name[i]) {
80      case TGSI_SEMANTIC_GENERIC:
81         {
82            unsigned j = info->output_semantic_index[i];
83            assert(j < sizeof(mask) * 8);
84            mask |= ((uint64_t) 1) << j;
85         }
86         break;
87      case TGSI_SEMANTIC_FOG:
88         mask |= FOG_GENERIC_BIT;
89         break;
90      }
91   }
92
93   return mask;
94}
95
96
97
98/**
99 * Given a mask of used generic variables (as returned by the above functions)
100 * fill in a table which maps those indexes to small integers.
101 * This table is used by the remap_generic_index() function in
102 * svga_tgsi_decl_sm30.c
103 * Example: if generics_mask = binary(1010) it means that GENERIC[1] and
104 * GENERIC[3] are used.  The remap_table will contain:
105 *   table[1] = 0;
106 *   table[3] = 1;
107 * The remaining table entries will be filled in with the next unused
108 * generic index (in this example, 2).
109 */
110void
111svga_remap_generics(uint64_t generics_mask,
112                    int8_t remap_table[MAX_GENERIC_VARYING])
113{
114   /* Note texcoord[0] is reserved so start at 1 */
115   unsigned count = 1, i;
116
117   for (i = 0; i < MAX_GENERIC_VARYING; i++) {
118      remap_table[i] = -1;
119   }
120
121   /* for each bit set in generic_mask */
122   while (generics_mask) {
123      unsigned index = ffsll(generics_mask) - 1;
124      remap_table[index] = count++;
125      generics_mask &= ~((uint64_t) 1 << index);
126   }
127}
128
129
130/**
131 * Use the generic remap table to map a TGSI generic varying variable
132 * index to a small integer.  If the remapping table doesn't have a
133 * valid value for the given index (the table entry is -1) it means
134 * the fragment shader doesn't use that VS output.  Just allocate
135 * the next free value in that case.  Alternately, we could cull
136 * VS instructions that write to register, or replace the register
137 * with a dummy temp register.
138 * XXX TODO: we should do one of the later as it would save precious
139 * texcoord registers.
140 */
141int
142svga_remap_generic_index(int8_t remap_table[MAX_GENERIC_VARYING],
143                         int generic_index)
144{
145   assert(generic_index < MAX_GENERIC_VARYING);
146
147   if (generic_index >= MAX_GENERIC_VARYING) {
148      /* just don't return a random/garbage value */
149      generic_index = MAX_GENERIC_VARYING - 1;
150   }
151
152   if (remap_table[generic_index] == -1) {
153      /* This is a VS output that has no matching PS input.  Find a
154       * free index.
155       */
156      int i, max = 0;
157      for (i = 0; i < MAX_GENERIC_VARYING; i++) {
158         max = MAX2(max, remap_table[i]);
159      }
160      remap_table[generic_index] = max + 1;
161   }
162
163   return remap_table[generic_index];
164}
165
166static const enum pipe_swizzle copy_alpha[PIPE_SWIZZLE_MAX] = {
167   PIPE_SWIZZLE_X,
168   PIPE_SWIZZLE_Y,
169   PIPE_SWIZZLE_Z,
170   PIPE_SWIZZLE_W,
171   PIPE_SWIZZLE_0,
172   PIPE_SWIZZLE_1,
173   PIPE_SWIZZLE_NONE
174};
175
176static const enum pipe_swizzle set_alpha[PIPE_SWIZZLE_MAX] = {
177   PIPE_SWIZZLE_X,
178   PIPE_SWIZZLE_Y,
179   PIPE_SWIZZLE_Z,
180   PIPE_SWIZZLE_1,
181   PIPE_SWIZZLE_0,
182   PIPE_SWIZZLE_1,
183   PIPE_SWIZZLE_NONE
184};
185
186static const enum pipe_swizzle set_000X[PIPE_SWIZZLE_MAX] = {
187   PIPE_SWIZZLE_0,
188   PIPE_SWIZZLE_0,
189   PIPE_SWIZZLE_0,
190   PIPE_SWIZZLE_X,
191   PIPE_SWIZZLE_0,
192   PIPE_SWIZZLE_1,
193   PIPE_SWIZZLE_NONE
194};
195
196static const enum pipe_swizzle set_XXXX[PIPE_SWIZZLE_MAX] = {
197   PIPE_SWIZZLE_X,
198   PIPE_SWIZZLE_X,
199   PIPE_SWIZZLE_X,
200   PIPE_SWIZZLE_X,
201   PIPE_SWIZZLE_0,
202   PIPE_SWIZZLE_1,
203   PIPE_SWIZZLE_NONE
204};
205
206static const enum pipe_swizzle set_XXX1[PIPE_SWIZZLE_MAX] = {
207   PIPE_SWIZZLE_X,
208   PIPE_SWIZZLE_X,
209   PIPE_SWIZZLE_X,
210   PIPE_SWIZZLE_1,
211   PIPE_SWIZZLE_0,
212   PIPE_SWIZZLE_1,
213   PIPE_SWIZZLE_NONE
214};
215
216static const enum pipe_swizzle set_XXXY[PIPE_SWIZZLE_MAX] = {
217   PIPE_SWIZZLE_X,
218   PIPE_SWIZZLE_X,
219   PIPE_SWIZZLE_X,
220   PIPE_SWIZZLE_Y,
221   PIPE_SWIZZLE_0,
222   PIPE_SWIZZLE_1,
223   PIPE_SWIZZLE_NONE
224};
225
226
227static VGPU10_RESOURCE_RETURN_TYPE
228vgpu10_return_type(enum pipe_format format)
229{
230   if (util_format_is_unorm(format))
231      return VGPU10_RETURN_TYPE_UNORM;
232   else if (util_format_is_snorm(format))
233      return VGPU10_RETURN_TYPE_SNORM;
234   else if (util_format_is_pure_uint(format))
235      return VGPU10_RETURN_TYPE_UINT;
236   else if (util_format_is_pure_sint(format))
237      return VGPU10_RETURN_TYPE_SINT;
238   else if (util_format_is_float(format))
239      return VGPU10_RETURN_TYPE_FLOAT;
240   else
241      return VGPU10_RETURN_TYPE_MAX;
242}
243
244
245/**
246 * Initialize the shader-neutral fields of svga_compile_key from context
247 * state.  This is basically the texture-related state.
248 */
249void
250svga_init_shader_key_common(const struct svga_context *svga,
251                            enum pipe_shader_type shader_type,
252                            const struct svga_shader *shader,
253                            struct svga_compile_key *key)
254{
255   unsigned i, idx = 0;
256
257   assert(shader_type < ARRAY_SIZE(svga->curr.num_sampler_views));
258
259   /* In case the number of samplers and sampler_views doesn't match,
260    * loop over the lower of the two counts.
261    */
262   key->num_textures = MAX2(svga->curr.num_sampler_views[shader_type],
263                            svga->curr.num_samplers[shader_type]);
264
265   for (i = 0; i < key->num_textures; i++) {
266      struct pipe_sampler_view *view = svga->curr.sampler_views[shader_type][i];
267      const struct svga_sampler_state
268         *sampler = svga->curr.sampler[shader_type][i];
269
270      if (view) {
271         assert(view->texture);
272         assert(view->texture->target < (1 << 4)); /* texture_target:4 */
273
274         enum pipe_texture_target target = view->target;
275
276	 key->tex[i].target = target;
277	 key->tex[i].sampler_return_type = vgpu10_return_type(view->format);
278	 key->tex[i].sampler_view = 1;
279
280
281         /* 1D/2D array textures with one slice and cube map array textures
282          * with one cube are treated as non-arrays by the SVGA3D device.
283          * Set the is_array flag only if we know that we have more than 1
284          * element.  This will be used to select shader instruction/resource
285          * types during shader translation.
286          */
287         switch (view->texture->target) {
288         case PIPE_TEXTURE_1D_ARRAY:
289         case PIPE_TEXTURE_2D_ARRAY:
290            key->tex[i].is_array = view->texture->array_size > 1;
291            break;
292         case PIPE_TEXTURE_CUBE_ARRAY:
293            key->tex[i].is_array = view->texture->array_size > 6;
294            break;
295         default:
296            ; /* nothing / silence compiler warning */
297         }
298
299         assert(view->texture->nr_samples < (1 << 5)); /* 5-bit field */
300         key->tex[i].num_samples = view->texture->nr_samples;
301
302         const enum pipe_swizzle *swizzle_tab;
303         if (view->texture->target == PIPE_BUFFER) {
304            SVGA3dSurfaceFormat svga_format;
305            unsigned tf_flags;
306
307            /* Apply any special swizzle mask for the view format if needed */
308
309            svga_translate_texture_buffer_view_format(view->format,
310                                                      &svga_format, &tf_flags);
311            if (tf_flags & TF_000X)
312               swizzle_tab = set_000X;
313            else if (tf_flags & TF_XXXX)
314               swizzle_tab = set_XXXX;
315            else if (tf_flags & TF_XXX1)
316               swizzle_tab = set_XXX1;
317            else if (tf_flags & TF_XXXY)
318               swizzle_tab = set_XXXY;
319            else
320               swizzle_tab = copy_alpha;
321         }
322         else {
323            /* If we have a non-alpha view into an svga3d surface with an
324             * alpha channel, then explicitly set the alpha channel to 1
325             * when sampling. Note that we need to check the
326             * actual device format to cover also imported surface cases.
327             */
328            swizzle_tab =
329               (!util_format_has_alpha(view->format) &&
330                svga_texture_device_format_has_alpha(view->texture)) ?
331                set_alpha : copy_alpha;
332
333            if (view->texture->format == PIPE_FORMAT_DXT1_RGB ||
334                view->texture->format == PIPE_FORMAT_DXT1_SRGB)
335               swizzle_tab = set_alpha;
336
337            /* Save the compare function as we need to handle
338             * depth compare in the shader.
339             */
340            key->tex[i].compare_mode = sampler->compare_mode;
341            key->tex[i].compare_func = sampler->compare_func;
342         }
343
344         key->tex[i].swizzle_r = swizzle_tab[view->swizzle_r];
345         key->tex[i].swizzle_g = swizzle_tab[view->swizzle_g];
346         key->tex[i].swizzle_b = swizzle_tab[view->swizzle_b];
347         key->tex[i].swizzle_a = swizzle_tab[view->swizzle_a];
348      }
349      else {
350	 key->tex[i].sampler_view = 0;
351      }
352
353      if (sampler) {
354         if (!sampler->normalized_coords) {
355            if (view) {
356               assert(idx < (1 << 5));  /* width_height_idx:5 bitfield */
357               key->tex[i].width_height_idx = idx++;
358	    }
359            key->tex[i].unnormalized = TRUE;
360            ++key->num_unnormalized_coords;
361
362            if (sampler->magfilter == SVGA3D_TEX_FILTER_NEAREST ||
363                sampler->minfilter == SVGA3D_TEX_FILTER_NEAREST) {
364                key->tex[i].texel_bias = TRUE;
365            }
366         }
367      }
368   }
369
370   key->clamp_vertex_color = svga->curr.rast ?
371                             svga->curr.rast->templ.clamp_vertex_color : 0;
372}
373
374
375/** Search for a compiled shader variant with the same compile key */
376struct svga_shader_variant *
377svga_search_shader_key(const struct svga_shader *shader,
378                       const struct svga_compile_key *key)
379{
380   struct svga_shader_variant *variant = shader->variants;
381
382   assert(key);
383
384   for ( ; variant; variant = variant->next) {
385      if (svga_compile_keys_equal(key, &variant->key))
386         return variant;
387   }
388   return NULL;
389}
390
391/** Search for a shader with the same token key */
392struct svga_shader *
393svga_search_shader_token_key(struct svga_shader *pshader,
394                             const struct svga_token_key *key)
395{
396   struct svga_shader *shader = pshader;
397
398   assert(key);
399
400   for ( ; shader; shader = shader->next) {
401      if (memcmp(key, &shader->token_key, sizeof(struct svga_token_key)) == 0)
402         return shader;
403   }
404   return NULL;
405}
406
407/**
408 * Helper function to define a gb shader for non-vgpu10 device
409 */
410static enum pipe_error
411define_gb_shader_vgpu9(struct svga_context *svga,
412                       struct svga_shader_variant *variant,
413                       unsigned codeLen)
414{
415   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
416   enum pipe_error ret;
417
418   /**
419    * Create gb memory for the shader and upload the shader code.
420    * Kernel module will allocate an id for the shader and issue
421    * the DefineGBShader command.
422    */
423   variant->gb_shader = sws->shader_create(sws, variant->type,
424                                           variant->tokens, codeLen);
425
426   svga->hud.shader_mem_used += codeLen;
427
428   if (!variant->gb_shader)
429      return PIPE_ERROR_OUT_OF_MEMORY;
430
431   ret = SVGA3D_BindGBShader(svga->swc, variant->gb_shader);
432
433   return ret;
434}
435
436/**
437 * Helper function to define a gb shader for vgpu10 device
438 */
439static enum pipe_error
440define_gb_shader_vgpu10(struct svga_context *svga,
441                        struct svga_shader_variant *variant,
442                        unsigned codeLen)
443{
444   struct svga_winsys_context *swc = svga->swc;
445   enum pipe_error ret;
446   unsigned len = codeLen + variant->signatureLen;
447
448   /**
449    * Shaders in VGPU10 enabled device reside in the device COTable.
450    * SVGA driver will allocate an integer ID for the shader and
451    * issue DXDefineShader and DXBindShader commands.
452    */
453   variant->id = util_bitmask_add(svga->shader_id_bm);
454   if (variant->id == UTIL_BITMASK_INVALID_INDEX) {
455      return PIPE_ERROR_OUT_OF_MEMORY;
456   }
457
458   /* Create gb memory for the shader and upload the shader code */
459   variant->gb_shader = swc->shader_create(swc,
460                                           variant->id, variant->type,
461                                           variant->tokens, codeLen,
462                                           variant->signature,
463                                           variant->signatureLen);
464
465   svga->hud.shader_mem_used += len;
466
467   if (!variant->gb_shader) {
468      /* Free the shader ID */
469      assert(variant->id != UTIL_BITMASK_INVALID_INDEX);
470      goto fail_no_allocation;
471   }
472
473   /**
474    * Since we don't want to do any flush within state emission to avoid
475    * partial state in a command buffer, it's important to make sure that
476    * there is enough room to send both the DXDefineShader & DXBindShader
477    * commands in the same command buffer. So let's send both
478    * commands in one command reservation. If it fails, we'll undo
479    * the shader creation and return an error.
480    */
481   ret = SVGA3D_vgpu10_DefineAndBindShader(swc, variant->gb_shader,
482                                           variant->id, variant->type,
483                                           len);
484
485   if (ret != PIPE_OK)
486      goto fail;
487
488   return PIPE_OK;
489
490fail:
491   swc->shader_destroy(swc, variant->gb_shader);
492   variant->gb_shader = NULL;
493
494fail_no_allocation:
495   util_bitmask_clear(svga->shader_id_bm, variant->id);
496   variant->id = UTIL_BITMASK_INVALID_INDEX;
497
498   return PIPE_ERROR_OUT_OF_MEMORY;
499}
500
501/**
502 * Issue the SVGA3D commands to define a new shader.
503 * \param variant  contains the shader tokens, etc.  The result->id field will
504 *                 be set here.
505 */
506enum pipe_error
507svga_define_shader(struct svga_context *svga,
508                   struct svga_shader_variant *variant)
509{
510   unsigned codeLen = variant->nr_tokens * sizeof(variant->tokens[0]);
511   enum pipe_error ret;
512
513   SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DEFINESHADER);
514
515   variant->id = UTIL_BITMASK_INVALID_INDEX;
516
517   if (svga_have_gb_objects(svga)) {
518      if (svga_have_vgpu10(svga))
519         ret = define_gb_shader_vgpu10(svga, variant, codeLen);
520      else
521         ret = define_gb_shader_vgpu9(svga, variant, codeLen);
522   }
523   else {
524      /* Allocate an integer ID for the shader */
525      variant->id = util_bitmask_add(svga->shader_id_bm);
526      if (variant->id == UTIL_BITMASK_INVALID_INDEX) {
527         ret = PIPE_ERROR_OUT_OF_MEMORY;
528         goto done;
529      }
530
531      /* Issue SVGA3D device command to define the shader */
532      ret = SVGA3D_DefineShader(svga->swc,
533                                variant->id,
534                                variant->type,
535                                variant->tokens,
536                                codeLen);
537      if (ret != PIPE_OK) {
538         /* free the ID */
539         assert(variant->id != UTIL_BITMASK_INVALID_INDEX);
540         util_bitmask_clear(svga->shader_id_bm, variant->id);
541         variant->id = UTIL_BITMASK_INVALID_INDEX;
542      }
543   }
544
545done:
546   SVGA_STATS_TIME_POP(svga_sws(svga));
547   return ret;
548}
549
550
551/**
552 * Issue the SVGA3D commands to set/bind a shader.
553 * \param result  the shader to bind.
554 */
555enum pipe_error
556svga_set_shader(struct svga_context *svga,
557                SVGA3dShaderType type,
558                struct svga_shader_variant *variant)
559{
560   enum pipe_error ret;
561   unsigned id = variant ? variant->id : SVGA3D_INVALID_ID;
562
563   assert(type == SVGA3D_SHADERTYPE_VS ||
564          type == SVGA3D_SHADERTYPE_GS ||
565          type == SVGA3D_SHADERTYPE_PS ||
566          type == SVGA3D_SHADERTYPE_HS ||
567          type == SVGA3D_SHADERTYPE_DS ||
568          type == SVGA3D_SHADERTYPE_CS);
569
570   if (svga_have_gb_objects(svga)) {
571      struct svga_winsys_gb_shader *gbshader =
572         variant ? variant->gb_shader : NULL;
573
574      if (svga_have_vgpu10(svga))
575         ret = SVGA3D_vgpu10_SetShader(svga->swc, type, gbshader, id);
576      else
577         ret = SVGA3D_SetGBShader(svga->swc, type, gbshader);
578   }
579   else {
580      ret = SVGA3D_SetShader(svga->swc, type, id);
581   }
582
583   return ret;
584}
585
586
587struct svga_shader_variant *
588svga_new_shader_variant(struct svga_context *svga, enum pipe_shader_type type)
589{
590   struct svga_shader_variant *variant;
591
592   switch (type) {
593   case PIPE_SHADER_FRAGMENT:
594      variant = CALLOC(1, sizeof(struct svga_fs_variant));
595      break;
596   case PIPE_SHADER_GEOMETRY:
597      variant = CALLOC(1, sizeof(struct svga_gs_variant));
598      break;
599   case PIPE_SHADER_VERTEX:
600      variant = CALLOC(1, sizeof(struct svga_vs_variant));
601      break;
602   case PIPE_SHADER_TESS_EVAL:
603      variant = CALLOC(1, sizeof(struct svga_tes_variant));
604      break;
605   case PIPE_SHADER_TESS_CTRL:
606      variant = CALLOC(1, sizeof(struct svga_tcs_variant));
607      break;
608   default:
609      return NULL;
610   }
611
612   if (variant) {
613      variant->type = svga_shader_type(type);
614      svga->hud.num_shaders++;
615   }
616   return variant;
617}
618
619
620void
621svga_destroy_shader_variant(struct svga_context *svga,
622                            struct svga_shader_variant *variant)
623{
624   if (svga_have_gb_objects(svga) && variant->gb_shader) {
625      if (svga_have_vgpu10(svga)) {
626         struct svga_winsys_context *swc = svga->swc;
627         swc->shader_destroy(swc, variant->gb_shader);
628         SVGA_RETRY(svga, SVGA3D_vgpu10_DestroyShader(svga->swc, variant->id));
629         util_bitmask_clear(svga->shader_id_bm, variant->id);
630      }
631      else {
632         struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
633         sws->shader_destroy(sws, variant->gb_shader);
634      }
635      variant->gb_shader = NULL;
636   }
637   else {
638      if (variant->id != UTIL_BITMASK_INVALID_INDEX) {
639         SVGA_RETRY(svga, SVGA3D_DestroyShader(svga->swc, variant->id,
640                                               variant->type));
641         util_bitmask_clear(svga->shader_id_bm, variant->id);
642      }
643   }
644
645   FREE(variant->signature);
646   FREE((unsigned *)variant->tokens);
647   FREE(variant);
648
649   svga->hud.num_shaders--;
650}
651
652/*
653 * Rebind shaders.
654 * Called at the beginning of every new command buffer to ensure that
655 * shaders are properly paged-in. Instead of sending the SetShader
656 * command, this function sends a private allocation command to
657 * page in a shader. This avoids emitting redundant state to the device
658 * just to page in a resource.
659 */
660enum pipe_error
661svga_rebind_shaders(struct svga_context *svga)
662{
663   struct svga_winsys_context *swc = svga->swc;
664   struct svga_hw_draw_state *hw = &svga->state.hw_draw;
665   enum pipe_error ret;
666
667   assert(svga_have_vgpu10(svga));
668
669   /**
670    * If the underlying winsys layer does not need resource rebinding,
671    * just clear the rebind flags and return.
672    */
673   if (swc->resource_rebind == NULL) {
674      svga->rebind.flags.vs = 0;
675      svga->rebind.flags.gs = 0;
676      svga->rebind.flags.fs = 0;
677      svga->rebind.flags.tcs = 0;
678      svga->rebind.flags.tes = 0;
679
680      return PIPE_OK;
681   }
682
683   if (svga->rebind.flags.vs && hw->vs && hw->vs->gb_shader) {
684      ret = swc->resource_rebind(swc, NULL, hw->vs->gb_shader, SVGA_RELOC_READ);
685      if (ret != PIPE_OK)
686         return ret;
687   }
688   svga->rebind.flags.vs = 0;
689
690   if (svga->rebind.flags.gs && hw->gs && hw->gs->gb_shader) {
691      ret = swc->resource_rebind(swc, NULL, hw->gs->gb_shader, SVGA_RELOC_READ);
692      if (ret != PIPE_OK)
693         return ret;
694   }
695   svga->rebind.flags.gs = 0;
696
697   if (svga->rebind.flags.fs && hw->fs && hw->fs->gb_shader) {
698      ret = swc->resource_rebind(swc, NULL, hw->fs->gb_shader, SVGA_RELOC_READ);
699      if (ret != PIPE_OK)
700         return ret;
701   }
702   svga->rebind.flags.fs = 0;
703
704   if (svga->rebind.flags.tcs && hw->tcs && hw->tcs->gb_shader) {
705      ret = swc->resource_rebind(swc, NULL, hw->tcs->gb_shader, SVGA_RELOC_READ);
706      if (ret != PIPE_OK)
707         return ret;
708   }
709   svga->rebind.flags.tcs = 0;
710
711   if (svga->rebind.flags.tes && hw->tes && hw->tes->gb_shader) {
712      ret = swc->resource_rebind(swc, NULL, hw->tes->gb_shader, SVGA_RELOC_READ);
713      if (ret != PIPE_OK)
714         return ret;
715   }
716   svga->rebind.flags.tes = 0;
717
718   return PIPE_OK;
719}
720