virgl_context.c revision b8e80941
1/*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <libsync.h>
25#include "pipe/p_shader_tokens.h"
26
27#include "pipe/p_context.h"
28#include "pipe/p_defines.h"
29#include "pipe/p_screen.h"
30#include "pipe/p_state.h"
31#include "util/u_inlines.h"
32#include "util/u_memory.h"
33#include "util/u_format.h"
34#include "util/u_prim.h"
35#include "util/u_transfer.h"
36#include "util/u_helpers.h"
37#include "util/slab.h"
38#include "util/u_upload_mgr.h"
39#include "util/u_blitter.h"
40#include "tgsi/tgsi_text.h"
41#include "indices/u_primconvert.h"
42
43#include "pipebuffer/pb_buffer.h"
44
45#include "virgl_encode.h"
46#include "virgl_context.h"
47#include "virgl_protocol.h"
48#include "virgl_resource.h"
49#include "virgl_screen.h"
50
51struct virgl_vertex_elements_state {
52   uint32_t handle;
53   uint8_t binding_map[PIPE_MAX_ATTRIBS];
54   uint8_t num_bindings;
55};
56
57static uint32_t next_handle;
58uint32_t virgl_object_assign_handle(void)
59{
60   return ++next_handle;
61}
62
63static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
64{
65   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
66   struct pipe_surface *surf;
67   struct virgl_resource *res;
68   unsigned i;
69
70   surf = vctx->framebuffer.zsbuf;
71   if (surf) {
72      res = virgl_resource(surf->texture);
73      if (res) {
74         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
75         virgl_resource_dirty(res, surf->u.tex.level);
76      }
77   }
78   for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
79      surf = vctx->framebuffer.cbufs[i];
80      if (surf) {
81         res = virgl_resource(surf->texture);
82         if (res) {
83            vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
84            virgl_resource_dirty(res, surf->u.tex.level);
85         }
86      }
87   }
88}
89
90static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
91                                           enum pipe_shader_type shader_type)
92{
93   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
94   struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
95   struct virgl_resource *res;
96   uint32_t remaining_mask = tinfo->enabled_mask;
97   unsigned i;
98   while (remaining_mask) {
99      i = u_bit_scan(&remaining_mask);
100      assert(tinfo->views[i]);
101
102      res = virgl_resource(tinfo->views[i]->base.texture);
103      if (res)
104         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
105   }
106}
107
108static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
109{
110   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
111   struct virgl_resource *res;
112   unsigned i;
113
114   for (i = 0; i < vctx->num_vertex_buffers; i++) {
115      res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
116      if (res)
117         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
118   }
119}
120
121static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
122					  struct virgl_indexbuf *ib)
123{
124   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
125   struct virgl_resource *res;
126
127   res = virgl_resource(ib->buffer);
128   if (res)
129      vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
130}
131
132static void virgl_attach_res_so_targets(struct virgl_context *vctx)
133{
134   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
135   struct virgl_resource *res;
136   unsigned i;
137
138   for (i = 0; i < vctx->num_so_targets; i++) {
139      res = virgl_resource(vctx->so_targets[i].base.buffer);
140      if (res)
141         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
142   }
143}
144
145static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
146                                             enum pipe_shader_type shader_type)
147{
148   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
149   struct virgl_resource *res;
150   unsigned i;
151   for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
152      res = virgl_resource(vctx->ubos[shader_type][i]);
153      if (res) {
154         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
155      }
156   }
157}
158
159static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
160                                            enum pipe_shader_type shader_type)
161{
162   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
163   struct virgl_resource *res;
164   unsigned i;
165   for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
166      res = virgl_resource(vctx->ssbos[shader_type][i]);
167      if (res) {
168         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
169      }
170   }
171}
172
173static void virgl_attach_res_shader_images(struct virgl_context *vctx,
174                                           enum pipe_shader_type shader_type)
175{
176   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
177   struct virgl_resource *res;
178   unsigned i;
179   for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
180      res = virgl_resource(vctx->images[shader_type][i]);
181      if (res) {
182         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
183      }
184   }
185}
186
187static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
188{
189   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
190   struct virgl_resource *res;
191   unsigned i;
192   for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
193      res = virgl_resource(vctx->atomic_buffers[i]);
194      if (res) {
195         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
196      }
197   }
198}
199
200/*
201 * after flushing, the hw context still has a bunch of
202 * resources bound, so we need to rebind those here.
203 */
204static void virgl_reemit_res(struct virgl_context *vctx)
205{
206   enum pipe_shader_type shader_type;
207
208   /* reattach any flushed resources */
209   /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
210   virgl_attach_res_framebuffer(vctx);
211
212   for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
213      virgl_attach_res_sampler_views(vctx, shader_type);
214      virgl_attach_res_uniform_buffers(vctx, shader_type);
215      virgl_attach_res_shader_buffers(vctx, shader_type);
216      virgl_attach_res_shader_images(vctx, shader_type);
217   }
218   virgl_attach_res_atomic_buffers(vctx);
219   virgl_attach_res_vertex_buffers(vctx);
220   virgl_attach_res_so_targets(vctx);
221}
222
223static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
224                                                struct pipe_resource *resource,
225                                                const struct pipe_surface *templ)
226{
227   struct virgl_context *vctx = virgl_context(ctx);
228   struct virgl_surface *surf;
229   struct virgl_resource *res = virgl_resource(resource);
230   uint32_t handle;
231
232   surf = CALLOC_STRUCT(virgl_surface);
233   if (!surf)
234      return NULL;
235
236   assert(ctx->screen->get_param(ctx->screen,
237                                 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
238          (util_format_is_srgb(templ->format) ==
239           util_format_is_srgb(resource->format)));
240
241   virgl_resource_dirty(res, 0);
242   handle = virgl_object_assign_handle();
243   pipe_reference_init(&surf->base.reference, 1);
244   pipe_resource_reference(&surf->base.texture, resource);
245   surf->base.context = ctx;
246   surf->base.format = templ->format;
247   if (resource->target != PIPE_BUFFER) {
248      surf->base.width = u_minify(resource->width0, templ->u.tex.level);
249      surf->base.height = u_minify(resource->height0, templ->u.tex.level);
250      surf->base.u.tex.level = templ->u.tex.level;
251      surf->base.u.tex.first_layer = templ->u.tex.first_layer;
252      surf->base.u.tex.last_layer = templ->u.tex.last_layer;
253   } else {
254      surf->base.width = templ->u.buf.last_element - templ->u.buf.first_element + 1;
255      surf->base.height = resource->height0;
256      surf->base.u.buf.first_element = templ->u.buf.first_element;
257      surf->base.u.buf.last_element = templ->u.buf.last_element;
258   }
259   virgl_encoder_create_surface(vctx, handle, res, &surf->base);
260   surf->handle = handle;
261   return &surf->base;
262}
263
264static void virgl_surface_destroy(struct pipe_context *ctx,
265                                 struct pipe_surface *psurf)
266{
267   struct virgl_context *vctx = virgl_context(ctx);
268   struct virgl_surface *surf = virgl_surface(psurf);
269
270   pipe_resource_reference(&surf->base.texture, NULL);
271   virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
272   FREE(surf);
273}
274
275static void *virgl_create_blend_state(struct pipe_context *ctx,
276                                              const struct pipe_blend_state *blend_state)
277{
278   struct virgl_context *vctx = virgl_context(ctx);
279   uint32_t handle;
280   handle = virgl_object_assign_handle();
281
282   virgl_encode_blend_state(vctx, handle, blend_state);
283   return (void *)(unsigned long)handle;
284
285}
286
287static void virgl_bind_blend_state(struct pipe_context *ctx,
288                                           void *blend_state)
289{
290   struct virgl_context *vctx = virgl_context(ctx);
291   uint32_t handle = (unsigned long)blend_state;
292   virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
293}
294
295static void virgl_delete_blend_state(struct pipe_context *ctx,
296                                     void *blend_state)
297{
298   struct virgl_context *vctx = virgl_context(ctx);
299   uint32_t handle = (unsigned long)blend_state;
300   virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
301}
302
303static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
304                                                   const struct pipe_depth_stencil_alpha_state *blend_state)
305{
306   struct virgl_context *vctx = virgl_context(ctx);
307   uint32_t handle;
308   handle = virgl_object_assign_handle();
309
310   virgl_encode_dsa_state(vctx, handle, blend_state);
311   return (void *)(unsigned long)handle;
312}
313
314static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
315                                                void *blend_state)
316{
317   struct virgl_context *vctx = virgl_context(ctx);
318   uint32_t handle = (unsigned long)blend_state;
319   virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
320}
321
322static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
323                                                  void *dsa_state)
324{
325   struct virgl_context *vctx = virgl_context(ctx);
326   uint32_t handle = (unsigned long)dsa_state;
327   virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
328}
329
330static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
331                                                   const struct pipe_rasterizer_state *rs_state)
332{
333   struct virgl_context *vctx = virgl_context(ctx);
334   struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
335
336   if (!vrs)
337      return NULL;
338   vrs->rs = *rs_state;
339   vrs->handle = virgl_object_assign_handle();
340
341   virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
342   return (void *)vrs;
343}
344
345static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
346                                                void *rs_state)
347{
348   struct virgl_context *vctx = virgl_context(ctx);
349   uint32_t handle = 0;
350   if (rs_state) {
351      struct virgl_rasterizer_state *vrs = rs_state;
352      vctx->rs_state = *vrs;
353      handle = vrs->handle;
354   }
355   virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
356}
357
358static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
359                                         void *rs_state)
360{
361   struct virgl_context *vctx = virgl_context(ctx);
362   struct virgl_rasterizer_state *vrs = rs_state;
363   virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
364   FREE(vrs);
365}
366
367static void virgl_set_framebuffer_state(struct pipe_context *ctx,
368                                                const struct pipe_framebuffer_state *state)
369{
370   struct virgl_context *vctx = virgl_context(ctx);
371
372   vctx->framebuffer = *state;
373   virgl_encoder_set_framebuffer_state(vctx, state);
374   virgl_attach_res_framebuffer(vctx);
375}
376
377static void virgl_set_viewport_states(struct pipe_context *ctx,
378                                     unsigned start_slot,
379                                     unsigned num_viewports,
380                                     const struct pipe_viewport_state *state)
381{
382   struct virgl_context *vctx = virgl_context(ctx);
383   virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
384}
385
386static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
387                                                        unsigned num_elements,
388                                                        const struct pipe_vertex_element *elements)
389{
390   struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
391   struct virgl_context *vctx = virgl_context(ctx);
392   struct virgl_vertex_elements_state *state =
393      CALLOC_STRUCT(virgl_vertex_elements_state);
394
395   for (int i = 0; i < num_elements; ++i) {
396      if (elements[i].instance_divisor) {
397	 /* Virglrenderer doesn't deal with instance_divisor correctly if
398	  * there isn't a 1:1 relationship between elements and bindings.
399	  * So let's make sure there is, by duplicating bindings.
400	  */
401	 for (int j = 0; j < num_elements; ++j) {
402            new_elements[j] = elements[j];
403            new_elements[j].vertex_buffer_index = j;
404            state->binding_map[j] = elements[j].vertex_buffer_index;
405	 }
406	 elements = new_elements;
407	 state->num_bindings = num_elements;
408	 break;
409      }
410   }
411
412   state->handle = virgl_object_assign_handle();
413   virgl_encoder_create_vertex_elements(vctx, state->handle,
414                                       num_elements, elements);
415   return state;
416}
417
418static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
419                                              void *ve)
420{
421   struct virgl_context *vctx = virgl_context(ctx);
422   struct virgl_vertex_elements_state *state =
423      (struct virgl_vertex_elements_state *)ve;
424   virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
425   FREE(state);
426}
427
428static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
429                                                     void *ve)
430{
431   struct virgl_context *vctx = virgl_context(ctx);
432   struct virgl_vertex_elements_state *state =
433      (struct virgl_vertex_elements_state *)ve;
434   vctx->vertex_elements = state;
435   virgl_encode_bind_object(vctx, state ? state->handle : 0,
436                            VIRGL_OBJECT_VERTEX_ELEMENTS);
437   vctx->vertex_array_dirty = TRUE;
438}
439
440static void virgl_set_vertex_buffers(struct pipe_context *ctx,
441                                    unsigned start_slot,
442                                    unsigned num_buffers,
443                                    const struct pipe_vertex_buffer *buffers)
444{
445   struct virgl_context *vctx = virgl_context(ctx);
446
447   util_set_vertex_buffers_count(vctx->vertex_buffer,
448                                 &vctx->num_vertex_buffers,
449                                 buffers, start_slot, num_buffers);
450
451   vctx->vertex_array_dirty = TRUE;
452}
453
454static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
455{
456   if (vctx->vertex_array_dirty) {
457      struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
458
459      if (ve->num_bindings) {
460         struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
461         for (int i = 0; i < ve->num_bindings; ++i)
462            vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
463
464         virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
465      } else
466         virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
467
468      virgl_attach_res_vertex_buffers(vctx);
469
470      vctx->vertex_array_dirty = FALSE;
471   }
472}
473
474static void virgl_set_stencil_ref(struct pipe_context *ctx,
475                                 const struct pipe_stencil_ref *ref)
476{
477   struct virgl_context *vctx = virgl_context(ctx);
478   virgl_encoder_set_stencil_ref(vctx, ref);
479}
480
481static void virgl_set_blend_color(struct pipe_context *ctx,
482                                 const struct pipe_blend_color *color)
483{
484   struct virgl_context *vctx = virgl_context(ctx);
485   virgl_encoder_set_blend_color(vctx, color);
486}
487
488static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
489                                     struct virgl_indexbuf *ib)
490{
491   virgl_encoder_set_index_buffer(vctx, ib);
492   virgl_attach_res_index_buffer(vctx, ib);
493}
494
495static void virgl_set_constant_buffer(struct pipe_context *ctx,
496                                     enum pipe_shader_type shader, uint index,
497                                     const struct pipe_constant_buffer *buf)
498{
499   struct virgl_context *vctx = virgl_context(ctx);
500
501   if (buf) {
502      if (!buf->user_buffer){
503         struct virgl_resource *res = virgl_resource(buf->buffer);
504         virgl_encoder_set_uniform_buffer(vctx, shader, index, buf->buffer_offset,
505                                          buf->buffer_size, res);
506         pipe_resource_reference(&vctx->ubos[shader][index], buf->buffer);
507         return;
508      }
509      pipe_resource_reference(&vctx->ubos[shader][index], NULL);
510      virgl_encoder_write_constant_buffer(vctx, shader, index, buf->buffer_size / 4, buf->user_buffer);
511   } else {
512      virgl_encoder_write_constant_buffer(vctx, shader, index, 0, NULL);
513      pipe_resource_reference(&vctx->ubos[shader][index], NULL);
514   }
515}
516
517void virgl_transfer_inline_write(struct pipe_context *ctx,
518                                struct pipe_resource *res,
519                                unsigned level,
520                                unsigned usage,
521                                const struct pipe_box *box,
522                                const void *data,
523                                unsigned stride,
524                                unsigned layer_stride)
525{
526   struct virgl_context *vctx = virgl_context(ctx);
527   struct virgl_screen *vs = virgl_screen(ctx->screen);
528   struct virgl_resource *grres = virgl_resource(res);
529   struct virgl_transfer trans = { 0 };
530
531   trans.base.resource = res;
532   trans.base.level = level;
533   trans.base.usage = usage;
534   trans.base.box = *box;
535   trans.base.stride = stride;
536   trans.base.layer_stride = layer_stride;
537   trans.offset = box->x;
538
539   virgl_resource_dirty(grres, 0);
540
541   if (virgl_res_needs_flush(vctx, &trans)) {
542      ctx->flush(ctx, NULL, 0);
543      vs->vws->resource_wait(vs->vws, grres->hw_res);
544   }
545
546   virgl_encoder_inline_write(vctx, grres, level, usage,
547                              box, data, stride, layer_stride);
548}
549
550static void *virgl_shader_encoder(struct pipe_context *ctx,
551                                  const struct pipe_shader_state *shader,
552                                  unsigned type)
553{
554   struct virgl_context *vctx = virgl_context(ctx);
555   uint32_t handle;
556   struct tgsi_token *new_tokens;
557   int ret;
558
559   new_tokens = virgl_tgsi_transform(vctx, shader->tokens);
560   if (!new_tokens)
561      return NULL;
562
563   handle = virgl_object_assign_handle();
564   /* encode VS state */
565   ret = virgl_encode_shader_state(vctx, handle, type,
566                                   &shader->stream_output, 0,
567                                   new_tokens);
568   if (ret) {
569      return NULL;
570   }
571
572   FREE(new_tokens);
573   return (void *)(unsigned long)handle;
574
575}
576static void *virgl_create_vs_state(struct pipe_context *ctx,
577                                   const struct pipe_shader_state *shader)
578{
579   return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
580}
581
582static void *virgl_create_tcs_state(struct pipe_context *ctx,
583                                   const struct pipe_shader_state *shader)
584{
585   return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
586}
587
588static void *virgl_create_tes_state(struct pipe_context *ctx,
589                                   const struct pipe_shader_state *shader)
590{
591   return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
592}
593
594static void *virgl_create_gs_state(struct pipe_context *ctx,
595                                   const struct pipe_shader_state *shader)
596{
597   return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
598}
599
600static void *virgl_create_fs_state(struct pipe_context *ctx,
601                                   const struct pipe_shader_state *shader)
602{
603   return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
604}
605
606static void
607virgl_delete_fs_state(struct pipe_context *ctx,
608                     void *fs)
609{
610   uint32_t handle = (unsigned long)fs;
611   struct virgl_context *vctx = virgl_context(ctx);
612
613   virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
614}
615
616static void
617virgl_delete_gs_state(struct pipe_context *ctx,
618                     void *gs)
619{
620   uint32_t handle = (unsigned long)gs;
621   struct virgl_context *vctx = virgl_context(ctx);
622
623   virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
624}
625
626static void
627virgl_delete_vs_state(struct pipe_context *ctx,
628                     void *vs)
629{
630   uint32_t handle = (unsigned long)vs;
631   struct virgl_context *vctx = virgl_context(ctx);
632
633   virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
634}
635
636static void
637virgl_delete_tcs_state(struct pipe_context *ctx,
638                       void *tcs)
639{
640   uint32_t handle = (unsigned long)tcs;
641   struct virgl_context *vctx = virgl_context(ctx);
642
643   virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
644}
645
646static void
647virgl_delete_tes_state(struct pipe_context *ctx,
648                      void *tes)
649{
650   uint32_t handle = (unsigned long)tes;
651   struct virgl_context *vctx = virgl_context(ctx);
652
653   virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
654}
655
656static void virgl_bind_vs_state(struct pipe_context *ctx,
657                                        void *vss)
658{
659   uint32_t handle = (unsigned long)vss;
660   struct virgl_context *vctx = virgl_context(ctx);
661
662   virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
663}
664
665static void virgl_bind_tcs_state(struct pipe_context *ctx,
666                               void *vss)
667{
668   uint32_t handle = (unsigned long)vss;
669   struct virgl_context *vctx = virgl_context(ctx);
670
671   virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
672}
673
674static void virgl_bind_tes_state(struct pipe_context *ctx,
675                               void *vss)
676{
677   uint32_t handle = (unsigned long)vss;
678   struct virgl_context *vctx = virgl_context(ctx);
679
680   virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
681}
682
683static void virgl_bind_gs_state(struct pipe_context *ctx,
684                               void *vss)
685{
686   uint32_t handle = (unsigned long)vss;
687   struct virgl_context *vctx = virgl_context(ctx);
688
689   virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
690}
691
692
693static void virgl_bind_fs_state(struct pipe_context *ctx,
694                                        void *vss)
695{
696   uint32_t handle = (unsigned long)vss;
697   struct virgl_context *vctx = virgl_context(ctx);
698
699   virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
700}
701
702static void virgl_clear(struct pipe_context *ctx,
703                                unsigned buffers,
704                                const union pipe_color_union *color,
705                                double depth, unsigned stencil)
706{
707   struct virgl_context *vctx = virgl_context(ctx);
708
709   virgl_encode_clear(vctx, buffers, color, depth, stencil);
710}
711
712static void virgl_draw_vbo(struct pipe_context *ctx,
713                                   const struct pipe_draw_info *dinfo)
714{
715   struct virgl_context *vctx = virgl_context(ctx);
716   struct virgl_screen *rs = virgl_screen(ctx->screen);
717   struct virgl_indexbuf ib = {};
718   struct pipe_draw_info info = *dinfo;
719
720   if (!dinfo->count_from_stream_output && !dinfo->indirect &&
721       !dinfo->primitive_restart &&
722       !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
723      return;
724
725   if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
726      util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
727      util_primconvert_draw_vbo(vctx->primconvert, dinfo);
728      return;
729   }
730   if (info.index_size) {
731           pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
732           ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
733           ib.index_size = dinfo->index_size;
734           ib.offset = info.start * ib.index_size;
735
736           if (ib.user_buffer) {
737                   u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 4,
738                                 ib.user_buffer, &ib.offset, &ib.buffer);
739                   ib.user_buffer = NULL;
740           }
741   }
742
743   vctx->num_draws++;
744   virgl_hw_set_vertex_buffers(vctx);
745   if (info.index_size)
746      virgl_hw_set_index_buffer(vctx, &ib);
747
748   virgl_encoder_draw_vbo(vctx, &info);
749
750   pipe_resource_reference(&ib.buffer, NULL);
751
752}
753
754static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
755			   struct pipe_fence_handle **fence)
756{
757   struct virgl_screen *rs = virgl_screen(ctx->base.screen);
758
759   /* skip empty cbuf */
760   if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
761       ctx->queue.num_dwords == 0 &&
762       !fence)
763      return;
764
765   if (ctx->num_draws)
766      u_upload_unmap(ctx->uploader);
767
768   /* send the buffer to the remote side for decoding */
769   ctx->num_draws = ctx->num_compute = 0;
770
771   virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
772   rs->vws->submit_cmd(rs->vws, ctx->cbuf, fence);
773
774   /* Reserve some space for transfers. */
775   if (ctx->encoded_transfers)
776      ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
777
778   virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
779
780   /* add back current framebuffer resources to reference list? */
781   virgl_reemit_res(ctx);
782
783   ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
784}
785
786static void virgl_flush_from_st(struct pipe_context *ctx,
787                               struct pipe_fence_handle **fence,
788                               enum pipe_flush_flags flags)
789{
790   struct virgl_context *vctx = virgl_context(ctx);
791
792   virgl_flush_eq(vctx, vctx, fence);
793}
794
795static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
796                                      struct pipe_resource *texture,
797                                      const struct pipe_sampler_view *state)
798{
799   struct virgl_context *vctx = virgl_context(ctx);
800   struct virgl_sampler_view *grview;
801   uint32_t handle;
802   struct virgl_resource *res;
803
804   if (!state)
805      return NULL;
806
807   grview = CALLOC_STRUCT(virgl_sampler_view);
808   if (!grview)
809      return NULL;
810
811   res = virgl_resource(texture);
812   handle = virgl_object_assign_handle();
813   virgl_encode_sampler_view(vctx, handle, res, state);
814
815   grview->base = *state;
816   grview->base.reference.count = 1;
817
818   grview->base.texture = NULL;
819   grview->base.context = ctx;
820   pipe_resource_reference(&grview->base.texture, texture);
821   grview->handle = handle;
822   return &grview->base;
823}
824
825static void virgl_set_sampler_views(struct pipe_context *ctx,
826                                   enum pipe_shader_type shader_type,
827                                   unsigned start_slot,
828                                   unsigned num_views,
829                                   struct pipe_sampler_view **views)
830{
831   struct virgl_context *vctx = virgl_context(ctx);
832   int i;
833   uint32_t disable_mask = ~((1ull << num_views) - 1);
834   struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
835   uint32_t new_mask = 0;
836   uint32_t remaining_mask;
837
838   remaining_mask = tinfo->enabled_mask & disable_mask;
839
840   while (remaining_mask) {
841      i = u_bit_scan(&remaining_mask);
842      assert(tinfo->views[i]);
843
844      pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
845   }
846
847   for (i = 0; i < num_views; i++) {
848      struct virgl_sampler_view *grview = virgl_sampler_view(views[i]);
849
850      if (views[i] == (struct pipe_sampler_view *)tinfo->views[i])
851         continue;
852
853      if (grview) {
854         new_mask |= 1 << i;
855         pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], views[i]);
856      } else {
857         pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
858         disable_mask |= 1 << i;
859      }
860   }
861
862   tinfo->enabled_mask &= ~disable_mask;
863   tinfo->enabled_mask |= new_mask;
864   virgl_encode_set_sampler_views(vctx, shader_type, start_slot, num_views, tinfo->views);
865   virgl_attach_res_sampler_views(vctx, shader_type);
866}
867
868static void
869virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
870{
871   struct virgl_context *vctx = virgl_context(ctx);
872   struct virgl_screen *rs = virgl_screen(ctx->screen);
873
874   if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER))
875      return;
876   virgl_encode_texture_barrier(vctx, flags);
877}
878
879static void virgl_destroy_sampler_view(struct pipe_context *ctx,
880                                 struct pipe_sampler_view *view)
881{
882   struct virgl_context *vctx = virgl_context(ctx);
883   struct virgl_sampler_view *grview = virgl_sampler_view(view);
884
885   virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
886   pipe_resource_reference(&view->texture, NULL);
887   FREE(view);
888}
889
890static void *virgl_create_sampler_state(struct pipe_context *ctx,
891                                        const struct pipe_sampler_state *state)
892{
893   struct virgl_context *vctx = virgl_context(ctx);
894   uint32_t handle;
895
896   handle = virgl_object_assign_handle();
897
898   virgl_encode_sampler_state(vctx, handle, state);
899   return (void *)(unsigned long)handle;
900}
901
902static void virgl_delete_sampler_state(struct pipe_context *ctx,
903                                      void *ss)
904{
905   struct virgl_context *vctx = virgl_context(ctx);
906   uint32_t handle = (unsigned long)ss;
907
908   virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
909}
910
911static void virgl_bind_sampler_states(struct pipe_context *ctx,
912                                     enum pipe_shader_type shader,
913                                     unsigned start_slot,
914                                     unsigned num_samplers,
915                                     void **samplers)
916{
917   struct virgl_context *vctx = virgl_context(ctx);
918   uint32_t handles[32];
919   int i;
920   for (i = 0; i < num_samplers; i++) {
921      handles[i] = (unsigned long)(samplers[i]);
922   }
923   virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
924}
925
926static void virgl_set_polygon_stipple(struct pipe_context *ctx,
927                                     const struct pipe_poly_stipple *ps)
928{
929   struct virgl_context *vctx = virgl_context(ctx);
930   virgl_encoder_set_polygon_stipple(vctx, ps);
931}
932
933static void virgl_set_scissor_states(struct pipe_context *ctx,
934                                    unsigned start_slot,
935                                    unsigned num_scissor,
936                                   const struct pipe_scissor_state *ss)
937{
938   struct virgl_context *vctx = virgl_context(ctx);
939   virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
940}
941
942static void virgl_set_sample_mask(struct pipe_context *ctx,
943                                 unsigned sample_mask)
944{
945   struct virgl_context *vctx = virgl_context(ctx);
946   virgl_encoder_set_sample_mask(vctx, sample_mask);
947}
948
949static void virgl_set_min_samples(struct pipe_context *ctx,
950                                 unsigned min_samples)
951{
952   struct virgl_context *vctx = virgl_context(ctx);
953   struct virgl_screen *rs = virgl_screen(ctx->screen);
954
955   if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
956      return;
957   virgl_encoder_set_min_samples(vctx, min_samples);
958}
959
960static void virgl_set_clip_state(struct pipe_context *ctx,
961                                const struct pipe_clip_state *clip)
962{
963   struct virgl_context *vctx = virgl_context(ctx);
964   virgl_encoder_set_clip_state(vctx, clip);
965}
966
967static void virgl_set_tess_state(struct pipe_context *ctx,
968                                 const float default_outer_level[4],
969                                 const float default_inner_level[2])
970{
971   struct virgl_context *vctx = virgl_context(ctx);
972   struct virgl_screen *rs = virgl_screen(ctx->screen);
973
974   if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
975      return;
976   virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
977}
978
979static void virgl_resource_copy_region(struct pipe_context *ctx,
980                                      struct pipe_resource *dst,
981                                      unsigned dst_level,
982                                      unsigned dstx, unsigned dsty, unsigned dstz,
983                                      struct pipe_resource *src,
984                                      unsigned src_level,
985                                      const struct pipe_box *src_box)
986{
987   struct virgl_context *vctx = virgl_context(ctx);
988   struct virgl_resource *dres = virgl_resource(dst);
989   struct virgl_resource *sres = virgl_resource(src);
990
991   virgl_resource_dirty(dres, dst_level);
992   virgl_encode_resource_copy_region(vctx, dres,
993                                    dst_level, dstx, dsty, dstz,
994                                    sres, src_level,
995                                    src_box);
996}
997
998static void
999virgl_flush_resource(struct pipe_context *pipe,
1000                    struct pipe_resource *resource)
1001{
1002}
1003
1004static void virgl_blit(struct pipe_context *ctx,
1005                      const struct pipe_blit_info *blit)
1006{
1007   struct virgl_context *vctx = virgl_context(ctx);
1008   struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1009   struct virgl_resource *sres = virgl_resource(blit->src.resource);
1010
1011   assert(ctx->screen->get_param(ctx->screen,
1012                                 PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1013          (util_format_is_srgb(blit->dst.resource->format) ==
1014            util_format_is_srgb(blit->dst.format)));
1015
1016   virgl_resource_dirty(dres, blit->dst.level);
1017   virgl_encode_blit(vctx, dres, sres,
1018                    blit);
1019}
1020
1021static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1022                                        unsigned start_slot,
1023                                        unsigned count,
1024                                        const struct pipe_shader_buffer *buffers)
1025{
1026   struct virgl_context *vctx = virgl_context(ctx);
1027
1028   for (unsigned i = 0; i < count; i++) {
1029      unsigned idx = start_slot + i;
1030
1031      if (buffers) {
1032         if (buffers[i].buffer) {
1033            pipe_resource_reference(&vctx->atomic_buffers[idx],
1034                                    buffers[i].buffer);
1035            continue;
1036         }
1037      }
1038      pipe_resource_reference(&vctx->atomic_buffers[idx], NULL);
1039   }
1040   virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1041}
1042
1043static void virgl_set_shader_buffers(struct pipe_context *ctx,
1044                                     enum pipe_shader_type shader,
1045                                     unsigned start_slot, unsigned count,
1046                                     const struct pipe_shader_buffer *buffers,
1047                                     unsigned writable_bitmask)
1048{
1049   struct virgl_context *vctx = virgl_context(ctx);
1050   struct virgl_screen *rs = virgl_screen(ctx->screen);
1051
1052   for (unsigned i = 0; i < count; i++) {
1053      unsigned idx = start_slot + i;
1054
1055      if (buffers) {
1056         if (buffers[i].buffer) {
1057            pipe_resource_reference(&vctx->ssbos[shader][idx], buffers[i].buffer);
1058            continue;
1059         }
1060      }
1061      pipe_resource_reference(&vctx->ssbos[shader][idx], NULL);
1062   }
1063
1064   uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1065      rs->caps.caps.v2.max_shader_buffer_frag_compute :
1066      rs->caps.caps.v2.max_shader_buffer_other_stages;
1067   if (!max_shader_buffer)
1068      return;
1069   virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1070}
1071
1072static void virgl_create_fence_fd(struct pipe_context *ctx,
1073                                  struct pipe_fence_handle **fence,
1074                                  int fd,
1075                                  enum pipe_fd_type type)
1076{
1077   assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1078   struct virgl_screen *rs = virgl_screen(ctx->screen);
1079
1080   if (rs->vws->cs_create_fence)
1081      *fence = rs->vws->cs_create_fence(rs->vws, fd);
1082}
1083
1084static void virgl_fence_server_sync(struct pipe_context *ctx,
1085			            struct pipe_fence_handle *fence)
1086{
1087   struct virgl_context *vctx = virgl_context(ctx);
1088   struct virgl_screen *rs = virgl_screen(ctx->screen);
1089
1090   if (rs->vws->fence_server_sync)
1091      rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1092}
1093
1094static void virgl_set_shader_images(struct pipe_context *ctx,
1095                                    enum pipe_shader_type shader,
1096                                    unsigned start_slot, unsigned count,
1097                                    const struct pipe_image_view *images)
1098{
1099   struct virgl_context *vctx = virgl_context(ctx);
1100   struct virgl_screen *rs = virgl_screen(ctx->screen);
1101
1102   for (unsigned i = 0; i < count; i++) {
1103      unsigned idx = start_slot + i;
1104
1105      if (images) {
1106         if (images[i].resource) {
1107            pipe_resource_reference(&vctx->images[shader][idx], images[i].resource);
1108            continue;
1109         }
1110      }
1111      pipe_resource_reference(&vctx->images[shader][idx], NULL);
1112   }
1113
1114   uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1115     rs->caps.caps.v2.max_shader_image_frag_compute :
1116     rs->caps.caps.v2.max_shader_image_other_stages;
1117   if (!max_shader_images)
1118      return;
1119   virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1120}
1121
1122static void virgl_memory_barrier(struct pipe_context *ctx,
1123                                 unsigned flags)
1124{
1125   struct virgl_context *vctx = virgl_context(ctx);
1126   struct virgl_screen *rs = virgl_screen(ctx->screen);
1127
1128   if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1129      return;
1130   virgl_encode_memory_barrier(vctx, flags);
1131}
1132
1133static void *virgl_create_compute_state(struct pipe_context *ctx,
1134                                        const struct pipe_compute_state *state)
1135{
1136   struct virgl_context *vctx = virgl_context(ctx);
1137   uint32_t handle;
1138   const struct tgsi_token *new_tokens = state->prog;
1139   struct pipe_stream_output_info so_info = {};
1140   int ret;
1141
1142   handle = virgl_object_assign_handle();
1143   ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1144                                   &so_info,
1145                                   state->req_local_mem,
1146                                   new_tokens);
1147   if (ret) {
1148      return NULL;
1149   }
1150
1151   return (void *)(unsigned long)handle;
1152}
1153
1154static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1155{
1156   uint32_t handle = (unsigned long)state;
1157   struct virgl_context *vctx = virgl_context(ctx);
1158
1159   virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1160}
1161
1162static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1163{
1164   uint32_t handle = (unsigned long)state;
1165   struct virgl_context *vctx = virgl_context(ctx);
1166
1167   virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1168}
1169
1170static void virgl_launch_grid(struct pipe_context *ctx,
1171                              const struct pipe_grid_info *info)
1172{
1173   struct virgl_context *vctx = virgl_context(ctx);
1174   virgl_encode_launch_grid(vctx, info);
1175   vctx->num_compute++;
1176}
1177
1178static void
1179virgl_context_destroy( struct pipe_context *ctx )
1180{
1181   struct virgl_context *vctx = virgl_context(ctx);
1182   struct virgl_screen *rs = virgl_screen(ctx->screen);
1183
1184   vctx->framebuffer.zsbuf = NULL;
1185   vctx->framebuffer.nr_cbufs = 0;
1186   virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1187   virgl_flush_eq(vctx, vctx, NULL);
1188
1189   rs->vws->cmd_buf_destroy(vctx->cbuf);
1190   if (vctx->uploader)
1191      u_upload_destroy(vctx->uploader);
1192   util_primconvert_destroy(vctx->primconvert);
1193   virgl_transfer_queue_fini(&vctx->queue);
1194
1195   slab_destroy_child(&vctx->transfer_pool);
1196   FREE(vctx);
1197}
1198
1199static void virgl_get_sample_position(struct pipe_context *ctx,
1200				      unsigned sample_count,
1201				      unsigned index,
1202				      float *out_value)
1203{
1204   struct virgl_context *vctx = virgl_context(ctx);
1205   struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1206
1207   if (sample_count > vs->caps.caps.v1.max_samples) {
1208      debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1209		   sample_count, vs->caps.caps.v1.max_samples);
1210      return;
1211   }
1212
1213   /* The following is basically copied from dri/i965gen6_get_sample_position
1214    * The only addition is that we hold the msaa positions for all sample
1215    * counts in a flat array. */
1216   uint32_t bits = 0;
1217   if (sample_count == 1) {
1218      out_value[0] = out_value[1] = 0.5f;
1219      return;
1220   } else if (sample_count == 2) {
1221      bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1222   } else if (sample_count <= 4) {
1223      bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1224   } else if (sample_count <= 8) {
1225      bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1226   } else if (sample_count <= 16) {
1227      bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1228   }
1229   out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1230   out_value[1] = (bits & 0xf) / 16.0f;
1231
1232   if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1233      debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1234                   index, sample_count, out_value[0], out_value[1]);
1235}
1236
1237struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1238                                          void *priv,
1239                                          unsigned flags)
1240{
1241   struct virgl_context *vctx;
1242   struct virgl_screen *rs = virgl_screen(pscreen);
1243   vctx = CALLOC_STRUCT(virgl_context);
1244   const char *host_debug_flagstring;
1245
1246   vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1247   if (!vctx->cbuf) {
1248      FREE(vctx);
1249      return NULL;
1250   }
1251
1252   vctx->base.destroy = virgl_context_destroy;
1253   vctx->base.create_surface = virgl_create_surface;
1254   vctx->base.surface_destroy = virgl_surface_destroy;
1255   vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1256   vctx->base.create_blend_state = virgl_create_blend_state;
1257   vctx->base.bind_blend_state = virgl_bind_blend_state;
1258   vctx->base.delete_blend_state = virgl_delete_blend_state;
1259   vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1260   vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1261   vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1262   vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1263   vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1264   vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1265
1266   vctx->base.set_viewport_states = virgl_set_viewport_states;
1267   vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1268   vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1269   vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1270   vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1271   vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1272
1273   vctx->base.set_tess_state = virgl_set_tess_state;
1274   vctx->base.create_vs_state = virgl_create_vs_state;
1275   vctx->base.create_tcs_state = virgl_create_tcs_state;
1276   vctx->base.create_tes_state = virgl_create_tes_state;
1277   vctx->base.create_gs_state = virgl_create_gs_state;
1278   vctx->base.create_fs_state = virgl_create_fs_state;
1279
1280   vctx->base.bind_vs_state = virgl_bind_vs_state;
1281   vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1282   vctx->base.bind_tes_state = virgl_bind_tes_state;
1283   vctx->base.bind_gs_state = virgl_bind_gs_state;
1284   vctx->base.bind_fs_state = virgl_bind_fs_state;
1285
1286   vctx->base.delete_vs_state = virgl_delete_vs_state;
1287   vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1288   vctx->base.delete_tes_state = virgl_delete_tes_state;
1289   vctx->base.delete_gs_state = virgl_delete_gs_state;
1290   vctx->base.delete_fs_state = virgl_delete_fs_state;
1291
1292   vctx->base.create_compute_state = virgl_create_compute_state;
1293   vctx->base.bind_compute_state = virgl_bind_compute_state;
1294   vctx->base.delete_compute_state = virgl_delete_compute_state;
1295   vctx->base.launch_grid = virgl_launch_grid;
1296
1297   vctx->base.clear = virgl_clear;
1298   vctx->base.draw_vbo = virgl_draw_vbo;
1299   vctx->base.flush = virgl_flush_from_st;
1300   vctx->base.screen = pscreen;
1301   vctx->base.create_sampler_view = virgl_create_sampler_view;
1302   vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1303   vctx->base.set_sampler_views = virgl_set_sampler_views;
1304   vctx->base.texture_barrier = virgl_texture_barrier;
1305
1306   vctx->base.create_sampler_state = virgl_create_sampler_state;
1307   vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1308   vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1309
1310   vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1311   vctx->base.set_scissor_states = virgl_set_scissor_states;
1312   vctx->base.set_sample_mask = virgl_set_sample_mask;
1313   vctx->base.set_min_samples = virgl_set_min_samples;
1314   vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1315   vctx->base.set_clip_state = virgl_set_clip_state;
1316
1317   vctx->base.set_blend_color = virgl_set_blend_color;
1318
1319   vctx->base.get_sample_position = virgl_get_sample_position;
1320
1321   vctx->base.resource_copy_region = virgl_resource_copy_region;
1322   vctx->base.flush_resource = virgl_flush_resource;
1323   vctx->base.blit =  virgl_blit;
1324   vctx->base.create_fence_fd = virgl_create_fence_fd;
1325   vctx->base.fence_server_sync = virgl_fence_server_sync;
1326
1327   vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1328   vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1329   vctx->base.set_shader_images = virgl_set_shader_images;
1330   vctx->base.memory_barrier = virgl_memory_barrier;
1331
1332   virgl_init_context_resource_functions(&vctx->base);
1333   virgl_init_query_functions(vctx);
1334   virgl_init_so_functions(vctx);
1335
1336   slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1337   virgl_transfer_queue_init(&vctx->queue, rs, &vctx->transfer_pool);
1338   vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1339                       (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1340
1341   /* Reserve some space for transfers. */
1342   if (vctx->encoded_transfers)
1343      vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1344
1345   vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1346   vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1347                                     PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1348   if (!vctx->uploader)
1349           goto fail;
1350   vctx->base.stream_uploader = vctx->uploader;
1351   vctx->base.const_uploader = vctx->uploader;
1352
1353   vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
1354   virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1355
1356   virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1357
1358   if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1359      host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1360      if (host_debug_flagstring)
1361         virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1362   }
1363
1364   return &vctx->base;
1365fail:
1366   return NULL;
1367}
1368