1/*
2 * Copyright © 2019 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/* use a gallium context to execute a command buffer */
25
26#include "lvp_private.h"
27
28#include "pipe/p_context.h"
29#include "pipe/p_state.h"
30#include "lvp_conv.h"
31
32#include "pipe/p_shader_tokens.h"
33#include "tgsi/tgsi_text.h"
34#include "tgsi/tgsi_parse.h"
35
36#include "util/format/u_format.h"
37#include "util/u_surface.h"
38#include "util/u_sampler.h"
39#include "util/u_box.h"
40#include "util/u_inlines.h"
41#include "util/u_prim.h"
42#include "util/u_prim_restart.h"
43#include "util/format/u_format_zs.h"
44
45#include "vk_util.h"
46
47#define VK_PROTOTYPES
48#include <vulkan/vulkan.h>
49
50#define DOUBLE_EQ(a, b) (fabs((a) - (b)) < DBL_EPSILON)
51
52enum gs_output {
53  GS_OUTPUT_NONE,
54  GS_OUTPUT_NOT_LINES,
55  GS_OUTPUT_LINES,
56};
57
58struct rendering_state {
59   struct pipe_context *pctx;
60   struct cso_context *cso;
61
62   bool blend_dirty;
63   bool rs_dirty;
64   bool dsa_dirty;
65   bool stencil_ref_dirty;
66   bool clip_state_dirty;
67   bool blend_color_dirty;
68   bool ve_dirty;
69   bool vb_dirty;
70   bool constbuf_dirty[PIPE_SHADER_TYPES];
71   bool pcbuf_dirty[PIPE_SHADER_TYPES];
72   bool vp_dirty;
73   bool scissor_dirty;
74   bool ib_dirty;
75   bool sample_mask_dirty;
76   bool min_samples_dirty;
77   struct pipe_draw_indirect_info indirect_info;
78   struct pipe_draw_info info;
79
80   struct pipe_grid_info dispatch_info;
81   struct pipe_framebuffer_state framebuffer;
82
83   struct pipe_blend_state blend_state;
84   struct {
85      float offset_units;
86      float offset_scale;
87      float offset_clamp;
88      bool enabled;
89   } depth_bias;
90   struct pipe_rasterizer_state rs_state;
91   struct pipe_depth_stencil_alpha_state dsa_state;
92
93   struct pipe_blend_color blend_color;
94   struct pipe_stencil_ref stencil_ref;
95   struct pipe_clip_state clip_state;
96
97   int num_scissors;
98   struct pipe_scissor_state scissors[16];
99
100   int num_viewports;
101   struct pipe_viewport_state viewports[16];
102
103   uint8_t patch_vertices;
104   ubyte index_size;
105   unsigned index_offset;
106   struct pipe_resource *index_buffer;
107   struct pipe_constant_buffer pc_buffer[PIPE_SHADER_TYPES];
108   struct pipe_constant_buffer const_buffer[PIPE_SHADER_TYPES][16];
109   int num_const_bufs[PIPE_SHADER_TYPES];
110   int num_vb;
111   unsigned start_vb;
112   struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
113   struct cso_velems_state velem;
114
115   struct pipe_sampler_view *sv[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
116   int num_sampler_views[PIPE_SHADER_TYPES];
117   struct pipe_sampler_state ss[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
118   /* cso_context api is stupid */
119   const struct pipe_sampler_state *cso_ss_ptr[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
120   int num_sampler_states[PIPE_SHADER_TYPES];
121   bool sv_dirty[PIPE_SHADER_TYPES];
122   bool ss_dirty[PIPE_SHADER_TYPES];
123
124   struct pipe_image_view iv[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES];
125   int num_shader_images[PIPE_SHADER_TYPES];
126   struct pipe_shader_buffer sb[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
127   int num_shader_buffers[PIPE_SHADER_TYPES];
128   bool iv_dirty[PIPE_SHADER_TYPES];
129   bool sb_dirty[PIPE_SHADER_TYPES];
130   bool disable_multisample;
131   enum gs_output gs_output_lines : 2;
132
133   uint32_t color_write_disables:8;
134   bool has_color_write_disables:1;
135   uint32_t pad:13;
136
137   void *ss_cso[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
138   void *velems_cso;
139
140   uint8_t push_constants[128 * 4];
141
142   const struct lvp_render_pass *pass;
143   uint32_t subpass;
144   const struct lvp_framebuffer *vk_framebuffer;
145   VkRect2D render_area;
146
147   uint32_t sample_mask;
148   unsigned min_samples;
149
150   struct lvp_image_view **imageless_views;
151   struct lvp_attachment_state *attachments;
152   VkImageAspectFlags *pending_clear_aspects;
153   uint32_t *cleared_views;
154   int num_pending_aspects;
155
156   uint32_t num_so_targets;
157   struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
158   uint32_t so_offsets[PIPE_MAX_SO_BUFFERS];
159};
160
161ALWAYS_INLINE static void
162assert_subresource_layers(const struct pipe_resource *pres, const VkImageSubresourceLayers *layers, const VkOffset3D *offsets)
163{
164#ifndef NDEBUG
165   if (pres->target == PIPE_TEXTURE_3D) {
166      assert(layers->baseArrayLayer == 0);
167      assert(layers->layerCount == 1);
168      assert(offsets[0].z <= pres->depth0);
169      assert(offsets[1].z <= pres->depth0);
170   } else {
171      assert(layers->baseArrayLayer < pres->array_size);
172      assert(layers->baseArrayLayer + layers->layerCount <= pres->array_size);
173      assert(offsets[0].z == 0);
174      assert(offsets[1].z == 1);
175   }
176#endif
177}
178
179static void emit_compute_state(struct rendering_state *state)
180{
181   if (state->iv_dirty[PIPE_SHADER_COMPUTE]) {
182      state->pctx->set_shader_images(state->pctx, PIPE_SHADER_COMPUTE,
183                                     0, state->num_shader_images[PIPE_SHADER_COMPUTE],
184                                     0, state->iv[PIPE_SHADER_COMPUTE]);
185      state->iv_dirty[PIPE_SHADER_COMPUTE] = false;
186   }
187
188   if (state->pcbuf_dirty[PIPE_SHADER_COMPUTE]) {
189      state->pctx->set_constant_buffer(state->pctx, PIPE_SHADER_COMPUTE,
190                                       0, false, &state->pc_buffer[PIPE_SHADER_COMPUTE]);
191      state->pcbuf_dirty[PIPE_SHADER_COMPUTE] = false;
192   }
193
194   if (state->constbuf_dirty[PIPE_SHADER_COMPUTE]) {
195      for (unsigned i = 0; i < state->num_const_bufs[PIPE_SHADER_COMPUTE]; i++)
196         state->pctx->set_constant_buffer(state->pctx, PIPE_SHADER_COMPUTE,
197                                          i + 1, false, &state->const_buffer[PIPE_SHADER_COMPUTE][i]);
198      state->constbuf_dirty[PIPE_SHADER_COMPUTE] = false;
199   }
200
201   if (state->sb_dirty[PIPE_SHADER_COMPUTE]) {
202      state->pctx->set_shader_buffers(state->pctx, PIPE_SHADER_COMPUTE,
203                                      0, state->num_shader_buffers[PIPE_SHADER_COMPUTE],
204                                      state->sb[PIPE_SHADER_COMPUTE], 0);
205      state->sb_dirty[PIPE_SHADER_COMPUTE] = false;
206   }
207
208   if (state->sv_dirty[PIPE_SHADER_COMPUTE]) {
209      state->pctx->set_sampler_views(state->pctx, PIPE_SHADER_COMPUTE, 0, state->num_sampler_views[PIPE_SHADER_COMPUTE],
210                                     0, false, state->sv[PIPE_SHADER_COMPUTE]);
211      state->sv_dirty[PIPE_SHADER_COMPUTE] = false;
212   }
213
214   if (state->ss_dirty[PIPE_SHADER_COMPUTE]) {
215      for (unsigned i = 0; i < state->num_sampler_states[PIPE_SHADER_COMPUTE]; i++) {
216         if (state->ss_cso[PIPE_SHADER_COMPUTE][i])
217            state->pctx->delete_sampler_state(state->pctx, state->ss_cso[PIPE_SHADER_COMPUTE][i]);
218         state->ss_cso[PIPE_SHADER_COMPUTE][i] = state->pctx->create_sampler_state(state->pctx, &state->ss[PIPE_SHADER_COMPUTE][i]);
219      }
220      state->pctx->bind_sampler_states(state->pctx, PIPE_SHADER_COMPUTE, 0, state->num_sampler_states[PIPE_SHADER_COMPUTE], state->ss_cso[PIPE_SHADER_COMPUTE]);
221      state->ss_dirty[PIPE_SHADER_COMPUTE] = false;
222   }
223}
224
225static void emit_state(struct rendering_state *state)
226{
227   int sh;
228   if (state->blend_dirty) {
229      uint32_t mask = 0;
230      /* zero out the colormask values for disabled attachments */
231      if (state->has_color_write_disables && state->color_write_disables) {
232         u_foreach_bit(att, state->color_write_disables) {
233            mask |= state->blend_state.rt[att].colormask << (att * 4);
234            state->blend_state.rt[att].colormask = 0;
235         }
236      }
237      cso_set_blend(state->cso, &state->blend_state);
238      /* reset colormasks using saved bitmask */
239      if (state->has_color_write_disables && state->color_write_disables) {
240         const uint32_t att_mask = BITFIELD_MASK(4);
241         u_foreach_bit(att, state->color_write_disables) {
242            state->blend_state.rt[att].colormask = (mask >> (att * 4)) & att_mask;
243         }
244      }
245      state->blend_dirty = false;
246   }
247
248   if (state->rs_dirty) {
249      bool ms = state->rs_state.multisample;
250      if (state->disable_multisample &&
251          (state->gs_output_lines == GS_OUTPUT_LINES ||
252           (state->gs_output_lines == GS_OUTPUT_NONE && u_reduced_prim(state->info.mode) == PIPE_PRIM_LINES)))
253         state->rs_state.multisample = false;
254      assert(offsetof(struct pipe_rasterizer_state, offset_clamp) - offsetof(struct pipe_rasterizer_state, offset_units) == sizeof(float) * 2);
255      if (state->depth_bias.enabled) {
256         memcpy(&state->rs_state.offset_units, &state->depth_bias, sizeof(float) * 3);
257      } else {
258         memset(&state->rs_state.offset_units, 0, sizeof(float) * 3);
259      }
260      cso_set_rasterizer(state->cso, &state->rs_state);
261      state->rs_dirty = false;
262      state->rs_state.multisample = ms;
263   }
264
265   if (state->dsa_dirty) {
266      cso_set_depth_stencil_alpha(state->cso, &state->dsa_state);
267      state->dsa_dirty = false;
268   }
269
270   if (state->sample_mask_dirty) {
271      cso_set_sample_mask(state->cso, state->sample_mask);
272      state->sample_mask_dirty = false;
273   }
274
275   if (state->min_samples_dirty) {
276      cso_set_min_samples(state->cso, state->min_samples);
277      state->min_samples_dirty = false;
278   }
279
280   if (state->blend_color_dirty) {
281      state->pctx->set_blend_color(state->pctx, &state->blend_color);
282      state->blend_color_dirty = false;
283   }
284
285   if (state->stencil_ref_dirty) {
286      cso_set_stencil_ref(state->cso, state->stencil_ref);
287      state->stencil_ref_dirty = false;
288   }
289
290   if (state->vb_dirty) {
291      cso_set_vertex_buffers(state->cso, state->start_vb, state->num_vb, state->vb);
292      state->vb_dirty = false;
293   }
294
295   if (state->ve_dirty) {
296      cso_set_vertex_elements(state->cso, &state->velem);
297      state->ve_dirty = false;
298   }
299
300
301   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
302      if (state->constbuf_dirty[sh]) {
303         for (unsigned idx = 0; idx < state->num_const_bufs[sh]; idx++)
304            state->pctx->set_constant_buffer(state->pctx, sh,
305                                             idx + 1, false, &state->const_buffer[sh][idx]);
306      }
307      state->constbuf_dirty[sh] = false;
308   }
309
310   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
311      if (state->pcbuf_dirty[sh]) {
312         state->pctx->set_constant_buffer(state->pctx, sh,
313                                          0, false, &state->pc_buffer[sh]);
314      }
315   }
316
317   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
318      if (state->sb_dirty[sh]) {
319         state->pctx->set_shader_buffers(state->pctx, sh,
320                                         0, state->num_shader_buffers[sh],
321                                         state->sb[sh], 0);
322      }
323   }
324
325   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
326      if (state->iv_dirty[sh]) {
327         state->pctx->set_shader_images(state->pctx, sh,
328                                        0, state->num_shader_images[sh], 0,
329                                        state->iv[sh]);
330      }
331   }
332
333   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
334
335      if (!state->sv_dirty[sh])
336         continue;
337
338      state->pctx->set_sampler_views(state->pctx, sh, 0, state->num_sampler_views[sh],
339                                     0, false, state->sv[sh]);
340      state->sv_dirty[sh] = false;
341   }
342
343   for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
344      if (!state->ss_dirty[sh])
345         continue;
346
347      cso_set_samplers(state->cso, sh, state->num_sampler_states[sh], state->cso_ss_ptr[sh]);
348   }
349
350   if (state->vp_dirty) {
351      state->pctx->set_viewport_states(state->pctx, 0, state->num_viewports, state->viewports);
352      state->vp_dirty = false;
353   }
354
355   if (state->scissor_dirty) {
356      state->pctx->set_scissor_states(state->pctx, 0, state->num_scissors, state->scissors);
357      state->scissor_dirty = false;
358   }
359}
360
361static void handle_compute_pipeline(struct vk_cmd_queue_entry *cmd,
362                                    struct rendering_state *state)
363{
364   LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
365
366   state->dispatch_info.block[0] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.workgroup_size[0];
367   state->dispatch_info.block[1] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.workgroup_size[1];
368   state->dispatch_info.block[2] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.workgroup_size[2];
369   state->pctx->bind_compute_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_COMPUTE]);
370}
371
372static void
373get_viewport_xform(const VkViewport *viewport,
374                   float scale[3], float translate[3])
375{
376   float x = viewport->x;
377   float y = viewport->y;
378   float half_width = 0.5f * viewport->width;
379   float half_height = 0.5f * viewport->height;
380   double n = viewport->minDepth;
381   double f = viewport->maxDepth;
382
383   scale[0] = half_width;
384   translate[0] = half_width + x;
385   scale[1] = half_height;
386   translate[1] = half_height + y;
387
388   scale[2] = (f - n);
389   translate[2] = n;
390}
391
392/* enum re-indexing:
393
394    VK_DYNAMIC_STATE_VIEWPORT
395    VK_DYNAMIC_STATE_SCISSOR
396    VK_DYNAMIC_STATE_LINE_WIDTH
397    VK_DYNAMIC_STATE_DEPTH_BIAS
398    VK_DYNAMIC_STATE_BLEND_CONSTANTS
399    VK_DYNAMIC_STATE_DEPTH_BOUNDS
400    VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
401    VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
402    VK_DYNAMIC_STATE_STENCIL_REFERENCE
403
404    VK_DYNAMIC_STATE_LINE_STIPPLE_EXT
405
406    VK_DYNAMIC_STATE_CULL_MODE_EXT
407    VK_DYNAMIC_STATE_FRONT_FACE_EXT
408    VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT
409    VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT
410    VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT
411    VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT
412    VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT
413    VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT
414    VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT
415    VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT
416    VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT
417    VK_DYNAMIC_STATE_STENCIL_OP_EXT
418
419    VK_DYNAMIC_STATE_VERTEX_INPUT_EXT
420
421    VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT
422    VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT
423    VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT
424    VK_DYNAMIC_STATE_LOGIC_OP_EXT
425    VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT
426
427    VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT
428*/
429static int conv_dynamic_state_idx(VkDynamicState dyn_state)
430{
431   if (dyn_state <= VK_DYNAMIC_STATE_STENCIL_REFERENCE)
432      return dyn_state;
433   if (dyn_state == VK_DYNAMIC_STATE_LINE_STIPPLE_EXT)
434      /* this one has a weird id, map after the normal dynamic state ones */
435      return VK_DYNAMIC_STATE_STENCIL_REFERENCE + 1;
436   if (dyn_state >= VK_DYNAMIC_STATE_CULL_MODE_EXT &&
437       dyn_state <= VK_DYNAMIC_STATE_STENCIL_OP_EXT)
438      return dyn_state - VK_DYNAMIC_STATE_CULL_MODE_EXT + VK_DYNAMIC_STATE_STENCIL_REFERENCE + 2;
439   if (dyn_state == VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)
440      return (VK_DYNAMIC_STATE_STENCIL_OP_EXT - VK_DYNAMIC_STATE_CULL_MODE_EXT) + VK_DYNAMIC_STATE_STENCIL_REFERENCE + 2 + 1;
441   if (dyn_state >= VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT &&
442       dyn_state <= VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT)
443      return dyn_state - VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT +
444             VK_DYNAMIC_STATE_STENCIL_OP_EXT - VK_DYNAMIC_STATE_CULL_MODE_EXT +
445             VK_DYNAMIC_STATE_STENCIL_REFERENCE + 2 + 1 + 1;
446   if (dyn_state == VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT)
447      return VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT - VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT +
448             VK_DYNAMIC_STATE_STENCIL_OP_EXT - VK_DYNAMIC_STATE_CULL_MODE_EXT +
449             VK_DYNAMIC_STATE_STENCIL_REFERENCE + 2 + 1 + 1 + 1;
450   assert(0);
451   return -1;
452}
453
454static void handle_graphics_pipeline(struct vk_cmd_queue_entry *cmd,
455                                     struct rendering_state *state)
456{
457   LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
458   bool dynamic_states[VK_DYNAMIC_STATE_STENCIL_REFERENCE+32];
459   unsigned fb_samples = 0;
460
461   memset(dynamic_states, 0, sizeof(dynamic_states));
462   if (pipeline->graphics_create_info.pDynamicState)
463   {
464      const VkPipelineDynamicStateCreateInfo *dyn = pipeline->graphics_create_info.pDynamicState;
465      int i;
466      for (i = 0; i < dyn->dynamicStateCount; i++) {
467         int idx = conv_dynamic_state_idx(dyn->pDynamicStates[i]);
468         if (idx == -1)
469            continue;
470         dynamic_states[idx] = true;
471      }
472   }
473   state->has_color_write_disables = dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT)];
474
475   bool has_stage[PIPE_SHADER_TYPES] = { false };
476
477   state->pctx->bind_gs_state(state->pctx, NULL);
478   if (state->pctx->bind_tcs_state)
479      state->pctx->bind_tcs_state(state->pctx, NULL);
480   if (state->pctx->bind_tes_state)
481      state->pctx->bind_tes_state(state->pctx, NULL);
482   state->gs_output_lines = GS_OUTPUT_NONE;
483   {
484      int i;
485      for (i = 0; i < pipeline->graphics_create_info.stageCount; i++) {
486         const VkPipelineShaderStageCreateInfo *sh = &pipeline->graphics_create_info.pStages[i];
487         switch (sh->stage) {
488         case VK_SHADER_STAGE_FRAGMENT_BIT:
489            state->pctx->bind_fs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
490            has_stage[PIPE_SHADER_FRAGMENT] = true;
491            break;
492         case VK_SHADER_STAGE_VERTEX_BIT:
493            state->pctx->bind_vs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_VERTEX]);
494            has_stage[PIPE_SHADER_VERTEX] = true;
495            break;
496         case VK_SHADER_STAGE_GEOMETRY_BIT:
497            state->pctx->bind_gs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_GEOMETRY]);
498            state->gs_output_lines = pipeline->gs_output_lines ? GS_OUTPUT_LINES : GS_OUTPUT_NOT_LINES;
499            has_stage[PIPE_SHADER_GEOMETRY] = true;
500            break;
501         case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
502            state->pctx->bind_tcs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_TESS_CTRL]);
503            has_stage[PIPE_SHADER_TESS_CTRL] = true;
504            break;
505         case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
506            state->pctx->bind_tes_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_TESS_EVAL]);
507            has_stage[PIPE_SHADER_TESS_EVAL] = true;
508            break;
509         default:
510            assert(0);
511            break;
512         }
513      }
514   }
515
516   /* there should always be a dummy fs. */
517   if (!has_stage[PIPE_SHADER_FRAGMENT])
518      state->pctx->bind_fs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
519   if (state->pctx->bind_gs_state && !has_stage[PIPE_SHADER_GEOMETRY])
520      state->pctx->bind_gs_state(state->pctx, NULL);
521   if (state->pctx->bind_tcs_state && !has_stage[PIPE_SHADER_TESS_CTRL])
522      state->pctx->bind_tcs_state(state->pctx, NULL);
523   if (state->pctx->bind_tes_state && !has_stage[PIPE_SHADER_TESS_EVAL])
524      state->pctx->bind_tes_state(state->pctx, NULL);
525
526   /* rasterization state */
527   if (pipeline->graphics_create_info.pRasterizationState) {
528      const VkPipelineRasterizationStateCreateInfo *rsc = pipeline->graphics_create_info.pRasterizationState;
529      const VkPipelineRasterizationDepthClipStateCreateInfoEXT *depth_clip_state =
530         vk_find_struct_const(rsc->pNext, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT);
531      state->rs_state.depth_clamp = rsc->depthClampEnable;
532      if (!depth_clip_state)
533         state->rs_state.depth_clip_near = state->rs_state.depth_clip_far = !rsc->depthClampEnable;
534      else
535         state->rs_state.depth_clip_near = state->rs_state.depth_clip_far = depth_clip_state->depthClipEnable;
536
537      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT)])
538         state->rs_state.rasterizer_discard = rsc->rasterizerDiscardEnable;
539
540      state->rs_state.line_smooth = pipeline->line_smooth;
541      state->rs_state.line_stipple_enable = pipeline->line_stipple_enable;
542      state->rs_state.fill_front = vk_polygon_mode_to_pipe(rsc->polygonMode);
543      state->rs_state.fill_back = vk_polygon_mode_to_pipe(rsc->polygonMode);
544      state->rs_state.point_size_per_vertex = true;
545      state->rs_state.flatshade_first = !pipeline->provoking_vertex_last;
546      state->rs_state.point_quad_rasterization = true;
547      state->rs_state.clip_halfz = true;
548      state->rs_state.half_pixel_center = true;
549      state->rs_state.scissor = true;
550      state->rs_state.no_ms_sample_mask_out = true;
551      state->rs_state.line_rectangular = pipeline->line_rectangular;
552
553      if (!dynamic_states[VK_DYNAMIC_STATE_LINE_WIDTH])
554         state->rs_state.line_width = rsc->lineWidth;
555      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_LINE_STIPPLE_EXT)]) {
556         state->rs_state.line_stipple_factor = pipeline->line_stipple_factor;
557         state->rs_state.line_stipple_pattern = pipeline->line_stipple_pattern;
558      }
559
560      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT)])
561         state->depth_bias.enabled = pipeline->graphics_create_info.pRasterizationState->depthBiasEnable;
562      if (!dynamic_states[VK_DYNAMIC_STATE_DEPTH_BIAS]) {
563         state->depth_bias.offset_units = rsc->depthBiasConstantFactor;
564         state->depth_bias.offset_scale = rsc->depthBiasSlopeFactor;
565         state->depth_bias.offset_clamp = rsc->depthBiasClamp;
566      }
567
568      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_CULL_MODE_EXT)])
569         state->rs_state.cull_face = vk_cull_to_pipe(rsc->cullMode);
570
571      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_FRONT_FACE_EXT)])
572         state->rs_state.front_ccw = (rsc->frontFace == VK_FRONT_FACE_COUNTER_CLOCKWISE);
573      state->rs_dirty = true;
574   }
575
576   state->disable_multisample = pipeline->disable_multisample;
577   if (pipeline->graphics_create_info.pMultisampleState) {
578      const VkPipelineMultisampleStateCreateInfo *ms = pipeline->graphics_create_info.pMultisampleState;
579      state->rs_state.multisample = ms->rasterizationSamples > 1;
580      state->sample_mask = ms->pSampleMask ? ms->pSampleMask[0] : 0xffffffff;
581      state->blend_state.alpha_to_coverage = ms->alphaToCoverageEnable;
582      state->blend_state.alpha_to_one = ms->alphaToOneEnable;
583      state->blend_dirty = true;
584      state->rs_dirty = true;
585      state->min_samples = 1;
586      state->sample_mask_dirty = true;
587      fb_samples = ms->rasterizationSamples;
588      if (ms->sampleShadingEnable) {
589         state->min_samples = ceil(ms->rasterizationSamples * ms->minSampleShading);
590         if (state->min_samples > 1)
591            state->min_samples = ms->rasterizationSamples;
592         if (state->min_samples < 1)
593            state->min_samples = 1;
594      }
595      if (pipeline->force_min_sample)
596         state->min_samples = ms->rasterizationSamples;
597      state->min_samples_dirty = true;
598   } else {
599      state->rs_state.multisample = false;
600      state->sample_mask_dirty = state->sample_mask != 0xffffffff;
601      state->sample_mask = 0xffffffff;
602      state->min_samples_dirty = state->min_samples;
603      state->min_samples = 0;
604      state->blend_dirty |= state->blend_state.alpha_to_coverage || state->blend_state.alpha_to_one;
605      state->blend_state.alpha_to_coverage = false;
606      state->blend_state.alpha_to_one = false;
607      state->rs_dirty = true;
608   }
609
610   if (pipeline->graphics_create_info.pDepthStencilState) {
611      const VkPipelineDepthStencilStateCreateInfo *dsa = pipeline->graphics_create_info.pDepthStencilState;
612
613      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT)])
614         state->dsa_state.depth_enabled = dsa->depthTestEnable;
615      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT)])
616         state->dsa_state.depth_writemask = dsa->depthWriteEnable;
617      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT)])
618         state->dsa_state.depth_func = dsa->depthCompareOp;
619      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT)])
620         state->dsa_state.depth_bounds_test = dsa->depthBoundsTestEnable;
621
622      if (!dynamic_states[VK_DYNAMIC_STATE_DEPTH_BOUNDS]) {
623         state->dsa_state.depth_bounds_min = dsa->minDepthBounds;
624         state->dsa_state.depth_bounds_max = dsa->maxDepthBounds;
625      }
626
627      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT)]) {
628         state->dsa_state.stencil[0].enabled = dsa->stencilTestEnable;
629         state->dsa_state.stencil[1].enabled = dsa->stencilTestEnable;
630      }
631
632      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_STENCIL_OP_EXT)]) {
633         state->dsa_state.stencil[0].func = dsa->front.compareOp;
634         state->dsa_state.stencil[0].fail_op = vk_conv_stencil_op(dsa->front.failOp);
635         state->dsa_state.stencil[0].zpass_op = vk_conv_stencil_op(dsa->front.passOp);
636         state->dsa_state.stencil[0].zfail_op = vk_conv_stencil_op(dsa->front.depthFailOp);
637
638         state->dsa_state.stencil[1].func = dsa->back.compareOp;
639         state->dsa_state.stencil[1].fail_op = vk_conv_stencil_op(dsa->back.failOp);
640         state->dsa_state.stencil[1].zpass_op = vk_conv_stencil_op(dsa->back.passOp);
641         state->dsa_state.stencil[1].zfail_op = vk_conv_stencil_op(dsa->back.depthFailOp);
642      }
643
644      if (!dynamic_states[VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK]) {
645         state->dsa_state.stencil[0].valuemask = dsa->front.compareMask;
646         state->dsa_state.stencil[1].valuemask = dsa->back.compareMask;
647      }
648
649      if (!dynamic_states[VK_DYNAMIC_STATE_STENCIL_WRITE_MASK]) {
650         state->dsa_state.stencil[0].writemask = dsa->front.writeMask;
651         state->dsa_state.stencil[1].writemask = dsa->back.writeMask;
652      }
653
654      if (dsa->stencilTestEnable) {
655         if (!dynamic_states[VK_DYNAMIC_STATE_STENCIL_REFERENCE]) {
656            state->stencil_ref.ref_value[0] = dsa->front.reference;
657            state->stencil_ref.ref_value[1] = dsa->back.reference;
658            state->stencil_ref_dirty = true;
659         }
660      }
661   } else
662      memset(&state->dsa_state, 0, sizeof(state->dsa_state));
663   state->dsa_dirty = true;
664
665   if (pipeline->graphics_create_info.pColorBlendState) {
666      const VkPipelineColorBlendStateCreateInfo *cb = pipeline->graphics_create_info.pColorBlendState;
667      int i;
668
669      if (cb->logicOpEnable) {
670         state->blend_state.logicop_enable = VK_TRUE;
671         if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_LOGIC_OP_EXT)])
672            state->blend_state.logicop_func = vk_conv_logic_op(cb->logicOp);
673      }
674
675      if (cb->attachmentCount > 1)
676         state->blend_state.independent_blend_enable = true;
677      for (i = 0; i < cb->attachmentCount; i++) {
678         state->blend_state.rt[i].colormask = cb->pAttachments[i].colorWriteMask;
679         state->blend_state.rt[i].blend_enable = cb->pAttachments[i].blendEnable;
680         state->blend_state.rt[i].rgb_func = vk_conv_blend_func(cb->pAttachments[i].colorBlendOp);
681         state->blend_state.rt[i].rgb_src_factor = vk_conv_blend_factor(cb->pAttachments[i].srcColorBlendFactor);
682         state->blend_state.rt[i].rgb_dst_factor = vk_conv_blend_factor(cb->pAttachments[i].dstColorBlendFactor);
683         state->blend_state.rt[i].alpha_func = vk_conv_blend_func(cb->pAttachments[i].alphaBlendOp);
684         state->blend_state.rt[i].alpha_src_factor = vk_conv_blend_factor(cb->pAttachments[i].srcAlphaBlendFactor);
685         state->blend_state.rt[i].alpha_dst_factor = vk_conv_blend_factor(cb->pAttachments[i].dstAlphaBlendFactor);
686
687         /* At least llvmpipe applies the blend factor prior to the blend function,
688          * regardless of what function is used. (like i965 hardware).
689          * It means for MIN/MAX the blend factor has to be stomped to ONE.
690          */
691         if (cb->pAttachments[i].colorBlendOp == VK_BLEND_OP_MIN ||
692             cb->pAttachments[i].colorBlendOp == VK_BLEND_OP_MAX) {
693            state->blend_state.rt[i].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
694            state->blend_state.rt[i].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
695         }
696
697         if (cb->pAttachments[i].alphaBlendOp == VK_BLEND_OP_MIN ||
698             cb->pAttachments[i].alphaBlendOp == VK_BLEND_OP_MAX) {
699            state->blend_state.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
700            state->blend_state.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
701         }
702      }
703      state->blend_dirty = true;
704      if (!dynamic_states[VK_DYNAMIC_STATE_BLEND_CONSTANTS]) {
705         memcpy(state->blend_color.color, cb->blendConstants, 4 * sizeof(float));
706         state->blend_color_dirty = true;
707      }
708   } else {
709      memset(&state->blend_state, 0, sizeof(state->blend_state));
710      state->blend_dirty = true;
711   }
712
713   if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)]) {
714      const VkPipelineVertexInputStateCreateInfo *vi = pipeline->graphics_create_info.pVertexInputState;
715      int i;
716      const VkPipelineVertexInputDivisorStateCreateInfoEXT *div_state =
717         vk_find_struct_const(vi->pNext,
718                              PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT);
719
720      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT)]) {
721         for (i = 0; i < vi->vertexBindingDescriptionCount; i++) {
722            state->vb[vi->pVertexBindingDescriptions[i].binding].stride = vi->pVertexBindingDescriptions[i].stride;
723         }
724      }
725
726      int max_location = -1;
727      for (i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
728         unsigned location = vi->pVertexAttributeDescriptions[i].location;
729         unsigned binding = vi->pVertexAttributeDescriptions[i].binding;
730         const struct VkVertexInputBindingDescription *desc_binding = NULL;
731         for (unsigned j = 0; j < vi->vertexBindingDescriptionCount; j++) {
732            const struct VkVertexInputBindingDescription *b = &vi->pVertexBindingDescriptions[j];
733            if (b->binding == binding) {
734               desc_binding = b;
735               break;
736            }
737         }
738         assert(desc_binding);
739         state->velem.velems[location].src_offset = vi->pVertexAttributeDescriptions[i].offset;
740         state->velem.velems[location].vertex_buffer_index = binding;
741         state->velem.velems[location].src_format = lvp_vk_format_to_pipe_format(vi->pVertexAttributeDescriptions[i].format);
742         state->velem.velems[location].dual_slot = false;
743
744         switch (desc_binding->inputRate) {
745         case VK_VERTEX_INPUT_RATE_VERTEX:
746            state->velem.velems[location].instance_divisor = 0;
747            break;
748         case VK_VERTEX_INPUT_RATE_INSTANCE:
749            if (div_state) {
750               for (unsigned j = 0; j < div_state->vertexBindingDivisorCount; j++) {
751                  const VkVertexInputBindingDivisorDescriptionEXT *desc =
752                     &div_state->pVertexBindingDivisors[j];
753                  if (desc->binding == state->velem.velems[location].vertex_buffer_index) {
754                     state->velem.velems[location].instance_divisor = desc->divisor;
755                     break;
756                  }
757               }
758            } else
759               state->velem.velems[location].instance_divisor = 1;
760            break;
761         default:
762            assert(0);
763            break;
764         }
765
766         if ((int)location > max_location)
767            max_location = location;
768      }
769      state->velem.count = max_location + 1;
770      state->vb_dirty = true;
771      state->ve_dirty = true;
772   }
773
774   {
775      const VkPipelineInputAssemblyStateCreateInfo *ia = pipeline->graphics_create_info.pInputAssemblyState;
776
777      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT)]) {
778         state->info.mode = vk_conv_topology(ia->topology);
779         state->rs_dirty = true;
780      }
781      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT)])
782         state->info.primitive_restart = ia->primitiveRestartEnable;
783   }
784
785   if (pipeline->graphics_create_info.pTessellationState) {
786      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT)]) {
787         const VkPipelineTessellationStateCreateInfo *ts = pipeline->graphics_create_info.pTessellationState;
788         state->patch_vertices = ts->patchControlPoints;
789      }
790   } else
791      state->patch_vertices = 0;
792
793   if (pipeline->graphics_create_info.pViewportState) {
794      const VkPipelineViewportStateCreateInfo *vpi= pipeline->graphics_create_info.pViewportState;
795      int i;
796
797      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT)]) {
798         state->num_viewports = vpi->viewportCount;
799         state->vp_dirty = true;
800      }
801      if (!dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT)]) {
802         state->num_scissors = vpi->scissorCount;
803         state->scissor_dirty = true;
804      }
805
806      if (!dynamic_states[VK_DYNAMIC_STATE_VIEWPORT] &&
807          !dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT)]) {
808         for (i = 0; i < vpi->viewportCount; i++)
809            get_viewport_xform(&vpi->pViewports[i], state->viewports[i].scale, state->viewports[i].translate);
810         state->vp_dirty = true;
811      }
812      if (!dynamic_states[VK_DYNAMIC_STATE_SCISSOR] &&
813          !dynamic_states[conv_dynamic_state_idx(VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT)]) {
814         for (i = 0; i < vpi->scissorCount; i++) {
815            const VkRect2D *ss = &vpi->pScissors[i];
816            state->scissors[i].minx = ss->offset.x;
817            state->scissors[i].miny = ss->offset.y;
818            state->scissors[i].maxx = ss->offset.x + ss->extent.width;
819            state->scissors[i].maxy = ss->offset.y + ss->extent.height;
820            state->scissor_dirty = true;
821         }
822
823      }
824   }
825
826   if (fb_samples != state->framebuffer.samples) {
827      state->framebuffer.samples = fb_samples;
828      state->pctx->set_framebuffer_state(state->pctx, &state->framebuffer);
829   }
830}
831
832static void handle_pipeline(struct vk_cmd_queue_entry *cmd,
833                            struct rendering_state *state)
834{
835   LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
836   if (pipeline->is_compute_pipeline)
837      handle_compute_pipeline(cmd, state);
838   else
839      handle_graphics_pipeline(cmd, state);
840}
841
842static void vertex_buffers(uint32_t first_binding,
843                           uint32_t binding_count,
844                           const VkBuffer *buffers,
845                           const VkDeviceSize *offsets,
846                           const VkDeviceSize *strides,
847                           struct rendering_state *state)
848{
849   int i;
850   for (i = 0; i < binding_count; i++) {
851      int idx = i + first_binding;
852
853      state->vb[idx].buffer_offset = offsets[i];
854      state->vb[idx].buffer.resource = buffers[i] ? lvp_buffer_from_handle(buffers[i])->bo : NULL;
855
856      if (strides)
857         state->vb[idx].stride = strides[i];
858   }
859   if (first_binding < state->start_vb)
860      state->start_vb = first_binding;
861   if (first_binding + binding_count >= state->num_vb)
862      state->num_vb = first_binding + binding_count;
863   state->vb_dirty = true;
864}
865
866static void handle_vertex_buffers(struct vk_cmd_queue_entry *cmd,
867                                  struct rendering_state *state)
868{
869   struct vk_cmd_bind_vertex_buffers *vcb = &cmd->u.bind_vertex_buffers;
870
871   vertex_buffers(vcb->first_binding,
872                  vcb->binding_count,
873                  vcb->buffers,
874                  vcb->offsets,
875                  NULL,
876                  state);
877}
878
879static void handle_vertex_buffers2(struct vk_cmd_queue_entry *cmd,
880                                   struct rendering_state *state)
881{
882   struct vk_cmd_bind_vertex_buffers2_ext *vcb = &cmd->u.bind_vertex_buffers2_ext;
883
884   vertex_buffers(vcb->first_binding,
885                  vcb->binding_count,
886                  vcb->buffers,
887                  vcb->offsets,
888                  vcb->strides,
889                  state);
890}
891
892struct dyn_info {
893   struct {
894      uint16_t const_buffer_count;
895      uint16_t shader_buffer_count;
896      uint16_t sampler_count;
897      uint16_t sampler_view_count;
898      uint16_t image_count;
899   } stage[MESA_SHADER_STAGES];
900
901   uint32_t dyn_index;
902   const uint32_t *dynamic_offsets;
903   uint32_t dynamic_offset_count;
904};
905
906static void fill_sampler(struct pipe_sampler_state *ss,
907                         struct lvp_sampler *samp)
908{
909   ss->wrap_s = vk_conv_wrap_mode(samp->create_info.addressModeU);
910   ss->wrap_t = vk_conv_wrap_mode(samp->create_info.addressModeV);
911   ss->wrap_r = vk_conv_wrap_mode(samp->create_info.addressModeW);
912   ss->min_img_filter = samp->create_info.minFilter == VK_FILTER_LINEAR ? PIPE_TEX_FILTER_LINEAR : PIPE_TEX_FILTER_NEAREST;
913   ss->min_mip_filter = samp->create_info.mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR ? PIPE_TEX_MIPFILTER_LINEAR : PIPE_TEX_MIPFILTER_NEAREST;
914   ss->mag_img_filter = samp->create_info.magFilter == VK_FILTER_LINEAR ? PIPE_TEX_FILTER_LINEAR : PIPE_TEX_FILTER_NEAREST;
915   ss->min_lod = samp->create_info.minLod;
916   ss->max_lod = samp->create_info.maxLod;
917   ss->lod_bias = samp->create_info.mipLodBias;
918   if (samp->create_info.anisotropyEnable)
919      ss->max_anisotropy = samp->create_info.maxAnisotropy;
920   else
921      ss->max_anisotropy = 1;
922   ss->normalized_coords = !samp->create_info.unnormalizedCoordinates;
923   ss->compare_mode = samp->create_info.compareEnable ? PIPE_TEX_COMPARE_R_TO_TEXTURE : PIPE_TEX_COMPARE_NONE;
924   ss->compare_func = samp->create_info.compareOp;
925   ss->seamless_cube_map = true;
926   ss->reduction_mode = samp->reduction_mode;
927   memcpy(&ss->border_color, &samp->border_color,
928          sizeof(union pipe_color_union));
929}
930
931static void fill_sampler_stage(struct rendering_state *state,
932                               struct dyn_info *dyn_info,
933                               gl_shader_stage stage,
934                               enum pipe_shader_type p_stage,
935                               int array_idx,
936                               const union lvp_descriptor_info *descriptor,
937                               const struct lvp_descriptor_set_binding_layout *binding)
938{
939   int ss_idx = binding->stage[stage].sampler_index;
940   if (ss_idx == -1)
941      return;
942   ss_idx += array_idx;
943   ss_idx += dyn_info->stage[stage].sampler_count;
944   fill_sampler(&state->ss[p_stage][ss_idx], binding->immutable_samplers ? binding->immutable_samplers[array_idx] : descriptor->sampler);
945   if (state->num_sampler_states[p_stage] <= ss_idx)
946      state->num_sampler_states[p_stage] = ss_idx + 1;
947   state->ss_dirty[p_stage] = true;
948}
949
950#define fix_depth_swizzle(x) do { \
951  if (x > PIPE_SWIZZLE_X && x < PIPE_SWIZZLE_0) \
952    x = PIPE_SWIZZLE_0;				\
953  } while (0)
954#define fix_depth_swizzle_a(x) do { \
955  if (x > PIPE_SWIZZLE_X && x < PIPE_SWIZZLE_0) \
956    x = PIPE_SWIZZLE_1;				\
957  } while (0)
958
959static void fill_sampler_view_stage(struct rendering_state *state,
960                                    struct dyn_info *dyn_info,
961                                    gl_shader_stage stage,
962                                    enum pipe_shader_type p_stage,
963                                    int array_idx,
964                                    const union lvp_descriptor_info *descriptor,
965                                    const struct lvp_descriptor_set_binding_layout *binding)
966{
967   int sv_idx = binding->stage[stage].sampler_view_index;
968   if (sv_idx == -1)
969      return;
970   sv_idx += array_idx;
971   sv_idx += dyn_info->stage[stage].sampler_view_count;
972   struct lvp_image_view *iv = descriptor->iview;
973   struct pipe_sampler_view templ;
974
975   enum pipe_format pformat;
976   if (iv->subresourceRange.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT)
977      pformat = lvp_vk_format_to_pipe_format(iv->format);
978   else if (iv->subresourceRange.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)
979      pformat = util_format_stencil_only(lvp_vk_format_to_pipe_format(iv->format));
980   else
981      pformat = lvp_vk_format_to_pipe_format(iv->format);
982   u_sampler_view_default_template(&templ,
983                                   iv->image->bo,
984                                   pformat);
985   if (iv->view_type == VK_IMAGE_VIEW_TYPE_1D)
986      templ.target = PIPE_TEXTURE_1D;
987   if (iv->view_type == VK_IMAGE_VIEW_TYPE_2D)
988      templ.target = PIPE_TEXTURE_2D;
989   if (iv->view_type == VK_IMAGE_VIEW_TYPE_CUBE)
990      templ.target = PIPE_TEXTURE_CUBE;
991   if (iv->view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
992      templ.target = PIPE_TEXTURE_CUBE_ARRAY;
993   templ.u.tex.first_layer = iv->subresourceRange.baseArrayLayer;
994   templ.u.tex.last_layer = iv->subresourceRange.baseArrayLayer + lvp_get_layerCount(iv->image, &iv->subresourceRange) - 1;
995   templ.u.tex.first_level = iv->subresourceRange.baseMipLevel;
996   templ.u.tex.last_level = iv->subresourceRange.baseMipLevel + lvp_get_levelCount(iv->image, &iv->subresourceRange) - 1;
997   if (iv->components.r != VK_COMPONENT_SWIZZLE_IDENTITY)
998      templ.swizzle_r = vk_conv_swizzle(iv->components.r);
999   if (iv->components.g != VK_COMPONENT_SWIZZLE_IDENTITY)
1000      templ.swizzle_g = vk_conv_swizzle(iv->components.g);
1001   if (iv->components.b != VK_COMPONENT_SWIZZLE_IDENTITY)
1002      templ.swizzle_b = vk_conv_swizzle(iv->components.b);
1003   if (iv->components.a != VK_COMPONENT_SWIZZLE_IDENTITY)
1004      templ.swizzle_a = vk_conv_swizzle(iv->components.a);
1005
1006   /* depth stencil swizzles need special handling to pass VK CTS
1007    * but also for zink GL tests.
1008    * piping A swizzle into R fixes GL_ALPHA depth texture mode
1009    * only swizzling from R/0/1 (for alpha) fixes VK CTS tests
1010    * and a bunch of zink tests.
1011   */
1012   if (iv->subresourceRange.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT ||
1013       iv->subresourceRange.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
1014      if (templ.swizzle_a == PIPE_SWIZZLE_X)
1015         templ.swizzle_r = PIPE_SWIZZLE_X;
1016      fix_depth_swizzle(templ.swizzle_r);
1017      fix_depth_swizzle(templ.swizzle_g);
1018      fix_depth_swizzle(templ.swizzle_b);
1019      fix_depth_swizzle_a(templ.swizzle_a);
1020   }
1021
1022   if (state->sv[p_stage][sv_idx])
1023      pipe_sampler_view_reference(&state->sv[p_stage][sv_idx], NULL);
1024   state->sv[p_stage][sv_idx] = state->pctx->create_sampler_view(state->pctx, iv->image->bo, &templ);
1025   if (state->num_sampler_views[p_stage] <= sv_idx)
1026      state->num_sampler_views[p_stage] = sv_idx + 1;
1027   state->sv_dirty[p_stage] = true;
1028}
1029
1030static void fill_sampler_buffer_view_stage(struct rendering_state *state,
1031                                           struct dyn_info *dyn_info,
1032                                           gl_shader_stage stage,
1033                                           enum pipe_shader_type p_stage,
1034                                           int array_idx,
1035                                           const union lvp_descriptor_info *descriptor,
1036                                           const struct lvp_descriptor_set_binding_layout *binding)
1037{
1038   int sv_idx = binding->stage[stage].sampler_view_index;
1039   if (sv_idx == -1)
1040      return;
1041   sv_idx += array_idx;
1042   sv_idx += dyn_info->stage[stage].sampler_view_count;
1043   struct lvp_buffer_view *bv = descriptor->buffer_view;
1044   struct pipe_sampler_view templ;
1045   memset(&templ, 0, sizeof(templ));
1046   templ.target = PIPE_BUFFER;
1047   templ.swizzle_r = PIPE_SWIZZLE_X;
1048   templ.swizzle_g = PIPE_SWIZZLE_Y;
1049   templ.swizzle_b = PIPE_SWIZZLE_Z;
1050   templ.swizzle_a = PIPE_SWIZZLE_W;
1051   templ.format = bv->pformat;
1052   templ.u.buf.offset = bv->offset + bv->buffer->offset;
1053   templ.u.buf.size = bv->range == VK_WHOLE_SIZE ? (bv->buffer->size - bv->offset) : bv->range;
1054   templ.texture = bv->buffer->bo;
1055   templ.context = state->pctx;
1056
1057   if (state->sv[p_stage][sv_idx])
1058      pipe_sampler_view_reference(&state->sv[p_stage][sv_idx], NULL);
1059   state->sv[p_stage][sv_idx] = state->pctx->create_sampler_view(state->pctx, bv->buffer->bo, &templ);
1060   if (state->num_sampler_views[p_stage] <= sv_idx)
1061      state->num_sampler_views[p_stage] = sv_idx + 1;
1062   state->sv_dirty[p_stage] = true;
1063}
1064
1065static void fill_image_view_stage(struct rendering_state *state,
1066                                  struct dyn_info *dyn_info,
1067                                  gl_shader_stage stage,
1068                                  enum pipe_shader_type p_stage,
1069                                  int array_idx,
1070                                  const union lvp_descriptor_info *descriptor,
1071                                  const struct lvp_descriptor_set_binding_layout *binding)
1072{
1073   struct lvp_image_view *iv = descriptor->iview;
1074   int idx = binding->stage[stage].image_index;
1075   if (idx == -1)
1076      return;
1077   idx += array_idx;
1078   idx += dyn_info->stage[stage].image_count;
1079   state->iv[p_stage][idx].resource = iv->image->bo;
1080   if (iv->subresourceRange.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT)
1081      state->iv[p_stage][idx].format = lvp_vk_format_to_pipe_format(iv->format);
1082   else if (iv->subresourceRange.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)
1083      state->iv[p_stage][idx].format = util_format_stencil_only(lvp_vk_format_to_pipe_format(iv->format));
1084   else
1085      state->iv[p_stage][idx].format = lvp_vk_format_to_pipe_format(iv->format);
1086
1087   if (iv->view_type == VK_IMAGE_VIEW_TYPE_3D) {
1088      state->iv[p_stage][idx].u.tex.first_layer = 0;
1089      state->iv[p_stage][idx].u.tex.last_layer = u_minify(iv->image->bo->depth0, iv->subresourceRange.baseMipLevel) - 1;
1090   } else {
1091      state->iv[p_stage][idx].u.tex.first_layer = iv->subresourceRange.baseArrayLayer;
1092      state->iv[p_stage][idx].u.tex.last_layer = iv->subresourceRange.baseArrayLayer + lvp_get_layerCount(iv->image, &iv->subresourceRange) - 1;
1093   }
1094   state->iv[p_stage][idx].u.tex.level = iv->subresourceRange.baseMipLevel;
1095   if (state->num_shader_images[p_stage] <= idx)
1096      state->num_shader_images[p_stage] = idx + 1;
1097   state->iv_dirty[p_stage] = true;
1098}
1099
1100static void fill_image_buffer_view_stage(struct rendering_state *state,
1101                                         struct dyn_info *dyn_info,
1102                                         gl_shader_stage stage,
1103                                         enum pipe_shader_type p_stage,
1104                                         int array_idx,
1105                                         const union lvp_descriptor_info *descriptor,
1106                                         const struct lvp_descriptor_set_binding_layout *binding)
1107{
1108   struct lvp_buffer_view *bv = descriptor->buffer_view;
1109   int idx = binding->stage[stage].image_index;
1110   if (idx == -1)
1111      return;
1112   idx += array_idx;
1113   idx += dyn_info->stage[stage].image_count;
1114   state->iv[p_stage][idx].resource = bv->buffer->bo;
1115   state->iv[p_stage][idx].format = bv->pformat;
1116   state->iv[p_stage][idx].u.buf.offset = bv->offset + bv->buffer->offset;
1117   state->iv[p_stage][idx].u.buf.size = bv->range == VK_WHOLE_SIZE ? (bv->buffer->size - bv->offset): bv->range;
1118   if (state->num_shader_images[p_stage] <= idx)
1119      state->num_shader_images[p_stage] = idx + 1;
1120   state->iv_dirty[p_stage] = true;
1121}
1122
1123static void handle_descriptor(struct rendering_state *state,
1124                              struct dyn_info *dyn_info,
1125                              const struct lvp_descriptor_set_binding_layout *binding,
1126                              gl_shader_stage stage,
1127                              enum pipe_shader_type p_stage,
1128                              int array_idx,
1129                              VkDescriptorType type,
1130                              const union lvp_descriptor_info *descriptor)
1131{
1132   bool is_dynamic = type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
1133      type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
1134
1135   switch (type) {
1136   case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1137   case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
1138      fill_image_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1139      break;
1140   }
1141   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1142   case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
1143      int idx = binding->stage[stage].const_buffer_index;
1144      if (idx == -1)
1145         return;
1146      idx += array_idx;
1147      idx += dyn_info->stage[stage].const_buffer_count;
1148      state->const_buffer[p_stage][idx].buffer = descriptor->buffer->bo;
1149      state->const_buffer[p_stage][idx].buffer_offset = descriptor->offset + descriptor->buffer->offset;
1150      if (is_dynamic) {
1151         uint32_t offset = dyn_info->dynamic_offsets[dyn_info->dyn_index + binding->dynamic_index + array_idx];
1152         state->const_buffer[p_stage][idx].buffer_offset += offset;
1153      }
1154      if (descriptor->range == VK_WHOLE_SIZE)
1155         state->const_buffer[p_stage][idx].buffer_size = descriptor->buffer->bo->width0 - state->const_buffer[p_stage][idx].buffer_offset;
1156      else
1157         state->const_buffer[p_stage][idx].buffer_size = descriptor->range;
1158      if (state->num_const_bufs[p_stage] <= idx)
1159         state->num_const_bufs[p_stage] = idx + 1;
1160      state->constbuf_dirty[p_stage] = true;
1161      break;
1162   }
1163   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1164   case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1165      int idx = binding->stage[stage].shader_buffer_index;
1166      if (idx == -1)
1167         return;
1168      idx += array_idx;
1169      idx += dyn_info->stage[stage].shader_buffer_count;
1170      state->sb[p_stage][idx].buffer = descriptor->buffer->bo;
1171      state->sb[p_stage][idx].buffer_offset = descriptor->offset + descriptor->buffer->offset;
1172      if (is_dynamic) {
1173         uint32_t offset = dyn_info->dynamic_offsets[dyn_info->dyn_index + binding->dynamic_index + array_idx];
1174         state->sb[p_stage][idx].buffer_offset += offset;
1175      }
1176      if (descriptor->range == VK_WHOLE_SIZE)
1177         state->sb[p_stage][idx].buffer_size = descriptor->buffer->bo->width0 - state->sb[p_stage][idx].buffer_offset;
1178      else
1179         state->sb[p_stage][idx].buffer_size = descriptor->range;
1180      if (state->num_shader_buffers[p_stage] <= idx)
1181         state->num_shader_buffers[p_stage] = idx + 1;
1182      state->sb_dirty[p_stage] = true;
1183      break;
1184   }
1185   case VK_DESCRIPTOR_TYPE_SAMPLER:
1186      if (!descriptor->sampler)
1187         return;
1188      fill_sampler_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1189      break;
1190   case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1191      fill_sampler_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1192      break;
1193   case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1194      fill_sampler_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1195      fill_sampler_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1196      break;
1197   case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1198      fill_sampler_buffer_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1199      break;
1200   case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1201      fill_image_buffer_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1202      break;
1203   default:
1204      fprintf(stderr, "Unhandled descriptor set %d\n", type);
1205      break;
1206   }
1207}
1208
1209static void handle_set_stage(struct rendering_state *state,
1210                             struct dyn_info *dyn_info,
1211                             const struct lvp_descriptor_set *set,
1212                             gl_shader_stage stage,
1213                             enum pipe_shader_type p_stage)
1214{
1215   int j;
1216   for (j = 0; j < set->layout->binding_count; j++) {
1217      const struct lvp_descriptor_set_binding_layout *binding;
1218      const struct lvp_descriptor *descriptor;
1219      binding = &set->layout->binding[j];
1220
1221      if (binding->valid) {
1222         for (int i = 0; i < binding->array_size; i++) {
1223            descriptor = &set->descriptors[binding->descriptor_index + i];
1224            handle_descriptor(state, dyn_info, binding, stage, p_stage, i, descriptor->type, &descriptor->info);
1225         }
1226      }
1227   }
1228}
1229
1230static void increment_dyn_info(struct dyn_info *dyn_info,
1231                               struct lvp_descriptor_set_layout *layout, bool inc_dyn)
1232{
1233   for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) {
1234      dyn_info->stage[stage].const_buffer_count += layout->stage[stage].const_buffer_count;
1235      dyn_info->stage[stage].shader_buffer_count += layout->stage[stage].shader_buffer_count;
1236      dyn_info->stage[stage].sampler_count += layout->stage[stage].sampler_count;
1237      dyn_info->stage[stage].sampler_view_count += layout->stage[stage].sampler_view_count;
1238      dyn_info->stage[stage].image_count += layout->stage[stage].image_count;
1239   }
1240   if (inc_dyn)
1241      dyn_info->dyn_index += layout->dynamic_offset_count;
1242}
1243
1244static void handle_compute_descriptor_sets(struct vk_cmd_queue_entry *cmd,
1245                                           struct dyn_info *dyn_info,
1246                                           struct rendering_state *state)
1247{
1248   struct vk_cmd_bind_descriptor_sets *bds = &cmd->u.bind_descriptor_sets;
1249   struct lvp_descriptor_set_layout **set_layout = cmd->driver_data;
1250   int i;
1251
1252   for (i = 0; i < bds->first_set; i++) {
1253      increment_dyn_info(dyn_info, set_layout[i], false);
1254   }
1255   for (i = 0; i < bds->descriptor_set_count; i++) {
1256      const struct lvp_descriptor_set *set = lvp_descriptor_set_from_handle(bds->descriptor_sets[i]);
1257
1258      if (set->layout->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT)
1259         handle_set_stage(state, dyn_info, set, MESA_SHADER_COMPUTE, PIPE_SHADER_COMPUTE);
1260      increment_dyn_info(dyn_info, set_layout[bds->first_set + i], true);
1261   }
1262}
1263
1264static void handle_descriptor_sets(struct vk_cmd_queue_entry *cmd,
1265                                   struct rendering_state *state)
1266{
1267   struct vk_cmd_bind_descriptor_sets *bds = &cmd->u.bind_descriptor_sets;
1268   struct lvp_descriptor_set_layout **set_layout = cmd->driver_data;
1269   int i;
1270   struct dyn_info dyn_info;
1271
1272   dyn_info.dyn_index = 0;
1273   dyn_info.dynamic_offsets = bds->dynamic_offsets;
1274   dyn_info.dynamic_offset_count = bds->dynamic_offset_count;
1275
1276   memset(dyn_info.stage, 0, sizeof(dyn_info.stage));
1277   if (bds->pipeline_bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
1278      handle_compute_descriptor_sets(cmd, &dyn_info, state);
1279      return;
1280   }
1281
1282   for (i = 0; i < bds->first_set; i++) {
1283      increment_dyn_info(&dyn_info, set_layout[i], false);
1284   }
1285
1286   for (i = 0; i < bds->descriptor_set_count; i++) {
1287      const struct lvp_descriptor_set *set = lvp_descriptor_set_from_handle(bds->descriptor_sets[i]);
1288
1289      if (set->layout->shader_stages & VK_SHADER_STAGE_VERTEX_BIT)
1290         handle_set_stage(state, &dyn_info, set, MESA_SHADER_VERTEX, PIPE_SHADER_VERTEX);
1291
1292      if (set->layout->shader_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
1293         handle_set_stage(state, &dyn_info, set, MESA_SHADER_GEOMETRY, PIPE_SHADER_GEOMETRY);
1294
1295      if (set->layout->shader_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
1296         handle_set_stage(state, &dyn_info, set, MESA_SHADER_TESS_CTRL, PIPE_SHADER_TESS_CTRL);
1297
1298      if (set->layout->shader_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1299         handle_set_stage(state, &dyn_info, set, MESA_SHADER_TESS_EVAL, PIPE_SHADER_TESS_EVAL);
1300
1301      if (set->layout->shader_stages & VK_SHADER_STAGE_FRAGMENT_BIT)
1302         handle_set_stage(state, &dyn_info, set, MESA_SHADER_FRAGMENT, PIPE_SHADER_FRAGMENT);
1303
1304      increment_dyn_info(&dyn_info, set_layout[bds->first_set + i], true);
1305   }
1306}
1307
1308static struct pipe_surface *create_img_surface_bo(struct rendering_state *state,
1309                                                  VkImageSubresourceRange *range,
1310                                                  struct pipe_resource *bo,
1311                                                  enum pipe_format pformat,
1312                                                  int width,
1313                                                  int height,
1314                                                  int base_layer, int layer_count,
1315                                                  int level)
1316{
1317   struct pipe_surface template;
1318
1319   memset(&template, 0, sizeof(struct pipe_surface));
1320
1321   template.format = pformat;
1322   template.width = width;
1323   template.height = height;
1324   template.u.tex.first_layer = range->baseArrayLayer + base_layer;
1325   template.u.tex.last_layer = range->baseArrayLayer + layer_count;
1326   template.u.tex.level = range->baseMipLevel + level;
1327
1328   if (template.format == PIPE_FORMAT_NONE)
1329      return NULL;
1330   return state->pctx->create_surface(state->pctx,
1331                                      bo, &template);
1332
1333}
1334static struct pipe_surface *create_img_surface(struct rendering_state *state,
1335                                               struct lvp_image_view *imgv,
1336                                               VkFormat format, int width,
1337                                               int height,
1338                                               int base_layer, int layer_count)
1339{
1340   return create_img_surface_bo(state, &imgv->subresourceRange, imgv->image->bo,
1341                                lvp_vk_format_to_pipe_format(format), width, height, base_layer, layer_count, 0);
1342}
1343
1344static void add_img_view_surface(struct rendering_state *state,
1345                                 struct lvp_image_view *imgv, VkFormat format, int width, int height)
1346{
1347   if (!imgv->surface) {
1348      imgv->surface = create_img_surface(state, imgv, format,
1349                                         width, height,
1350                                         0, lvp_get_layerCount(imgv->image, &imgv->subresourceRange) - 1);
1351   }
1352}
1353
1354static inline bool
1355attachment_needs_clear(struct rendering_state *state,
1356                       uint32_t a)
1357{
1358   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
1359   uint32_t view_mask = subpass->view_mask;
1360   return (a != VK_ATTACHMENT_UNUSED &&
1361           state->pending_clear_aspects[a] &&
1362           (!view_mask || (view_mask & ~state->cleared_views[a])));
1363}
1364
1365static bool
1366subpass_needs_clear(struct rendering_state *state)
1367{
1368   uint32_t a;
1369   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
1370   for (uint32_t i = 0; i < subpass->color_count; i++) {
1371      a = subpass->color_attachments[i].attachment;
1372      if (attachment_needs_clear(state, a))
1373         return true;
1374   }
1375   if (subpass->depth_stencil_attachment) {
1376      a = subpass->depth_stencil_attachment->attachment;
1377      if (attachment_needs_clear(state, a))
1378         return true;
1379   }
1380   return false;
1381}
1382
1383static void clear_attachment_layers(struct rendering_state *state,
1384                                    struct lvp_image_view *imgv,
1385                                    VkRect2D *rect,
1386                                    unsigned base_layer, unsigned layer_count,
1387                                    unsigned ds_clear_flags, double dclear_val,
1388                                    uint32_t sclear_val,
1389                                    union pipe_color_union *col_val)
1390{
1391   struct pipe_surface *clear_surf = create_img_surface(state,
1392                                                        imgv,
1393                                                        imgv->format,
1394                                                        state->framebuffer.width,
1395                                                        state->framebuffer.height,
1396                                                        base_layer,
1397                                                        base_layer + layer_count - 1);
1398
1399   if (ds_clear_flags) {
1400      state->pctx->clear_depth_stencil(state->pctx,
1401                                       clear_surf,
1402                                       ds_clear_flags,
1403                                       dclear_val, sclear_val,
1404                                       rect->offset.x, rect->offset.y,
1405                                       rect->extent.width, rect->extent.height,
1406                                       true);
1407   } else {
1408      state->pctx->clear_render_target(state->pctx, clear_surf,
1409                                       col_val,
1410                                       rect->offset.x, rect->offset.y,
1411                                       rect->extent.width, rect->extent.height,
1412                                       true);
1413   }
1414   state->pctx->surface_destroy(state->pctx, clear_surf);
1415}
1416
1417static struct lvp_image_view *
1418get_attachment(struct rendering_state *state,
1419               unsigned idx)
1420{
1421   if (state->imageless_views)
1422      return state->imageless_views[idx];
1423   else
1424      return state->vk_framebuffer->attachments[idx];
1425}
1426
1427static void render_subpass_clear(struct rendering_state *state)
1428{
1429   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
1430
1431   for (unsigned i = 0; i < subpass->color_count; i++) {
1432      uint32_t a = subpass->color_attachments[i].attachment;
1433
1434      if (!attachment_needs_clear(state, a))
1435         continue;
1436
1437      union pipe_color_union color_clear_val = { 0 };
1438      const VkClearValue value = state->attachments[a].clear_value;
1439      color_clear_val.ui[0] = value.color.uint32[0];
1440      color_clear_val.ui[1] = value.color.uint32[1];
1441      color_clear_val.ui[2] = value.color.uint32[2];
1442      color_clear_val.ui[3] = value.color.uint32[3];
1443
1444      struct lvp_image_view *imgv = get_attachment(state, a);
1445
1446      assert(imgv->surface);
1447
1448      if (subpass->view_mask) {
1449         u_foreach_bit(i, subpass->view_mask)
1450            clear_attachment_layers(state, imgv, &state->render_area,
1451                                    i, 1, 0, 0, 0, &color_clear_val);
1452         state->cleared_views[a] |= subpass->view_mask;
1453      } else {
1454         state->pctx->clear_render_target(state->pctx,
1455                                          imgv->surface,
1456                                          &color_clear_val,
1457                                          state->render_area.offset.x, state->render_area.offset.y,
1458                                          state->render_area.extent.width, state->render_area.extent.height,
1459                                          false);
1460         state->pending_clear_aspects[a] = 0;
1461      }
1462   }
1463
1464   if (subpass->depth_stencil_attachment) {
1465      uint32_t ds = subpass->depth_stencil_attachment->attachment;
1466
1467      if (!attachment_needs_clear(state, ds))
1468         return;
1469
1470      struct lvp_render_pass_attachment *att = &state->pass->attachments[ds];
1471      struct lvp_image_view *imgv = get_attachment(state, ds);
1472
1473      assert (util_format_is_depth_or_stencil(imgv->surface->format));
1474
1475      const struct util_format_description *desc = util_format_description(imgv->surface->format);
1476      double dclear_val = 0;
1477      uint32_t sclear_val = 0;
1478      uint32_t ds_clear_flags = 0;
1479
1480      if ((util_format_has_stencil(desc) && att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) ||
1481          (util_format_is_depth_and_stencil(imgv->surface->format) && att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
1482         ds_clear_flags |= PIPE_CLEAR_STENCIL;
1483         if (att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1484            sclear_val = state->attachments[ds].clear_value.depthStencil.stencil;
1485      }
1486      if ((util_format_has_depth(desc) && att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) ||
1487          (util_format_is_depth_and_stencil(imgv->surface->format) && att->load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
1488         ds_clear_flags |= PIPE_CLEAR_DEPTH;
1489         if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1490            dclear_val = state->attachments[ds].clear_value.depthStencil.depth;
1491      }
1492
1493      assert(imgv->surface);
1494      if (ds_clear_flags) {
1495         if (subpass->view_mask) {
1496            u_foreach_bit(i, subpass->view_mask)
1497               clear_attachment_layers(state, imgv, &state->render_area,
1498                                       i, 1, ds_clear_flags, dclear_val, sclear_val, NULL);
1499            state->cleared_views[ds] |= subpass->view_mask;
1500         } else {
1501            state->pctx->clear_depth_stencil(state->pctx,
1502                                             imgv->surface,
1503                                             ds_clear_flags,
1504                                             dclear_val, sclear_val,
1505                                             state->render_area.offset.x, state->render_area.offset.y,
1506                                             state->render_area.extent.width, state->render_area.extent.height,
1507                                             false);
1508            state->pending_clear_aspects[ds] = 0;
1509         }
1510      }
1511
1512   }
1513
1514}
1515
1516static void render_subpass_clear_fast(struct rendering_state *state)
1517{
1518   /* attempt to use the clear interface first, then fallback to per-attchment clears */
1519   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
1520   bool has_color_value = false;
1521   uint32_t buffers = 0;
1522   VkClearValue color_value = {0};
1523   double dclear_val = 0;
1524   uint32_t sclear_val = 0;
1525
1526   /*
1527    * the state tracker clear interface only works if all the attachments have the same
1528    * clear color.
1529    */
1530   /* llvmpipe doesn't support scissored clears yet */
1531   if (state->render_area.offset.x || state->render_area.offset.y)
1532      goto slow_clear;
1533
1534   if (state->render_area.extent.width != state->framebuffer.width ||
1535       state->render_area.extent.height != state->framebuffer.height)
1536      goto slow_clear;
1537
1538   if (subpass->view_mask)
1539      goto slow_clear;
1540   for (unsigned i = 0; i < subpass->color_count; i++) {
1541      uint32_t a = subpass->color_attachments[i].attachment;
1542
1543      if (!attachment_needs_clear(state, a))
1544         continue;
1545
1546      if (has_color_value) {
1547         if (memcmp(&color_value, &state->attachments[a].clear_value, sizeof(VkClearValue)))
1548            goto slow_clear;
1549      } else {
1550         memcpy(&color_value, &state->attachments[a].clear_value, sizeof(VkClearValue));
1551         has_color_value = true;
1552      }
1553   }
1554
1555   for (unsigned i = 0; i < subpass->color_count; i++) {
1556      uint32_t a = subpass->color_attachments[i].attachment;
1557
1558      if (!attachment_needs_clear(state, a))
1559         continue;
1560      buffers |= (PIPE_CLEAR_COLOR0 << i);
1561      state->pending_clear_aspects[a] = 0;
1562   }
1563
1564   if (subpass->depth_stencil_attachment &&
1565       attachment_needs_clear(state, subpass->depth_stencil_attachment->attachment)) {
1566      uint32_t ds = subpass->depth_stencil_attachment->attachment;
1567
1568      struct lvp_render_pass_attachment *att = &state->pass->attachments[ds];
1569      struct lvp_image_view *imgv = get_attachment(state, ds);
1570      const struct util_format_description *desc = util_format_description(imgv->surface->format);
1571
1572      /* also clear stencil for don't care to avoid RMW */
1573      if ((util_format_has_stencil(desc) && att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) ||
1574          (util_format_is_depth_and_stencil(imgv->surface->format) && att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE))
1575         buffers |= PIPE_CLEAR_STENCIL;
1576      if (util_format_has_depth(desc) && att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1577         buffers |= PIPE_CLEAR_DEPTH;
1578
1579      dclear_val = state->attachments[ds].clear_value.depthStencil.depth;
1580      sclear_val = state->attachments[ds].clear_value.depthStencil.stencil;
1581      state->pending_clear_aspects[ds] = 0;
1582   }
1583
1584   union pipe_color_union col_val;
1585   for (unsigned i = 0; i < 4; i++)
1586      col_val.ui[i] = color_value.color.uint32[i];
1587
1588   state->pctx->clear(state->pctx, buffers,
1589                      NULL, &col_val,
1590                      dclear_val, sclear_val);
1591   return;
1592slow_clear:
1593   render_subpass_clear(state);
1594}
1595
1596static void render_pass_resolve(struct rendering_state *state)
1597{
1598   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
1599
1600   if (subpass->depth_stencil_attachment && subpass->ds_resolve_attachment) {
1601      struct lvp_subpass_attachment src_att = *subpass->depth_stencil_attachment;
1602      struct lvp_subpass_attachment dst_att = *subpass->ds_resolve_attachment;
1603      if (dst_att.attachment != VK_ATTACHMENT_UNUSED) {
1604         int num_blits = 1;
1605         if (subpass->depth_resolve_mode != subpass->stencil_resolve_mode)
1606            num_blits = 2;
1607
1608         for (unsigned i = 0; i < num_blits; i++) {
1609
1610            if (i == 0 && subpass->depth_resolve_mode == VK_RESOLVE_MODE_NONE)
1611               continue;
1612
1613            if (i == 1 && subpass->stencil_resolve_mode == VK_RESOLVE_MODE_NONE)
1614               continue;
1615
1616            struct lvp_image_view *src_imgv = get_attachment(state, src_att.attachment);
1617            struct lvp_image_view *dst_imgv = get_attachment(state, dst_att.attachment);
1618
1619            struct pipe_blit_info info;
1620            memset(&info, 0, sizeof(info));
1621
1622            info.src.resource = src_imgv->image->bo;
1623            info.dst.resource = dst_imgv->image->bo;
1624            info.src.format = src_imgv->pformat;
1625            info.dst.format = dst_imgv->pformat;
1626            info.filter = PIPE_TEX_FILTER_NEAREST;
1627
1628            if (num_blits == 1)
1629               info.mask = PIPE_MASK_ZS;
1630            else if (i == 0)
1631               info.mask = PIPE_MASK_Z;
1632            else
1633               info.mask = PIPE_MASK_S;
1634
1635            if (i == 0 && subpass->depth_resolve_mode == VK_RESOLVE_MODE_SAMPLE_ZERO_BIT)
1636               info.sample0_only = true;
1637            if (i == 1 && subpass->stencil_resolve_mode == VK_RESOLVE_MODE_SAMPLE_ZERO_BIT)
1638               info.sample0_only = true;
1639
1640            info.src.box.x = state->render_area.offset.x;
1641            info.src.box.y = state->render_area.offset.y;
1642            info.src.box.width = state->render_area.extent.width;
1643            info.src.box.height = state->render_area.extent.height;
1644            info.src.box.depth = state->vk_framebuffer->layers;
1645
1646            info.dst.box = info.src.box;
1647
1648            state->pctx->blit(state->pctx, &info);
1649         }
1650      }
1651   }
1652
1653   if (!subpass->has_color_resolve)
1654      return;
1655   for (uint32_t i = 0; i < subpass->color_count; i++) {
1656      struct lvp_subpass_attachment src_att = subpass->color_attachments[i];
1657      struct lvp_subpass_attachment dst_att = subpass->resolve_attachments[i];
1658
1659      if (dst_att.attachment == VK_ATTACHMENT_UNUSED)
1660         continue;
1661
1662      struct lvp_image_view *src_imgv = get_attachment(state, src_att.attachment);
1663      struct lvp_image_view *dst_imgv = get_attachment(state, dst_att.attachment);
1664
1665      struct pipe_blit_info info;
1666      memset(&info, 0, sizeof(info));
1667
1668      info.src.resource = src_imgv->image->bo;
1669      info.dst.resource = dst_imgv->image->bo;
1670      info.src.format = src_imgv->pformat;
1671      info.dst.format = dst_imgv->pformat;
1672      info.filter = PIPE_TEX_FILTER_NEAREST;
1673      info.mask = PIPE_MASK_RGBA;
1674      info.src.box.x = state->render_area.offset.x;
1675      info.src.box.y = state->render_area.offset.y;
1676      info.src.box.width = state->render_area.extent.width;
1677      info.src.box.height = state->render_area.extent.height;
1678      info.src.box.depth = state->vk_framebuffer->layers;
1679
1680      info.dst.box = info.src.box;
1681
1682      info.src.level = src_imgv->subresourceRange.baseMipLevel;
1683      info.dst.level = dst_imgv->subresourceRange.baseMipLevel;
1684
1685      state->pctx->blit(state->pctx, &info);
1686   }
1687}
1688
1689static void begin_render_subpass(struct rendering_state *state,
1690                                 int subpass_idx)
1691{
1692   state->subpass = subpass_idx;
1693
1694   state->framebuffer.nr_cbufs = 0;
1695
1696   const struct lvp_subpass *subpass = &state->pass->subpasses[subpass_idx];
1697   for (unsigned i = 0; i < subpass->color_count; i++) {
1698      struct lvp_subpass_attachment *color_att = &subpass->color_attachments[i];
1699      if (color_att->attachment != VK_ATTACHMENT_UNUSED) {
1700         struct lvp_image_view *imgv = get_attachment(state, color_att->attachment);
1701         add_img_view_surface(state, imgv, state->pass->attachments[color_att->attachment].format, state->framebuffer.width, state->framebuffer.height);
1702         state->framebuffer.cbufs[state->framebuffer.nr_cbufs] = imgv->surface;
1703      } else
1704         state->framebuffer.cbufs[state->framebuffer.nr_cbufs] = NULL;
1705      state->framebuffer.nr_cbufs++;
1706   }
1707
1708   if (subpass->depth_stencil_attachment) {
1709      struct lvp_subpass_attachment *ds_att = subpass->depth_stencil_attachment;
1710
1711      if (ds_att->attachment != VK_ATTACHMENT_UNUSED) {
1712         struct lvp_image_view *imgv = get_attachment(state, ds_att->attachment);
1713         add_img_view_surface(state, imgv, state->pass->attachments[ds_att->attachment].format, state->framebuffer.width, state->framebuffer.height);
1714         state->framebuffer.zsbuf = imgv->surface;
1715      }
1716   }
1717
1718   state->pctx->set_framebuffer_state(state->pctx,
1719                                      &state->framebuffer);
1720
1721   if (subpass_needs_clear(state))
1722      render_subpass_clear_fast(state);
1723}
1724
1725static void begin_render_pass(const VkRenderPassBeginInfo *render_pass_begin,
1726                              struct rendering_state *state)
1727{
1728   LVP_FROM_HANDLE(lvp_render_pass, pass, render_pass_begin->renderPass);
1729   LVP_FROM_HANDLE(lvp_framebuffer, framebuffer, render_pass_begin->framebuffer);
1730   const struct VkRenderPassAttachmentBeginInfo *attachment_info =
1731      vk_find_struct_const(render_pass_begin->pNext,
1732                           RENDER_PASS_ATTACHMENT_BEGIN_INFO);
1733
1734   state->pass = pass;
1735   state->vk_framebuffer = framebuffer;
1736   state->render_area = render_pass_begin->renderArea;
1737
1738   if (attachment_info) {
1739      state->imageless_views = realloc(state->imageless_views, sizeof(*state->imageless_views) * attachment_info->attachmentCount);
1740      for (unsigned i = 0; i < attachment_info->attachmentCount; i++)
1741         state->imageless_views[i] = lvp_image_view_from_handle(attachment_info->pAttachments[i]);
1742   }
1743
1744   state->framebuffer.width = state->vk_framebuffer->width;
1745   state->framebuffer.height = state->vk_framebuffer->height;
1746   state->framebuffer.layers = state->vk_framebuffer->layers;
1747
1748   if (state->num_pending_aspects < state->pass->attachment_count) {
1749      state->pending_clear_aspects = realloc(state->pending_clear_aspects, sizeof(VkImageAspectFlags) * state->pass->attachment_count);
1750      state->cleared_views = realloc(state->cleared_views, sizeof(uint32_t) * state->pass->attachment_count);
1751      state->num_pending_aspects = state->pass->attachment_count;
1752   }
1753
1754   state->attachments = realloc(state->attachments, sizeof(*state->attachments) * pass->attachment_count);
1755   for (unsigned i = 0; i < state->pass->attachment_count; i++) {
1756      struct lvp_render_pass_attachment *att = &pass->attachments[i];
1757      VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
1758      VkImageAspectFlags clear_aspects = 0;
1759      if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
1760         /* color attachment */
1761         if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1762            clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
1763         }
1764      } else {
1765         /* depthstencil attachment */
1766         if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1767             att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1768            clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1769            if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
1770                att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
1771               clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1772         }
1773         if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
1774             att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1775            clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1776         }
1777      }
1778      state->attachments[i].pending_clear_aspects = clear_aspects;
1779      if (clear_aspects)
1780         state->attachments[i].clear_value = render_pass_begin->pClearValues[i];
1781
1782      state->pending_clear_aspects[i] = state->attachments[i].pending_clear_aspects;
1783      state->cleared_views[i] = 0;
1784   }
1785   begin_render_subpass(state, 0);
1786}
1787
1788
1789static void handle_begin_render_pass(struct vk_cmd_queue_entry *cmd,
1790                                     struct rendering_state *state)
1791{
1792   begin_render_pass(cmd->u.begin_render_pass.render_pass_begin, state);
1793}
1794
1795static void handle_begin_render_pass2(struct vk_cmd_queue_entry *cmd,
1796                                      struct rendering_state *state)
1797{
1798   begin_render_pass(cmd->u.begin_render_pass2.render_pass_begin, state);
1799}
1800
1801static void handle_end_render_pass2(struct vk_cmd_queue_entry *cmd,
1802                                    struct rendering_state *state)
1803{
1804   state->pctx->flush(state->pctx, NULL, 0);
1805
1806   render_pass_resolve(state);
1807
1808   free(state->attachments);
1809   state->attachments = NULL;
1810   state->pass = NULL;
1811   state->subpass = 0;
1812}
1813
1814static void handle_next_subpass2(struct vk_cmd_queue_entry *cmd,
1815                                struct rendering_state *state)
1816{
1817   state->pctx->flush(state->pctx, NULL, 0);
1818   render_pass_resolve(state);
1819   state->subpass++;
1820   begin_render_subpass(state, state->subpass);
1821}
1822
1823static void handle_draw(struct vk_cmd_queue_entry *cmd,
1824                        struct rendering_state *state)
1825{
1826   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
1827   struct pipe_draw_start_count_bias draw;
1828
1829   state->info.index_size = 0;
1830   state->info.index.resource = NULL;
1831   state->info.start_instance = cmd->u.draw.first_instance;
1832   state->info.instance_count = cmd->u.draw.instance_count;
1833   state->info.view_mask = subpass->view_mask;
1834
1835   draw.start = cmd->u.draw.first_vertex;
1836   draw.count = cmd->u.draw.vertex_count;
1837
1838   state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
1839   state->pctx->draw_vbo(state->pctx, &state->info, 0, NULL, &draw, 1);
1840}
1841
1842static void handle_draw_multi(struct vk_cmd_queue_entry *cmd,
1843                              struct rendering_state *state)
1844{
1845   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
1846   struct pipe_draw_start_count_bias *draws = calloc(cmd->u.draw_multi_ext.draw_count,
1847                                                     sizeof(*draws));
1848
1849   state->info.index_size = 0;
1850   state->info.index.resource = NULL;
1851   state->info.start_instance = cmd->u.draw_multi_ext.first_instance;
1852   state->info.instance_count = cmd->u.draw_multi_ext.instance_count;
1853   state->info.view_mask = subpass->view_mask;
1854   if (cmd->u.draw_multi_ext.draw_count > 1)
1855      state->info.increment_draw_id = true;
1856
1857   for(unsigned i = 0; i < cmd->u.draw_multi_ext.draw_count; i++) {
1858      draws[i].start = cmd->u.draw_multi_ext.vertex_info[i].firstVertex;
1859      draws[i].count = cmd->u.draw_multi_ext.vertex_info[i].vertexCount;
1860      draws[i].index_bias = 0;
1861   }
1862
1863   state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
1864
1865   if (cmd->u.draw_multi_indexed_ext.draw_count)
1866      state->pctx->draw_vbo(state->pctx, &state->info, 0, NULL, draws, cmd->u.draw_multi_ext.draw_count);
1867
1868   free(draws);
1869}
1870
1871static void set_viewport(unsigned first_viewport, unsigned viewport_count,
1872                         const VkViewport* viewports,
1873                         struct rendering_state *state)
1874{
1875   int i;
1876   unsigned base = 0;
1877   if (first_viewport == UINT32_MAX)
1878      state->num_viewports = viewport_count;
1879   else
1880      base = first_viewport;
1881
1882   for (i = 0; i < viewport_count; i++) {
1883      int idx = i + base;
1884      const VkViewport *vp = &viewports[i];
1885      get_viewport_xform(vp, state->viewports[idx].scale, state->viewports[idx].translate);
1886   }
1887   state->vp_dirty = true;
1888}
1889
1890static void handle_set_viewport(struct vk_cmd_queue_entry *cmd,
1891                                struct rendering_state *state)
1892{
1893   set_viewport(cmd->u.set_viewport.first_viewport,
1894                cmd->u.set_viewport.viewport_count,
1895                cmd->u.set_viewport.viewports,
1896                state);
1897}
1898
1899static void handle_set_viewport_with_count(struct vk_cmd_queue_entry *cmd,
1900                                           struct rendering_state *state)
1901{
1902   set_viewport(UINT32_MAX,
1903                cmd->u.set_viewport_with_count_ext.viewport_count,
1904                cmd->u.set_viewport_with_count_ext.viewports,
1905                state);
1906}
1907
1908static void set_scissor(unsigned first_scissor,
1909                        unsigned scissor_count,
1910                        const VkRect2D *scissors,
1911                        struct rendering_state *state)
1912{
1913   int i;
1914   unsigned base = 0;
1915   if (first_scissor == UINT32_MAX)
1916      state->num_scissors = scissor_count;
1917   else
1918      base = first_scissor;
1919
1920   for (i = 0; i < scissor_count; i++) {
1921      int idx = i + base;
1922      const VkRect2D *ss = &scissors[i];
1923      state->scissors[idx].minx = ss->offset.x;
1924      state->scissors[idx].miny = ss->offset.y;
1925      state->scissors[idx].maxx = ss->offset.x + ss->extent.width;
1926      state->scissors[idx].maxy = ss->offset.y + ss->extent.height;
1927   }
1928   state->scissor_dirty = true;
1929}
1930
1931static void handle_set_scissor(struct vk_cmd_queue_entry *cmd,
1932                               struct rendering_state *state)
1933{
1934   set_scissor(cmd->u.set_scissor.first_scissor,
1935               cmd->u.set_scissor.scissor_count,
1936               cmd->u.set_scissor.scissors,
1937               state);
1938}
1939
1940static void handle_set_scissor_with_count(struct vk_cmd_queue_entry *cmd,
1941                                          struct rendering_state *state)
1942{
1943   set_scissor(UINT32_MAX,
1944               cmd->u.set_scissor_with_count_ext.scissor_count,
1945               cmd->u.set_scissor_with_count_ext.scissors,
1946               state);
1947}
1948
1949static void handle_set_line_width(struct vk_cmd_queue_entry *cmd,
1950                                  struct rendering_state *state)
1951{
1952   state->rs_state.line_width = cmd->u.set_line_width.line_width;
1953   state->rs_dirty = true;
1954}
1955
1956static void handle_set_depth_bias(struct vk_cmd_queue_entry *cmd,
1957                                  struct rendering_state *state)
1958{
1959   state->depth_bias.offset_units = cmd->u.set_depth_bias.depth_bias_constant_factor;
1960   state->depth_bias.offset_scale = cmd->u.set_depth_bias.depth_bias_slope_factor;
1961   state->depth_bias.offset_clamp = cmd->u.set_depth_bias.depth_bias_clamp;
1962   state->rs_dirty = true;
1963}
1964
1965static void handle_set_blend_constants(struct vk_cmd_queue_entry *cmd,
1966                                       struct rendering_state *state)
1967{
1968   memcpy(state->blend_color.color, cmd->u.set_blend_constants.blend_constants, 4 * sizeof(float));
1969   state->blend_color_dirty = true;
1970}
1971
1972static void handle_set_depth_bounds(struct vk_cmd_queue_entry *cmd,
1973                                    struct rendering_state *state)
1974{
1975   state->dsa_dirty |= !DOUBLE_EQ(state->dsa_state.depth_bounds_min, cmd->u.set_depth_bounds.min_depth_bounds);
1976   state->dsa_dirty |= !DOUBLE_EQ(state->dsa_state.depth_bounds_max, cmd->u.set_depth_bounds.max_depth_bounds);
1977   state->dsa_state.depth_bounds_min = cmd->u.set_depth_bounds.min_depth_bounds;
1978   state->dsa_state.depth_bounds_max = cmd->u.set_depth_bounds.max_depth_bounds;
1979}
1980
1981static void handle_set_stencil_compare_mask(struct vk_cmd_queue_entry *cmd,
1982                                            struct rendering_state *state)
1983{
1984   if (cmd->u.set_stencil_compare_mask.face_mask & VK_STENCIL_FACE_FRONT_BIT)
1985      state->dsa_state.stencil[0].valuemask = cmd->u.set_stencil_compare_mask.compare_mask;
1986   if (cmd->u.set_stencil_compare_mask.face_mask & VK_STENCIL_FACE_BACK_BIT)
1987      state->dsa_state.stencil[1].valuemask = cmd->u.set_stencil_compare_mask.compare_mask;
1988   state->dsa_dirty = true;
1989}
1990
1991static void handle_set_stencil_write_mask(struct vk_cmd_queue_entry *cmd,
1992                                          struct rendering_state *state)
1993{
1994   if (cmd->u.set_stencil_write_mask.face_mask & VK_STENCIL_FACE_FRONT_BIT)
1995      state->dsa_state.stencil[0].writemask = cmd->u.set_stencil_write_mask.write_mask;
1996   if (cmd->u.set_stencil_write_mask.face_mask & VK_STENCIL_FACE_BACK_BIT)
1997      state->dsa_state.stencil[1].writemask = cmd->u.set_stencil_write_mask.write_mask;
1998   state->dsa_dirty = true;
1999}
2000
2001static void handle_set_stencil_reference(struct vk_cmd_queue_entry *cmd,
2002                                         struct rendering_state *state)
2003{
2004   if (cmd->u.set_stencil_reference.face_mask & VK_STENCIL_FACE_FRONT_BIT)
2005      state->stencil_ref.ref_value[0] = cmd->u.set_stencil_reference.reference;
2006   if (cmd->u.set_stencil_reference.face_mask & VK_STENCIL_FACE_BACK_BIT)
2007      state->stencil_ref.ref_value[1] = cmd->u.set_stencil_reference.reference;
2008   state->stencil_ref_dirty = true;
2009}
2010
2011static void
2012copy_depth_rect(ubyte * dst,
2013                enum pipe_format dst_format,
2014                unsigned dst_stride,
2015                unsigned dst_x,
2016                unsigned dst_y,
2017                unsigned width,
2018                unsigned height,
2019                const ubyte * src,
2020                enum pipe_format src_format,
2021                int src_stride,
2022                unsigned src_x,
2023                unsigned src_y)
2024{
2025   int src_stride_pos = src_stride < 0 ? -src_stride : src_stride;
2026   int src_blocksize = util_format_get_blocksize(src_format);
2027   int src_blockwidth = util_format_get_blockwidth(src_format);
2028   int src_blockheight = util_format_get_blockheight(src_format);
2029   int dst_blocksize = util_format_get_blocksize(dst_format);
2030   int dst_blockwidth = util_format_get_blockwidth(dst_format);
2031   int dst_blockheight = util_format_get_blockheight(dst_format);
2032
2033   assert(src_blocksize > 0);
2034   assert(src_blockwidth > 0);
2035   assert(src_blockheight > 0);
2036
2037   dst_x /= dst_blockwidth;
2038   dst_y /= dst_blockheight;
2039   width = (width + src_blockwidth - 1)/src_blockwidth;
2040   height = (height + src_blockheight - 1)/src_blockheight;
2041   src_x /= src_blockwidth;
2042   src_y /= src_blockheight;
2043
2044   dst += dst_x * dst_blocksize;
2045   src += src_x * src_blocksize;
2046   dst += dst_y * dst_stride;
2047   src += src_y * src_stride_pos;
2048
2049   if (dst_format == PIPE_FORMAT_S8_UINT) {
2050      if (src_format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
2051         util_format_z32_float_s8x24_uint_unpack_s_8uint(dst, dst_stride,
2052                                                         src, src_stride,
2053                                                         width, height);
2054      } else if (src_format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
2055         util_format_z24_unorm_s8_uint_unpack_s_8uint(dst, dst_stride,
2056                                                      src, src_stride,
2057                                                      width, height);
2058      } else {
2059      }
2060   } else if (dst_format == PIPE_FORMAT_Z24X8_UNORM) {
2061      util_format_z24_unorm_s8_uint_unpack_z24(dst, dst_stride,
2062                                               src, src_stride,
2063                                               width, height);
2064   } else if (dst_format == PIPE_FORMAT_Z32_FLOAT) {
2065      if (src_format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
2066         util_format_z32_float_s8x24_uint_unpack_z_float((float *)dst, dst_stride,
2067                                                         src, src_stride,
2068                                                         width, height);
2069      }
2070   } else if (dst_format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
2071      if (src_format == PIPE_FORMAT_Z32_FLOAT)
2072         util_format_z32_float_s8x24_uint_pack_z_float(dst, dst_stride,
2073                                                       (float *)src, src_stride,
2074                                                       width, height);
2075      else if (src_format == PIPE_FORMAT_S8_UINT)
2076         util_format_z32_float_s8x24_uint_pack_s_8uint(dst, dst_stride,
2077                                                       src, src_stride,
2078                                                       width, height);
2079   } else if (dst_format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
2080      if (src_format == PIPE_FORMAT_S8_UINT)
2081         util_format_z24_unorm_s8_uint_pack_s_8uint(dst, dst_stride,
2082                                                    src, src_stride,
2083                                                    width, height);
2084      if (src_format == PIPE_FORMAT_Z24X8_UNORM)
2085         util_format_z24_unorm_s8_uint_pack_z24(dst, dst_stride,
2086                                                src, src_stride,
2087                                                width, height);
2088   }
2089}
2090
2091static void
2092copy_depth_box(ubyte *dst,
2093               enum pipe_format dst_format,
2094               unsigned dst_stride, unsigned dst_slice_stride,
2095               unsigned dst_x, unsigned dst_y, unsigned dst_z,
2096               unsigned width, unsigned height, unsigned depth,
2097               const ubyte * src,
2098               enum pipe_format src_format,
2099               int src_stride, unsigned src_slice_stride,
2100               unsigned src_x, unsigned src_y, unsigned src_z)
2101{
2102   unsigned z;
2103   dst += dst_z * dst_slice_stride;
2104   src += src_z * src_slice_stride;
2105   for (z = 0; z < depth; ++z) {
2106      copy_depth_rect(dst,
2107                      dst_format,
2108                      dst_stride,
2109                      dst_x, dst_y,
2110                      width, height,
2111                      src,
2112                      src_format,
2113                      src_stride,
2114                      src_x, src_y);
2115
2116      dst += dst_slice_stride;
2117      src += src_slice_stride;
2118   }
2119}
2120
2121static void handle_copy_image_to_buffer2_khr(struct vk_cmd_queue_entry *cmd,
2122                                             struct rendering_state *state)
2123{
2124   int i;
2125   struct VkCopyImageToBufferInfo2KHR *copycmd = cmd->u.copy_image_to_buffer2_khr.copy_image_to_buffer_info;
2126   LVP_FROM_HANDLE(lvp_image, src_image, copycmd->srcImage);
2127   struct pipe_box box, dbox;
2128   struct pipe_transfer *src_t, *dst_t;
2129   ubyte *src_data, *dst_data;
2130
2131   state->pctx->flush(state->pctx, NULL, 0);
2132
2133   for (i = 0; i < copycmd->regionCount; i++) {
2134
2135      box.x = copycmd->pRegions[i].imageOffset.x;
2136      box.y = copycmd->pRegions[i].imageOffset.y;
2137      box.z = src_image->vk.image_type == VK_IMAGE_TYPE_3D ? copycmd->pRegions[i].imageOffset.z : copycmd->pRegions[i].imageSubresource.baseArrayLayer;
2138      box.width = copycmd->pRegions[i].imageExtent.width;
2139      box.height = copycmd->pRegions[i].imageExtent.height;
2140      box.depth = src_image->vk.image_type == VK_IMAGE_TYPE_3D ? copycmd->pRegions[i].imageExtent.depth : copycmd->pRegions[i].imageSubresource.layerCount;
2141
2142      src_data = state->pctx->texture_map(state->pctx,
2143                                           src_image->bo,
2144                                           copycmd->pRegions[i].imageSubresource.mipLevel,
2145                                           PIPE_MAP_READ,
2146                                           &box,
2147                                           &src_t);
2148
2149      dbox.x = copycmd->pRegions[i].bufferOffset;
2150      dbox.y = 0;
2151      dbox.z = 0;
2152      dbox.width = lvp_buffer_from_handle(copycmd->dstBuffer)->bo->width0;
2153      dbox.height = 1;
2154      dbox.depth = 1;
2155      dst_data = state->pctx->buffer_map(state->pctx,
2156                                           lvp_buffer_from_handle(copycmd->dstBuffer)->bo,
2157                                           0,
2158                                           PIPE_MAP_WRITE,
2159                                           &dbox,
2160                                           &dst_t);
2161
2162      enum pipe_format src_format = src_image->bo->format;
2163      enum pipe_format dst_format = src_format;
2164      if (util_format_is_depth_or_stencil(src_format)) {
2165         if (copycmd->pRegions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) {
2166            dst_format = util_format_get_depth_only(src_format);
2167         } else if (copycmd->pRegions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
2168            dst_format = PIPE_FORMAT_S8_UINT;
2169         }
2170      }
2171
2172      unsigned buffer_row_len = util_format_get_stride(dst_format, copycmd->pRegions[i].bufferRowLength);
2173      if (buffer_row_len == 0)
2174         buffer_row_len = util_format_get_stride(dst_format, copycmd->pRegions[i].imageExtent.width);
2175      unsigned buffer_image_height = copycmd->pRegions[i].bufferImageHeight;
2176      if (buffer_image_height == 0)
2177         buffer_image_height = copycmd->pRegions[i].imageExtent.height;
2178
2179      unsigned img_stride = util_format_get_2d_size(dst_format, buffer_row_len, buffer_image_height);
2180      if (src_format != dst_format) {
2181         copy_depth_box(dst_data, dst_format,
2182                        buffer_row_len, img_stride,
2183                        0, 0, 0,
2184                        copycmd->pRegions[i].imageExtent.width,
2185                        copycmd->pRegions[i].imageExtent.height,
2186                        box.depth,
2187                        src_data, src_format, src_t->stride, src_t->layer_stride, 0, 0, 0);
2188      } else {
2189         util_copy_box((ubyte *)dst_data, src_format,
2190                       buffer_row_len, img_stride,
2191                       0, 0, 0,
2192                       copycmd->pRegions[i].imageExtent.width,
2193                       copycmd->pRegions[i].imageExtent.height,
2194                       box.depth,
2195                       src_data, src_t->stride, src_t->layer_stride, 0, 0, 0);
2196      }
2197      state->pctx->texture_unmap(state->pctx, src_t);
2198      state->pctx->buffer_unmap(state->pctx, dst_t);
2199   }
2200}
2201
2202static void handle_copy_buffer_to_image(struct vk_cmd_queue_entry *cmd,
2203                                        struct rendering_state *state)
2204{
2205   int i;
2206   struct VkCopyBufferToImageInfo2KHR *copycmd = cmd->u.copy_buffer_to_image2_khr.copy_buffer_to_image_info;
2207   LVP_FROM_HANDLE(lvp_image, dst_image, copycmd->dstImage);
2208   struct pipe_box box, sbox;
2209   struct pipe_transfer *src_t, *dst_t;
2210   void *src_data, *dst_data;
2211
2212   state->pctx->flush(state->pctx, NULL, 0);
2213
2214   for (i = 0; i < copycmd->regionCount; i++) {
2215
2216      sbox.x = copycmd->pRegions[i].bufferOffset;
2217      sbox.y = 0;
2218      sbox.z = 0;
2219      sbox.width = lvp_buffer_from_handle(copycmd->srcBuffer)->bo->width0;
2220      sbox.height = 1;
2221      sbox.depth = 1;
2222      src_data = state->pctx->buffer_map(state->pctx,
2223                                           lvp_buffer_from_handle(copycmd->srcBuffer)->bo,
2224                                           0,
2225                                           PIPE_MAP_READ,
2226                                           &sbox,
2227                                           &src_t);
2228
2229
2230      box.x = copycmd->pRegions[i].imageOffset.x;
2231      box.y = copycmd->pRegions[i].imageOffset.y;
2232      box.z = dst_image->vk.image_type == VK_IMAGE_TYPE_3D ? copycmd->pRegions[i].imageOffset.z : copycmd->pRegions[i].imageSubresource.baseArrayLayer;
2233      box.width = copycmd->pRegions[i].imageExtent.width;
2234      box.height = copycmd->pRegions[i].imageExtent.height;
2235      box.depth = dst_image->vk.image_type == VK_IMAGE_TYPE_3D ? copycmd->pRegions[i].imageExtent.depth : copycmd->pRegions[i].imageSubresource.layerCount;
2236
2237      dst_data = state->pctx->texture_map(state->pctx,
2238                                           dst_image->bo,
2239                                           copycmd->pRegions[i].imageSubresource.mipLevel,
2240                                           PIPE_MAP_WRITE,
2241                                           &box,
2242                                           &dst_t);
2243
2244      enum pipe_format dst_format = dst_image->bo->format;
2245      enum pipe_format src_format = dst_format;
2246      if (util_format_is_depth_or_stencil(dst_format)) {
2247         if (copycmd->pRegions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) {
2248            src_format = util_format_get_depth_only(dst_image->bo->format);
2249         } else if (copycmd->pRegions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
2250            src_format = PIPE_FORMAT_S8_UINT;
2251         }
2252      }
2253
2254      unsigned buffer_row_len = util_format_get_stride(src_format, copycmd->pRegions[i].bufferRowLength);
2255      if (buffer_row_len == 0)
2256         buffer_row_len = util_format_get_stride(src_format, copycmd->pRegions[i].imageExtent.width);
2257      unsigned buffer_image_height = copycmd->pRegions[i].bufferImageHeight;
2258      if (buffer_image_height == 0)
2259         buffer_image_height = copycmd->pRegions[i].imageExtent.height;
2260
2261      unsigned img_stride = util_format_get_2d_size(src_format, buffer_row_len, buffer_image_height);
2262      if (src_format != dst_format) {
2263         copy_depth_box(dst_data, dst_format,
2264                        dst_t->stride, dst_t->layer_stride,
2265                        0, 0, 0,
2266                        copycmd->pRegions[i].imageExtent.width,
2267                        copycmd->pRegions[i].imageExtent.height,
2268                        box.depth,
2269                        src_data, src_format,
2270                        buffer_row_len, img_stride, 0, 0, 0);
2271      } else {
2272         util_copy_box(dst_data, dst_format,
2273                       dst_t->stride, dst_t->layer_stride,
2274                       0, 0, 0,
2275                       copycmd->pRegions[i].imageExtent.width,
2276                       copycmd->pRegions[i].imageExtent.height,
2277                       box.depth,
2278                       src_data,
2279                       buffer_row_len, img_stride, 0, 0, 0);
2280      }
2281      state->pctx->buffer_unmap(state->pctx, src_t);
2282      state->pctx->texture_unmap(state->pctx, dst_t);
2283   }
2284}
2285
2286static void handle_copy_image(struct vk_cmd_queue_entry *cmd,
2287                              struct rendering_state *state)
2288{
2289   int i;
2290   struct VkCopyImageInfo2KHR *copycmd = cmd->u.copy_image2_khr.copy_image_info;
2291   LVP_FROM_HANDLE(lvp_image, src_image, copycmd->srcImage);
2292   LVP_FROM_HANDLE(lvp_image, dst_image, copycmd->dstImage);
2293
2294   state->pctx->flush(state->pctx, NULL, 0);
2295
2296   for (i = 0; i < copycmd->regionCount; i++) {
2297      struct pipe_box src_box;
2298      src_box.x = copycmd->pRegions[i].srcOffset.x;
2299      src_box.y = copycmd->pRegions[i].srcOffset.y;
2300      src_box.width = copycmd->pRegions[i].extent.width;
2301      src_box.height = copycmd->pRegions[i].extent.height;
2302      if (src_image->bo->target == PIPE_TEXTURE_3D) {
2303         src_box.depth = copycmd->pRegions[i].extent.depth;
2304         src_box.z = copycmd->pRegions[i].srcOffset.z;
2305      } else {
2306         src_box.depth = copycmd->pRegions[i].srcSubresource.layerCount;
2307         src_box.z = copycmd->pRegions[i].srcSubresource.baseArrayLayer;
2308      }
2309
2310      unsigned dstz = dst_image->bo->target == PIPE_TEXTURE_3D ?
2311                      copycmd->pRegions[i].dstOffset.z :
2312                      copycmd->pRegions[i].dstSubresource.baseArrayLayer;
2313      state->pctx->resource_copy_region(state->pctx, dst_image->bo,
2314                                        copycmd->pRegions[i].dstSubresource.mipLevel,
2315                                        copycmd->pRegions[i].dstOffset.x,
2316                                        copycmd->pRegions[i].dstOffset.y,
2317                                        dstz,
2318                                        src_image->bo,
2319                                        copycmd->pRegions[i].srcSubresource.mipLevel,
2320                                        &src_box);
2321   }
2322}
2323
2324static void handle_copy_buffer(struct vk_cmd_queue_entry *cmd,
2325                               struct rendering_state *state)
2326{
2327   int i;
2328   struct VkCopyBufferInfo2KHR *copycmd = cmd->u.copy_buffer2_khr.copy_buffer_info;
2329
2330   for (i = 0; i < copycmd->regionCount; i++) {
2331      struct pipe_box box = { 0 };
2332      u_box_1d(copycmd->pRegions[i].srcOffset, copycmd->pRegions[i].size, &box);
2333      state->pctx->resource_copy_region(state->pctx, lvp_buffer_from_handle(copycmd->dstBuffer)->bo, 0,
2334                                        copycmd->pRegions[i].dstOffset, 0, 0,
2335                                        lvp_buffer_from_handle(copycmd->srcBuffer)->bo, 0, &box);
2336   }
2337}
2338
2339static void handle_blit_image(struct vk_cmd_queue_entry *cmd,
2340                              struct rendering_state *state)
2341{
2342   int i;
2343   struct VkBlitImageInfo2KHR *blitcmd = cmd->u.blit_image2_khr.blit_image_info;
2344   LVP_FROM_HANDLE(lvp_image, src_image, blitcmd->srcImage);
2345   LVP_FROM_HANDLE(lvp_image, dst_image, blitcmd->dstImage);
2346   struct pipe_blit_info info;
2347
2348   memset(&info, 0, sizeof(info));
2349
2350   state->pctx->flush(state->pctx, NULL, 0);
2351   info.src.resource = src_image->bo;
2352   info.dst.resource = dst_image->bo;
2353   info.src.format = src_image->bo->format;
2354   info.dst.format = dst_image->bo->format;
2355   info.mask = util_format_is_depth_or_stencil(info.src.format) ? PIPE_MASK_ZS : PIPE_MASK_RGBA;
2356   info.filter = blitcmd->filter == VK_FILTER_NEAREST ? PIPE_TEX_FILTER_NEAREST : PIPE_TEX_FILTER_LINEAR;
2357   for (i = 0; i < blitcmd->regionCount; i++) {
2358      int srcX0, srcX1, srcY0, srcY1, srcZ0, srcZ1;
2359      unsigned dstX0, dstX1, dstY0, dstY1, dstZ0, dstZ1;
2360
2361      srcX0 = blitcmd->pRegions[i].srcOffsets[0].x;
2362      srcX1 = blitcmd->pRegions[i].srcOffsets[1].x;
2363      srcY0 = blitcmd->pRegions[i].srcOffsets[0].y;
2364      srcY1 = blitcmd->pRegions[i].srcOffsets[1].y;
2365      srcZ0 = blitcmd->pRegions[i].srcOffsets[0].z;
2366      srcZ1 = blitcmd->pRegions[i].srcOffsets[1].z;
2367
2368      dstX0 = blitcmd->pRegions[i].dstOffsets[0].x;
2369      dstX1 = blitcmd->pRegions[i].dstOffsets[1].x;
2370      dstY0 = blitcmd->pRegions[i].dstOffsets[0].y;
2371      dstY1 = blitcmd->pRegions[i].dstOffsets[1].y;
2372      dstZ0 = blitcmd->pRegions[i].dstOffsets[0].z;
2373      dstZ1 = blitcmd->pRegions[i].dstOffsets[1].z;
2374
2375      if (dstX0 < dstX1) {
2376         info.dst.box.x = dstX0;
2377         info.src.box.x = srcX0;
2378         info.dst.box.width = dstX1 - dstX0;
2379         info.src.box.width = srcX1 - srcX0;
2380      } else {
2381         info.dst.box.x = dstX1;
2382         info.src.box.x = srcX1;
2383         info.dst.box.width = dstX0 - dstX1;
2384         info.src.box.width = srcX0 - srcX1;
2385      }
2386
2387      if (dstY0 < dstY1) {
2388         info.dst.box.y = dstY0;
2389         info.src.box.y = srcY0;
2390         info.dst.box.height = dstY1 - dstY0;
2391         info.src.box.height = srcY1 - srcY0;
2392      } else {
2393         info.dst.box.y = dstY1;
2394         info.src.box.y = srcY1;
2395         info.dst.box.height = dstY0 - dstY1;
2396         info.src.box.height = srcY0 - srcY1;
2397      }
2398
2399      assert_subresource_layers(info.src.resource, &blitcmd->pRegions[i].srcSubresource, blitcmd->pRegions[i].srcOffsets);
2400      assert_subresource_layers(info.dst.resource, &blitcmd->pRegions[i].dstSubresource, blitcmd->pRegions[i].dstOffsets);
2401      if (src_image->bo->target == PIPE_TEXTURE_3D) {
2402         if (dstZ0 < dstZ1) {
2403            info.dst.box.z = dstZ0;
2404            info.src.box.z = srcZ0;
2405            info.dst.box.depth = dstZ1 - dstZ0;
2406            info.src.box.depth = srcZ1 - srcZ0;
2407         } else {
2408            info.dst.box.z = dstZ1;
2409            info.src.box.z = srcZ1;
2410            info.dst.box.depth = dstZ0 - dstZ1;
2411            info.src.box.depth = srcZ0 - srcZ1;
2412         }
2413      } else {
2414         info.src.box.z = blitcmd->pRegions[i].srcSubresource.baseArrayLayer;
2415         info.dst.box.z = blitcmd->pRegions[i].dstSubresource.baseArrayLayer;
2416         info.src.box.depth = blitcmd->pRegions[i].srcSubresource.layerCount;
2417         info.dst.box.depth = blitcmd->pRegions[i].dstSubresource.layerCount;
2418      }
2419
2420      info.src.level = blitcmd->pRegions[i].srcSubresource.mipLevel;
2421      info.dst.level = blitcmd->pRegions[i].dstSubresource.mipLevel;
2422      state->pctx->blit(state->pctx, &info);
2423   }
2424}
2425
2426static void handle_fill_buffer(struct vk_cmd_queue_entry *cmd,
2427                               struct rendering_state *state)
2428{
2429   struct vk_cmd_fill_buffer *fillcmd = &cmd->u.fill_buffer;
2430   uint32_t size = fillcmd->size;
2431
2432   if (fillcmd->size == VK_WHOLE_SIZE) {
2433      size = lvp_buffer_from_handle(fillcmd->dst_buffer)->bo->width0 - fillcmd->dst_offset;
2434      size = ROUND_DOWN_TO(size, 4);
2435   }
2436
2437   state->pctx->clear_buffer(state->pctx,
2438                             lvp_buffer_from_handle(fillcmd->dst_buffer)->bo,
2439                             fillcmd->dst_offset,
2440                             size,
2441                             &fillcmd->data,
2442                             4);
2443}
2444
2445static void handle_update_buffer(struct vk_cmd_queue_entry *cmd,
2446                                 struct rendering_state *state)
2447{
2448   struct vk_cmd_update_buffer *updcmd = &cmd->u.update_buffer;
2449   uint32_t *dst;
2450   struct pipe_transfer *dst_t;
2451   struct pipe_box box;
2452
2453   u_box_1d(updcmd->dst_offset, updcmd->data_size, &box);
2454   dst = state->pctx->buffer_map(state->pctx,
2455                                   lvp_buffer_from_handle(updcmd->dst_buffer)->bo,
2456                                   0,
2457                                   PIPE_MAP_WRITE,
2458                                   &box,
2459                                   &dst_t);
2460
2461   memcpy(dst, updcmd->data, updcmd->data_size);
2462   state->pctx->buffer_unmap(state->pctx, dst_t);
2463}
2464
2465static void handle_draw_indexed(struct vk_cmd_queue_entry *cmd,
2466                                struct rendering_state *state)
2467{
2468   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
2469   struct pipe_draw_start_count_bias draw = {0};
2470
2471   state->info.index_bounds_valid = false;
2472   state->info.min_index = 0;
2473   state->info.max_index = ~0;
2474   state->info.index_size = state->index_size;
2475   state->info.index.resource = state->index_buffer;
2476   state->info.start_instance = cmd->u.draw_indexed.first_instance;
2477   state->info.instance_count = cmd->u.draw_indexed.instance_count;
2478   state->info.view_mask = subpass->view_mask;
2479
2480   if (state->info.primitive_restart)
2481      state->info.restart_index = util_prim_restart_index_from_size(state->info.index_size);
2482
2483   draw.count = cmd->u.draw_indexed.index_count;
2484   draw.index_bias = cmd->u.draw_indexed.vertex_offset;
2485   /* TODO: avoid calculating multiple times if cmdbuf is submitted again */
2486   draw.start = (state->index_offset / state->index_size) + cmd->u.draw_indexed.first_index;
2487
2488   state->info.index_bias_varies = !cmd->u.draw_indexed.vertex_offset;
2489   state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
2490   state->pctx->draw_vbo(state->pctx, &state->info, 0, NULL, &draw, 1);
2491}
2492
2493static void handle_draw_multi_indexed(struct vk_cmd_queue_entry *cmd,
2494                                      struct rendering_state *state)
2495{
2496   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
2497   struct pipe_draw_start_count_bias *draws = calloc(cmd->u.draw_multi_indexed_ext.draw_count,
2498                                                     sizeof(*draws));
2499
2500   state->info.index_bounds_valid = false;
2501   state->info.min_index = 0;
2502   state->info.max_index = ~0;
2503   state->info.index_size = state->index_size;
2504   state->info.index.resource = state->index_buffer;
2505   state->info.start_instance = cmd->u.draw_multi_indexed_ext.first_instance;
2506   state->info.instance_count = cmd->u.draw_multi_indexed_ext.instance_count;
2507   state->info.view_mask = subpass->view_mask;
2508   if (cmd->u.draw_multi_indexed_ext.draw_count > 1)
2509      state->info.increment_draw_id = true;
2510
2511   if (state->info.primitive_restart)
2512      state->info.restart_index = util_prim_restart_index_from_size(state->info.index_size);
2513
2514   unsigned size = cmd->u.draw_multi_indexed_ext.draw_count * sizeof(struct pipe_draw_start_count_bias);
2515   memcpy(draws, cmd->u.draw_multi_indexed_ext.index_info, size);
2516
2517   /* only the first member is read if index_bias_varies is true */
2518   if (cmd->u.draw_multi_indexed_ext.draw_count &&
2519       cmd->u.draw_multi_indexed_ext.vertex_offset)
2520      draws[0].index_bias = *cmd->u.draw_multi_indexed_ext.vertex_offset;
2521
2522   /* TODO: avoid calculating multiple times if cmdbuf is submitted again */
2523   for (unsigned i = 0; i < cmd->u.draw_multi_indexed_ext.draw_count; i++)
2524      draws[i].start = (state->index_offset / state->index_size) + draws[i].start;
2525
2526   state->info.index_bias_varies = !cmd->u.draw_multi_indexed_ext.vertex_offset;
2527   state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
2528
2529   if (cmd->u.draw_multi_indexed_ext.draw_count)
2530      state->pctx->draw_vbo(state->pctx, &state->info, 0, NULL, draws, cmd->u.draw_multi_indexed_ext.draw_count);
2531
2532   free(draws);
2533}
2534
2535static void handle_draw_indirect(struct vk_cmd_queue_entry *cmd,
2536                                 struct rendering_state *state, bool indexed)
2537{
2538   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
2539   struct pipe_draw_start_count_bias draw = {0};
2540   if (indexed) {
2541      state->info.index_bounds_valid = false;
2542      state->info.index_size = state->index_size;
2543      state->info.index.resource = state->index_buffer;
2544      state->info.max_index = ~0;
2545      if (state->info.primitive_restart)
2546         state->info.restart_index = util_prim_restart_index_from_size(state->info.index_size);
2547   } else
2548      state->info.index_size = 0;
2549   state->indirect_info.offset = cmd->u.draw_indirect.offset;
2550   state->indirect_info.stride = cmd->u.draw_indirect.stride;
2551   state->indirect_info.draw_count = cmd->u.draw_indirect.draw_count;
2552   state->indirect_info.buffer = lvp_buffer_from_handle(cmd->u.draw_indirect.buffer)->bo;
2553   state->info.view_mask = subpass->view_mask;
2554
2555   state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
2556   state->pctx->draw_vbo(state->pctx, &state->info, 0, &state->indirect_info, &draw, 1);
2557}
2558
2559static void handle_index_buffer(struct vk_cmd_queue_entry *cmd,
2560                                struct rendering_state *state)
2561{
2562   struct vk_cmd_bind_index_buffer *ib = &cmd->u.bind_index_buffer;
2563   switch (ib->index_type) {
2564   case VK_INDEX_TYPE_UINT8_EXT:
2565      state->index_size = 1;
2566      break;
2567   case VK_INDEX_TYPE_UINT16:
2568      state->index_size = 2;
2569      break;
2570   case VK_INDEX_TYPE_UINT32:
2571      state->index_size = 4;
2572      break;
2573   default:
2574      break;
2575   }
2576   state->index_offset = ib->offset;
2577   if (ib->buffer)
2578      state->index_buffer = lvp_buffer_from_handle(ib->buffer)->bo;
2579   else
2580      state->index_buffer = NULL;
2581
2582   state->ib_dirty = true;
2583}
2584
2585static void handle_dispatch(struct vk_cmd_queue_entry *cmd,
2586                            struct rendering_state *state)
2587{
2588   state->dispatch_info.grid[0] = cmd->u.dispatch.group_count_x;
2589   state->dispatch_info.grid[1] = cmd->u.dispatch.group_count_y;
2590   state->dispatch_info.grid[2] = cmd->u.dispatch.group_count_z;
2591   state->dispatch_info.grid_base[0] = 0;
2592   state->dispatch_info.grid_base[1] = 0;
2593   state->dispatch_info.grid_base[2] = 0;
2594   state->dispatch_info.indirect = NULL;
2595   state->pctx->launch_grid(state->pctx, &state->dispatch_info);
2596}
2597
2598static void handle_dispatch_base(struct vk_cmd_queue_entry *cmd,
2599                                 struct rendering_state *state)
2600{
2601   state->dispatch_info.grid[0] = cmd->u.dispatch_base.group_count_x;
2602   state->dispatch_info.grid[1] = cmd->u.dispatch_base.group_count_y;
2603   state->dispatch_info.grid[2] = cmd->u.dispatch_base.group_count_z;
2604   state->dispatch_info.grid_base[0] = cmd->u.dispatch_base.base_group_x;
2605   state->dispatch_info.grid_base[1] = cmd->u.dispatch_base.base_group_y;
2606   state->dispatch_info.grid_base[2] = cmd->u.dispatch_base.base_group_z;
2607   state->dispatch_info.indirect = NULL;
2608   state->pctx->launch_grid(state->pctx, &state->dispatch_info);
2609}
2610
2611static void handle_dispatch_indirect(struct vk_cmd_queue_entry *cmd,
2612                                     struct rendering_state *state)
2613{
2614   state->dispatch_info.indirect = lvp_buffer_from_handle(cmd->u.dispatch_indirect.buffer)->bo;
2615   state->dispatch_info.indirect_offset = cmd->u.dispatch_indirect.offset;
2616   state->pctx->launch_grid(state->pctx, &state->dispatch_info);
2617}
2618
2619static void handle_push_constants(struct vk_cmd_queue_entry *cmd,
2620                                  struct rendering_state *state)
2621{
2622   memcpy(state->push_constants + cmd->u.push_constants.offset, cmd->u.push_constants.values, cmd->u.push_constants.size);
2623
2624   state->pc_buffer[PIPE_SHADER_VERTEX].buffer_size = 128 * 4;
2625   state->pc_buffer[PIPE_SHADER_VERTEX].buffer_offset = 0;
2626   state->pc_buffer[PIPE_SHADER_VERTEX].user_buffer = state->push_constants;
2627   state->pcbuf_dirty[PIPE_SHADER_VERTEX] = true;
2628   state->pc_buffer[PIPE_SHADER_FRAGMENT].buffer_size = 128 * 4;
2629   state->pc_buffer[PIPE_SHADER_FRAGMENT].buffer_offset = 0;
2630   state->pc_buffer[PIPE_SHADER_FRAGMENT].user_buffer = state->push_constants;
2631   state->pcbuf_dirty[PIPE_SHADER_FRAGMENT] = true;
2632   state->pc_buffer[PIPE_SHADER_GEOMETRY].buffer_size = 128 * 4;
2633   state->pc_buffer[PIPE_SHADER_GEOMETRY].buffer_offset = 0;
2634   state->pc_buffer[PIPE_SHADER_GEOMETRY].user_buffer = state->push_constants;
2635   state->pcbuf_dirty[PIPE_SHADER_GEOMETRY] = true;
2636   state->pc_buffer[PIPE_SHADER_TESS_CTRL].buffer_size = 128 * 4;
2637   state->pc_buffer[PIPE_SHADER_TESS_CTRL].buffer_offset = 0;
2638   state->pc_buffer[PIPE_SHADER_TESS_CTRL].user_buffer = state->push_constants;
2639   state->pcbuf_dirty[PIPE_SHADER_TESS_CTRL] = true;
2640   state->pc_buffer[PIPE_SHADER_TESS_EVAL].buffer_size = 128 * 4;
2641   state->pc_buffer[PIPE_SHADER_TESS_EVAL].buffer_offset = 0;
2642   state->pc_buffer[PIPE_SHADER_TESS_EVAL].user_buffer = state->push_constants;
2643   state->pcbuf_dirty[PIPE_SHADER_TESS_EVAL] = true;
2644   state->pc_buffer[PIPE_SHADER_COMPUTE].buffer_size = 128 * 4;
2645   state->pc_buffer[PIPE_SHADER_COMPUTE].buffer_offset = 0;
2646   state->pc_buffer[PIPE_SHADER_COMPUTE].user_buffer = state->push_constants;
2647   state->pcbuf_dirty[PIPE_SHADER_COMPUTE] = true;
2648}
2649
2650static void lvp_execute_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer,
2651                                   struct rendering_state *state);
2652
2653static void handle_execute_commands(struct vk_cmd_queue_entry *cmd,
2654                                    struct rendering_state *state)
2655{
2656   for (unsigned i = 0; i < cmd->u.execute_commands.command_buffer_count; i++) {
2657      LVP_FROM_HANDLE(lvp_cmd_buffer, secondary_buf, cmd->u.execute_commands.command_buffers[i]);
2658      lvp_execute_cmd_buffer(secondary_buf, state);
2659   }
2660}
2661
2662static void handle_event_set(struct vk_cmd_queue_entry *cmd,
2663                             struct rendering_state *state)
2664{
2665   LVP_FROM_HANDLE(lvp_event, event, cmd->u.set_event.event);
2666
2667   if (cmd->u.reset_event.stage_mask == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)
2668      state->pctx->flush(state->pctx, NULL, 0);
2669   event->event_storage = 1;
2670}
2671
2672static void handle_event_reset(struct vk_cmd_queue_entry *cmd,
2673                               struct rendering_state *state)
2674{
2675   LVP_FROM_HANDLE(lvp_event, event, cmd->u.reset_event.event);
2676
2677   if (cmd->u.reset_event.stage_mask == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)
2678      state->pctx->flush(state->pctx, NULL, 0);
2679   event->event_storage = 0;
2680}
2681
2682static void handle_wait_events(struct vk_cmd_queue_entry *cmd,
2683                               struct rendering_state *state)
2684{
2685   for (unsigned i = 0; i < cmd->u.wait_events.event_count; i++) {
2686      LVP_FROM_HANDLE(lvp_event, event, cmd->u.wait_events.events[i]);
2687
2688      while (event->event_storage != true);
2689   }
2690}
2691
2692static void handle_pipeline_barrier(struct vk_cmd_queue_entry *cmd,
2693                                    struct rendering_state *state)
2694{
2695   /* why hello nail, I'm a hammer. - TODO */
2696   state->pctx->flush(state->pctx, NULL, 0);
2697}
2698
2699static void handle_begin_query(struct vk_cmd_queue_entry *cmd,
2700                               struct rendering_state *state)
2701{
2702   struct vk_cmd_begin_query *qcmd = &cmd->u.begin_query;
2703   LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2704
2705   if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
2706       pool->pipeline_stats & VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT)
2707      emit_compute_state(state);
2708
2709   emit_state(state);
2710
2711   if (!pool->queries[qcmd->query]) {
2712      enum pipe_query_type qtype = pool->base_type;
2713      pool->queries[qcmd->query] = state->pctx->create_query(state->pctx,
2714                                                             qtype, 0);
2715   }
2716
2717   state->pctx->begin_query(state->pctx, pool->queries[qcmd->query]);
2718}
2719
2720static void handle_end_query(struct vk_cmd_queue_entry *cmd,
2721                             struct rendering_state *state)
2722{
2723   struct vk_cmd_end_query *qcmd = &cmd->u.end_query;
2724   LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2725   assert(pool->queries[qcmd->query]);
2726
2727   state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
2728}
2729
2730
2731static void handle_begin_query_indexed_ext(struct vk_cmd_queue_entry *cmd,
2732                                           struct rendering_state *state)
2733{
2734   struct vk_cmd_begin_query_indexed_ext *qcmd = &cmd->u.begin_query_indexed_ext;
2735   LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2736
2737   if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
2738       pool->pipeline_stats & VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT)
2739      emit_compute_state(state);
2740
2741   emit_state(state);
2742
2743   if (!pool->queries[qcmd->query]) {
2744      enum pipe_query_type qtype = pool->base_type;
2745      pool->queries[qcmd->query] = state->pctx->create_query(state->pctx,
2746                                                             qtype, qcmd->index);
2747   }
2748
2749   state->pctx->begin_query(state->pctx, pool->queries[qcmd->query]);
2750}
2751
2752static void handle_end_query_indexed_ext(struct vk_cmd_queue_entry *cmd,
2753                                         struct rendering_state *state)
2754{
2755   struct vk_cmd_end_query_indexed_ext *qcmd = &cmd->u.end_query_indexed_ext;
2756   LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2757   assert(pool->queries[qcmd->query]);
2758
2759   state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
2760}
2761
2762static void handle_reset_query_pool(struct vk_cmd_queue_entry *cmd,
2763                                    struct rendering_state *state)
2764{
2765   struct vk_cmd_reset_query_pool *qcmd = &cmd->u.reset_query_pool;
2766   LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2767   for (unsigned i = qcmd->first_query; i < qcmd->first_query + qcmd->query_count; i++) {
2768      if (pool->queries[i]) {
2769         state->pctx->destroy_query(state->pctx, pool->queries[i]);
2770         pool->queries[i] = NULL;
2771      }
2772   }
2773}
2774
2775static void handle_write_timestamp(struct vk_cmd_queue_entry *cmd,
2776                                   struct rendering_state *state)
2777{
2778   struct vk_cmd_write_timestamp *qcmd = &cmd->u.write_timestamp;
2779   LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2780   if (!pool->queries[qcmd->query]) {
2781      pool->queries[qcmd->query] = state->pctx->create_query(state->pctx,
2782                                                             PIPE_QUERY_TIMESTAMP, 0);
2783   }
2784
2785   if (!(qcmd->pipeline_stage == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT))
2786      state->pctx->flush(state->pctx, NULL, 0);
2787   state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
2788
2789}
2790
2791static void handle_copy_query_pool_results(struct vk_cmd_queue_entry *cmd,
2792                                           struct rendering_state *state)
2793{
2794   struct vk_cmd_copy_query_pool_results *copycmd = &cmd->u.copy_query_pool_results;
2795   LVP_FROM_HANDLE(lvp_query_pool, pool, copycmd->query_pool);
2796
2797   for (unsigned i = copycmd->first_query; i < copycmd->first_query + copycmd->query_count; i++) {
2798      unsigned offset = copycmd->dst_offset + lvp_buffer_from_handle(copycmd->dst_buffer)->offset + (copycmd->stride * (i - copycmd->first_query));
2799      if (pool->queries[i]) {
2800         if (copycmd->flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
2801            state->pctx->get_query_result_resource(state->pctx,
2802                                                   pool->queries[i],
2803                                                   copycmd->flags & VK_QUERY_RESULT_WAIT_BIT,
2804                                                   copycmd->flags & VK_QUERY_RESULT_64_BIT ? PIPE_QUERY_TYPE_U64 : PIPE_QUERY_TYPE_U32,
2805                                                   -1,
2806                                                   lvp_buffer_from_handle(copycmd->dst_buffer)->bo,
2807                                                   offset + (copycmd->flags & VK_QUERY_RESULT_64_BIT ? 8 : 4));
2808         if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
2809            unsigned num_results = 0;
2810            unsigned result_size = copycmd->flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
2811            u_foreach_bit(bit, pool->pipeline_stats)
2812               state->pctx->get_query_result_resource(state->pctx,
2813                                                      pool->queries[i],
2814                                                      copycmd->flags & VK_QUERY_RESULT_WAIT_BIT,
2815                                                      copycmd->flags & VK_QUERY_RESULT_64_BIT ? PIPE_QUERY_TYPE_U64 : PIPE_QUERY_TYPE_U32,
2816                                                      bit,
2817                                                      lvp_buffer_from_handle(copycmd->dst_buffer)->bo,
2818                                                      offset + num_results++ * result_size);
2819         } else {
2820            state->pctx->get_query_result_resource(state->pctx,
2821                                                   pool->queries[i],
2822                                                   copycmd->flags & VK_QUERY_RESULT_WAIT_BIT,
2823                                                   copycmd->flags & VK_QUERY_RESULT_64_BIT ? PIPE_QUERY_TYPE_U64 : PIPE_QUERY_TYPE_U32,
2824                                                   0,
2825                                                   lvp_buffer_from_handle(copycmd->dst_buffer)->bo,
2826                                                   offset);
2827         }
2828      } else {
2829         /* if no queries emitted yet, just reset the buffer to 0 so avail is reported correctly */
2830         if (copycmd->flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
2831            struct pipe_transfer *src_t;
2832            uint32_t *map;
2833
2834            struct pipe_box box = {0};
2835            box.x = offset;
2836            box.width = copycmd->stride;
2837            box.height = 1;
2838            box.depth = 1;
2839            map = state->pctx->buffer_map(state->pctx,
2840                                            lvp_buffer_from_handle(copycmd->dst_buffer)->bo, 0, PIPE_MAP_READ, &box,
2841                                            &src_t);
2842
2843            memset(map, 0, box.width);
2844            state->pctx->buffer_unmap(state->pctx, src_t);
2845         }
2846      }
2847   }
2848}
2849
2850static void handle_clear_color_image(struct vk_cmd_queue_entry *cmd,
2851                                     struct rendering_state *state)
2852{
2853   LVP_FROM_HANDLE(lvp_image, image, cmd->u.clear_color_image.image);
2854   union util_color uc;
2855   uint32_t *col_val = uc.ui;
2856   util_pack_color_union(image->bo->format, &uc, (void*)cmd->u.clear_color_image.color);
2857   for (unsigned i = 0; i < cmd->u.clear_color_image.range_count; i++) {
2858      VkImageSubresourceRange *range = &cmd->u.clear_color_image.ranges[i];
2859      struct pipe_box box;
2860      box.x = 0;
2861      box.y = 0;
2862      box.z = 0;
2863
2864      uint32_t level_count = lvp_get_levelCount(image, range);
2865      for (unsigned j = range->baseMipLevel; j < range->baseMipLevel + level_count; j++) {
2866         box.width = u_minify(image->bo->width0, j);
2867         box.height = u_minify(image->bo->height0, j);
2868         box.depth = 1;
2869         if (image->bo->target == PIPE_TEXTURE_3D)
2870            box.depth = u_minify(image->bo->depth0, j);
2871         else if (image->bo->target == PIPE_TEXTURE_1D_ARRAY) {
2872            box.y = range->baseArrayLayer;
2873            box.height = lvp_get_layerCount(image, range);
2874            box.depth = 1;
2875         } else {
2876            box.z = range->baseArrayLayer;
2877            box.depth = lvp_get_layerCount(image, range);
2878         }
2879
2880         state->pctx->clear_texture(state->pctx, image->bo,
2881                                    j, &box, (void *)col_val);
2882      }
2883   }
2884}
2885
2886static void handle_clear_ds_image(struct vk_cmd_queue_entry *cmd,
2887                                  struct rendering_state *state)
2888{
2889   LVP_FROM_HANDLE(lvp_image, image, cmd->u.clear_depth_stencil_image.image);
2890   for (unsigned i = 0; i < cmd->u.clear_depth_stencil_image.range_count; i++) {
2891      VkImageSubresourceRange *range = &cmd->u.clear_depth_stencil_image.ranges[i];
2892      uint32_t ds_clear_flags = 0;
2893      if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
2894         ds_clear_flags |= PIPE_CLEAR_DEPTH;
2895      if (range->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
2896         ds_clear_flags |= PIPE_CLEAR_STENCIL;
2897
2898      uint32_t level_count = lvp_get_levelCount(image, range);
2899      for (unsigned j = 0; j < level_count; j++) {
2900         struct pipe_surface *surf;
2901         unsigned width, height;
2902
2903         width = u_minify(image->bo->width0, range->baseMipLevel + j);
2904         height = u_minify(image->bo->height0, range->baseMipLevel + j);
2905
2906         surf = create_img_surface_bo(state, range,
2907                                      image->bo, image->bo->format,
2908                                      width, height,
2909                                      0, lvp_get_layerCount(image, range) - 1, j);
2910
2911         state->pctx->clear_depth_stencil(state->pctx,
2912                                          surf,
2913                                          ds_clear_flags,
2914                                          cmd->u.clear_depth_stencil_image.depth_stencil->depth,
2915                                          cmd->u.clear_depth_stencil_image.depth_stencil->stencil,
2916                                          0, 0,
2917                                          width, height, true);
2918         state->pctx->surface_destroy(state->pctx, surf);
2919      }
2920   }
2921}
2922
2923static void handle_clear_attachments(struct vk_cmd_queue_entry *cmd,
2924                                     struct rendering_state *state)
2925{
2926   for (uint32_t a = 0; a < cmd->u.clear_attachments.attachment_count; a++) {
2927      VkClearAttachment *att = &cmd->u.clear_attachments.attachments[a];
2928      const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
2929      struct lvp_image_view *imgv;
2930
2931      if (att->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
2932         struct lvp_subpass_attachment *color_att = &subpass->color_attachments[att->colorAttachment];
2933         if (!color_att || color_att->attachment == VK_ATTACHMENT_UNUSED)
2934            continue;
2935         imgv = get_attachment(state, color_att->attachment);
2936      } else {
2937         struct lvp_subpass_attachment *ds_att = subpass->depth_stencil_attachment;
2938         if (!ds_att || ds_att->attachment == VK_ATTACHMENT_UNUSED)
2939            continue;
2940         imgv = get_attachment(state, ds_att->attachment);
2941      }
2942      union pipe_color_union col_val;
2943      double dclear_val = 0;
2944      uint32_t sclear_val = 0;
2945      uint32_t ds_clear_flags = 0;
2946      if (att->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
2947         ds_clear_flags |= PIPE_CLEAR_DEPTH;
2948         dclear_val = att->clearValue.depthStencil.depth;
2949      }
2950      if (att->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
2951         ds_clear_flags |= PIPE_CLEAR_STENCIL;
2952         sclear_val = att->clearValue.depthStencil.stencil;
2953      }
2954      if (att->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
2955         for (unsigned i = 0; i < 4; i++)
2956            col_val.ui[i] = att->clearValue.color.uint32[i];
2957      }
2958
2959      for (uint32_t r = 0; r < cmd->u.clear_attachments.rect_count; r++) {
2960
2961         VkClearRect *rect = &cmd->u.clear_attachments.rects[r];
2962         if (subpass->view_mask) {
2963            u_foreach_bit(i, subpass->view_mask)
2964               clear_attachment_layers(state, imgv, &rect->rect,
2965                                       i, 1,
2966                                       ds_clear_flags, dclear_val, sclear_val,
2967                                       &col_val);
2968         } else
2969            clear_attachment_layers(state, imgv, &rect->rect,
2970                                    rect->baseArrayLayer, rect->layerCount,
2971                                    ds_clear_flags, dclear_val, sclear_val,
2972                                    &col_val);
2973      }
2974   }
2975}
2976
2977static void handle_resolve_image(struct vk_cmd_queue_entry *cmd,
2978                                 struct rendering_state *state)
2979{
2980   int i;
2981   struct VkResolveImageInfo2KHR *resolvecmd = cmd->u.resolve_image2_khr.resolve_image_info;
2982   LVP_FROM_HANDLE(lvp_image, src_image, resolvecmd->srcImage);
2983   LVP_FROM_HANDLE(lvp_image, dst_image, resolvecmd->dstImage);
2984   struct pipe_blit_info info;
2985
2986   memset(&info, 0, sizeof(info));
2987
2988   state->pctx->flush(state->pctx, NULL, 0);
2989   info.src.resource = src_image->bo;
2990   info.dst.resource = dst_image->bo;
2991   info.src.format = src_image->bo->format;
2992   info.dst.format = dst_image->bo->format;
2993   info.mask = util_format_is_depth_or_stencil(info.src.format) ? PIPE_MASK_ZS : PIPE_MASK_RGBA;
2994   info.filter = PIPE_TEX_FILTER_NEAREST;
2995   for (i = 0; i < resolvecmd->regionCount; i++) {
2996      int srcX0, srcY0;
2997      unsigned dstX0, dstY0;
2998
2999      srcX0 = resolvecmd->pRegions[i].srcOffset.x;
3000      srcY0 = resolvecmd->pRegions[i].srcOffset.y;
3001
3002      dstX0 = resolvecmd->pRegions[i].dstOffset.x;
3003      dstY0 = resolvecmd->pRegions[i].dstOffset.y;
3004
3005      info.dst.box.x = dstX0;
3006      info.dst.box.y = dstY0;
3007      info.src.box.x = srcX0;
3008      info.src.box.y = srcY0;
3009
3010      info.dst.box.width = resolvecmd->pRegions[i].extent.width;
3011      info.src.box.width = resolvecmd->pRegions[i].extent.width;
3012      info.dst.box.height = resolvecmd->pRegions[i].extent.height;
3013      info.src.box.height = resolvecmd->pRegions[i].extent.height;
3014
3015      info.dst.box.depth = resolvecmd->pRegions[i].dstSubresource.layerCount;
3016      info.src.box.depth = resolvecmd->pRegions[i].srcSubresource.layerCount;
3017
3018      info.src.level = resolvecmd->pRegions[i].srcSubresource.mipLevel;
3019      info.src.box.z = resolvecmd->pRegions[i].srcOffset.z + resolvecmd->pRegions[i].srcSubresource.baseArrayLayer;
3020
3021      info.dst.level = resolvecmd->pRegions[i].dstSubresource.mipLevel;
3022      info.dst.box.z = resolvecmd->pRegions[i].dstOffset.z + resolvecmd->pRegions[i].dstSubresource.baseArrayLayer;
3023
3024      state->pctx->blit(state->pctx, &info);
3025   }
3026}
3027
3028static void handle_draw_indirect_count(struct vk_cmd_queue_entry *cmd,
3029                                       struct rendering_state *state, bool indexed)
3030{
3031   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
3032   struct pipe_draw_start_count_bias draw = {0};
3033   if (indexed) {
3034      state->info.index_bounds_valid = false;
3035      state->info.index_size = state->index_size;
3036      state->info.index.resource = state->index_buffer;
3037      state->info.max_index = ~0;
3038   } else
3039      state->info.index_size = 0;
3040   state->indirect_info.offset = cmd->u.draw_indirect_count.offset;
3041   state->indirect_info.stride = cmd->u.draw_indirect_count.stride;
3042   state->indirect_info.draw_count = cmd->u.draw_indirect_count.max_draw_count;
3043   state->indirect_info.buffer = lvp_buffer_from_handle(cmd->u.draw_indirect_count.buffer)->bo;
3044   state->indirect_info.indirect_draw_count_offset = cmd->u.draw_indirect_count.count_buffer_offset;
3045   state->indirect_info.indirect_draw_count = lvp_buffer_from_handle(cmd->u.draw_indirect_count.count_buffer)->bo;
3046   state->info.view_mask = subpass->view_mask;
3047
3048   state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
3049   state->pctx->draw_vbo(state->pctx, &state->info, 0, &state->indirect_info, &draw, 1);
3050}
3051
3052static void handle_compute_push_descriptor_set(struct lvp_cmd_push_descriptor_set *pds,
3053                                               struct dyn_info *dyn_info,
3054                                               struct rendering_state *state)
3055{
3056   struct lvp_descriptor_set_layout *layout = pds->layout->set[pds->set].layout;
3057
3058   if (!(layout->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT))
3059      return;
3060   for (unsigned i = 0; i < pds->set; i++) {
3061      increment_dyn_info(dyn_info, pds->layout->set[i].layout, false);
3062   }
3063   unsigned info_idx = 0;
3064   for (unsigned i = 0; i < pds->descriptor_write_count; i++) {
3065      struct lvp_write_descriptor *desc = &pds->descriptors[i];
3066      struct lvp_descriptor_set_binding_layout *binding = &layout->binding[desc->dst_binding];
3067
3068      if (!binding->valid)
3069         continue;
3070
3071      for (unsigned j = 0; j < desc->descriptor_count; j++) {
3072         union lvp_descriptor_info *info = &pds->infos[info_idx + j];
3073
3074         handle_descriptor(state, dyn_info, binding,
3075                           MESA_SHADER_COMPUTE, PIPE_SHADER_COMPUTE,
3076                           j, desc->descriptor_type,
3077                           info);
3078      }
3079      info_idx += desc->descriptor_count;
3080   }
3081}
3082
3083static struct lvp_cmd_push_descriptor_set *create_push_descriptor_set(struct vk_cmd_push_descriptor_set_khr *in_cmd)
3084{
3085   LVP_FROM_HANDLE(lvp_pipeline_layout, layout, in_cmd->layout);
3086   struct lvp_cmd_push_descriptor_set *out_cmd;
3087   int count_descriptors = 0;
3088   int cmd_size = sizeof(*out_cmd);
3089
3090   for (unsigned i = 0; i < in_cmd->descriptor_write_count; i++) {
3091      count_descriptors += in_cmd->descriptor_writes[i].descriptorCount;
3092   }
3093   cmd_size += count_descriptors * sizeof(union lvp_descriptor_info);
3094
3095   cmd_size += in_cmd->descriptor_write_count * sizeof(struct lvp_write_descriptor);
3096
3097   out_cmd = calloc(1, cmd_size);
3098   if (!out_cmd)
3099      return NULL;
3100
3101   out_cmd->bind_point = in_cmd->pipeline_bind_point;
3102   out_cmd->layout = layout;
3103   out_cmd->set = in_cmd->set;
3104   out_cmd->descriptor_write_count = in_cmd->descriptor_write_count;
3105   out_cmd->descriptors = (struct lvp_write_descriptor *)(out_cmd + 1);
3106   out_cmd->infos = (union lvp_descriptor_info *)(out_cmd->descriptors + in_cmd->descriptor_write_count);
3107
3108   unsigned descriptor_index = 0;
3109
3110   for (unsigned i = 0; i < in_cmd->descriptor_write_count; i++) {
3111      struct lvp_write_descriptor *desc = &out_cmd->descriptors[i];
3112
3113      /* dstSet is ignored */
3114      desc->dst_binding = in_cmd->descriptor_writes[i].dstBinding;
3115      desc->dst_array_element = in_cmd->descriptor_writes[i].dstArrayElement;
3116      desc->descriptor_count = in_cmd->descriptor_writes[i].descriptorCount;
3117      desc->descriptor_type = in_cmd->descriptor_writes[i].descriptorType;
3118
3119      for (unsigned j = 0; j < desc->descriptor_count; j++) {
3120         union lvp_descriptor_info *info = &out_cmd->infos[descriptor_index + j];
3121         switch (desc->descriptor_type) {
3122         case VK_DESCRIPTOR_TYPE_SAMPLER:
3123            info->sampler = lvp_sampler_from_handle(in_cmd->descriptor_writes[i].pImageInfo[j].sampler);
3124            break;
3125         case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3126            info->sampler = lvp_sampler_from_handle(in_cmd->descriptor_writes[i].pImageInfo[j].sampler);
3127            info->iview = lvp_image_view_from_handle(in_cmd->descriptor_writes[i].pImageInfo[j].imageView);
3128            info->image_layout = in_cmd->descriptor_writes[i].pImageInfo[j].imageLayout;
3129            break;
3130         case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3131         case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3132         case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3133            info->iview = lvp_image_view_from_handle(in_cmd->descriptor_writes[i].pImageInfo[j].imageView);
3134            info->image_layout = in_cmd->descriptor_writes[i].pImageInfo[j].imageLayout;
3135            break;
3136         case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3137         case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3138            info->buffer_view = lvp_buffer_view_from_handle(in_cmd->descriptor_writes[i].pTexelBufferView[j]);
3139            break;
3140         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3141         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3142         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3143         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3144         default:
3145            info->buffer = lvp_buffer_from_handle(in_cmd->descriptor_writes[i].pBufferInfo[j].buffer);
3146            info->offset = in_cmd->descriptor_writes[i].pBufferInfo[j].offset;
3147            info->range = in_cmd->descriptor_writes[i].pBufferInfo[j].range;
3148            break;
3149         }
3150      }
3151      descriptor_index += desc->descriptor_count;
3152   }
3153
3154   return out_cmd;
3155}
3156
3157static void handle_push_descriptor_set_generic(struct vk_cmd_push_descriptor_set_khr *_pds,
3158                                               struct rendering_state *state)
3159{
3160   struct lvp_cmd_push_descriptor_set *pds;
3161   struct lvp_descriptor_set_layout *layout;
3162   struct dyn_info dyn_info;
3163
3164   pds = create_push_descriptor_set(_pds);
3165   layout = pds->layout->set[pds->set].layout;
3166
3167   memset(&dyn_info.stage, 0, sizeof(dyn_info.stage));
3168   dyn_info.dyn_index = 0;
3169   if (pds->bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
3170      handle_compute_push_descriptor_set(pds, &dyn_info, state);
3171   }
3172
3173   for (unsigned i = 0; i < pds->set; i++) {
3174      increment_dyn_info(&dyn_info, pds->layout->set[i].layout, false);
3175   }
3176
3177   unsigned info_idx = 0;
3178   for (unsigned i = 0; i < pds->descriptor_write_count; i++) {
3179      struct lvp_write_descriptor *desc = &pds->descriptors[i];
3180      struct lvp_descriptor_set_binding_layout *binding = &layout->binding[desc->dst_binding];
3181
3182      if (!binding->valid)
3183         continue;
3184
3185      for (unsigned j = 0; j < desc->descriptor_count; j++) {
3186         union lvp_descriptor_info *info = &pds->infos[info_idx + j];
3187
3188         if (layout->shader_stages & VK_SHADER_STAGE_VERTEX_BIT)
3189            handle_descriptor(state, &dyn_info, binding,
3190                              MESA_SHADER_VERTEX, PIPE_SHADER_VERTEX,
3191                              j, desc->descriptor_type,
3192                              info);
3193         if (layout->shader_stages & VK_SHADER_STAGE_FRAGMENT_BIT)
3194            handle_descriptor(state, &dyn_info, binding,
3195                              MESA_SHADER_FRAGMENT, PIPE_SHADER_FRAGMENT,
3196                              j, desc->descriptor_type,
3197                              info);
3198         if (layout->shader_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
3199            handle_descriptor(state, &dyn_info, binding,
3200                              MESA_SHADER_GEOMETRY, PIPE_SHADER_GEOMETRY,
3201                              j, desc->descriptor_type,
3202                              info);
3203         if (layout->shader_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
3204            handle_descriptor(state, &dyn_info, binding,
3205                              MESA_SHADER_TESS_CTRL, PIPE_SHADER_TESS_CTRL,
3206                              j, desc->descriptor_type,
3207                              info);
3208         if (layout->shader_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
3209            handle_descriptor(state, &dyn_info, binding,
3210                              MESA_SHADER_TESS_EVAL, PIPE_SHADER_TESS_EVAL,
3211                              j, desc->descriptor_type,
3212                              info);
3213      }
3214      info_idx += desc->descriptor_count;
3215   }
3216   free(pds);
3217}
3218
3219static void handle_push_descriptor_set(struct vk_cmd_queue_entry *cmd,
3220                                       struct rendering_state *state)
3221{
3222   handle_push_descriptor_set_generic(&cmd->u.push_descriptor_set_khr, state);
3223}
3224
3225static void handle_push_descriptor_set_with_template(struct vk_cmd_queue_entry *cmd,
3226                                                     struct rendering_state *state)
3227{
3228   LVP_FROM_HANDLE(lvp_descriptor_update_template, templ, cmd->u.push_descriptor_set_with_template_khr.descriptor_update_template);
3229   struct vk_cmd_push_descriptor_set_khr *pds;
3230   int pds_size = sizeof(*pds);
3231
3232   pds_size += templ->entry_count * sizeof(struct VkWriteDescriptorSet);
3233
3234   for (unsigned i = 0; i < templ->entry_count; i++) {
3235      VkDescriptorUpdateTemplateEntry *entry = &templ->entry[i];
3236      switch (entry->descriptorType) {
3237      case VK_DESCRIPTOR_TYPE_SAMPLER:
3238      case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3239      case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3240      case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3241      case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3242         pds_size += sizeof(VkDescriptorImageInfo) * entry->descriptorCount;
3243         break;
3244      case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3245      case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3246         pds_size += sizeof(VkBufferView) * entry->descriptorCount;
3247         break;
3248      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3249      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3250      case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3251      case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3252      default:
3253         pds_size += sizeof(VkDescriptorBufferInfo) * entry->descriptorCount;
3254         break;
3255      }
3256   }
3257
3258   pds = calloc(1, pds_size);
3259   if (!pds)
3260      return;
3261
3262   pds->pipeline_bind_point = templ->bind_point;
3263   pds->layout = lvp_pipeline_layout_to_handle(templ->pipeline_layout);
3264   pds->set = templ->set;
3265   pds->descriptor_write_count = templ->entry_count;
3266   pds->descriptor_writes = (struct VkWriteDescriptorSet *)(pds + 1);
3267   const uint8_t *next_info = (const uint8_t *) (pds->descriptor_writes + templ->entry_count);
3268
3269   const uint8_t *pSrc = cmd->u.push_descriptor_set_with_template_khr.data;
3270   for (unsigned i = 0; i < templ->entry_count; i++) {
3271      struct VkWriteDescriptorSet *desc = &pds->descriptor_writes[i];
3272      struct VkDescriptorUpdateTemplateEntry *entry = &templ->entry[i];
3273
3274      /* dstSet is ignored */
3275      desc->dstBinding = entry->dstBinding;
3276      desc->dstArrayElement = entry->dstArrayElement;
3277      desc->descriptorCount = entry->descriptorCount;
3278      desc->descriptorType = entry->descriptorType;
3279      desc->pImageInfo = (const VkDescriptorImageInfo *) next_info;
3280      desc->pTexelBufferView = (const VkBufferView *) next_info;
3281      desc->pBufferInfo = (const VkDescriptorBufferInfo *) next_info;
3282
3283      for (unsigned j = 0; j < desc->descriptorCount; j++) {
3284         switch (desc->descriptorType) {
3285         case VK_DESCRIPTOR_TYPE_SAMPLER:
3286         case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3287         case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3288         case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3289         case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3290            memcpy((VkDescriptorImageInfo*)&desc->pImageInfo[j], pSrc, sizeof(VkDescriptorImageInfo));
3291            next_info += sizeof(VkDescriptorImageInfo);
3292            pSrc += sizeof(VkDescriptorImageInfo);
3293            break;
3294         case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3295         case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3296            memcpy((VkBufferView*)&desc->pTexelBufferView[j], pSrc, sizeof(VkBufferView));
3297            next_info += sizeof(VkBufferView);
3298            pSrc += sizeof(VkBufferView);
3299            break;
3300         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3301         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3302         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3303         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3304         default:
3305            memcpy((VkDescriptorBufferInfo*)&desc->pBufferInfo[j], pSrc, sizeof(VkDescriptorBufferInfo));
3306            next_info += sizeof(VkDescriptorBufferInfo);
3307            pSrc += sizeof(VkDescriptorBufferInfo);
3308            break;
3309         }
3310      }
3311   }
3312   handle_push_descriptor_set_generic(pds, state);
3313   free(pds);
3314}
3315
3316static void handle_bind_transform_feedback_buffers(struct vk_cmd_queue_entry *cmd,
3317                                                   struct rendering_state *state)
3318{
3319   struct vk_cmd_bind_transform_feedback_buffers_ext *btfb = &cmd->u.bind_transform_feedback_buffers_ext;
3320
3321   for (unsigned i = 0; i < btfb->binding_count; i++) {
3322      int idx = i + btfb->first_binding;
3323      uint32_t size;
3324      if (btfb->sizes && btfb->sizes[i] != VK_WHOLE_SIZE)
3325         size = btfb->sizes[i];
3326      else
3327         size = lvp_buffer_from_handle(btfb->buffers[i])->size - btfb->offsets[i];
3328
3329      if (state->so_targets[idx])
3330         state->pctx->stream_output_target_destroy(state->pctx, state->so_targets[idx]);
3331
3332      state->so_targets[idx] = state->pctx->create_stream_output_target(state->pctx,
3333                                                                        lvp_buffer_from_handle(btfb->buffers[i])->bo,
3334                                                                        btfb->offsets[i],
3335                                                                        size);
3336   }
3337   state->num_so_targets = btfb->first_binding + btfb->binding_count;
3338}
3339
3340static void handle_begin_transform_feedback(struct vk_cmd_queue_entry *cmd,
3341                                            struct rendering_state *state)
3342{
3343   struct vk_cmd_begin_transform_feedback_ext *btf = &cmd->u.begin_transform_feedback_ext;
3344   uint32_t offsets[4];
3345
3346   memset(offsets, 0, sizeof(uint32_t)*4);
3347
3348   for (unsigned i = 0; i < btf->counter_buffer_count; i++) {
3349      if (!btf->counter_buffers[i])
3350         continue;
3351
3352      pipe_buffer_read(state->pctx,
3353                       btf->counter_buffers ? lvp_buffer_from_handle(btf->counter_buffers[i])->bo : NULL,
3354                       btf->counter_buffer_offsets ? btf->counter_buffer_offsets[i] : 0,
3355                       4,
3356                       &offsets[i]);
3357   }
3358   state->pctx->set_stream_output_targets(state->pctx, state->num_so_targets,
3359                                          state->so_targets, offsets);
3360}
3361
3362static void handle_end_transform_feedback(struct vk_cmd_queue_entry *cmd,
3363                                          struct rendering_state *state)
3364{
3365   struct vk_cmd_end_transform_feedback_ext *etf = &cmd->u.end_transform_feedback_ext;
3366
3367   if (etf->counter_buffer_count) {
3368      for (unsigned i = 0; i < etf->counter_buffer_count; i++) {
3369         if (!etf->counter_buffers[i])
3370            continue;
3371
3372         uint32_t offset;
3373         offset = state->pctx->stream_output_target_offset(state->so_targets[i]);
3374
3375         pipe_buffer_write(state->pctx,
3376                           etf->counter_buffers ? lvp_buffer_from_handle(etf->counter_buffers[i])->bo : NULL,
3377                           etf->counter_buffer_offsets ? etf->counter_buffer_offsets[i] : 0,
3378                           4,
3379                           &offset);
3380      }
3381   }
3382   state->pctx->set_stream_output_targets(state->pctx, 0, NULL, NULL);
3383}
3384
3385static void handle_draw_indirect_byte_count(struct vk_cmd_queue_entry *cmd,
3386                                            struct rendering_state *state)
3387{
3388   struct vk_cmd_draw_indirect_byte_count_ext *dibc = &cmd->u.draw_indirect_byte_count_ext;
3389   const struct lvp_subpass *subpass = &state->pass->subpasses[state->subpass];
3390   struct pipe_draw_start_count_bias draw = {0};
3391
3392   pipe_buffer_read(state->pctx,
3393                    lvp_buffer_from_handle(dibc->counter_buffer)->bo,
3394                    lvp_buffer_from_handle(dibc->counter_buffer)->offset + dibc->counter_buffer_offset,
3395                    4, &draw.count);
3396
3397   state->info.start_instance = cmd->u.draw_indirect_byte_count_ext.first_instance;
3398   state->info.instance_count = cmd->u.draw_indirect_byte_count_ext.instance_count;
3399   state->info.index_size = 0;
3400
3401   draw.count /= cmd->u.draw_indirect_byte_count_ext.vertex_stride;
3402   state->info.view_mask = subpass->view_mask;
3403   state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
3404   state->pctx->draw_vbo(state->pctx, &state->info, 0, &state->indirect_info, &draw, 1);
3405}
3406
3407static void handle_begin_conditional_rendering(struct vk_cmd_queue_entry *cmd,
3408                                               struct rendering_state *state)
3409{
3410   struct VkConditionalRenderingBeginInfoEXT *bcr = cmd->u.begin_conditional_rendering_ext.conditional_rendering_begin;
3411   state->pctx->render_condition_mem(state->pctx,
3412                                     lvp_buffer_from_handle(bcr->buffer)->bo,
3413                                     lvp_buffer_from_handle(bcr->buffer)->offset + bcr->offset,
3414                                     bcr->flags & VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT);
3415}
3416
3417static void handle_end_conditional_rendering(struct rendering_state *state)
3418{
3419   state->pctx->render_condition_mem(state->pctx, NULL, 0, false);
3420}
3421
3422static void handle_set_vertex_input(struct vk_cmd_queue_entry *cmd,
3423                                    struct rendering_state *state)
3424{
3425   const struct vk_cmd_set_vertex_input_ext *vertex_input = &cmd->u.set_vertex_input_ext;
3426   const struct VkVertexInputBindingDescription2EXT *bindings = vertex_input->vertex_binding_descriptions;
3427   const struct VkVertexInputAttributeDescription2EXT *attrs = vertex_input->vertex_attribute_descriptions;
3428   int max_location = -1;
3429   for (unsigned i = 0; i < vertex_input->vertex_attribute_description_count; i++) {
3430      const struct VkVertexInputBindingDescription2EXT *binding = NULL;
3431      unsigned location = attrs[i].location;
3432
3433      for (unsigned j = 0; j < vertex_input->vertex_binding_description_count; j++) {
3434         const struct VkVertexInputBindingDescription2EXT *b = &bindings[j];
3435         if (b->binding == attrs[i].binding) {
3436            binding = b;
3437            break;
3438         }
3439      }
3440      assert(binding);
3441      state->velem.velems[location].src_offset = attrs[i].offset;
3442      state->velem.velems[location].vertex_buffer_index = attrs[i].binding;
3443      state->velem.velems[location].src_format = lvp_vk_format_to_pipe_format(attrs[i].format);
3444      state->vb[attrs[i].binding].stride = binding->stride;
3445
3446      switch (binding->inputRate) {
3447      case VK_VERTEX_INPUT_RATE_VERTEX:
3448         state->velem.velems[location].instance_divisor = 0;
3449         break;
3450      case VK_VERTEX_INPUT_RATE_INSTANCE:
3451         state->velem.velems[location].instance_divisor = binding->divisor;
3452         break;
3453      default:
3454         assert(0);
3455         break;
3456      }
3457
3458      if ((int)location > max_location)
3459         max_location = location;
3460   }
3461   state->velem.count = max_location + 1;
3462   state->vb_dirty = true;
3463   state->ve_dirty = true;
3464}
3465
3466static void handle_set_cull_mode(struct vk_cmd_queue_entry *cmd,
3467                                 struct rendering_state *state)
3468{
3469   state->rs_state.cull_face = vk_cull_to_pipe(cmd->u.set_cull_mode_ext.cull_mode);
3470   state->rs_dirty = true;
3471}
3472
3473static void handle_set_front_face(struct vk_cmd_queue_entry *cmd,
3474                                  struct rendering_state *state)
3475{
3476   state->rs_state.front_ccw = (cmd->u.set_front_face_ext.front_face == VK_FRONT_FACE_COUNTER_CLOCKWISE);
3477   state->rs_dirty = true;
3478}
3479
3480static void handle_set_primitive_topology(struct vk_cmd_queue_entry *cmd,
3481                                          struct rendering_state *state)
3482{
3483   state->info.mode = vk_conv_topology(cmd->u.set_primitive_topology_ext.primitive_topology);
3484   state->rs_dirty = true;
3485}
3486
3487
3488static void handle_set_depth_test_enable(struct vk_cmd_queue_entry *cmd,
3489                                         struct rendering_state *state)
3490{
3491   state->dsa_dirty |= state->dsa_state.depth_enabled != cmd->u.set_depth_test_enable_ext.depth_test_enable;
3492   state->dsa_state.depth_enabled = cmd->u.set_depth_test_enable_ext.depth_test_enable;
3493}
3494
3495static void handle_set_depth_write_enable(struct vk_cmd_queue_entry *cmd,
3496                                          struct rendering_state *state)
3497{
3498   state->dsa_dirty |= state->dsa_state.depth_writemask != cmd->u.set_depth_write_enable_ext.depth_write_enable;
3499   state->dsa_state.depth_writemask = cmd->u.set_depth_write_enable_ext.depth_write_enable;
3500}
3501
3502static void handle_set_depth_compare_op(struct vk_cmd_queue_entry *cmd,
3503                                        struct rendering_state *state)
3504{
3505   state->dsa_dirty |= state->dsa_state.depth_func != cmd->u.set_depth_compare_op_ext.depth_compare_op;
3506   state->dsa_state.depth_func = cmd->u.set_depth_compare_op_ext.depth_compare_op;
3507}
3508
3509static void handle_set_depth_bounds_test_enable(struct vk_cmd_queue_entry *cmd,
3510                                                struct rendering_state *state)
3511{
3512   state->dsa_dirty |= state->dsa_state.depth_bounds_test != cmd->u.set_depth_bounds_test_enable_ext.depth_bounds_test_enable;
3513   state->dsa_state.depth_bounds_test = cmd->u.set_depth_bounds_test_enable_ext.depth_bounds_test_enable;
3514}
3515
3516static void handle_set_stencil_test_enable(struct vk_cmd_queue_entry *cmd,
3517                                           struct rendering_state *state)
3518{
3519   state->dsa_dirty |= state->dsa_state.stencil[0].enabled != cmd->u.set_stencil_test_enable_ext.stencil_test_enable ||
3520                       state->dsa_state.stencil[1].enabled != cmd->u.set_stencil_test_enable_ext.stencil_test_enable;
3521   state->dsa_state.stencil[0].enabled = cmd->u.set_stencil_test_enable_ext.stencil_test_enable;
3522   state->dsa_state.stencil[1].enabled = cmd->u.set_stencil_test_enable_ext.stencil_test_enable;
3523}
3524
3525static void handle_set_stencil_op(struct vk_cmd_queue_entry *cmd,
3526                                  struct rendering_state *state)
3527{
3528   if (cmd->u.set_stencil_op_ext.face_mask & VK_STENCIL_FACE_FRONT_BIT) {
3529      state->dsa_state.stencil[0].func = cmd->u.set_stencil_op_ext.compare_op;
3530      state->dsa_state.stencil[0].fail_op = vk_conv_stencil_op(cmd->u.set_stencil_op_ext.fail_op);
3531      state->dsa_state.stencil[0].zpass_op = vk_conv_stencil_op(cmd->u.set_stencil_op_ext.pass_op);
3532      state->dsa_state.stencil[0].zfail_op = vk_conv_stencil_op(cmd->u.set_stencil_op_ext.depth_fail_op);
3533   }
3534
3535   if (cmd->u.set_stencil_op_ext.face_mask & VK_STENCIL_FACE_BACK_BIT) {
3536      state->dsa_state.stencil[1].func = cmd->u.set_stencil_op_ext.compare_op;
3537      state->dsa_state.stencil[1].fail_op = vk_conv_stencil_op(cmd->u.set_stencil_op_ext.fail_op);
3538      state->dsa_state.stencil[1].zpass_op = vk_conv_stencil_op(cmd->u.set_stencil_op_ext.pass_op);
3539      state->dsa_state.stencil[1].zfail_op = vk_conv_stencil_op(cmd->u.set_stencil_op_ext.depth_fail_op);
3540   }
3541   state->dsa_dirty = true;
3542}
3543
3544static void handle_set_line_stipple(struct vk_cmd_queue_entry *cmd,
3545                                    struct rendering_state *state)
3546{
3547   state->rs_state.line_stipple_factor = cmd->u.set_line_stipple_ext.line_stipple_factor - 1;
3548   state->rs_state.line_stipple_pattern = cmd->u.set_line_stipple_ext.line_stipple_pattern;
3549   state->rs_dirty = true;
3550}
3551
3552static void handle_set_depth_bias_enable(struct vk_cmd_queue_entry *cmd,
3553                                         struct rendering_state *state)
3554{
3555   state->rs_dirty |= state->depth_bias.enabled != cmd->u.set_depth_bias_enable_ext.depth_bias_enable;
3556   state->depth_bias.enabled = cmd->u.set_depth_bias_enable_ext.depth_bias_enable;
3557}
3558
3559static void handle_set_logic_op(struct vk_cmd_queue_entry *cmd,
3560                                struct rendering_state *state)
3561{
3562   unsigned op = vk_conv_logic_op(cmd->u.set_logic_op_ext.logic_op);
3563   state->rs_dirty |= state->blend_state.logicop_func != op;
3564   state->blend_state.logicop_func = op;
3565}
3566
3567static void handle_set_patch_control_points(struct vk_cmd_queue_entry *cmd,
3568                                            struct rendering_state *state)
3569{
3570   state->patch_vertices = cmd->u.set_patch_control_points_ext.patch_control_points;
3571}
3572
3573static void handle_set_primitive_restart_enable(struct vk_cmd_queue_entry *cmd,
3574                                                struct rendering_state *state)
3575{
3576   state->info.primitive_restart = cmd->u.set_primitive_restart_enable_ext.primitive_restart_enable;
3577}
3578
3579static void handle_set_rasterizer_discard_enable(struct vk_cmd_queue_entry *cmd,
3580                                                 struct rendering_state *state)
3581{
3582   state->rs_dirty |= state->rs_state.rasterizer_discard != cmd->u.set_rasterizer_discard_enable_ext.rasterizer_discard_enable;
3583   state->rs_state.rasterizer_discard = cmd->u.set_rasterizer_discard_enable_ext.rasterizer_discard_enable;
3584}
3585
3586static void handle_set_color_write_enable(struct vk_cmd_queue_entry *cmd,
3587                                          struct rendering_state *state)
3588{
3589   uint8_t disable_mask = 0; //PIPE_MAX_COLOR_BUFS is max attachment count
3590
3591   for (unsigned i = 0; i < cmd->u.set_color_write_enable_ext.attachment_count; i++) {
3592      /* this is inverted because cmdbufs are zero-initialized, meaning only 'true'
3593       * can be detected with a bool, and the default is to enable color writes
3594       */
3595      if (cmd->u.set_color_write_enable_ext.color_write_enables[i] != VK_TRUE)
3596         disable_mask |= BITFIELD_BIT(i);
3597   }
3598
3599   state->blend_dirty |= state->color_write_disables != disable_mask;
3600   state->color_write_disables = disable_mask;
3601}
3602
3603static void lvp_execute_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer,
3604                                   struct rendering_state *state)
3605{
3606   struct vk_cmd_queue_entry *cmd;
3607   bool first = true;
3608   bool did_flush = false;
3609
3610   LIST_FOR_EACH_ENTRY(cmd, &cmd_buffer->queue.cmds, cmd_link) {
3611      switch (cmd->type) {
3612      case VK_CMD_BIND_PIPELINE:
3613         handle_pipeline(cmd, state);
3614         break;
3615      case VK_CMD_SET_VIEWPORT:
3616         handle_set_viewport(cmd, state);
3617         break;
3618      case VK_CMD_SET_VIEWPORT_WITH_COUNT_EXT:
3619         handle_set_viewport_with_count(cmd, state);
3620         break;
3621      case VK_CMD_SET_SCISSOR:
3622         handle_set_scissor(cmd, state);
3623         break;
3624      case VK_CMD_SET_SCISSOR_WITH_COUNT_EXT:
3625         handle_set_scissor_with_count(cmd, state);
3626         break;
3627      case VK_CMD_SET_LINE_WIDTH:
3628         handle_set_line_width(cmd, state);
3629         break;
3630      case VK_CMD_SET_DEPTH_BIAS:
3631         handle_set_depth_bias(cmd, state);
3632         break;
3633      case VK_CMD_SET_BLEND_CONSTANTS:
3634         handle_set_blend_constants(cmd, state);
3635         break;
3636      case VK_CMD_SET_DEPTH_BOUNDS:
3637         handle_set_depth_bounds(cmd, state);
3638         break;
3639      case VK_CMD_SET_STENCIL_COMPARE_MASK:
3640         handle_set_stencil_compare_mask(cmd, state);
3641         break;
3642      case VK_CMD_SET_STENCIL_WRITE_MASK:
3643         handle_set_stencil_write_mask(cmd, state);
3644         break;
3645      case VK_CMD_SET_STENCIL_REFERENCE:
3646         handle_set_stencil_reference(cmd, state);
3647         break;
3648      case VK_CMD_BIND_DESCRIPTOR_SETS:
3649         handle_descriptor_sets(cmd, state);
3650         break;
3651      case VK_CMD_BIND_INDEX_BUFFER:
3652         handle_index_buffer(cmd, state);
3653         break;
3654      case VK_CMD_BIND_VERTEX_BUFFERS:
3655         handle_vertex_buffers(cmd, state);
3656         break;
3657      case VK_CMD_BIND_VERTEX_BUFFERS2_EXT:
3658         handle_vertex_buffers2(cmd, state);
3659         break;
3660      case VK_CMD_DRAW:
3661         emit_state(state);
3662         handle_draw(cmd, state);
3663         break;
3664      case VK_CMD_DRAW_MULTI_EXT:
3665         emit_state(state);
3666         handle_draw_multi(cmd, state);
3667         break;
3668      case VK_CMD_DRAW_INDEXED:
3669         emit_state(state);
3670         handle_draw_indexed(cmd, state);
3671         break;
3672      case VK_CMD_DRAW_INDIRECT:
3673         emit_state(state);
3674         handle_draw_indirect(cmd, state, false);
3675         break;
3676      case VK_CMD_DRAW_INDEXED_INDIRECT:
3677         emit_state(state);
3678         handle_draw_indirect(cmd, state, true);
3679         break;
3680      case VK_CMD_DRAW_MULTI_INDEXED_EXT:
3681         emit_state(state);
3682         handle_draw_multi_indexed(cmd, state);
3683         break;
3684      case VK_CMD_DISPATCH:
3685         emit_compute_state(state);
3686         handle_dispatch(cmd, state);
3687         break;
3688      case VK_CMD_DISPATCH_BASE:
3689         emit_compute_state(state);
3690         handle_dispatch_base(cmd, state);
3691         break;
3692      case VK_CMD_DISPATCH_INDIRECT:
3693         emit_compute_state(state);
3694         handle_dispatch_indirect(cmd, state);
3695         break;
3696      case VK_CMD_COPY_BUFFER2_KHR:
3697         handle_copy_buffer(cmd, state);
3698         break;
3699      case VK_CMD_COPY_IMAGE2_KHR:
3700         handle_copy_image(cmd, state);
3701         break;
3702      case VK_CMD_BLIT_IMAGE2_KHR:
3703         handle_blit_image(cmd, state);
3704         break;
3705      case VK_CMD_COPY_BUFFER_TO_IMAGE2_KHR:
3706         handle_copy_buffer_to_image(cmd, state);
3707         break;
3708      case VK_CMD_COPY_IMAGE_TO_BUFFER2_KHR:
3709         handle_copy_image_to_buffer2_khr(cmd, state);
3710         break;
3711      case VK_CMD_UPDATE_BUFFER:
3712         handle_update_buffer(cmd, state);
3713         break;
3714      case VK_CMD_FILL_BUFFER:
3715         handle_fill_buffer(cmd, state);
3716         break;
3717      case VK_CMD_CLEAR_COLOR_IMAGE:
3718         handle_clear_color_image(cmd, state);
3719         break;
3720      case VK_CMD_CLEAR_DEPTH_STENCIL_IMAGE:
3721         handle_clear_ds_image(cmd, state);
3722         break;
3723      case VK_CMD_CLEAR_ATTACHMENTS:
3724         handle_clear_attachments(cmd, state);
3725         break;
3726      case VK_CMD_RESOLVE_IMAGE2_KHR:
3727         handle_resolve_image(cmd, state);
3728         break;
3729      case VK_CMD_SET_EVENT:
3730         handle_event_set(cmd, state);
3731         break;
3732      case VK_CMD_RESET_EVENT:
3733         handle_event_reset(cmd, state);
3734         break;
3735      case VK_CMD_WAIT_EVENTS:
3736         handle_wait_events(cmd, state);
3737         break;
3738      case VK_CMD_PIPELINE_BARRIER:
3739         /* skip flushes since every cmdbuf does a flush
3740            after iterating its cmds and so this is redundant
3741          */
3742         if (first || did_flush || cmd->cmd_link.next == &cmd_buffer->queue.cmds)
3743            continue;
3744         handle_pipeline_barrier(cmd, state);
3745         did_flush = true;
3746         continue;
3747      case VK_CMD_BEGIN_QUERY_INDEXED_EXT:
3748         handle_begin_query_indexed_ext(cmd, state);
3749         break;
3750      case VK_CMD_END_QUERY_INDEXED_EXT:
3751         handle_end_query_indexed_ext(cmd, state);
3752         break;
3753      case VK_CMD_BEGIN_QUERY:
3754         handle_begin_query(cmd, state);
3755         break;
3756      case VK_CMD_END_QUERY:
3757         handle_end_query(cmd, state);
3758         break;
3759      case VK_CMD_RESET_QUERY_POOL:
3760         handle_reset_query_pool(cmd, state);
3761         break;
3762      case VK_CMD_WRITE_TIMESTAMP:
3763         handle_write_timestamp(cmd, state);
3764         break;
3765      case VK_CMD_COPY_QUERY_POOL_RESULTS:
3766         handle_copy_query_pool_results(cmd, state);
3767         break;
3768      case VK_CMD_PUSH_CONSTANTS:
3769         handle_push_constants(cmd, state);
3770         break;
3771      case VK_CMD_BEGIN_RENDER_PASS:
3772         handle_begin_render_pass(cmd, state);
3773         break;
3774      case VK_CMD_BEGIN_RENDER_PASS2:
3775         handle_begin_render_pass2(cmd, state);
3776         break;
3777      case VK_CMD_NEXT_SUBPASS:
3778      case VK_CMD_NEXT_SUBPASS2:
3779         handle_next_subpass2(cmd, state);
3780         break;
3781      case VK_CMD_END_RENDER_PASS:
3782      case VK_CMD_END_RENDER_PASS2:
3783         handle_end_render_pass2(cmd, state);
3784         break;
3785      case VK_CMD_EXECUTE_COMMANDS:
3786         handle_execute_commands(cmd, state);
3787         break;
3788      case VK_CMD_DRAW_INDIRECT_COUNT:
3789         emit_state(state);
3790         handle_draw_indirect_count(cmd, state, false);
3791         break;
3792      case VK_CMD_DRAW_INDEXED_INDIRECT_COUNT:
3793         emit_state(state);
3794         handle_draw_indirect_count(cmd, state, true);
3795         break;
3796      case VK_CMD_PUSH_DESCRIPTOR_SET_KHR:
3797         handle_push_descriptor_set(cmd, state);
3798         break;
3799      case VK_CMD_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_KHR:
3800         handle_push_descriptor_set_with_template(cmd, state);
3801         break;
3802      case VK_CMD_BIND_TRANSFORM_FEEDBACK_BUFFERS_EXT:
3803         handle_bind_transform_feedback_buffers(cmd, state);
3804         break;
3805      case VK_CMD_BEGIN_TRANSFORM_FEEDBACK_EXT:
3806         handle_begin_transform_feedback(cmd, state);
3807         break;
3808      case VK_CMD_END_TRANSFORM_FEEDBACK_EXT:
3809         handle_end_transform_feedback(cmd, state);
3810         break;
3811      case VK_CMD_DRAW_INDIRECT_BYTE_COUNT_EXT:
3812         emit_state(state);
3813         handle_draw_indirect_byte_count(cmd, state);
3814         break;
3815      case VK_CMD_BEGIN_CONDITIONAL_RENDERING_EXT:
3816         handle_begin_conditional_rendering(cmd, state);
3817         break;
3818      case VK_CMD_END_CONDITIONAL_RENDERING_EXT:
3819         handle_end_conditional_rendering(state);
3820         break;
3821      case VK_CMD_SET_VERTEX_INPUT_EXT:
3822         handle_set_vertex_input(cmd, state);
3823         break;
3824      case VK_CMD_SET_CULL_MODE_EXT:
3825         handle_set_cull_mode(cmd, state);
3826         break;
3827      case VK_CMD_SET_FRONT_FACE_EXT:
3828         handle_set_front_face(cmd, state);
3829         break;
3830      case VK_CMD_SET_PRIMITIVE_TOPOLOGY_EXT:
3831         handle_set_primitive_topology(cmd, state);
3832         break;
3833      case VK_CMD_SET_DEPTH_TEST_ENABLE_EXT:
3834         handle_set_depth_test_enable(cmd, state);
3835         break;
3836      case VK_CMD_SET_DEPTH_WRITE_ENABLE_EXT:
3837         handle_set_depth_write_enable(cmd, state);
3838         break;
3839      case VK_CMD_SET_DEPTH_COMPARE_OP_EXT:
3840         handle_set_depth_compare_op(cmd, state);
3841         break;
3842      case VK_CMD_SET_DEPTH_BOUNDS_TEST_ENABLE_EXT:
3843         handle_set_depth_bounds_test_enable(cmd, state);
3844         break;
3845      case VK_CMD_SET_STENCIL_TEST_ENABLE_EXT:
3846         handle_set_stencil_test_enable(cmd, state);
3847         break;
3848      case VK_CMD_SET_STENCIL_OP_EXT:
3849         handle_set_stencil_op(cmd, state);
3850         break;
3851      case VK_CMD_SET_LINE_STIPPLE_EXT:
3852         handle_set_line_stipple(cmd, state);
3853         break;
3854      case VK_CMD_SET_DEPTH_BIAS_ENABLE_EXT:
3855         handle_set_depth_bias_enable(cmd, state);
3856         break;
3857      case VK_CMD_SET_LOGIC_OP_EXT:
3858         handle_set_logic_op(cmd, state);
3859         break;
3860      case VK_CMD_SET_PATCH_CONTROL_POINTS_EXT:
3861         handle_set_patch_control_points(cmd, state);
3862         break;
3863      case VK_CMD_SET_PRIMITIVE_RESTART_ENABLE_EXT:
3864         handle_set_primitive_restart_enable(cmd, state);
3865         break;
3866      case VK_CMD_SET_RASTERIZER_DISCARD_ENABLE_EXT:
3867         handle_set_rasterizer_discard_enable(cmd, state);
3868         break;
3869      case VK_CMD_SET_COLOR_WRITE_ENABLE_EXT:
3870         handle_set_color_write_enable(cmd, state);
3871         break;
3872      case VK_CMD_SET_DEVICE_MASK:
3873         /* no-op */
3874         break;
3875      default:
3876         fprintf(stderr, "Unsupported command %s\n", vk_cmd_queue_type_names[cmd->type]);
3877         unreachable("Unsupported command");
3878         break;
3879      }
3880      first = false;
3881      did_flush = false;
3882   }
3883}
3884
3885VkResult lvp_execute_cmds(struct lvp_device *device,
3886                          struct lvp_queue *queue,
3887                          struct lvp_cmd_buffer *cmd_buffer)
3888{
3889   struct rendering_state state;
3890   memset(&state, 0, sizeof(state));
3891   state.pctx = queue->ctx;
3892   state.cso = queue->cso;
3893   state.blend_dirty = true;
3894   state.dsa_dirty = true;
3895   state.rs_dirty = true;
3896   state.vp_dirty = true;
3897   for (enum pipe_shader_type s = PIPE_SHADER_VERTEX; s < PIPE_SHADER_TYPES; s++) {
3898      for (unsigned i = 0; i < PIPE_MAX_SAMPLERS; i++)
3899         state.cso_ss_ptr[s][i] = &state.ss[s][i];
3900   }
3901   /* create a gallium context */
3902   lvp_execute_cmd_buffer(cmd_buffer, &state);
3903
3904   state.start_vb = -1;
3905   state.num_vb = 0;
3906   cso_unbind_context(queue->cso);
3907   for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
3908      if (state.so_targets[i]) {
3909         state.pctx->stream_output_target_destroy(state.pctx, state.so_targets[i]);
3910      }
3911   }
3912
3913   for (enum pipe_shader_type s = PIPE_SHADER_VERTEX; s < PIPE_SHADER_TYPES; s++) {
3914      for (unsigned i = 0; i < PIPE_MAX_SAMPLERS; i++) {
3915         if (state.sv[s][i])
3916            pipe_sampler_view_reference(&state.sv[s][i], NULL);
3917      }
3918   }
3919
3920   for (unsigned i = 0; i < PIPE_MAX_SAMPLERS; i++) {
3921      if (state.cso_ss_ptr[PIPE_SHADER_COMPUTE][i])
3922         state.pctx->delete_sampler_state(state.pctx, state.ss_cso[PIPE_SHADER_COMPUTE][i]);
3923   }
3924
3925   free(state.imageless_views);
3926   free(state.pending_clear_aspects);
3927   free(state.cleared_views);
3928   free(state.attachments);
3929   return VK_SUCCESS;
3930}
3931